summaryrefslogtreecommitdiff
path: root/src/test/regress/sql/hash_index.sql
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2022-02-08 15:30:38 -0500
committerTom Lane <tgl@sss.pgh.pa.us>2022-02-08 15:30:38 -0500
commitcc50080a828dd4791b43539f5a0f976e535d147c (patch)
tree787184da35163d8be525b7f84af85083e50d152a /src/test/regress/sql/hash_index.sql
parentba15f16107bea8a93edc505f3013cd7df4ac90fc (diff)
downloadpostgresql-cc50080a828dd4791b43539f5a0f976e535d147c.tar.gz
Rearrange core regression tests to reduce cross-script dependencies.
The idea behind this patch is to make it possible to run individual test scripts without running the entire core test suite. Making all the scripts completely independent would involve a massive rewrite, and would probably be worse for coverage of things like concurrent DDL. So this patch just does what seems practical with limited changes. The net effect is that any test script can be run after running limited earlier dependencies: * all scripts depend on test_setup * many scripts depend on create_index * other dependencies are few in number, and are documented in the parallel_schedule file. To accomplish this, I chose a small number of commonly-used tables and moved their creation and filling into test_setup. Later scripts are expected not to modify these tables' data contents, for fear of affecting other scripts' results. Also, our former habit of declaring all C functions in one place is now gone in favor of declaring them where they're used, if that's just one script, or in test_setup if necessary. There's more that could be done to remove some of the remaining inter-script dependencies, but significantly more-invasive changes would be needed, and at least for now it doesn't seem worth it. Discussion: https://postgr.es/m/1114748.1640383217@sss.pgh.pa.us
Diffstat (limited to 'src/test/regress/sql/hash_index.sql')
-rw-r--r--src/test/regress/sql/hash_index.sql64
1 files changed, 63 insertions, 1 deletions
diff --git a/src/test/regress/sql/hash_index.sql b/src/test/regress/sql/hash_index.sql
index 4d1aa020a9..527024f710 100644
--- a/src/test/regress/sql/hash_index.sql
+++ b/src/test/regress/sql/hash_index.sql
@@ -1,8 +1,70 @@
--
-- HASH_INDEX
--- grep 843938989 hash.data
--
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
+
+CREATE TABLE hash_i4_heap (
+ seqno int4,
+ random int4
+);
+
+CREATE TABLE hash_name_heap (
+ seqno int4,
+ random name
+);
+
+CREATE TABLE hash_txt_heap (
+ seqno int4,
+ random text
+);
+
+CREATE TABLE hash_f8_heap (
+ seqno int4,
+ random float8
+);
+
+\set filename :abs_srcdir '/data/hash.data'
+COPY hash_i4_heap FROM :'filename';
+COPY hash_name_heap FROM :'filename';
+COPY hash_txt_heap FROM :'filename';
+COPY hash_f8_heap FROM :'filename';
+
+-- the data in this file has a lot of duplicates in the index key
+-- fields, leading to long bucket chains and lots of table expansion.
+-- this is therefore a stress test of the bucket overflow code (unlike
+-- the data in hash.data, which has unique index keys).
+--
+-- \set filename :abs_srcdir '/data/hashovfl.data'
+-- COPY hash_ovfl_heap FROM :'filename';
+
+ANALYZE hash_i4_heap;
+ANALYZE hash_name_heap;
+ANALYZE hash_txt_heap;
+ANALYZE hash_f8_heap;
+
+CREATE INDEX hash_i4_index ON hash_i4_heap USING hash (random int4_ops);
+
+CREATE INDEX hash_name_index ON hash_name_heap USING hash (random name_ops);
+
+CREATE INDEX hash_txt_index ON hash_txt_heap USING hash (random text_ops);
+
+CREATE INDEX hash_f8_index ON hash_f8_heap USING hash (random float8_ops)
+ WITH (fillfactor=60);
+
+--
+-- Also try building functional, expressional, and partial indexes on
+-- tables that already contain data.
+--
+create unique index hash_f8_index_1 on hash_f8_heap(abs(random));
+create unique index hash_f8_index_2 on hash_f8_heap((seqno + 1), random);
+create unique index hash_f8_index_3 on hash_f8_heap(random) where seqno > 1000;
+
+--
+-- hash index
+-- grep 843938989 hash.data
+--
SELECT * FROM hash_i4_heap
WHERE hash_i4_heap.random = 843938989;