The following issues were found
tsl/test/sql/dist_triggers.sql
17 issues
Line: 5
Column: 1
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
\unset ECHO
\o /dev/null
\ir include/filter_exec.sql
\ir include/remote_exec.sql
Reported by SQLint.
Line: 7
Column: 1
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
\unset ECHO
\o /dev/null
\ir include/filter_exec.sql
\ir include/remote_exec.sql
\o
\set ECHO all
Reported by SQLint.
Line: 30
Column: 1
GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC;
-- Import testsupport.sql file to data nodes
\unset ECHO
\o /dev/null
\c :DATA_NODE_1
SET client_min_messages TO ERROR;
\ir :TEST_SUPPORT_FILE
\c :DATA_NODE_2
Reported by SQLint.
Line: 34
Column: 1
\o /dev/null
\c :DATA_NODE_1
SET client_min_messages TO ERROR;
\ir :TEST_SUPPORT_FILE
\c :DATA_NODE_2
SET client_min_messages TO ERROR;
\ir :TEST_SUPPORT_FILE
\c :DATA_NODE_3
SET client_min_messages TO ERROR;
Reported by SQLint.
Line: 37
Column: 1
\ir :TEST_SUPPORT_FILE
\c :DATA_NODE_2
SET client_min_messages TO ERROR;
\ir :TEST_SUPPORT_FILE
\c :DATA_NODE_3
SET client_min_messages TO ERROR;
\ir :TEST_SUPPORT_FILE
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
\o
Reported by SQLint.
Line: 40
Column: 1
\ir :TEST_SUPPORT_FILE
\c :DATA_NODE_3
SET client_min_messages TO ERROR;
\ir :TEST_SUPPORT_FILE
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
\o
SET client_min_messages TO NOTICE;
\set ECHO all
Reported by SQLint.
Line: 42
Column: 1
SET client_min_messages TO ERROR;
\ir :TEST_SUPPORT_FILE
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
\o
SET client_min_messages TO NOTICE;
\set ECHO all
SET ROLE :ROLE_1;
Reported by SQLint.
Line: 44
Column: 1
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
\o
SET client_min_messages TO NOTICE;
\set ECHO all
SET ROLE :ROLE_1;
CREATE TABLE hyper (
time BIGINT NOT NULL,
Reported by SQLint.
Line: 126
Column: 1
SELECT * FROM create_distributed_hypertable('hyper', 'time', 'device_id', 3, chunk_time_interval => 10, data_nodes => ARRAY[:'DATA_NODE_1', :'DATA_NODE_2']);
-- FAILURE cases
\set ON_ERROR_STOP 0
-- Check that CREATE TRIGGER fails if a trigger already exists on a data node.
CALL distributed_exec($$
CREATE TRIGGER _0_test_trigger_insert_s_before
BEFORE INSERT ON hyper
Reported by SQLint.
Line: 147
Column: 1
-- exist on all nodes. Insert should fail
INSERT INTO hyper(time, device_id,sensor_1) VALUES
(1257987600000000000, 'dev1', 1);
\set ON_ERROR_STOP 1
-- Now, create trigger_events on the other nodes
CALL distributed_exec($$
CREATE TABLE trigger_events (
tg_when text,
Reported by SQLint.
tsl/test/sql/dist_util.sql
17 issues
Line: 7
Column: 1
----------------------------------------------------------------
-- Test version compability function
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER
CREATE OR REPLACE FUNCTION compatible_version(version CSTRING, reference CSTRING)
RETURNS TABLE(is_compatible BOOLEAN, is_old_version BOOLEAN)
AS :TSL_MODULE_PATHNAME, 'ts_test_compatible_version'
LANGUAGE C VOLATILE;
Reported by SQLint.
Line: 24
Column: 1
SELECT * FROM compatible_version('2.1.0', reference => '2.1.19-beta3.19');
-- These should not parse and instead generate an error.
\set ON_ERROR_STOP 0
SELECT * FROM compatible_version('2.1.*', reference => '2.1.19-beta3.19');
SELECT * FROM compatible_version('2.1.0', reference => '2.1.*');
\set ON_ERROR_STOP 1
----------------------------------------------------------------
Reported by SQLint.
Line: 27
Column: 1
\set ON_ERROR_STOP 0
SELECT * FROM compatible_version('2.1.*', reference => '2.1.19-beta3.19');
SELECT * FROM compatible_version('2.1.0', reference => '2.1.*');
\set ON_ERROR_STOP 1
----------------------------------------------------------------
-- Create two distributed databases
CREATE DATABASE frontend_1;
Reported by SQLint.
Line: 35
Column: 1
CREATE DATABASE frontend_1;
CREATE DATABASE frontend_2;
\c frontend_1 :ROLE_CLUSTER_SUPERUSER
SET client_min_messages TO ERROR;
CREATE EXTENSION timescaledb;
UPDATE _timescaledb_catalog.metadata SET value = '87c235e9-d857-4f16-b59f-7fbac9b87664' WHERE key = 'uuid';
SELECT key, value FROM _timescaledb_catalog.metadata WHERE key LIKE '%uuid';
SELECT * FROM add_data_node('data_node_1', host => 'localhost', database => 'backend_1_1');
Reported by SQLint.
Line: 45
Column: 1
SET client_min_messages TO NOTICE;
-- Create a second frontend database and add a backend to it
\c frontend_2 :ROLE_CLUSTER_SUPERUSER
SET client_min_messages TO ERROR;
CREATE EXTENSION timescaledb;
UPDATE _timescaledb_catalog.metadata SET value = '77348176-09da-4a80-bc78-e31bdf5e63ec' WHERE key = 'uuid';
SELECT key, value FROM _timescaledb_catalog.metadata WHERE key LIKE '%uuid';
SELECT * FROM add_data_node('data_node_1', host => 'localhost', database => 'backend_2_1');
Reported by SQLint.
Line: 54
Column: 1
SELECT key, value FROM _timescaledb_catalog.metadata WHERE key LIKE '%uuid';
SET client_min_messages TO NOTICE;
\set ON_ERROR_STOP 0
----------------------------------------------------------------
-- Adding frontend as backend to a different frontend should fail
\c frontend_1 :ROLE_CLUSTER_SUPERUSER
SELECT * FROM add_data_node('invalid_data_node', host => 'localhost', database => 'frontend_2', bootstrap => true);
Reported by SQLint.
Line: 64
Column: 1
----------------------------------------------------------------
-- Adding backend from a different group as a backend should fail
\c frontend_1 :ROLE_CLUSTER_SUPERUSER
SELECT * FROM add_data_node('invalid_data_node', host => 'localhost', database => 'backend_2_1', bootstrap => true);
SELECT * FROM add_data_node('invalid_data_node', host => 'localhost', database => 'backend_2_1', bootstrap => false);
----------------------------------------------------------------
-- Adding a valid backend target but to an existing backend should fail
Reported by SQLint.
Line: 70
Column: 1
----------------------------------------------------------------
-- Adding a valid backend target but to an existing backend should fail
\c backend_1_1 :ROLE_CLUSTER_SUPERUSER
SELECT * FROM add_data_node('invalid_data_node', host => 'localhost', database => 'backend_2_1', bootstrap => true);
SELECT * FROM add_data_node('invalid_data_node', host => 'localhost', database => 'backend_2_1', bootstrap => false);
----------------------------------------------------------------
-- Adding a frontend (frontend 1) as a backend to a nondistributed node (TEST_DBNAME) should fail
Reported by SQLint.
Line: 76
Column: 1
----------------------------------------------------------------
-- Adding a frontend (frontend 1) as a backend to a nondistributed node (TEST_DBNAME) should fail
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER
SELECT * FROM add_data_node('invalid_data_node', host => 'localhost', database => 'frontend_1', bootstrap => true);
SELECT * FROM add_data_node('invalid_data_node', host => 'localhost', database => 'frontend_1', bootstrap => false);
\set ON_ERROR_STOP 1
Reported by SQLint.
Line: 80
Column: 1
SELECT * FROM add_data_node('invalid_data_node', host => 'localhost', database => 'frontend_1', bootstrap => true);
SELECT * FROM add_data_node('invalid_data_node', host => 'localhost', database => 'frontend_1', bootstrap => false);
\set ON_ERROR_STOP 1
----------------------------------------------------------------
-- Test that a data node can be moved to a different frontend if it is
-- removed first.
\c frontend_1 :ROLE_CLUSTER_SUPERUSER
Reported by SQLint.
tsl/test/sql/compression_bgw.sql
16 issues
Line: 5
Column: 1
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE ROLE NOLOGIN_ROLE WITH nologin noinherit;
GRANT NOLOGIN_ROLE TO :ROLE_DEFAULT_PERM_USER WITH ADMIN OPTION;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
Reported by SQLint.
Line: 10
Column: 1
CREATE ROLE NOLOGIN_ROLE WITH nologin noinherit;
GRANT NOLOGIN_ROLE TO :ROLE_DEFAULT_PERM_USER WITH ADMIN OPTION;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
CREATE TABLE conditions (
time TIMESTAMPTZ NOT NULL,
location TEXT NOT NULL,
location2 char(10) NOT NULL,
Reported by SQLint.
Line: 23
Column: 1
--TEST 1--
--cannot set policy without enabling compression --
\set ON_ERROR_STOP 0
select add_compression_policy('conditions', '60d'::interval);
\set ON_ERROR_STOP 1
-- TEST2 --
--add a policy to compress chunks --
Reported by SQLint.
Line: 25
Column: 1
--cannot set policy without enabling compression --
\set ON_ERROR_STOP 0
select add_compression_policy('conditions', '60d'::interval);
\set ON_ERROR_STOP 1
-- TEST2 --
--add a policy to compress chunks --
alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'time');
insert into conditions
Reported by SQLint.
Line: 34
Column: 1
select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75;
select add_compression_policy('conditions', '60d'::interval) AS compressjob_id
\gset
select * from _timescaledb_config.bgw_job where id = :compressjob_id;
select * from alter_job(:compressjob_id, schedule_interval=>'1s');
select * from _timescaledb_config.bgw_job where id >= 1000 ORDER BY id;
insert into conditions
Reported by SQLint.
Line: 52
Column: 1
-- TEST 4 --
--cannot set another policy
\set ON_ERROR_STOP 0
select add_compression_policy('conditions', '60d'::interval, if_not_exists=>true);
select add_compression_policy('conditions', '60d'::interval);
select add_compression_policy('conditions', '30d'::interval, if_not_exists=>true);
\set ON_ERROR_STOP 1
Reported by SQLint.
Line: 56
Column: 1
select add_compression_policy('conditions', '60d'::interval, if_not_exists=>true);
select add_compression_policy('conditions', '60d'::interval);
select add_compression_policy('conditions', '30d'::interval, if_not_exists=>true);
\set ON_ERROR_STOP 1
--TEST 5 --
-- drop the policy --
select remove_compression_policy('conditions');
select count(*) from _timescaledb_config.bgw_job WHERE id>=1000;
Reported by SQLint.
Line: 65
Column: 1
--TEST 6 --
-- try to execute the policy after it has been dropped --
\set ON_ERROR_STOP 0
CALL run_job(:compressjob_id);
\set ON_ERROR_STOP 1
-- We're done with the table, so drop it.
DROP TABLE IF EXISTS conditions CASCADE;
Reported by SQLint.
Line: 67
Column: 1
-- try to execute the policy after it has been dropped --
\set ON_ERROR_STOP 0
CALL run_job(:compressjob_id);
\set ON_ERROR_STOP 1
-- We're done with the table, so drop it.
DROP TABLE IF EXISTS conditions CASCADE;
--TEST 7
Reported by SQLint.
Line: 82
Column: 1
insert into test_table_int select generate_series(1,5), 10;
alter table test_table_int set (timescaledb.compress);
select add_compression_policy('test_table_int', 2::int) AS compressjob_id
\gset
select * from _timescaledb_config.bgw_job where id=:compressjob_id;
\gset
CALL run_job(:compressjob_id);
CALL run_job(:compressjob_id);
Reported by SQLint.
tsl/test/shared/sql/include/dist_distinct_run.sql
16 issues
Line: 5
Column: 1
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\echo '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'
\echo '%%% RUNNING TESTS on table:' :TABLE_NAME
\echo '%%% PREFIX:' :PREFIX
\echo '%%% ORDER_BY_1:' :ORDER_BY_1
\echo '%%% ORDER_BY_1_2:' :ORDER_BY_1_2
\echo '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'
Reported by SQLint.
Line: 15
Column: 1
-- Test SkipScan with SELECT DISTINCT in multi-node environment
-- Ensure that a Unique plan gets chosen on the access node
SET enable_hashagg TO false;
\qecho Unique plan on access node for SELECT DISTINCT
:PREFIX
SELECT DISTINCT device_id
FROM :TABLE_NAME
:ORDER_BY_1
LIMIT 10;
Reported by SQLint.
Line: 25
Column: 1
SET timescaledb.enable_per_data_node_queries = true;
-- SELECT DISTINCT on expressions won't be pushed down
\qecho SELECT DISTINCT on expressions is not pushed down
:PREFIX
SELECT DISTINCT device_id*v1
FROM :TABLE_NAME
:ORDER_BY_1
LIMIT 10;
Reported by SQLint.
Line: 34
Column: 1
SET timescaledb.enable_remote_explain = ON;
-- SELECT DISTINCT on column with index should use SkipScan
\qecho SELECT DISTINCT on column with index uses SkipScan
:PREFIX
SELECT DISTINCT device_id
FROM :TABLE_NAME
:ORDER_BY_1
LIMIT 10;
Reported by SQLint.
Line: 42
Column: 1
LIMIT 10;
-- SELECT DISTINCT with constants and NULLs in targetlist should use SkipScan
\qecho SELECT DISTINCT with constants and NULLs in targetlist uses SkipScan
:PREFIX
SELECT DISTINCT device_id, NULL, 'const1'
FROM :TABLE_NAME
:ORDER_BY_1
LIMIT 10;
Reported by SQLint.
Line: 52
Column: 1
-- SELECT DISTINCT with a mix of constants and columns should send only
-- the columns to the remote side. However SkipScan won't be used because
-- right now only single column is supported in SkipScans
\qecho SELECT DISTINCT only sends columns to the data nodes
:PREFIX
SELECT DISTINCT device_id, time, NULL, 'const1'
FROM :TABLE_NAME
:ORDER_BY_1_2
LIMIT 10;
Reported by SQLint.
Line: 62
Column: 1
-- SELECT DISTINCT will be pushed down in the attribute attno order. This
-- is ok because "DISTINCT SELECT col1, col2" returns the same values
-- (subject to ORDER BY clauses) as "DISTINCE SELECT col2, col1"
\qecho SELECT DISTINCE is pushed down in attribute attno order
:PREFIX
SELECT DISTINCT device_id, time
FROM :TABLE_NAME
:ORDER_BY_1_2
LIMIT 10;
Reported by SQLint.
Line: 71
Column: 1
-- SELECT DISTINCT ON on multiple columns will be pushed to the remote side.
-- However SkipScan won't be used since only one column is supported
\qecho SELECT DISTINCT ON multiple columns is pushed to data nodes
:PREFIX
SELECT DISTINCT ON (device_id, time) device_id, time
FROM :TABLE_NAME
:ORDER_BY_1_2
LIMIT 10;
Reported by SQLint.
Line: 79
Column: 1
LIMIT 10;
-- Another variation with SELECT DISTINCT
\qecho SELECT DISTINCT within a sub-select
:PREFIX
SELECT device_id, time, 'const1' FROM (SELECT DISTINCT ON (device_id) device_id, time
FROM :TABLE_NAME
:ORDER_BY_1_2
LIMIT 10) a;
Reported by SQLint.
Line: 88
Column: 1
-- Ensure that SELECT DISTINCT pushdown happens even with below disabled
SET timescaledb.enable_per_data_node_queries = false;
\qecho SELECT DISTINCT works with enable_per_data_node_queries disabled
:PREFIX
SELECT DISTINCT device_id
FROM :TABLE_NAME
:ORDER_BY_1
LIMIT 10;
Reported by SQLint.
test/sql/alternate_users.sql
14 issues
Line: 5
Column: 1
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
\ir include/insert_single.sql
\c :TEST_DBNAME :ROLE_SUPERUSER
-- make sure tablespace1 exists
-- since there is no CREATE TABLESPACE IF EXISTS we drop with if exists and recreate
Reported by SQLint.
Line: 24
Column: 1
GRANT CREATE ON SCHEMA "one_Partition" TO :ROLE_DEFAULT_PERM_USER_2;
--test creating and using schema as non-superuser
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2
\dt
\set ON_ERROR_STOP 0
SELECT * FROM "one_Partition";
SELECT set_chunk_time_interval('"one_Partition"', 1::bigint);
Reported by SQLint.
Line: 32
Column: 1
SELECT set_chunk_time_interval('"one_Partition"', 1::bigint);
select add_dimension('"one_Partition"', 'device_id', 2);
select attach_tablespace('tablespace1', '"one_Partition"');
\set ON_ERROR_STOP 1
CREATE TABLE "1dim"(time timestamp, temp float);
SELECT create_hypertable('"1dim"', 'time');
INSERT INTO "1dim" VALUES('2017-01-20T09:00:01', 22.5);
INSERT INTO "1dim" VALUES('2017-01-20T09:00:21', 21.2);
Reported by SQLint.
Line: 42
Column: 1
SELECT * FROM "1dim";
\ir include/ddl_ops_1.sql
\ir include/ddl_ops_2.sql
--test proper denials for all security definer functions:
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE TABLE plain_table_su (time timestamp, temp float);
Reported by SQLint.
Line: 53
Column: 1
CREATE INDEX "ind_1" ON hypertable_su (time);
INSERT INTO hypertable_su VALUES('2017-01-20T09:00:01', 22.5);
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2
--all of the following should produce errors
\set ON_ERROR_STOP 0
SELECT create_hypertable('plain_table_su', 'time');
CREATE INDEX ON plain_table_su (time, temp);
CREATE INDEX ON hypertable_su (time, temp);
Reported by SQLint.
Line: 61
Column: 1
CREATE INDEX ON hypertable_su (time, temp);
DROP INDEX "ind_1";
ALTER INDEX "ind_1" RENAME TO "ind_2";
\set ON_ERROR_STOP 1
--test that I can't do anything to a non-owned hypertable.
\set ON_ERROR_STOP 0
CREATE INDEX ON hypertable_su (time, temp);
SELECT * FROM hypertable_su;
Reported by SQLint.
Line: 69
Column: 1
SELECT * FROM hypertable_su;
INSERT INTO hypertable_su VALUES('2017-01-20T09:00:01', 22.5);
ALTER TABLE hypertable_su ADD COLUMN val2 integer;
\set ON_ERROR_STOP 1
--grant read permissions
\c :TEST_DBNAME :ROLE_SUPERUSER
GRANT SELECT ON hypertable_su TO :ROLE_DEFAULT_PERM_USER_2;
Reported by SQLint.
Line: 75
Column: 1
\c :TEST_DBNAME :ROLE_SUPERUSER
GRANT SELECT ON hypertable_su TO :ROLE_DEFAULT_PERM_USER_2;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2
SELECT * FROM hypertable_su;
\set ON_ERROR_STOP 0
CREATE INDEX ON hypertable_su (time, temp);
INSERT INTO hypertable_su VALUES('2017-01-20T09:00:01', 22.5);
ALTER TABLE hypertable_su ADD COLUMN val2 integer;
Reported by SQLint.
Line: 77
Column: 1
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2
SELECT * FROM hypertable_su;
\set ON_ERROR_STOP 0
CREATE INDEX ON hypertable_su (time, temp);
INSERT INTO hypertable_su VALUES('2017-01-20T09:00:01', 22.5);
ALTER TABLE hypertable_su ADD COLUMN val2 integer;
\set ON_ERROR_STOP 1
Reported by SQLint.
Line: 81
Column: 1
CREATE INDEX ON hypertable_su (time, temp);
INSERT INTO hypertable_su VALUES('2017-01-20T09:00:01', 22.5);
ALTER TABLE hypertable_su ADD COLUMN val2 integer;
\set ON_ERROR_STOP 1
--grant read, insert permissions
\c :TEST_DBNAME :ROLE_SUPERUSER
GRANT SELECT, INSERT ON hypertable_su TO :ROLE_DEFAULT_PERM_USER_2;
Reported by SQLint.
test/sql/index.sql
14 issues
Line: 18
Column: 1
-- Create index before create_hypertable()
CREATE UNIQUE INDEX index_test_time_idx ON index_test (time);
\set ON_ERROR_STOP 0
-- Creating a hypertable from a table with an index that doesn't cover
-- all partitioning columns should fail
SELECT create_hypertable('index_test', 'time', 'device', 2);
\set ON_ERROR_STOP 1
Reported by SQLint.
Line: 22
Column: 1
-- Creating a hypertable from a table with an index that doesn't cover
-- all partitioning columns should fail
SELECT create_hypertable('index_test', 'time', 'device', 2);
\set ON_ERROR_STOP 1
-- Partitioning on only time should work
SELECT create_hypertable('index_test', 'time');
INSERT INTO index_test VALUES ('2017-01-20T09:00:01', 1, 17.5);
Reported by SQLint.
Line: 60
Column: 1
INSERT INTO index_test VALUES ('2017-01-20T09:00:01', 1, 17.5);
\set ON_ERROR_STOP 0
-- Create unique index without all partitioning columns should fail
CREATE UNIQUE INDEX index_test_time_device_idx ON index_test (time);
\set ON_ERROR_STOP 1
CREATE UNIQUE INDEX index_test_time_device_idx ON index_test (time, device);
Reported by SQLint.
Line: 63
Column: 1
\set ON_ERROR_STOP 0
-- Create unique index without all partitioning columns should fail
CREATE UNIQUE INDEX index_test_time_device_idx ON index_test (time);
\set ON_ERROR_STOP 1
CREATE UNIQUE INDEX index_test_time_device_idx ON index_test (time, device);
-- Regular index need not cover all partitioning columns
CREATE INDEX ON index_test (time, temp);
Reported by SQLint.
Line: 103
Column: 1
DROP INDEX a_hypertable_index_with_a_very_very_long_name_that_truncates;
DROP INDEX a_hypertable_index_with_a_very_very_long_name_that_truncates_2;
\set ON_ERROR_STOP 0
-- Create index CONCURRENTLY
CREATE UNIQUE INDEX CONCURRENTLY index_test_time_device_idx ON index_test (time, device);
\set ON_ERROR_STOP 1
-- Test tablespaces. Chunk indexes should end up in same tablespace as
Reported by SQLint.
Line: 106
Column: 1
\set ON_ERROR_STOP 0
-- Create index CONCURRENTLY
CREATE UNIQUE INDEX CONCURRENTLY index_test_time_device_idx ON index_test (time, device);
\set ON_ERROR_STOP 1
-- Test tablespaces. Chunk indexes should end up in same tablespace as
-- main index.
\c :TEST_DBNAME :ROLE_SUPERUSER
SET client_min_messages = ERROR;
Reported by SQLint.
Line: 117
Column: 1
SET client_min_messages = NOTICE;
CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
CREATE INDEX index_test_time_idx ON index_test (time) TABLESPACE tablespace1;
SELECT * FROM test.show_indexes('index_test');
SELECT * FROM test.show_indexesp('_timescaledb_internal._hyper%_chunk');
Reported by SQLint.
Line: 123
Column: 1
SELECT * FROM test.show_indexes('index_test');
SELECT * FROM test.show_indexesp('_timescaledb_internal._hyper%_chunk');
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
ALTER INDEX index_test_time_idx SET TABLESPACE tablespace2;
SELECT * FROM test.show_indexes('index_test');
Reported by SQLint.
Line: 125
Column: 1
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
ALTER INDEX index_test_time_idx SET TABLESPACE tablespace2;
SELECT * FROM test.show_indexes('index_test');
SELECT * FROM test.show_indexesp('_timescaledb_internal._hyper%_chunk');
Reported by SQLint.
Line: 177
Column: 1
-- Cleanup
DROP TABLE index_test CASCADE;
\c :TEST_DBNAME :ROLE_SUPERUSER
DROP TABLESPACE tablespace1;
DROP TABLESPACE tablespace2;
-- Test expression indexes
CREATE TABLE index_expr_test(id serial, time timestamptz, temp float, meta jsonb);
Reported by SQLint.
tsl/test/sql/dist_partial_agg.sql
13 issues
Line: 6
Column: 1
-- LICENSE-TIMESCALE for a copy of the license.
-- Need to be super user to create extension and add data nodes
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
\ir include/remote_exec.sql
SET ROLE :ROLE_1;
\set TEST_TABLE 'conditions'
\ir 'include/aggregate_table_create.sql'
Reported by SQLint.
Line: 7
Column: 1
-- Need to be super user to create extension and add data nodes
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
\ir include/remote_exec.sql
SET ROLE :ROLE_1;
\set TEST_TABLE 'conditions'
\ir 'include/aggregate_table_create.sql'
Reported by SQLint.
Line: 10
Column: 1
\ir include/remote_exec.sql
SET ROLE :ROLE_1;
\set TEST_TABLE 'conditions'
\ir 'include/aggregate_table_create.sql'
SET ROLE :ROLE_CLUSTER_SUPERUSER;
\set DN_DBNAME_1 :TEST_DBNAME _1
\set DN_DBNAME_2 :TEST_DBNAME _2
Reported by SQLint.
Line: 14
Column: 1
\ir 'include/aggregate_table_create.sql'
SET ROLE :ROLE_CLUSTER_SUPERUSER;
\set DN_DBNAME_1 :TEST_DBNAME _1
\set DN_DBNAME_2 :TEST_DBNAME _2
\set DN_DBNAME_3 :TEST_DBNAME _3
-- Add data nodes using the TimescaleDB node management API
SELECT * FROM add_data_node('data_node_1', host => 'localhost', database => :'DN_DBNAME_1');
Reported by SQLint.
Line: 33
Column: 1
SELECT table_name FROM create_distributed_hypertable( 'conditions', 'timec', 'location', 3, chunk_time_interval => INTERVAL '1 day');
-- We need a lot of data and a lot of chunks to make the planner push down all of the aggregates
\ir 'include/aggregate_table_populate.sql'
SET enable_partitionwise_aggregate = ON;
-- Run an explain on the aggregate queries to make sure expected aggregates are being pushed down.
-- Grouping by the paritioning column should result in full aggregate pushdown where possible,
Reported by SQLint.
Line: 40
Column: 1
-- Run an explain on the aggregate queries to make sure expected aggregates are being pushed down.
-- Grouping by the paritioning column should result in full aggregate pushdown where possible,
-- while using a non-partitioning column should result in a partial pushdown
\set PREFIX 'EXPLAIN (VERBOSE, COSTS OFF)'
\set GROUPING 'location'
\ir 'include/aggregate_queries.sql'
\set GROUPING 'region'
Reported by SQLint.
Line: 60
Column: 1
\set ECHO errors
SET client_min_messages TO error;
--make output contain query results
\set PREFIX ''
\o :RESULTS_CONTROL1
SET enable_partitionwise_aggregate = OFF;
\ir 'include/aggregate_queries.sql'
\o
\o :RESULTS_TEST1
Reported by SQLint.
Line: 63
Column: 1
\set PREFIX ''
\o :RESULTS_CONTROL1
SET enable_partitionwise_aggregate = OFF;
\ir 'include/aggregate_queries.sql'
\o
\o :RESULTS_TEST1
SET enable_partitionwise_aggregate = ON;
\ir 'include/aggregate_queries.sql'
\o
Reported by SQLint.
Line: 67
Column: 1
\o
\o :RESULTS_TEST1
SET enable_partitionwise_aggregate = ON;
\ir 'include/aggregate_queries.sql'
\o
\set ECHO all
:DIFF_CMD1
Reported by SQLint.
Line: 85
Column: 1
\set ECHO errors
SET client_min_messages TO error;
--make output contain query results
\set PREFIX ''
\o :RESULTS_CONTROL2
SET enable_partitionwise_aggregate = OFF;
\ir 'include/aggregate_queries.sql'
\o
\o :RESULTS_TEST2
Reported by SQLint.
test/sql/pg_dump.sql
13 issues
Line: 5
Column: 1
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
\o /dev/null
\ir include/insert_two_partitions.sql
\o
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE SCHEMA test_schema AUTHORIZATION :ROLE_DEFAULT_PERM_USER;
\c :TEST_DBNAME
Reported by SQLint.
Line: 10
Column: 1
\o
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE SCHEMA test_schema AUTHORIZATION :ROLE_DEFAULT_PERM_USER;
\c :TEST_DBNAME
ALTER TABLE PUBLIC."two_Partitions" SET SCHEMA "test_schema";
-- Test that we can restore constraints
ALTER TABLE "test_schema"."two_Partitions"
ADD CONSTRAINT timeCustom_device_id_series_2_key
Reported by SQLint.
Line: 57
Column: 1
FROM pg_depend
WHERE refclassid = 'pg_extension'::regclass
AND refobjid = (SELECT oid FROM pg_extension WHERE extname = 'timescaledb')
\gset
SELECT * FROM test.show_columns('"test_schema"."two_Partitions"');
SELECT * FROM test.show_columns('_timescaledb_internal._hyper_1_1_chunk');
SELECT * FROM test.show_indexes('"test_schema"."two_Partitions"');
SELECT * FROM test.show_indexes('_timescaledb_internal._hyper_1_1_chunk');
Reported by SQLint.
Line: 79
Column: 1
--force a value to exist for exported_uuid
INSERT INTO _timescaledb_catalog.metadata VALUES ('exported_uuid', 'original_uuid', true);
\c postgres :ROLE_SUPERUSER
-- We shell out to a script in order to grab the correct hostname from the
-- environmental variables that originally called this psql command. Sadly
-- vars passed to psql do not work in \! commands so we can't do it that way.
\! utils/pg_dump_aux_dump.sh dump/pg_dump.sql
Reported by SQLint.
Line: 96
Column: 1
SELECT timescaledb_pre_restore();
SHOW timescaledb.restoring;
\! utils/pg_dump_aux_restore.sh dump/pg_dump.sql
-- Inserting with restoring ON in current session causes tuples to be
-- inserted on main table, but this should be protected by the insert
-- blocking trigger.
Reported by SQLint.
Line: 105
Column: 1
\set ON_ERROR_STOP 0
INSERT INTO "test_schema"."two_Partitions"("timeCustom", device_id, series_0, series_1)
VALUES (1357894000000000000, 'dev5', 1.5, 2);
\set ON_ERROR_STOP 1
-- Now run our post-restore function.
SELECT timescaledb_post_restore();
SHOW timescaledb.restoring;
-- timescaledb_post_restore restarts background worker so we have to stop them
Reported by SQLint.
Line: 165
Column: 1
ORDER BY objid::text DESC;
-- Make sure we can't run our restoring functions as a normal perm user as that would disable functionality for the whole db
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
-- Hides error messages in cases where error messages differ between Postgres versions
create or replace function get_sqlstate(in_text TEXT) RETURNS TEXT AS
$$
BEGIN
BEGIN
Reported by SQLint.
Line: 172
Column: 5
BEGIN
BEGIN
EXECUTE in_text;
EXCEPTION WHEN others THEN GET STACKED DIAGNOSTICS in_text = RETURNED_SQLSTATE;
END;
RETURN in_text;
END;
$$
LANGUAGE PLPGSQL;
Reported by SQLint.
Line: 174
Column: 5
EXECUTE in_text;
EXCEPTION WHEN others THEN GET STACKED DIAGNOSTICS in_text = RETURNED_SQLSTATE;
END;
RETURN in_text;
END;
$$
LANGUAGE PLPGSQL;
SELECT get_sqlstate('SELECT timescaledb_pre_restore()');
Reported by SQLint.
Line: 176
Column: 1
END;
RETURN in_text;
END;
$$
LANGUAGE PLPGSQL;
SELECT get_sqlstate('SELECT timescaledb_pre_restore()');
SELECT get_sqlstate('SELECT timescaledb_post_restore()');
Reported by SQLint.
tsl/test/shared/sql/with_clause_parser.sql
13 issues
Line: 5
Column: 1
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE OR REPLACE FUNCTION test_with_clause_filter(with_clauses TEXT[][])
RETURNS TABLE(namespace TEXT, name TEXT, value TEXT, filtered BOOLEAN)
AS :MODULE_PATHNAME, 'ts_test_with_clause_filter' LANGUAGE C VOLATILE STRICT;
CREATE OR REPLACE FUNCTION test_with_clause_parse(with_clauses TEXT[][])
Reported by SQLint.
Line: 13
Column: 1
CREATE OR REPLACE FUNCTION test_with_clause_parse(with_clauses TEXT[][])
RETURNS TABLE(name TEXT, unimpl INT8, bool BOOLEAN, int32 INT4, def INT4, name_arg NAME, regc REGCLASS)
AS :MODULE_PATHNAME, 'ts_test_with_clause_parse' LANGUAGE C VOLATILE STRICT;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
SELECT * FROM test_with_clause_filter(
'{
{"baz", "bar", "foo"},
Reported by SQLint.
Line: 46
Column: 1
{"baz"}
}');
\set ON_ERROR_STOP 0
-- unrecognized argument
SELECT * FROM test_with_clause_parse('{{"timescaledb", "fakearg", "bar"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "fakearg"}}');
-- unimplemented handled gracefully
Reported by SQLint.
Line: 55
Column: 1
SELECT * FROM test_with_clause_parse('{{"timescaledb", "unimplemented", "bar"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "unimplemented", "true"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "unimplemented"}}');
\set ON_ERROR_STOP 1
-- bool parsing
SELECT * FROM test_with_clause_parse('{{"timescaledb", "bool", "true"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "bool", "false"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "bool", "on"}}');
Reported by SQLint.
Line: 70
Column: 1
SELECT * FROM test_with_clause_parse('{{"timescaledb", "int32", "1"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "int32", "572"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "int32", "-10"}}');
\set ON_ERROR_STOP 0
SELECT * FROM test_with_clause_parse('{{"timescaledb", "int32", "true"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "int32", "bar"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "int32"}}');
\set ON_ERROR_STOP 1
Reported by SQLint.
Line: 74
Column: 1
SELECT * FROM test_with_clause_parse('{{"timescaledb", "int32", "true"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "int32", "bar"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "int32"}}');
\set ON_ERROR_STOP 1
-- name parsing
SELECT * FROM test_with_clause_parse('{{"timescaledb", "name", "1"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "name", "572"}}');
Reported by SQLint.
Line: 83
Column: 1
SELECT * FROM test_with_clause_parse('{{"timescaledb", "name", "-10"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "name", "true"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "name", "bar"}}');
\set ON_ERROR_STOP 0
SELECT * FROM test_with_clause_parse('{{"timescaledb", "name"}}');
\set ON_ERROR_STOP 1
-- REGCLASS parsing
SELECT * FROM test_with_clause_parse('{{"timescaledb", "regclass", "pg_type"}}');
Reported by SQLint.
Line: 85
Column: 1
SELECT * FROM test_with_clause_parse('{{"timescaledb", "name", "bar"}}');
\set ON_ERROR_STOP 0
SELECT * FROM test_with_clause_parse('{{"timescaledb", "name"}}');
\set ON_ERROR_STOP 1
-- REGCLASS parsing
SELECT * FROM test_with_clause_parse('{{"timescaledb", "regclass", "pg_type"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "regclass", "1"}}');
\set ON_ERROR_STOP 0
Reported by SQLint.
Line: 90
Column: 1
-- REGCLASS parsing
SELECT * FROM test_with_clause_parse('{{"timescaledb", "regclass", "pg_type"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "regclass", "1"}}');
\set ON_ERROR_STOP 0
SELECT * FROM test_with_clause_parse('{{"timescaledb", "regclass", "-10"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "regclass", "true"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "regclass", "bar"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "regclass"}}');
\set ON_ERROR_STOP 1
Reported by SQLint.
Line: 95
Column: 1
SELECT * FROM test_with_clause_parse('{{"timescaledb", "regclass", "true"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "regclass", "bar"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "regclass"}}');
\set ON_ERROR_STOP 1
-- defaults get overridden
SELECT * FROM test_with_clause_parse('{{"timescaledb", "default", "1"}}');
SELECT * FROM test_with_clause_parse('{{"timescaledb", "default", "572"}}');
Reported by SQLint.
test/sql/bgw_launcher.sql
13 issues
Line: 5
Column: 1
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
\set TEST_DBNAME_2 :TEST_DBNAME _2
\c :TEST_DBNAME :ROLE_SUPERUSER
-- start bgw since they are stopped for tests by default
SELECT _timescaledb_internal.start_background_workers();
Reported by SQLint.
Line: 13
Column: 1
CREATE DATABASE :TEST_DBNAME_2;
\c :TEST_DBNAME_2 :ROLE_SUPERUSER
\ir include/bgw_launcher_utils.sql
-- When we've connected to test db 2, we should be able to see the cluster launcher
-- and the scheduler for test db in pg_stat_activity
Reported by SQLint.
Line: 182
Column: 1
-- Test that background workers are stopped with DROP OWNED
ALTER ROLE :ROLE_DEFAULT_PERM_USER WITH SUPERUSER;
\c :TEST_DBNAME_2 :ROLE_DEFAULT_PERM_USER
SET client_min_messages = ERROR;
CREATE EXTENSION timescaledb CASCADE;
RESET client_min_messages;
-- Make sure there is 1 launcher and 1 bgw in test db 2
SELECT wait_worker_counts(launcher_ct=>1, scheduler1_ct=> 0, scheduler2_ct=>1, template1_ct=>0);
Reported by SQLint.
Line: 195
Column: 1
DROP OWNED BY :ROLE_DEFAULT_PERM_USER;
-- The worker in test db 2 is dead. Note that 0s are respected
SELECT wait_worker_counts(launcher_ct=>1, scheduler1_ct=>0, scheduler2_ct=>0, template1_ct=>0);
\c :TEST_DBNAME_2 :ROLE_SUPERUSER
ALTER ROLE :ROLE_DEFAULT_PERM_USER WITH NOSUPERUSER;
-- Connect to the template1 database
\c template1
\ir include/bgw_launcher_utils.sql
Reported by SQLint.
Line: 199
Column: 1
ALTER ROLE :ROLE_DEFAULT_PERM_USER WITH NOSUPERUSER;
-- Connect to the template1 database
\c template1
\ir include/bgw_launcher_utils.sql
BEGIN;
-- Then create extension there in a txn and make sure we see a scheduler start
SET client_min_messages = ERROR;
Reported by SQLint.
Line: 212
Column: 1
-- End our transaction and it should immediately exit because it's a template database.
SELECT wait_worker_counts(1,0,0,0);
-- Clean up the template database, removing our test utilities etc
\ir include/bgw_launcher_utils_cleanup.sql
\c :TEST_DBNAME_2
-- Now try creating a DB from a template with the extension already installed.
-- Make sure we see a scheduler start.
CREATE DATABASE :TEST_DBNAME;
Reported by SQLint.
Line: 222
Column: 1
DROP DATABASE :TEST_DBNAME;
-- Now make sure that there's no race between create database and create extension.
-- Although to be honest, this race probably wouldn't manifest in this test.
\c template1
DROP EXTENSION timescaledb;
\c :TEST_DBNAME_2
CREATE DATABASE :TEST_DBNAME;
\c :TEST_DBNAME
SET client_min_messages = ERROR;
Reported by SQLint.
Line: 224
Column: 1
-- Although to be honest, this race probably wouldn't manifest in this test.
\c template1
DROP EXTENSION timescaledb;
\c :TEST_DBNAME_2
CREATE DATABASE :TEST_DBNAME;
\c :TEST_DBNAME
SET client_min_messages = ERROR;
CREATE EXTENSION timescaledb;
RESET client_min_messages;
Reported by SQLint.
Line: 226
Column: 1
DROP EXTENSION timescaledb;
\c :TEST_DBNAME_2
CREATE DATABASE :TEST_DBNAME;
\c :TEST_DBNAME
SET client_min_messages = ERROR;
CREATE EXTENSION timescaledb;
RESET client_min_messages;
\c :TEST_DBNAME_2
SELECT wait_worker_counts(1,1,0,0);
Reported by SQLint.
Line: 230
Column: 1
SET client_min_messages = ERROR;
CREATE EXTENSION timescaledb;
RESET client_min_messages;
\c :TEST_DBNAME_2
SELECT wait_worker_counts(1,1,0,0);
-- test rename database
CREATE DATABASE db_rename_test;
Reported by SQLint.