The following issues were found
test/sql/loader.sql
37 issues
Line: 5
Column: 1
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
\set TEST_DBNAME_2 :TEST_DBNAME _2
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE DATABASE :TEST_DBNAME_2;
DROP EXTENSION timescaledb;
Reported by SQLint.
Line: 12
Column: 1
DROP EXTENSION timescaledb;
--no extension
\dx
SELECT 1;
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE EXTENSION timescaledb VERSION 'mock-1';
SELECT 1;
Reported by SQLint.
Line: 15
Column: 1
\dx
SELECT 1;
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE EXTENSION timescaledb VERSION 'mock-1';
SELECT 1;
\dx
CREATE EXTENSION IF NOT EXISTS timescaledb VERSION 'mock-1';
Reported by SQLint.
Line: 18
Column: 1
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE EXTENSION timescaledb VERSION 'mock-1';
SELECT 1;
\dx
CREATE EXTENSION IF NOT EXISTS timescaledb VERSION 'mock-1';
CREATE EXTENSION IF NOT EXISTS timescaledb VERSION 'mock-2';
DROP EXTENSION timescaledb;
Reported by SQLint.
Line: 24
Column: 1
CREATE EXTENSION IF NOT EXISTS timescaledb VERSION 'mock-2';
DROP EXTENSION timescaledb;
\set ON_ERROR_STOP 0
--test that we cannot accidentally load another library version
CREATE EXTENSION IF NOT EXISTS timescaledb VERSION 'mock-2';
\set ON_ERROR_STOP 1
\c :TEST_DBNAME :ROLE_SUPERUSER
Reported by SQLint.
Line: 27
Column: 1
\set ON_ERROR_STOP 0
--test that we cannot accidentally load another library version
CREATE EXTENSION IF NOT EXISTS timescaledb VERSION 'mock-2';
\set ON_ERROR_STOP 1
\c :TEST_DBNAME :ROLE_SUPERUSER
--no extension
\dx
SELECT 1;
Reported by SQLint.
Line: 37
Column: 1
CREATE EXTENSION timescaledb VERSION 'mock-1';
--same backend as create extension;
SELECT 1;
\dx
--start new backend;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
SELECT 1;
Reported by SQLint.
Line: 40
Column: 1
\dx
--start new backend;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
SELECT 1;
SELECT 1;
--test fn call after load
SELECT mock_function();
Reported by SQLint.
Line: 46
Column: 1
SELECT 1;
--test fn call after load
SELECT mock_function();
\dx
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
--test fn call as first command
SELECT mock_function();
Reported by SQLint.
Line: 53
Column: 1
SELECT mock_function();
--use guc to prevent loading
\c :TEST_DBNAME :ROLE_SUPERUSER
SET timescaledb.disable_load = 'on';
SELECT 1;
SELECT 1;
SET timescaledb.disable_load = 'off';
SELECT 1;
Reported by SQLint.
scripts/gh_ci_summary.py
37 issues
Line: 14
Column: 14
message=list()
def get_json(url):
response = requests.get(url)
return response.json()
# get runs from last 24 hours
def process_runs(runs):
Reported by Pylint.
Line: 20
Column: 3
# get runs from last 24 hours
def process_runs(runs):
failed=list()
for run in runs:
start = datetime.strptime(run['created_at'], "%Y-%m-%dT%H:%M:%SZ")
delta = datetime.now() - start
Reported by Pylint.
Line: 40
Column: 19
if job['conclusion'] != 'success':
message.append("<{html_url}|{workflow_name} {name}>".format(workflow_name=run['name'], **job))
def print_summary(failed):
if len(failed) > 0:
message.append("Failed scheduled CI runs in last 24 hours:")
for run in failed:
print_run_details(run)
else:
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/python3
from datetime import datetime
import requests
import json
import os
# API reference: https://docs.github.com/en/rest/reference/actions#workflow-runs
# Slack message formatting: https://api.slack.com/reference/surfaces/formatting
Reported by Pylint.
Line: 5
Column: 1
from datetime import datetime
import requests
import json
import os
# API reference: https://docs.github.com/en/rest/reference/actions#workflow-runs
# Slack message formatting: https://api.slack.com/reference/surfaces/formatting
url = 'https://api.github.com/repos/timescale/timescaledb/actions/runs?event=schedule&status=completed'
Reported by Pylint.
Line: 6
Column: 1
from datetime import datetime
import requests
import json
import os
# API reference: https://docs.github.com/en/rest/reference/actions#workflow-runs
# Slack message formatting: https://api.slack.com/reference/surfaces/formatting
url = 'https://api.github.com/repos/timescale/timescaledb/actions/runs?event=schedule&status=completed'
Reported by Pylint.
Line: 10
Column: 1
# API reference: https://docs.github.com/en/rest/reference/actions#workflow-runs
# Slack message formatting: https://api.slack.com/reference/surfaces/formatting
url = 'https://api.github.com/repos/timescale/timescaledb/actions/runs?event=schedule&status=completed'
message=list()
def get_json(url):
response = requests.get(url)
Reported by Pylint.
Line: 10
Column: 1
# API reference: https://docs.github.com/en/rest/reference/actions#workflow-runs
# Slack message formatting: https://api.slack.com/reference/surfaces/formatting
url = 'https://api.github.com/repos/timescale/timescaledb/actions/runs?event=schedule&status=completed'
message=list()
def get_json(url):
response = requests.get(url)
Reported by Pylint.
Line: 14
Column: 1
message=list()
def get_json(url):
response = requests.get(url)
return response.json()
# get runs from last 24 hours
def process_runs(runs):
Reported by Pylint.
Line: 15
Column: 1
message=list()
def get_json(url):
response = requests.get(url)
return response.json()
# get runs from last 24 hours
def process_runs(runs):
failed=list()
Reported by Pylint.
tsl/test/sql/bgw_policy.sql
36 issues
Line: 27
Column: 82
-- Make sure reorder correctly selects chunks to reorder
-- by starting with oldest chunks
select add_reorder_policy('test_table', 'test_table_time_idx') as reorder_job_id \gset
select * from _timescaledb_config.bgw_job WHERE id >= 1000 ORDER BY id;
select job_id, chunk_id, num_times_job_run from _timescaledb_internal.bgw_policy_chunk_stats;
-- Make a manual calls to reorder: make sure the correct chunk is called
Reported by SQLint.
Line: 71
Column: 92
select remove_reorder_policy('test_table');
-- Now do drop_chunks test
select add_retention_policy('test_table', INTERVAL '4 months', true) as drop_chunks_job_id \gset
SELECT count(*) FROM _timescaledb_catalog.chunk as c, _timescaledb_catalog.hypertable as ht where c.hypertable_id = ht.id and ht.table_name='test_table';
-- Now simulate drop_chunks running automatically by calling it explicitly
CALL run_job(:drop_chunks_job_id);
Reported by SQLint.
Line: 79
Column: 154
CALL run_job(:drop_chunks_job_id);
-- Should have 4 chunks left
SELECT count(*) FROM _timescaledb_catalog.chunk as c, _timescaledb_catalog.hypertable as ht where c.hypertable_id = ht.id and ht.table_name='test_table' \gset before_
select :before_count=4;
-- Make sure this second call does nothing
CALL run_job(:drop_chunks_job_id);
SELECT count(*) FROM _timescaledb_catalog.chunk as c, _timescaledb_catalog.hypertable as ht where c.hypertable_id = ht.id and ht.table_name='test_table' \gset after_
Reported by SQLint.
Line: 84
Column: 154
-- Make sure this second call does nothing
CALL run_job(:drop_chunks_job_id);
SELECT count(*) FROM _timescaledb_catalog.chunk as c, _timescaledb_catalog.hypertable as ht where c.hypertable_id = ht.id and ht.table_name='test_table' \gset after_
-- Should be true
select :before_count=:after_count;
INSERT INTO test_table VALUES (now() - INTERVAL '2 weeks', 1);
Reported by SQLint.
Line: 90
Column: 154
select :before_count=:after_count;
INSERT INTO test_table VALUES (now() - INTERVAL '2 weeks', 1);
SELECT count(*) FROM _timescaledb_catalog.chunk as c, _timescaledb_catalog.hypertable as ht where c.hypertable_id = ht.id and ht.table_name='test_table' \gset before_
-- This call should also do nothing
CALL run_job(:drop_chunks_job_id);
SELECT count(*) FROM _timescaledb_catalog.chunk as c, _timescaledb_catalog.hypertable as ht where c.hypertable_id = ht.id and ht.table_name='test_table' \gset after_
Reported by SQLint.
Line: 94
Column: 154
-- This call should also do nothing
CALL run_job(:drop_chunks_job_id);
SELECT count(*) FROM _timescaledb_catalog.chunk as c, _timescaledb_catalog.hypertable as ht where c.hypertable_id = ht.id and ht.table_name='test_table' \gset after_
-- Should be true
select :before_count=:after_count;
select remove_retention_policy('test_table');
Reported by SQLint.
Line: 113
Column: 82
INSERT INTO test_table VALUES (now() - INTERVAL '8 months', 5);
INSERT INTO test_table VALUES (now() - INTERVAL '8 months', -5);
select add_reorder_policy('test_table', 'test_table_time_idx') as reorder_job_id \gset
-- Should be nothing in the chunk_stats table
select count(*) from _timescaledb_internal.bgw_policy_chunk_stats where job_id=:reorder_job_id;
-- Make a manual calls to reorder: make sure the correct (oldest) chunk is called
select chunk_id from _timescaledb_catalog.dimension_slice as ds, _timescaledb_catalog.chunk_constraint as cc where ds.dimension_id=1 and ds.id=cc.dimension_slice_id ORDER BY ds.range_start LIMIT 1 \gset oldest_
Reported by SQLint.
Line: 118
Column: 198
select count(*) from _timescaledb_internal.bgw_policy_chunk_stats where job_id=:reorder_job_id;
-- Make a manual calls to reorder: make sure the correct (oldest) chunk is called
select chunk_id from _timescaledb_catalog.dimension_slice as ds, _timescaledb_catalog.chunk_constraint as cc where ds.dimension_id=1 and ds.id=cc.dimension_slice_id ORDER BY ds.range_start LIMIT 1 \gset oldest_
CALL run_job(:reorder_job_id);
select job_id, chunk_id, num_times_job_run from _timescaledb_internal.bgw_policy_chunk_stats where job_id=:reorder_job_id and chunk_id=:oldest_chunk_id;
-- Confirm that reorder was called on the correct chunk Oid
Reported by SQLint.
Line: 127
Column: 292
SELECT reorder_called(:oldest_chunk_id);
-- Now run reorder again and pick the next oldest chunk
select cc.chunk_id from _timescaledb_catalog.dimension_slice as ds, _timescaledb_catalog.chunk_constraint as cc where ds.dimension_id=1 and ds.id=cc.dimension_slice_id and cc.chunk_id NOT IN (select chunk_id from _timescaledb_internal.bgw_policy_chunk_stats) ORDER BY ds.range_start LIMIT 1 \gset oldest_
CALL run_job(:reorder_job_id);
select job_id, chunk_id, num_times_job_run from _timescaledb_internal.bgw_policy_chunk_stats where job_id=:reorder_job_id and chunk_id=:oldest_chunk_id;
-- Confirm that reorder was called on the correct chunk Oid
Reported by SQLint.
Line: 136
Column: 292
SELECT reorder_called(:oldest_chunk_id);
-- Again
select cc.chunk_id from _timescaledb_catalog.dimension_slice as ds, _timescaledb_catalog.chunk_constraint as cc where ds.dimension_id=1 and ds.id=cc.dimension_slice_id and cc.chunk_id NOT IN (select chunk_id from _timescaledb_internal.bgw_policy_chunk_stats) ORDER BY ds.range_start LIMIT 1 \gset oldest_
CALL run_job(:reorder_job_id);
select job_id, chunk_id, num_times_job_run from _timescaledb_internal.bgw_policy_chunk_stats where job_id=:reorder_job_id and chunk_id=:oldest_chunk_id;
SELECT reorder_called(:oldest_chunk_id);
Reported by SQLint.
scripts/gh_matrix_builder.py
34 issues
Line: 1
Column: 1
# Copyright (c) 2016-2021 Timescale, Inc. All Rights Reserved.
#
# This file is licensed under the Apache License, see LICENSE-APACHE
# at the top level directory of the timescaledb distribution.
# Python script to dynamically generate matrix for github action
# Since we want to run additional test configurations when triggered
# by a push to prerelease_test or by cron but github actions don't
Reported by Pylint.
Line: 33
Column: 1
# helper functions to generate matrix entries
# the release and apache config inherit from the
# debug config to reduce repetition
def build_debug_config(overrides):
# llvm version and clang versions must match otherwise
# there will be build errors this is true even when compiling
# with gcc as clang is used to compile the llvm parts.
#
# Strictly speaking, WARNINGS_AS_ERRORS=ON is not needed here, but
Reported by Pylint.
Line: 43
Column: 1
# release and one debug build with WARNINGS_AS_ERRORS=ON so that we
# capture warnings generated due to changes in the code base or the
# compiler.
base_config = dict({
"name": "Debug",
"build_type": "Debug",
"pg_build_args": "--enable-debug --enable-cassert",
"tsdb_build_args": "-DCODECOVERAGE=ON -DWARNINGS_AS_ERRORS=ON",
"installcheck_args": "IGNORES='bgw_db_scheduler'",
Reported by Pylint.
Line: 57
Column: 1
"cc": "gcc",
"cxx": "g++",
})
base_config.update(overrides)
return base_config
# We build this release configuration with WARNINGS_AS_ERRORS=ON to
# make sure that we can build with -Werrors even for release
# builds. This will capture some cases where warnings are generated
Reported by Pylint.
Line: 58
Column: 1
"cxx": "g++",
})
base_config.update(overrides)
return base_config
# We build this release configuration with WARNINGS_AS_ERRORS=ON to
# make sure that we can build with -Werrors even for release
# builds. This will capture some cases where warnings are generated
# for release builds but not for debug builds.
Reported by Pylint.
Line: 64
Column: 1
# make sure that we can build with -Werrors even for release
# builds. This will capture some cases where warnings are generated
# for release builds but not for debug builds.
def build_release_config(overrides):
base_config = build_debug_config({})
release_config = dict({
"name": "Release",
"build_type": "Release",
"pg_build_args": "",
Reported by Pylint.
Line: 65
Column: 1
# builds. This will capture some cases where warnings are generated
# for release builds but not for debug builds.
def build_release_config(overrides):
base_config = build_debug_config({})
release_config = dict({
"name": "Release",
"build_type": "Release",
"pg_build_args": "",
"tsdb_build_args": "-DWARNINGS_AS_ERRORS=ON",
Reported by Pylint.
Line: 66
Column: 1
# for release builds but not for debug builds.
def build_release_config(overrides):
base_config = build_debug_config({})
release_config = dict({
"name": "Release",
"build_type": "Release",
"pg_build_args": "",
"tsdb_build_args": "-DWARNINGS_AS_ERRORS=ON",
"coverage": False,
Reported by Pylint.
Line: 73
Column: 1
"tsdb_build_args": "-DWARNINGS_AS_ERRORS=ON",
"coverage": False,
})
base_config.update(release_config)
base_config.update(overrides)
return base_config
def build_apache_config(overrides):
base_config = build_debug_config({})
Reported by Pylint.
Line: 74
Column: 1
"coverage": False,
})
base_config.update(release_config)
base_config.update(overrides)
return base_config
def build_apache_config(overrides):
base_config = build_debug_config({})
apache_config = dict({
Reported by Pylint.
tsl/test/sql/read_only.sql
34 issues
Line: 5
Column: 1
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
-- Following tests checks that API functions which modify data (including catalog)
-- properly recognize read-only transaction state
--
Reported by SQLint.
Line: 11
Column: 1
-- properly recognize read-only transaction state
--
\set DATA_NODE_1 :TEST_DBNAME _1
\set DATA_NODE_2 :TEST_DBNAME _2
-- create_hypertable()
--
CREATE TABLE test_table(time bigint NOT NULL, device int);
Reported by SQLint.
Line: 20
Column: 1
SET default_transaction_read_only TO on;
\set ON_ERROR_STOP 0
SELECT * FROM create_hypertable('test_table', 'time');
\set ON_ERROR_STOP 1
SET default_transaction_read_only TO off;
SELECT * FROM create_hypertable('test_table', 'time', chunk_time_interval => 1000000::bigint);
Reported by SQLint.
Line: 22
Column: 1
\set ON_ERROR_STOP 0
SELECT * FROM create_hypertable('test_table', 'time');
\set ON_ERROR_STOP 1
SET default_transaction_read_only TO off;
SELECT * FROM create_hypertable('test_table', 'time', chunk_time_interval => 1000000::bigint);
SET default_transaction_read_only TO on;
Reported by SQLint.
Line: 29
Column: 1
SET default_transaction_read_only TO on;
\set ON_ERROR_STOP 0
-- set_chunk_time_interval()
--
SELECT * FROM set_chunk_time_interval('test_table', 2000000000::bigint);
Reported by SQLint.
Line: 51
Column: 1
--
SELECT * FROM add_dimension('test_table', 'device', chunk_time_interval => 100);
\set ON_ERROR_STOP 1
-- tablespaces
--
SET default_transaction_read_only TO off;
Reported by SQLint.
Line: 66
Column: 1
-- attach_tablespace()
--
\set ON_ERROR_STOP 0
SELECT * FROM attach_tablespace('tablespace1', 'test_table');
\set ON_ERROR_STOP 1
SET default_transaction_read_only TO off;
SELECT * FROM attach_tablespace('tablespace1', 'test_table');
Reported by SQLint.
Line: 68
Column: 1
--
\set ON_ERROR_STOP 0
SELECT * FROM attach_tablespace('tablespace1', 'test_table');
\set ON_ERROR_STOP 1
SET default_transaction_read_only TO off;
SELECT * FROM attach_tablespace('tablespace1', 'test_table');
-- detach_tablespace()
Reported by SQLint.
Line: 76
Column: 1
-- detach_tablespace()
--
SET default_transaction_read_only TO on;
\set ON_ERROR_STOP 0
SELECT * FROM detach_tablespace('tablespace1', 'test_table');
\set ON_ERROR_STOP 1
-- detach_tablespaces()
--
Reported by SQLint.
Line: 78
Column: 1
SET default_transaction_read_only TO on;
\set ON_ERROR_STOP 0
SELECT * FROM detach_tablespace('tablespace1', 'test_table');
\set ON_ERROR_STOP 1
-- detach_tablespaces()
--
\set ON_ERROR_STOP 0
SELECT * FROM detach_tablespaces('test_table');
Reported by SQLint.
test/sql/updates/setup.continuous_aggs.v2.sql
33 issues
Line: 17
Column: 32
extversion >= '2.0.0' AS has_create_mat_view,
extversion >= '2.0.0' AS has_continuous_aggs_policy
FROM pg_extension
WHERE extname = 'timescaledb' \gset
-- disable background workers to prevent deadlocks between background processes
-- on timescaledb 1.7.x
CALL _timescaledb_testing.stop_workers();
Reported by SQLint.
Line: 161
Column: 1
ALTER MATERIALIZED VIEW rename_cols RENAME COLUMN bucket to "time";
\if :WITH_SUPERUSER
GRANT SELECT ON mat_before TO cagg_user WITH GRANT OPTION;
\endif
\if :has_refresh_mat_view
REFRESH MATERIALIZED VIEW rename_cols;
Reported by SQLint.
Line: 163
Column: 1
\if :WITH_SUPERUSER
GRANT SELECT ON mat_before TO cagg_user WITH GRANT OPTION;
\endif
\if :has_refresh_mat_view
REFRESH MATERIALIZED VIEW rename_cols;
REFRESH MATERIALIZED VIEW mat_before;
\else
Reported by SQLint.
Line: 168
Column: 1
\if :has_refresh_mat_view
REFRESH MATERIALIZED VIEW rename_cols;
REFRESH MATERIALIZED VIEW mat_before;
\else
CALL refresh_continuous_aggregate('rename_cols',NULL,NULL);
CALL refresh_continuous_aggregate('mat_before',NULL,NULL);
\endif
-- we create separate schema for realtime agg since we dump all view definitions in public schema
Reported by SQLint.
Line: 171
Column: 1
\else
CALL refresh_continuous_aggregate('rename_cols',NULL,NULL);
CALL refresh_continuous_aggregate('mat_before',NULL,NULL);
\endif
-- we create separate schema for realtime agg since we dump all view definitions in public schema
-- but realtime agg view definition is not stable across versions
CREATE SCHEMA cagg;
Reported by SQLint.
Line: 275
Column: 1
END IF;
END $$;
\if :WITH_SUPERUSER
GRANT SELECT ON cagg.realtime_mat TO cagg_user;
\endif
\if :has_refresh_mat_view
REFRESH MATERIALIZED VIEW cagg.realtime_mat;
Reported by SQLint.
Line: 277
Column: 1
\if :WITH_SUPERUSER
GRANT SELECT ON cagg.realtime_mat TO cagg_user;
\endif
\if :has_refresh_mat_view
REFRESH MATERIALIZED VIEW cagg.realtime_mat;
\else
CALL refresh_continuous_aggregate('cagg.realtime_mat',NULL,NULL);
Reported by SQLint.
Line: 281
Column: 1
\if :has_refresh_mat_view
REFRESH MATERIALIZED VIEW cagg.realtime_mat;
\else
CALL refresh_continuous_aggregate('cagg.realtime_mat',NULL,NULL);
\endif
-- test ignore_invalidation_older_than migration --
DO LANGUAGE PLPGSQL $$
Reported by SQLint.
Line: 283
Column: 1
REFRESH MATERIALIZED VIEW cagg.realtime_mat;
\else
CALL refresh_continuous_aggregate('cagg.realtime_mat',NULL,NULL);
\endif
-- test ignore_invalidation_older_than migration --
DO LANGUAGE PLPGSQL $$
DECLARE
ts_version TEXT;
Reported by SQLint.
Line: 290
Column: 3
DECLARE
ts_version TEXT;
BEGIN
SELECT extversion INTO ts_version FROM pg_extension WHERE extname = 'timescaledb';
IF ts_version < '2.0.0' THEN
CREATE VIEW mat_ignoreinval
WITH ( timescaledb.continuous, timescaledb.materialized_only=true,
timescaledb.refresh_lag='-30 day',
Reported by SQLint.
tsl/test/sql/tsl_tables.sql
33 issues
Line: 5
Column: 1
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_SUPERUSER
SET timescaledb.license_key='CommunityLicense';
CREATE OR REPLACE FUNCTION ts_test_chunk_stats_insert(job_id INTEGER, chunk_id INTEGER, num_times_run INTEGER, last_time_run TIMESTAMPTZ = NULL) RETURNS VOID
AS :TSL_MODULE_PATHNAME LANGUAGE C VOLATILE;
Reported by SQLint.
Line: 11
Column: 1
CREATE OR REPLACE FUNCTION ts_test_chunk_stats_insert(job_id INTEGER, chunk_id INTEGER, num_times_run INTEGER, last_time_run TIMESTAMPTZ = NULL) RETURNS VOID
AS :TSL_MODULE_PATHNAME LANGUAGE C VOLATILE;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
SELECT * FROM _timescaledb_config.bgw_job WHERE id >= 1000 ORDER BY id;
CREATE TABLE test_table(time timestamptz, junk int);
CREATE TABLE test_table_int(time bigint, junk int);
Reported by SQLint.
Line: 24
Column: 74
CREATE INDEX second_index on test_table (time);
CREATE INDEX third_index on test_table (time);
select add_reorder_policy('test_table', 'test_table_time_idx') as job_id \gset
-- Noop for duplicate policy
select add_reorder_policy('test_table', 'test_table_time_idx', true);
select add_reorder_policy('test_table', 'second_index', true);
select add_reorder_policy('test_table', 'third_index', true);
Reported by SQLint.
Line: 30
Column: 1
select add_reorder_policy('test_table', 'second_index', true);
select add_reorder_policy('test_table', 'third_index', true);
\set ON_ERROR_STOP 0
-- Error whenever incorrect arguments are applied (must have table and index)
select add_reorder_policy('test_table', 'bad_index');
select add_reorder_policy('test_table', '');
select add_reorder_policy('test_table');
Reported by SQLint.
Line: 40
Column: 1
select add_reorder_policy('test_table', 'third_index');
select add_reorder_policy(NULL, 'third_index');
select add_reorder_policy(2, 'third_index');
\set ON_ERROR_STOP 1
select * from _timescaledb_config.bgw_job where id=:job_id;
-- Now check that default scheduling interval for reorder policy is calculated correctly
-- Should be 1/2 default chunk interval length
Reported by SQLint.
Line: 57
Column: 1
SELECT * FROM _timescaledb_config.bgw_job WHERE id >= 1000 ORDER BY id;
-- Error whenever incorrect arguments are applied (must have table and interval)
\set ON_ERROR_STOP 0
select add_retention_policy();
select add_retention_policy('test_table');
select add_retention_policy(INTERVAL '3 hours');
select add_retention_policy('test_table', INTERVAL 'haha');
select add_retention_policy('test_table', 'haha');
Reported by SQLint.
Line: 65
Column: 1
select add_retention_policy('test_table', 'haha');
select add_retention_policy('test_table', 42);
select add_retention_policy('fake_table', INTERVAL '3 month');
\set ON_ERROR_STOP 1
select add_retention_policy('test_table', INTERVAL '3 month', true);
-- Should not add new policy with different parameters
select add_retention_policy('test_table', INTERVAL '3 month', true);
select add_retention_policy('test_table', INTERVAL '1 year', if_not_exists => true);
Reported by SQLint.
Line: 76
Column: 1
SELECT * FROM _timescaledb_config.bgw_job WHERE proc_name = 'policy_retention' ORDER BY id;
\set ON_ERROR_STOP 0
select add_retention_policy('test_table', INTERVAL '1 year');
select add_retention_policy('test_table', INTERVAL '3 days');
\set ON_ERROR_STOP 1
SELECT * FROM _timescaledb_config.bgw_job WHERE proc_name = 'policy_retention' ORDER BY id;
Reported by SQLint.
Line: 79
Column: 1
\set ON_ERROR_STOP 0
select add_retention_policy('test_table', INTERVAL '1 year');
select add_retention_policy('test_table', INTERVAL '3 days');
\set ON_ERROR_STOP 1
SELECT * FROM _timescaledb_config.bgw_job WHERE proc_name = 'policy_retention' ORDER BY id;
select remove_retention_policy('test_table');
Reported by SQLint.
Line: 89
Column: 1
-- hypertables that have integer time dimension
select * from _timescaledb_catalog.dimension;
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE SCHEMA IF NOT EXISTS my_new_schema;
create or replace function my_new_schema.dummy_now2() returns BIGINT LANGUAGE SQL IMMUTABLE as 'SELECT 1::BIGINT';
grant execute on ALL FUNCTIONS IN SCHEMA my_new_schema to public;
select set_integer_now_func('test_table_int', 'my_new_schema.dummy_now2');
Reported by SQLint.
tsl/test/sql/continuous_aggs_bgw.sql
32 issues
Line: 8
Column: 1
--
-- Setup
--
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(timeout INT = -1, mock_start_time INT = 0) RETURNS VOID
AS :MODULE_PATHNAME LANGUAGE C VOLATILE;
CREATE OR REPLACE FUNCTION ts_bgw_db_scheduler_test_run(timeout INT = -1, mock_start_time INT = 0) RETURNS VOID
AS :MODULE_PATHNAME LANGUAGE C VOLATILE;
Reported by SQLint.
Line: 38
Column: 1
$BODY$;
REVOKE EXECUTE ON FUNCTION get_constant_no_perms() FROM PUBLIC;
\set WAIT_ON_JOB 0
\set IMMEDIATELY_SET_UNTIL 1
\set WAIT_FOR_OTHER_TO_ADVANCE 2
CREATE OR REPLACE FUNCTION ts_bgw_params_mock_wait_returns_immediately(new_val INTEGER) RETURNS VOID
AS :MODULE_PATHNAME LANGUAGE C VOLATILE;
Reported by SQLint.
Line: 49
Column: 1
DELETE FROM _timescaledb_config.bgw_job WHERE TRUE;
TRUNCATE _timescaledb_internal.bgw_job_stat;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
CREATE TABLE public.bgw_log(
msg_no INT,
mock_time BIGINT,
application_name TEXT,
Reported by SQLint.
Line: 72
Column: 1
SELECT * FROM timescaledb_information.job_stats;
SELECT * FROM _timescaledb_catalog.continuous_agg;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
CREATE TABLE test_continuous_agg_table(time int, data int);
SELECT create_hypertable('test_continuous_agg_table', 'time', chunk_time_interval => 10);
CREATE OR REPLACE FUNCTION integer_now_test() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), 0) FROM test_continuous_agg_table $$;
SELECT set_integer_now_func('test_continuous_agg_table', 'integer_now_test');
Reported by SQLint.
Line: 86
Column: 109
SELECT add_continuous_aggregate_policy('test_continuous_agg_view', NULL, 4::integer, '12 h'::interval);
SELECT id as raw_table_id FROM _timescaledb_catalog.hypertable WHERE table_name='test_continuous_agg_table' \gset
-- min distance from end should be 1
SELECT mat_hypertable_id, user_view_schema, user_view_name, bucket_width
FROM _timescaledb_catalog.continuous_agg;
SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg \gset
Reported by SQLint.
Line: 91
Column: 67
-- min distance from end should be 1
SELECT mat_hypertable_id, user_view_schema, user_view_name, bucket_width
FROM _timescaledb_catalog.continuous_agg;
SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg \gset
SELECT id AS job_id FROM _timescaledb_config.bgw_job where hypertable_id=:mat_hypertable_id \gset
-- job was created
SELECT * FROM _timescaledb_config.bgw_job where hypertable_id=:mat_hypertable_id;
Reported by SQLint.
Line: 129
Column: 5
$BODY$
DECLARE
num_runs INTEGER;
message TEXT;
BEGIN
select format('[TESTING] Wait until %%, started at %s', started_at) into message;
FOR i in 1..spins
LOOP
SELECT COUNT(*) from bgw_log where msg LIKE message INTO num_runs;
Reported by SQLint.
Line: 131
Column: 5
num_runs INTEGER;
message TEXT;
BEGIN
select format('[TESTING] Wait until %%, started at %s', started_at) into message;
FOR i in 1..spins
LOOP
SELECT COUNT(*) from bgw_log where msg LIKE message INTO num_runs;
if (num_runs > 0) THEN
RETURN true;
Reported by SQLint.
Line: 132
Column: 5
message TEXT;
BEGIN
select format('[TESTING] Wait until %%, started at %s', started_at) into message;
FOR i in 1..spins
LOOP
SELECT COUNT(*) from bgw_log where msg LIKE message INTO num_runs;
if (num_runs > 0) THEN
RETURN true;
ELSE
Reported by SQLint.
Line: 135
Column: 5
FOR i in 1..spins
LOOP
SELECT COUNT(*) from bgw_log where msg LIKE message INTO num_runs;
if (num_runs > 0) THEN
RETURN true;
ELSE
PERFORM pg_sleep(0.1);
END IF;
END LOOP;
Reported by SQLint.
tsl/test/sql/continuous_aggs_ddl.sql
31 issues
Line: 7
Column: 1
-- Set this variable to avoid using a hard-coded path each time query
-- results are compared
\set QUERY_RESULT_TEST_EQUAL_RELPATH '../../../test/sql/include/query_result_test_equal.sql'
\set ON_ERROR_STOP 0
--DDL commands on continuous aggregates
Reported by SQLint.
Line: 26
Column: 1
-- schema tests
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH;
CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH;
CREATE SCHEMA rename_schema;
GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER;
Reported by SQLint.
Line: 62
Column: 1
FROM _timescaledb_catalog.continuous_agg ca
INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id)
WHERE user_view_name = 'rename_test'
\gset
RESET ROLE;
SELECT current_user;
ALTER VIEW :"PART_VIEW_SCHEMA".:"PART_VIEW_NAME" SET SCHEMA public;
Reported by SQLint.
Line: 151
Column: 84
CREATE TABLE drop_chunks_table(time BIGINT, data INTEGER);
SELECT hypertable_id AS drop_chunks_table_id
FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset
CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$;
SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test');
CREATE MATERIALIZED VIEW drop_chunks_view
Reported by SQLint.
Line: 185
Column: 1
-- cannot drop directly from the materialization table without specifying
-- cont. aggregate view name explicitly
\set ON_ERROR_STOP 0
SELECT drop_chunks(:'drop_chunks_mat_table',
newer_than => -20,
verbose => true);
\set ON_ERROR_STOP 1
Reported by SQLint.
Line: 189
Column: 1
SELECT drop_chunks(:'drop_chunks_mat_table',
newer_than => -20,
verbose => true);
\set ON_ERROR_STOP 1
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
SELECT count(c) FROM show_chunks('drop_chunks_view') AS c;
SELECT * FROM drop_chunks_view ORDER BY 1;
Reported by SQLint.
Line: 200
Column: 85
DROP TABLE drop_chunks_table CASCADE;
CREATE TABLE drop_chunks_table_u(time BIGINT, data INTEGER);
SELECT hypertable_id AS drop_chunks_table_u_id
FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset
CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$;
SELECT set_integer_now_func('drop_chunks_table_u', 'integer_now_test1');
CREATE MATERIALIZED VIEW drop_chunks_view
Reported by SQLint.
Line: 233
Column: 1
-- TRUNCATE test
-- Can truncate regular hypertables that have caggs
TRUNCATE drop_chunks_table_u;
\set ON_ERROR_STOP 0
-- Can't truncate materialized hypertables directly
TRUNCATE :drop_chunks_mat_table_u;
\set ON_ERROR_STOP 1
-- Check that we don't interfere with TRUNCATE of normal table and
Reported by SQLint.
Line: 236
Column: 1
\set ON_ERROR_STOP 0
-- Can't truncate materialized hypertables directly
TRUNCATE :drop_chunks_mat_table_u;
\set ON_ERROR_STOP 1
-- Check that we don't interfere with TRUNCATE of normal table and
-- partitioned table
CREATE TABLE truncate (value int);
INSERT INTO truncate VALUES (1), (2);
Reported by SQLint.
Line: 253
Column: 1
SELECT * FROM truncate_partitioned;
-- ALTER TABLE tests
\set ON_ERROR_STOP 0
-- test a variety of ALTER TABLE statements
ALTER TABLE :drop_chunks_mat_table_u RENAME chunk_id TO bad_name;
ALTER TABLE :drop_chunks_mat_table_u ADD UNIQUE(chunk_id);
ALTER TABLE :drop_chunks_mat_table_u SET UNLOGGED;
ALTER TABLE :drop_chunks_mat_table_u ENABLE ROW LEVEL SECURITY;
Reported by SQLint.
tsl/test/sql/compression.sql
28 issues
Line: 7
Column: 1
SET timescaledb.enable_transparent_decompression to OFF;
\ir include/rand_generator.sql
--test_collation ---
--basic test with count
create table foo (a integer, b integer, c integer, d integer);
select table_name from create_hypertable('foo', 'a', chunk_time_interval=> 10);
Reported by SQLint.
Line: 39
Column: 1
select tgname , tgtype, tgenabled , relname
from pg_trigger t, pg_class rel
where t.tgrelid = rel.oid and rel.relname like '_hyper_1_2_chunk' order by tgname;
\x
select * from chunk_compression_stats('foo')
order by chunk_name limit 2;
\x
select compress_chunk( '_timescaledb_internal._hyper_1_1_chunk');
\x
Reported by SQLint.
Line: 42
Column: 1
\x
select * from chunk_compression_stats('foo')
order by chunk_name limit 2;
\x
select compress_chunk( '_timescaledb_internal._hyper_1_1_chunk');
\x
select * from _timescaledb_catalog.compression_chunk_size
order by chunk_id;
\x
Reported by SQLint.
Line: 44
Column: 1
order by chunk_name limit 2;
\x
select compress_chunk( '_timescaledb_internal._hyper_1_1_chunk');
\x
select * from _timescaledb_catalog.compression_chunk_size
order by chunk_id;
\x
select ch1.id, ch1.schema_name, ch1.table_name , ch2.table_name as compress_table
from
Reported by SQLint.
Line: 47
Column: 1
\x
select * from _timescaledb_catalog.compression_chunk_size
order by chunk_id;
\x
select ch1.id, ch1.schema_name, ch1.table_name , ch2.table_name as compress_table
from
_timescaledb_catalog.chunk ch1, _timescaledb_catalog.chunk ch2
where ch1.compressed_chunk_id = ch2.id;
Reported by SQLint.
Line: 53
Column: 1
_timescaledb_catalog.chunk ch1, _timescaledb_catalog.chunk ch2
where ch1.compressed_chunk_id = ch2.id;
\set ON_ERROR_STOP 0
--cannot recompress the chunk the second time around
select compress_chunk( '_timescaledb_internal._hyper_1_2_chunk');
--TEST2a try DML on a compressed chunk
insert into foo values( 11 , 10 , 20, 120);
Reported by SQLint.
Line: 132
Column: 9
SELECT ch1.schema_name|| '.' || ch1.table_name as "CHUNK_NAME", ch1.id "CHUNK_ID"
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'conditions'
ORDER BY ch1.id
LIMIT 1 \gset
SELECT count(*) from :CHUNK_NAME;
SELECT count(*) as "ORIGINAL_CHUNK_COUNT" from :CHUNK_NAME \gset
select tableoid::regclass, count(*) from conditions group by tableoid order by tableoid;
Reported by SQLint.
Line: 160
Column: 1
SELECT sum(_ts_meta_count) from :COMPRESSED_CHUNK_NAME;
SELECT _ts_meta_sequence_num from :COMPRESSED_CHUNK_NAME;
\x
SELECT chunk_id, numrows_pre_compression, numrows_post_compression
FROM _timescaledb_catalog.chunk srcch,
_timescaledb_catalog.compression_chunk_size map,
_timescaledb_catalog.hypertable srcht
WHERE map.chunk_id = srcch.id and srcht.id = srcch.hypertable_id
Reported by SQLint.
Line: 187
Column: 1
select * from timescaledb_information.hypertables
where hypertable_name like 'foo' or hypertable_name like 'conditions'
order by hypertable_name;
\x
SELECT decompress_chunk(ch1.schema_name|| '.' || ch1.table_name) AS chunk
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht
WHERE ch1.hypertable_id = ht.id and ht.table_name LIKE 'conditions'
ORDER BY chunk;
Reported by SQLint.
Line: 196
Column: 1
SELECT count(*), count(*) = :'ORIGINAL_CHUNK_COUNT' from :CHUNK_NAME;
--check that the compressed chunk is dropped
\set ON_ERROR_STOP 0
SELECT count(*) from :COMPRESSED_CHUNK_NAME;
\set ON_ERROR_STOP 1
--size information is gone too
select count(*)
Reported by SQLint.