The following issues were found
test/sql/multi_transaction_index.sql
6 issues
Line: 18
Column: 1
INSERT INTO index_test VALUES ('2017-01-20T09:00:01', 1, 17.5);
\set ON_ERROR_STOP 0
-- cannot create a UNIQUE index with transaction_per_chunk
CREATE UNIQUE INDEX index_test_time_device_idx ON index_test (time) WITH (timescaledb.transaction_per_chunk);
CREATE UNIQUE INDEX index_test_time_device_idx ON index_test (time, device) WITH(timescaledb.transaction_per_chunk);
\set ON_ERROR_STOP 1
Reported by SQLint.
Line: 22
Column: 1
-- cannot create a UNIQUE index with transaction_per_chunk
CREATE UNIQUE INDEX index_test_time_device_idx ON index_test (time) WITH (timescaledb.transaction_per_chunk);
CREATE UNIQUE INDEX index_test_time_device_idx ON index_test (time, device) WITH(timescaledb.transaction_per_chunk);
\set ON_ERROR_STOP 1
CREATE INDEX index_test_time_device_idx ON index_test (time, device) WITH (timescaledb.transaction_per_chunk);
-- Regular index need not cover all partitioning columns
CREATE INDEX ON index_test (time, temp) WITH (timescaledb.transaction_per_chunk);
Reported by SQLint.
Line: 104
Column: 1
SET enable_seqscan TO default;
SET enable_bitmapscan TO default;
\set ON_ERROR_STOP 0
-- cannot create a transaction_per_chunk index within a transaction block
BEGIN;
CREATE INDEX ON index_expr_test (temp) WITH (timescaledb.transaction_per_chunk);
ROLLBACK;
\set ON_ERROR_STOP 1
Reported by SQLint.
Line: 109
Column: 1
BEGIN;
CREATE INDEX ON index_expr_test (temp) WITH (timescaledb.transaction_per_chunk);
ROLLBACK;
\set ON_ERROR_STOP 1
DROP TABLE index_expr_test CASCADE;
CREATE TABLE partial_index_test(time INTEGER);
SELECT create_hypertable('partial_index_test', 'time', chunk_time_interval => 1, create_default_indexes => false);
Reported by SQLint.
Line: 146
Column: 1
SET enable_seqscan TO true;
SET enable_bitmapscan TO true;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2
\set ON_ERROR_STOP 0
CREATE INDEX ON partial_index_test (time) WITH (timescaledb.transaction_per_chunk, timescaledb.max_chunks='1');
\set ON_ERROR_STOP 1
Reported by SQLint.
Line: 149
Column: 1
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2
\set ON_ERROR_STOP 0
CREATE INDEX ON partial_index_test (time) WITH (timescaledb.transaction_per_chunk, timescaledb.max_chunks='1');
\set ON_ERROR_STOP 1
Reported by SQLint.
test/sql/misc.sql
6 issues
Line: 12
Column: 1
"time" bigint NOT NULL,
"value" double precision NOT NULL
);
\COPY copy_golden (time, value) FROM data/copy_data.csv WITH CSV HEADER
SELECT * FROM copy_golden ORDER BY TIME;
CREATE TABLE "copy_control" (
"time" bigint NOT NULL,
"value" double precision NOT NULL
Reported by SQLint.
Line: 19
Column: 1
"time" bigint NOT NULL,
"value" double precision NOT NULL
);
\COPY copy_control (time, value) FROM data/copy_data.csv WITH CSV HEADER WHERE time > 10;
SELECT * FROM copy_control ORDER BY TIME;
CREATE TABLE "copy_test" (
"time" bigint NOT NULL,
"value" double precision NOT NULL
Reported by SQLint.
Line: 27
Column: 1
"value" double precision NOT NULL
);
SELECT create_hypertable('copy_test', 'time', chunk_time_interval => 10);
\COPY copy_test (time, value) FROM data/copy_data.csv WITH CSV HEADER WHERE time > 10;
SELECT * FROM copy_test ORDER BY TIME;
-- Verify attempting to use subqueries fails the same as non-hypertables
\set ON_ERROR_STOP 0
\COPY copy_control (time, value) FROM data/copy_data.csv WITH CSV HEADER WHERE time IN (SELECT time FROM copy_golden);
Reported by SQLint.
Line: 31
Column: 1
SELECT * FROM copy_test ORDER BY TIME;
-- Verify attempting to use subqueries fails the same as non-hypertables
\set ON_ERROR_STOP 0
\COPY copy_control (time, value) FROM data/copy_data.csv WITH CSV HEADER WHERE time IN (SELECT time FROM copy_golden);
\COPY copy_test (time, value) FROM data/copy_data.csv WITH CSV HEADER WHERE time IN (SELECT time FROM copy_golden);
\set ON_ERROR_STOP 1
DROP TABLE copy_golden;
Reported by SQLint.
Line: 33
Column: 1
-- Verify attempting to use subqueries fails the same as non-hypertables
\set ON_ERROR_STOP 0
\COPY copy_control (time, value) FROM data/copy_data.csv WITH CSV HEADER WHERE time IN (SELECT time FROM copy_golden);
\COPY copy_test (time, value) FROM data/copy_data.csv WITH CSV HEADER WHERE time IN (SELECT time FROM copy_golden);
\set ON_ERROR_STOP 1
DROP TABLE copy_golden;
DROP TABLE copy_control;
DROP TABLE copy_test;
Reported by SQLint.
Line: 34
Column: 1
\set ON_ERROR_STOP 0
\COPY copy_control (time, value) FROM data/copy_data.csv WITH CSV HEADER WHERE time IN (SELECT time FROM copy_golden);
\COPY copy_test (time, value) FROM data/copy_data.csv WITH CSV HEADER WHERE time IN (SELECT time FROM copy_golden);
\set ON_ERROR_STOP 1
DROP TABLE copy_golden;
DROP TABLE copy_control;
DROP TABLE copy_test;
Reported by SQLint.
test/sql/metadata.sql
6 issues
Line: 5
Column: 1
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE OR REPLACE FUNCTION _timescaledb_internal.test_uuid() RETURNS UUID
AS :MODULE_PATHNAME, 'ts_test_uuid' LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION _timescaledb_internal.test_exported_uuid() RETURNS UUID
AS :MODULE_PATHNAME, 'ts_test_exported_uuid' LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION _timescaledb_internal.test_install_timestamp() RETURNS TIMESTAMPTZ
Reported by SQLint.
Line: 12
Column: 1
AS :MODULE_PATHNAME, 'ts_test_exported_uuid' LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
CREATE OR REPLACE FUNCTION _timescaledb_internal.test_install_timestamp() RETURNS TIMESTAMPTZ
AS :MODULE_PATHNAME, 'ts_test_install_timestamp' LANGUAGE C IMMUTABLE STRICT PARALLEL SAFE;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
-- uuid and install_timestamp should already be in the table before we generate
SELECT COUNT(*) from _timescaledb_catalog.metadata;
SELECT _timescaledb_internal.test_uuid() as uuid_1 \gset
SELECT _timescaledb_internal.test_exported_uuid() as uuid_ex_1 \gset
Reported by SQLint.
Line: 16
Column: 52
-- uuid and install_timestamp should already be in the table before we generate
SELECT COUNT(*) from _timescaledb_catalog.metadata;
SELECT _timescaledb_internal.test_uuid() as uuid_1 \gset
SELECT _timescaledb_internal.test_exported_uuid() as uuid_ex_1 \gset
SELECT _timescaledb_internal.test_install_timestamp() as timestamp_1 \gset
-- Check that there is exactly 1 UUID row
SELECT COUNT(*) from _timescaledb_catalog.metadata where key='uuid';
Reported by SQLint.
Line: 37
Column: 1
SELECT _timescaledb_internal.test_install_timestamp() = :'timestamp_1' as timestamps_equal;
-- Now make sure that only the exported_uuid is exported on pg_dump
\c postgres :ROLE_SUPERUSER
\setenv PGOPTIONS '--client-min-messages=warning'
\! utils/pg_dump_aux_dump.sh dump/instmeta.sql
ALTER DATABASE :TEST_DBNAME SET timescaledb.restoring='on';
-- Redirect to /dev/null to suppress NOTICE
Reported by SQLint.
Line: 43
Column: 1
\! utils/pg_dump_aux_dump.sh dump/instmeta.sql
ALTER DATABASE :TEST_DBNAME SET timescaledb.restoring='on';
-- Redirect to /dev/null to suppress NOTICE
\! utils/pg_dump_aux_restore.sh dump/instmeta.sql
ALTER DATABASE :TEST_DBNAME SET timescaledb.restoring='off';
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
-- Should have all 3 row, because pg_dump includes the insertion of uuid and timestamp.
Reported by SQLint.
Line: 46
Column: 1
\! utils/pg_dump_aux_restore.sh dump/instmeta.sql
ALTER DATABASE :TEST_DBNAME SET timescaledb.restoring='off';
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
-- Should have all 3 row, because pg_dump includes the insertion of uuid and timestamp.
SELECT COUNT(*) FROM _timescaledb_catalog.metadata;
-- Verify that this is the old exported_uuid
SELECT _timescaledb_internal.test_exported_uuid() = :'uuid_ex_1' as exported_uuids_equal;
Reported by SQLint.
src/dimension.c
6 issues
Line: 171
Column: 2
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
d->fd.aligned = DatumGetBool(values[AttrNumberGetAttrOffset(Anum_dimension_aligned)]);
d->fd.column_type =
DatumGetObjectId(values[AttrNumberGetAttrOffset(Anum_dimension_column_type)]);
memcpy(&d->fd.column_name,
DatumGetName(values[AttrNumberGetAttrOffset(Anum_dimension_column_name)]),
NAMEDATALEN);
if (!isnull[Anum_dimension_partitioning_func_schema - 1] &&
!isnull[Anum_dimension_partitioning_func - 1])
Reported by FlawFinder.
Line: 183
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
d->fd.num_slices =
DatumGetInt16(values[AttrNumberGetAttrOffset(Anum_dimension_num_slices)]);
memcpy(&d->fd.partitioning_func_schema,
DatumGetName(
values[AttrNumberGetAttrOffset(Anum_dimension_partitioning_func_schema)]),
NAMEDATALEN);
memcpy(&d->fd.partitioning_func,
DatumGetName(values[AttrNumberGetAttrOffset(Anum_dimension_partitioning_func)]),
Reported by FlawFinder.
Line: 187
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
DatumGetName(
values[AttrNumberGetAttrOffset(Anum_dimension_partitioning_func_schema)]),
NAMEDATALEN);
memcpy(&d->fd.partitioning_func,
DatumGetName(values[AttrNumberGetAttrOffset(Anum_dimension_partitioning_func)]),
NAMEDATALEN);
old = MemoryContextSwitchTo(ti->mctx);
d->partitioning = ts_partitioning_info_create(NameStr(d->fd.partitioning_func_schema),
Reported by FlawFinder.
Line: 1603
Column: 34
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
if (namestrcmp(schemaname, names[0]) == 0)
{
namestrcpy(schemaname, (const char *) names[1]);
values[AttrNumberGetAttrOffset(Anum_dimension_partitioning_func_schema)] =
NameGetDatum(schemaname);
doReplace[AttrNumberGetAttrOffset(Anum_dimension_partitioning_func_schema)] = true;
}
}
Reported by FlawFinder.
Line: 1616
Column: 34
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
DatumGetName(values[AttrNumberGetAttrOffset(Anum_dimension_integer_now_func_schema)]);
if (namestrcmp(schemaname, names[0]) == 0)
{
namestrcpy(schemaname, (const char *) names[1]);
values[AttrNumberGetAttrOffset(Anum_dimension_integer_now_func_schema)] =
NameGetDatum(schemaname);
doReplace[AttrNumberGetAttrOffset(Anum_dimension_integer_now_func_schema)] = true;
}
}
Reported by FlawFinder.
Line: 1640
Column: 2
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
NameData old_schema_name;
ScanKeyData scankey[1];
Catalog *catalog = ts_catalog_get();
char *names[2] = { (char *) old_name, (char *) new_name };
ScannerCtx scanctx = {
.table = catalog_get_table_id(catalog, DIMENSION),
.index = InvalidOid,
.nkeys = 1,
.scankey = scankey,
Reported by FlawFinder.
tsl/src/compression/datum_serialize.c
6 issues
Line: 190
Column: 4
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
/* no alignment for short varlenas */
data_length = VARSIZE_SHORT(val);
check_allowed_data_len(data_length, *max_size);
memcpy(ptr, val, data_length);
}
else if (TYPE_IS_PACKABLE(serializer->type_len, serializer->type_storage) &&
VARATT_CAN_MAKE_SHORT(val))
{
/* convert to short varlena -- no alignment */
Reported by FlawFinder.
Line: 199
Column: 4
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
data_length = VARATT_CONVERTED_SHORT_SIZE(val);
check_allowed_data_len(data_length, *max_size);
SET_VARSIZE_SHORT(ptr, data_length);
memcpy(ptr + 1, VARDATA(val), data_length - 1);
}
else
{
/* full 4-byte header varlena */
ptr = align_and_zero(ptr, serializer->type_align, max_size);
Reported by FlawFinder.
Line: 207
Column: 4
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
ptr = align_and_zero(ptr, serializer->type_align, max_size);
data_length = VARSIZE(val);
check_allowed_data_len(data_length, *max_size);
memcpy(ptr, val, data_length);
}
}
else if (serializer->type_len == -2)
{
/* cstring ... never needs alignment */
Reported by FlawFinder.
Line: 216
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
Assert(serializer->type_align == 'c');
data_length = strlen(DatumGetCString(datum)) + 1;
check_allowed_data_len(data_length, *max_size);
memcpy(ptr, DatumGetPointer(datum), data_length);
}
else
{
/* fixed-length pass-by-reference */
ptr = align_and_zero(ptr, serializer->type_align, max_size);
Reported by FlawFinder.
Line: 225
Column: 3
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
Assert(serializer->type_len > 0);
data_length = serializer->type_len;
check_allowed_data_len(data_length, *max_size);
memcpy(ptr, DatumGetPointer(datum), data_length);
}
ptr += data_length;
*max_size = *max_size - data_length;
Reported by FlawFinder.
Line: 214
Column: 17
CWE codes:
126
{
/* cstring ... never needs alignment */
Assert(serializer->type_align == 'c');
data_length = strlen(DatumGetCString(datum)) + 1;
check_allowed_data_len(data_length, *max_size);
memcpy(ptr, DatumGetPointer(datum), data_length);
}
else
{
Reported by FlawFinder.
tsl/test/sql/telemetry_distributed.sql
6 issues
Line: 5
Column: 1
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
\set DN_DBNAME_1 :TEST_DBNAME _1
\set DN_DBNAME_2 :TEST_DBNAME _2
SELECT json_object_field(get_telemetry_report(always_display_report := true)::json, 'distributed_db');
Reported by SQLint.
Line: 6
Column: 1
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
\set DN_DBNAME_1 :TEST_DBNAME _1
\set DN_DBNAME_2 :TEST_DBNAME _2
SELECT json_object_field(get_telemetry_report(always_display_report := true)::json, 'distributed_db');
-- Become an access node
Reported by SQLint.
Line: 17
Column: 1
SELECT json_object_field(get_telemetry_report(always_display_report := true)::json, 'distributed_db');
-- See telemetry report from a data node
\c :DN_DBNAME_1 :ROLE_CLUSTER_SUPERUSER;
SELECT json_object_field(get_telemetry_report(always_display_report := true)::json, 'distributed_db');
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
SELECT * FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2');
Reported by SQLint.
Line: 20
Column: 1
\c :DN_DBNAME_1 :ROLE_CLUSTER_SUPERUSER;
SELECT json_object_field(get_telemetry_report(always_display_report := true)::json, 'distributed_db');
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
SELECT * FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2');
-- Add hypertables
CREATE TABLE test_ht(time timestamptz, device int, PRIMARY KEY (time, device));
SELECT * FROM create_hypertable('test_ht', 'time', 'device', 1);
Reported by SQLint.
Line: 34
Column: 1
SELECT * FROM create_distributed_hypertable('disttable2', 'time', 'device', 2, replication_factor => 2);
-- See telemetry report update from the data node
\c :DN_DBNAME_1 :ROLE_CLUSTER_SUPERUSER;
SELECT json_object_field(get_telemetry_report(always_display_report := true)::json, 'distributed_db');
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
-- See a number of distributed and distributed and replicated hypertables update
SELECT json_object_field(get_telemetry_report(always_display_report := true)::json, 'distributed_db');
Reported by SQLint.
Line: 37
Column: 1
\c :DN_DBNAME_1 :ROLE_CLUSTER_SUPERUSER;
SELECT json_object_field(get_telemetry_report(always_display_report := true)::json, 'distributed_db');
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
-- See a number of distributed and distributed and replicated hypertables update
SELECT json_object_field(get_telemetry_report(always_display_report := true)::json, 'distributed_db');
DROP DATABASE :DN_DBNAME_1;
DROP DATABASE :DN_DBNAME_2;
Reported by SQLint.
test/sql/generated_as_identity.sql
6 issues
Line: 16
Column: 1
select * from test_gen;
\set ON_ERROR_STOP 0
insert into test_gen values('1', 'a');
\set ON_ERROR_STOP 1
ALTER TABLE test_gen ALTER COLUMN id DROP IDENTITY;
\set ON_ERROR_STOP 0
Reported by SQLint.
Line: 18
Column: 1
\set ON_ERROR_STOP 0
insert into test_gen values('1', 'a');
\set ON_ERROR_STOP 1
ALTER TABLE test_gen ALTER COLUMN id DROP IDENTITY;
\set ON_ERROR_STOP 0
insert into test_gen (payload) select generate_series(15,20) returning *;
\set ON_ERROR_STOP 1
Reported by SQLint.
Line: 21
Column: 1
\set ON_ERROR_STOP 1
ALTER TABLE test_gen ALTER COLUMN id DROP IDENTITY;
\set ON_ERROR_STOP 0
insert into test_gen (payload) select generate_series(15,20) returning *;
\set ON_ERROR_STOP 1
ALTER TABLE test_gen ALTER COLUMN id ADD GENERATED BY DEFAULT AS IDENTITY;
\set ON_ERROR_STOP 0
Reported by SQLint.
Line: 23
Column: 1
ALTER TABLE test_gen ALTER COLUMN id DROP IDENTITY;
\set ON_ERROR_STOP 0
insert into test_gen (payload) select generate_series(15,20) returning *;
\set ON_ERROR_STOP 1
ALTER TABLE test_gen ALTER COLUMN id ADD GENERATED BY DEFAULT AS IDENTITY;
\set ON_ERROR_STOP 0
insert into test_gen (payload) select generate_series(15,20) returning *;
\set ON_ERROR_STOP 1
Reported by SQLint.
Line: 26
Column: 1
\set ON_ERROR_STOP 1
ALTER TABLE test_gen ALTER COLUMN id ADD GENERATED BY DEFAULT AS IDENTITY;
\set ON_ERROR_STOP 0
insert into test_gen (payload) select generate_series(15,20) returning *;
\set ON_ERROR_STOP 1
ALTER TABLE test_gen ALTER COLUMN id SET GENERATED BY DEFAULT RESTART 100;
insert into test_gen (payload) select generate_series(15,20) returning *;
Reported by SQLint.
Line: 28
Column: 1
ALTER TABLE test_gen ALTER COLUMN id ADD GENERATED BY DEFAULT AS IDENTITY;
\set ON_ERROR_STOP 0
insert into test_gen (payload) select generate_series(15,20) returning *;
\set ON_ERROR_STOP 1
ALTER TABLE test_gen ALTER COLUMN id SET GENERATED BY DEFAULT RESTART 100;
insert into test_gen (payload) select generate_series(15,20) returning *;
select * from test_gen;
Reported by SQLint.
test/sql/drop_hypertable.sql
6 issues
Line: 18
Column: 1
INSERT INTO hyper_with_dependencies VALUES (now(), 1.0);
\set ON_ERROR_STOP 0
DROP TABLE hyper_with_dependencies;
\set ON_ERROR_STOP 1
DROP TABLE hyper_with_dependencies CASCADE;
\dv
Reported by SQLint.
Line: 20
Column: 1
\set ON_ERROR_STOP 0
DROP TABLE hyper_with_dependencies;
\set ON_ERROR_STOP 1
DROP TABLE hyper_with_dependencies CASCADE;
\dv
CREATE TABLE chunk_with_dependencies (time timestamp, temp float8);
SELECT create_hypertable('chunk_with_dependencies', 'time');
Reported by SQLint.
Line: 22
Column: 1
DROP TABLE hyper_with_dependencies;
\set ON_ERROR_STOP 1
DROP TABLE hyper_with_dependencies CASCADE;
\dv
CREATE TABLE chunk_with_dependencies (time timestamp, temp float8);
SELECT create_hypertable('chunk_with_dependencies', 'time');
INSERT INTO chunk_with_dependencies VALUES (now(), 1.0);
Reported by SQLint.
Line: 31
Column: 1
CREATE VIEW dependent_view_chunk AS SELECT * FROM _timescaledb_internal._hyper_3_2_chunk;
\set ON_ERROR_STOP 0
DROP TABLE chunk_with_dependencies;
\set ON_ERROR_STOP 1
DROP TABLE chunk_with_dependencies CASCADE;
\dv
Reported by SQLint.
Line: 33
Column: 1
\set ON_ERROR_STOP 0
DROP TABLE chunk_with_dependencies;
\set ON_ERROR_STOP 1
DROP TABLE chunk_with_dependencies CASCADE;
\dv
-- Calling create hypertable again will increment hypertable ID
-- although no new hypertable is created. Make sure we can handle this.
Reported by SQLint.
Line: 35
Column: 1
DROP TABLE chunk_with_dependencies;
\set ON_ERROR_STOP 1
DROP TABLE chunk_with_dependencies CASCADE;
\dv
-- Calling create hypertable again will increment hypertable ID
-- although no new hypertable is created. Make sure we can handle this.
SELECT create_hypertable('should_drop', 'time', if_not_exists => true);
SELECT * from _timescaledb_catalog.hypertable;
Reported by SQLint.
tsl/test/sql/include/dist_query_run.sql
5 issues
Line: 5
Column: 1
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\echo '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'
\echo '%%% RUNNING TESTS on table:' :TABLE_NAME
\echo '%%% PREFIX:' :PREFIX
\echo '%%% WHERE_CLAUSE:' :WHERE_CLAUSE
\echo '%%% ORDER_BY_1:' :ORDER_BY_1
\echo '%%% ORDER_BY_1_2:' :ORDER_BY_1_2
Reported by SQLint.
Line: 18
Column: 1
-----------------------------------------------------------------
-- GROUP on time (partial aggregation)
-----------------------------------------------------------------
\set TEST_DESC '\n######### Grouping on time only (partial aggregation)\n'
\qecho :TEST_DESC
:PREFIX
SELECT time, avg(temp)
FROM :TABLE_NAME
WHERE :WHERE_CLAUSE
Reported by SQLint.
Line: 203
Column: 1
-----------------------------------------------------------------
-- LIMIT push down support
-----------------------------------------------------------------
\set TEST_DESC '\n######### LIMIT push down cases\n'
-- Basic query (should be pushed)
\qecho :TEST_DESC
:PREFIX
SELECT time, device
Reported by SQLint.
Line: 282
Column: 1
-- JOIN with a local table
CREATE TABLE join_test (device int);
\qecho :TEST_DESC
:PREFIX
SELECT t.time
FROM :TABLE_NAME t, join_test
WHERE t.device = join_test.device
LIMIT 10;
Reported by SQLint.
Line: 295
Column: 1
-- Test CTE / sub-queries. Data from two sub-queries on the same data
-- node is joined on the access node.
-----------------------------------------------------------------
\set TEST_DESC '\n######### CTEs/Sub-queries\n'
-- CTE / subquery
\qecho :TEST_DESC
:PREFIX
WITH top_n AS (
Reported by SQLint.
tsl/src/fdw/deparse.c
5 issues
Line: 2104
Column: 9
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
foreach (lc, *context->params_list)
{
pindex++;
if (equal(node, (Node *) lfirst(lc)))
break;
}
if (lc == NULL)
{
/* not in list, so add it */
Reported by FlawFinder.
Line: 2166
Column: 45
CWE codes:
126
* No need to quote unless it's a special value such as 'NaN'.
* See comments in get_const_expr().
*/
if (strspn(extval, "0123456789+-eE.") == strlen(extval))
{
if (extval[0] == '+' || extval[0] == '-')
appendStringInfo(buf, "(%s)", extval);
else
appendStringInfoString(buf, extval);
Reported by FlawFinder.
Line: 2172
Column: 35
CWE codes:
126
appendStringInfo(buf, "(%s)", extval);
else
appendStringInfoString(buf, extval);
if (strcspn(extval, "eE.") != strlen(extval))
isfloat = true; /* it looks like a float */
}
else
appendStringInfo(buf, "'%s'", extval);
}
Reported by FlawFinder.
Line: 2244
Column: 8
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
foreach (lc, *context->params_list)
{
pindex++;
if (equal(node, (Node *) lfirst(lc)))
break;
}
if (lc == NULL)
{
/* not in list, so add it */
Reported by FlawFinder.
Line: 3095
Column: 7
CWE codes:
126
Suggestion:
This function is often discouraged by most C++ coding standards in favor of its safer alternatives provided since C++14. Consider using a form of this function that checks the second iterator before potentially overflowing it
i = 1;
foreach (lc, foreignrel->reltarget->exprs)
{
if (equal(lfirst(lc), (Node *) node))
{
*colno = i;
return;
}
i++;
Reported by FlawFinder.