The following issues were found
test/sql/license.sql
4 issues
Line: 5
Column: 1
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
\c :TEST_DBNAME :ROLE_SUPERUSER
\set ECHO queries
\set VERBOSITY default
SHOW timescaledb.license;
Reported by SQLint.
Line: 14
Column: 1
SELECT _timescaledb_internal.tsl_loaded();
-- User shouldn't be able to change the license in the session
\set ON_ERROR_STOP 0
SET timescaledb.license='apache';
SET timescaledb.license='timescale';
SET timescaledb.license='something_else';
\set ON_ERROR_STOP 1
Reported by SQLint.
Line: 18
Column: 1
SET timescaledb.license='apache';
SET timescaledb.license='timescale';
SET timescaledb.license='something_else';
\set ON_ERROR_STOP 1
-- make sure apache license blocks tsl features
\set ON_ERROR_STOP 0
SELECT locf(1);
Reported by SQLint.
Line: 40
Column: 1
ALTER TABLE metrics SET (timescaledb.compress);
DROP TABLE metrics;
\set ON_ERROR_STOP 1
Reported by SQLint.
tsl/test/sql/dist_policy.sql
4 issues
Line: 6
Column: 1
-- LICENSE-TIMESCALE for a copy of the license.
-- Need to be super user to create extension and add data nodes
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
\unset ECHO
\o /dev/null
\ir include/remote_exec.sql
\o
Reported by SQLint.
Line: 8
Column: 1
-- Need to be super user to create extension and add data nodes
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
\unset ECHO
\o /dev/null
\ir include/remote_exec.sql
\o
\set ECHO all
Reported by SQLint.
Line: 22
Column: 1
database => 'dist_policy_data_node_2');
SELECT * FROM add_data_node('dist_policy_data_node_3', host => 'localhost',
database => 'dist_policy_data_node_3');
\x off
GRANT USAGE ON FOREIGN SERVER dist_policy_data_node_1, dist_policy_data_node_2, dist_policy_data_node_3 TO PUBLIC;
-- Create a fake clock that we can use below and make sure that it is
-- defined on the data nodes as well.
CREATE TABLE time_table (time BIGINT);
Reported by SQLint.
Line: 62
Column: 72
generate_series(1,3) AS device
ORDER BY time, device;
SELECT add_retention_policy('conditions', 5, true) as retention_job_id \gset
-- Now simulate drop_chunks running automatically by calling it
-- explicitly. Show chunks before and after.
SELECT show_chunks('conditions');
SELECT * FROM test.remote_exec(NULL, $$ SELECT show_chunks('conditions'); $$);
Reported by SQLint.
src/debug_guc.c
4 issues
Line: 75
Column: 5
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
mask |= STAGE_FINAL;
else
{
char buf[20] = { 0 };
char *ptr;
strncpy(buf, beg, sizeof(buf));
/* If the path name was long, make it clear that it is
* incomplete in the printout */
Reported by FlawFinder.
Line: 77
Column: 5
CWE codes:
120
{
char buf[20] = { 0 };
char *ptr;
strncpy(buf, beg, sizeof(buf));
/* If the path name was long, make it clear that it is
* incomplete in the printout */
if (buf[19] != '\0')
{
Reported by FlawFinder.
Line: 150
Column: 6
CWE codes:
126
Assert(string && flags);
if (strlen(string) == 0)
return true;
rawname = pstrdup(string);
if (!SplitIdentifierString(rawname, ':', &namelist))
{
Reported by FlawFinder.
Line: 166
Column: 36
CWE codes:
126
foreach (cell, namelist)
{
char *flag_string = (char *) lfirst(cell);
if (!set_debug_flag(flag_string, strlen(flag_string), &local_flags))
{
GUC_check_errdetail("Unrecognized flag setting \"%s\".", flag_string);
GUC_check_errhint("Allowed values are: show_upper_paths show_rel_pathlist");
pfree(rawname);
list_free(namelist);
Reported by FlawFinder.
tsl/src/compression/create.c
4 issues
Line: 67
Column: 13
CWE codes:
134
Suggestion:
Use a constant for the format specification
#define PRINT_COMPRESSION_TABLE_NAME(buf, prefix, hypertable_id) \
do \
{ \
int ret = snprintf(buf, NAMEDATALEN, prefix, hypertable_id); \
if (ret < 0 || ret > NAMEDATALEN) \
{ \
ereport(ERROR, \
(errcode(ERRCODE_INTERNAL_ERROR), \
errmsg("bad compression hypertable internal name"))); \
Reported by FlawFinder.
Line: 116
Column: 8
CWE codes:
134
Suggestion:
Use a constant for the format specification
int ret;
Assert(fd->orderby_column_index > 0);
ret = snprintf(buf,
NAMEDATALEN,
COMPRESSION_COLUMN_METADATA_PREFIX "%s_%d",
type,
fd->orderby_column_index);
if (ret < 0 || ret > NAMEDATALEN)
Reported by FlawFinder.
Line: 532
Column: 2
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
create_compression_table(Oid owner, CompressColInfo *compress_cols)
{
ObjectAddress tbladdress;
char relnamebuf[NAMEDATALEN];
CatalogSecurityContext sec_ctx;
Datum toast_options;
static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
Oid compress_relid;
Reported by FlawFinder.
Line: 279
Column: 6
CWE codes:
126
continue;
if (strncmp(NameStr(attr->attname),
COMPRESSION_COLUMN_METADATA_PREFIX,
strlen(COMPRESSION_COLUMN_METADATA_PREFIX)) == 0)
elog(ERROR,
"cannot compress tables with reserved column prefix '%s'",
COMPRESSION_COLUMN_METADATA_PREFIX);
namestrcpy(&cc->col_meta[colno].attname, NameStr(attr->attname));
Reported by FlawFinder.
src/net/http_response.c
4 issues
Line: 35
Column: 2
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
typedef struct HttpResponseState
{
MemoryContext context;
char version[HTTP_VERSION_BUFFER_SIZE];
char raw_buffer[MAX_RAW_BUFFER_SIZE];
/* The next read should copy data into the buffer starting here */
off_t offset;
off_t parse_offset;
size_t cur_header_name_len;
Reported by FlawFinder.
Line: 36
Column: 2
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
{
MemoryContext context;
char version[HTTP_VERSION_BUFFER_SIZE];
char raw_buffer[MAX_RAW_BUFFER_SIZE];
/* The next read should copy data into the buffer starting here */
off_t offset;
off_t parse_offset;
size_t cur_header_name_len;
size_t cur_header_value_len;
Reported by FlawFinder.
Line: 180
Column: 4
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
* Need a second %s inside the sscanf so that we make sure to get
* all of the digits of the status code
*/
memcpy(raw_buf, state->raw_buffer, state->parse_offset);
raw_buf[state->parse_offset] = '\0';
state->state = HTTP_STATE_ERROR;
memset(state->version, '\0', sizeof(state->version));
if (sscanf(raw_buf, "%127s%*[ ]%d%*[ ]%*s", state->version, &state->status_code) == 2)
Reported by FlawFinder.
Line: 185
Column: 8
CWE codes:
120
Suggestion:
Check that the limit is sufficiently small, or use a different input function
state->state = HTTP_STATE_ERROR;
memset(state->version, '\0', sizeof(state->version));
if (sscanf(raw_buf, "%127s%*[ ]%d%*[ ]%*s", state->version, &state->status_code) == 2)
{
if (http_parse_version(state))
state->state = HTTP_STATE_INTERM;
else
state->state = HTTP_STATE_ERROR;
Reported by FlawFinder.
test/sql/updates/post.continuous_aggs.v2.sql
4 issues
Line: 5
Column: 1
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
\ir post.continuous_aggs.sql
\d cagg.*
\x on
SELECT * FROM cagg.realtime_mat ORDER BY bucket, location;
Reported by SQLint.
Line: 11
Column: 1
\x on
SELECT * FROM cagg.realtime_mat ORDER BY bucket, location;
\x off
CALL refresh_continuous_aggregate('cagg.realtime_mat',NULL,NULL);
\x on
SELECT * FROM cagg.realtime_mat ORDER BY bucket, location;
Reported by SQLint.
Line: 15
Column: 1
CALL refresh_continuous_aggregate('cagg.realtime_mat',NULL,NULL);
\x on
SELECT * FROM cagg.realtime_mat ORDER BY bucket, location;
\x off
SELECT view_name, materialized_only, materialization_hypertable_name
FROM timescaledb_information.continuous_aggregates
Reported by SQLint.
Line: 17
Column: 1
\x on
SELECT * FROM cagg.realtime_mat ORDER BY bucket, location;
\x off
SELECT view_name, materialized_only, materialization_hypertable_name
FROM timescaledb_information.continuous_aggregates
ORDER BY view_name::text;
Reported by SQLint.
test/sql/include/ddl_ops_1.sql
4 issues
Line: 54
Column: 1
SELECT * FROM _timescaledb_catalog.chunk_index ORDER BY hypertable_id, hypertable_index_name, chunk_id;
--expect error cases
\set ON_ERROR_STOP 0
INSERT INTO "customSchema"."Hypertable_1"(time, "Device_id", temp_c, humidity, sensor_1, sensor_2, sensor_3, sensor_4)
VALUES(1257894000000000000, 'dev1', 31, 71, 72, 4, 1, 102);
CREATE UNIQUE INDEX "Unique2" ON PUBLIC."Hypertable_1" ("Device_id");
CREATE UNIQUE INDEX "Unique2" ON PUBLIC."Hypertable_1" (time);
CREATE UNIQUE INDEX "Unique2" ON PUBLIC."Hypertable_1" (sensor_1);
Reported by SQLint.
Line: 62
Column: 1
CREATE UNIQUE INDEX "Unique2" ON PUBLIC."Hypertable_1" (sensor_1);
UPDATE ONLY PUBLIC."Hypertable_1" SET time = 0 WHERE TRUE;
DELETE FROM ONLY PUBLIC."Hypertable_1" WHERE "Device_id" = 'dev1';
\set ON_ERROR_STOP 1
CREATE TABLE my_ht (time BIGINT, val integer);
SELECT * FROM create_hypertable('my_ht', 'time', chunk_time_interval=>_timescaledb_internal.interval_to_usec('1 month'));
ALTER TABLE my_ht ADD COLUMN val2 integer;
Reported by SQLint.
Line: 71
Column: 1
SELECT * FROM test.show_columns('my_ht');
-- Should error when adding again
\set ON_ERROR_STOP 0
ALTER TABLE my_ht ADD COLUMN val2 integer;
\set ON_ERROR_STOP 1
-- Should create
ALTER TABLE my_ht ADD COLUMN IF NOT EXISTS val3 integer;
Reported by SQLint.
Line: 73
Column: 1
-- Should error when adding again
\set ON_ERROR_STOP 0
ALTER TABLE my_ht ADD COLUMN val2 integer;
\set ON_ERROR_STOP 1
-- Should create
ALTER TABLE my_ht ADD COLUMN IF NOT EXISTS val3 integer;
SELECT * FROM test.show_columns('my_ht');
Reported by SQLint.
test/sql/util.sql
4 issues
Line: 5
Column: 1
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
\set ECHO errors
\set VERBOSITY default
DO $$
BEGIN
ASSERT( _timescaledb_internal.get_partition_for_key(''::text) = 669664877 );
Reported by SQLint.
Line: 11
Column: 3
DO $$
BEGIN
ASSERT( _timescaledb_internal.get_partition_for_key(''::text) = 669664877 );
ASSERT( _timescaledb_internal.get_partition_for_key('dev1'::text) = 1129986420 );
ASSERT( _timescaledb_internal.get_partition_for_key('longlonglonglongpartitionkey'::text) = 1169179734);
END$$;
Reported by SQLint.
Line: 12
Column: 3
BEGIN
ASSERT( _timescaledb_internal.get_partition_for_key(''::text) = 669664877 );
ASSERT( _timescaledb_internal.get_partition_for_key('dev1'::text) = 1129986420 );
ASSERT( _timescaledb_internal.get_partition_for_key('longlonglonglongpartitionkey'::text) = 1169179734);
END$$;
Reported by SQLint.
Line: 13
Column: 1
ASSERT( _timescaledb_internal.get_partition_for_key(''::text) = 669664877 );
ASSERT( _timescaledb_internal.get_partition_for_key('dev1'::text) = 1129986420 );
ASSERT( _timescaledb_internal.get_partition_for_key('longlonglonglongpartitionkey'::text) = 1169179734);
END$$;
Reported by SQLint.
test/src/net/conn_mock.c
4 issues
Line: 72
Column: 2
CWE codes:
327
Suggestion:
Use a more secure technique for acquiring random values
static int
mock_init(Connection *conn)
{
srand(time(0));
return 0;
}
static ConnOps mock_ops = {
.size = sizeof(MockConnection),
Reported by FlawFinder.
Line: 22
Column: 2
CWE codes:
119
120
Suggestion:
Perform bounds checking, use functions that limit length, or ensure that the size is larger than the maximum possible length
typedef struct MockConnection
{
Connection conn;
char recv_buf[MOCK_MAX_BUF_SIZE];
int recv_buf_offset;
int recv_buf_len;
} MockConnection;
static int
Reported by FlawFinder.
Line: 63
Column: 2
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
{
bytes_to_read = rand() % (max + 1);
}
memcpy(buf, mock->recv_buf + mock->recv_buf_offset, bytes_to_read);
mock->recv_buf_offset += bytes_to_read;
return bytes_to_read;
}
Reported by FlawFinder.
Line: 93
Column: 2
CWE codes:
120
Suggestion:
Make sure destination can always hold the source data
if (buf_len > MOCK_MAX_BUF_SIZE)
return -1;
memcpy(mock->recv_buf, buf, buf_len);
mock->recv_buf_len = buf_len;
return mock->recv_buf_len;
}
extern void _conn_mock_init(void);
Reported by FlawFinder.
test/sql/drop_rename_hypertable.sql
4 issues
Line: 5
Column: 1
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
\o /dev/null
\ir include/insert_two_partitions.sql
\o
SELECT * FROM test.show_columnsp('_timescaledb_internal.%_hyper%');
Reported by SQLint.
Line: 17
Column: 1
SELECT * FROM "newname";
SELECT * FROM _timescaledb_catalog.hypertable;
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE SCHEMA "newschema" AUTHORIZATION :ROLE_DEFAULT_PERM_USER;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
ALTER TABLE "newname" SET SCHEMA "newschema";
SELECT * FROM "newschema"."newname";
Reported by SQLint.
Line: 19
Column: 1
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE SCHEMA "newschema" AUTHORIZATION :ROLE_DEFAULT_PERM_USER;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
ALTER TABLE "newname" SET SCHEMA "newschema";
SELECT * FROM "newschema"."newname";
SELECT * FROM _timescaledb_catalog.hypertable;
Reported by SQLint.
Line: 28
Column: 1
DROP TABLE "newschema"."newname";
SELECT * FROM _timescaledb_catalog.hypertable;
\dt "public".*
\dt "_timescaledb_catalog".*
\dt "_timescaledb_internal".*
-- Test that renaming ordinary table works
Reported by SQLint.