Skip to content

Commit

Permalink
Enable -Wextra
Browse files Browse the repository at this point in the history
Our code mostly has warnings about comparison with different
signedness.
  • Loading branch information
akuzm committed Oct 27, 2022
1 parent 864da20 commit fef3823
Show file tree
Hide file tree
Showing 52 changed files with 137 additions and 142 deletions.
7 changes: 7 additions & 0 deletions CMakeLists.txt
Expand Up @@ -200,6 +200,13 @@ if(CMAKE_C_COMPILER_ID MATCHES "GNU|AppleClang|Clang")
-Wempty-body
-Wvla
-Wall
-Wextra
# The SQL function arguments macro PG_FUNCTION_ARGS often inroduces unused
# arguments.
-Wno-unused-parameter
-Wno-clobbered
# Seems to be broken in GCC 11 with designated initializers.
-Wno-missing-field-initializers
-Wundef
-Wmissing-prototypes
-Wpointer-arith
Expand Down
3 changes: 1 addition & 2 deletions src/adts/bit_array_impl.h
Expand Up @@ -128,10 +128,9 @@ bit_array_recv(const StringInfo buffer)
static inline void
bit_array_send(StringInfo buffer, const BitArray *data)
{
int i;
pq_sendint32(buffer, data->buckets.num_elements);
pq_sendbyte(buffer, data->bits_used_in_last_bucket);
for (i = 0; i < data->buckets.num_elements; i++)
for (uint32 i = 0; i < data->buckets.num_elements; i++)
pq_sendint64(buffer, data->buckets.data[i]);
}

Expand Down
13 changes: 5 additions & 8 deletions src/chunk.c
Expand Up @@ -118,7 +118,7 @@ typedef struct ChunkStubScanCtx
} ChunkStubScanCtx;

static bool
chunk_stub_is_valid(const ChunkStub *stub, unsigned int expected_slices)
chunk_stub_is_valid(const ChunkStub *stub, int16 expected_slices)
{
return stub && stub->id > 0 && stub->constraints && expected_slices == stub->cube->num_slices &&
stub->cube->num_slices == stub->constraints->num_dimension_constraints;
Expand Down Expand Up @@ -3793,10 +3793,8 @@ ts_chunk_do_drop_chunks(Hypertable *ht, int64 older_than, int64 newer_than, int3
List **affected_data_nodes)

{
uint64 i = 0;
uint64 num_chunks = 0;
Chunk *chunks;
List *dropped_chunk_names = NIL;
const char *schema_name, *table_name;
const int32 hypertable_id = ht->fd.id;
bool has_continuous_aggs;
Expand Down Expand Up @@ -3866,8 +3864,6 @@ ts_chunk_do_drop_chunks(Hypertable *ht, int64 older_than, int64 newer_than, int3

if (has_continuous_aggs)
{
int i;

/* Exclusively lock all chunks, and invalidate the continuous
* aggregates in the regions covered by the chunks. We do this in two
* steps: first lock all the chunks and then invalidate the
Expand All @@ -3878,7 +3874,7 @@ ts_chunk_do_drop_chunks(Hypertable *ht, int64 older_than, int64 newer_than, int3
* this transaction, which allows moving the invalidation threshold
* without having to worry about new invalidations while
* refreshing. */
for (i = 0; i < num_chunks; i++)
for (uint64 i = 0; i < num_chunks; i++)
{
LockRelationOid(chunks[i].table_id, ExclusiveLock);

Expand All @@ -3893,7 +3889,7 @@ ts_chunk_do_drop_chunks(Hypertable *ht, int64 older_than, int64 newer_than, int3
* The invalidation will allow the refresh command on a continuous
* aggregate to see that this region was dropped and and will
* therefore be able to refresh accordingly.*/
for (i = 0; i < num_chunks; i++)
for (uint64 i = 0; i < num_chunks; i++)
{
int64 start = ts_chunk_primary_dimension_start(&chunks[i]);
int64 end = ts_chunk_primary_dimension_end(&chunks[i]);
Expand All @@ -3902,7 +3898,8 @@ ts_chunk_do_drop_chunks(Hypertable *ht, int64 older_than, int64 newer_than, int3
}
}

for (i = 0; i < num_chunks; i++)
List *dropped_chunk_names = NIL;
for (uint64 i = 0; i < num_chunks; i++)
{
char *chunk_name;
ListCell *lc;
Expand Down
4 changes: 2 additions & 2 deletions src/chunk_scan.c
Expand Up @@ -46,8 +46,8 @@ ts_chunk_scan_by_chunk_ids(const Hyperspace *hs, const List *chunk_ids, unsigned
MemoryContext orig_mcxt;
Chunk **locked_chunks = NULL;
Chunk **unlocked_chunks = NULL;
unsigned int locked_chunk_count = 0;
unsigned int unlocked_chunk_count = 0;
int locked_chunk_count = 0;
int unlocked_chunk_count = 0;
ListCell *lc;
int remote_chunk_count = 0;

Expand Down
4 changes: 2 additions & 2 deletions src/compression_with_clause.c
Expand Up @@ -76,7 +76,6 @@ parse_segment_collist(char *inpstr, Hypertable *hypertable)
List *parsed;
ListCell *lc;
SelectStmt *select;
short index = 0;
List *collist = NIL;
RawStmt *raw;

Expand Down Expand Up @@ -119,6 +118,7 @@ parse_segment_collist(char *inpstr, Hypertable *hypertable)
if (select->sortClause != NIL)
throw_segment_by_error(inpstr);

short index = 0;
foreach (lc, select->groupClause)
{
ColumnRef *cf;
Expand Down Expand Up @@ -161,7 +161,6 @@ parse_order_collist(char *inpstr, Hypertable *hypertable)
List *parsed;
ListCell *lc;
SelectStmt *select;
short index = 0;
List *collist = NIL;
RawStmt *raw;

Expand Down Expand Up @@ -203,6 +202,7 @@ parse_order_collist(char *inpstr, Hypertable *hypertable)
if (select->groupClause != NIL)
throw_order_by_error(inpstr);

short index = 0;
foreach (lc, select->sortClause)
{
SortBy *sort_by;
Expand Down
3 changes: 1 addition & 2 deletions src/debug_guc.c
Expand Up @@ -107,7 +107,6 @@ get_show_upper_mask(const char *paths, size_t paths_len)
static bool
set_debug_flag(const char *flag_string, size_t length, DebugOptimizerFlags *flags)
{
int i;
char *end;
size_t flag_length;

Expand All @@ -121,7 +120,7 @@ set_debug_flag(const char *flag_string, size_t length, DebugOptimizerFlags *flag
flag_length = length;
}

for (i = 0; i < sizeof(g_flag_names) / sizeof(*g_flag_names); ++i)
for (size_t i = 0; i < sizeof(g_flag_names) / sizeof(*g_flag_names); ++i)
if (strncmp(g_flag_names[i].name, flag_string, flag_length) == 0)
switch (g_flag_names[i].flag)
{
Expand Down
2 changes: 1 addition & 1 deletion src/dimension.c
Expand Up @@ -1493,7 +1493,7 @@ ts_dimension_add(PG_FUNCTION_ARGS)
.colname = PG_ARGISNULL(1) ? NULL : PG_GETARG_NAME(1),
.num_slices = PG_ARGISNULL(2) ? DatumGetInt32(-1) : PG_GETARG_INT32(2),
.num_slices_is_set = !PG_ARGISNULL(2),
.interval_datum = PG_ARGISNULL(3) ? DatumGetInt32(-1) : PG_GETARG_DATUM(3),
.interval_datum = PG_ARGISNULL(3) ? Int32GetDatum(-1) : PG_GETARG_DATUM(3),
.interval_type = PG_ARGISNULL(3) ? InvalidOid : get_fn_expr_argtype(fcinfo->flinfo, 3),
.partitioning_func = PG_ARGISNULL(4) ? InvalidOid : PG_GETARG_OID(4),
.if_not_exists = PG_ARGISNULL(5) ? false : PG_GETARG_BOOL(5),
Expand Down
6 changes: 4 additions & 2 deletions src/extension_utils.c
Expand Up @@ -122,12 +122,14 @@ get_proxy_table_relid()
return get_relname_relid(EXTENSION_PROXY_TABLE, nsid);
}

static bool inline extension_exists()
inline static bool
extension_exists()
{
return OidIsValid(get_extension_oid(EXTENSION_NAME, true));
}

static bool inline extension_is_transitioning()
inline static bool
extension_is_transitioning()
{
/*
* Determine whether the extension is being created or upgraded (as a
Expand Down
3 changes: 1 addition & 2 deletions src/func_cache.c
Expand Up @@ -501,7 +501,6 @@ initialize_func_info()
Oid pg_nsp = get_namespace_oid("pg_catalog", false);
HeapTuple tuple;
Relation rel;
int i;

func_hash = hash_create("func_cache",
_MAX_CACHE_FUNCTIONS,
Expand All @@ -510,7 +509,7 @@ initialize_func_info()

rel = table_open(ProcedureRelationId, AccessShareLock);

for (i = 0; i < _MAX_CACHE_FUNCTIONS; i++)
for (size_t i = 0; i < _MAX_CACHE_FUNCTIONS; i++)
{
FuncInfo *finfo = &funcinfo[i];
Oid namespaceoid = pg_nsp;
Expand Down
11 changes: 2 additions & 9 deletions src/guc.c
Expand Up @@ -16,13 +16,6 @@
#endif

#ifdef USE_TELEMETRY
typedef enum TelemetryLevel
{
TELEMETRY_OFF,
TELEMETRY_NO_FUNCTIONS,
TELEMETRY_BASIC,
} TelemetryLevel;

/* Define which level means on. We use this object to have at least one object
* of type TelemetryLevel in the code, otherwise pgindent won't work for the
* type */
Expand Down Expand Up @@ -87,7 +80,7 @@ TSDLLEXPORT bool ts_guc_enable_skip_scan = true;
int ts_guc_max_open_chunks_per_insert = 10;
int ts_guc_max_cached_chunks_per_hypertable = 10;
#ifdef USE_TELEMETRY
int ts_guc_telemetry_level = TELEMETRY_DEFAULT;
TelemetryLevel ts_guc_telemetry_level = TELEMETRY_DEFAULT;
char *ts_telemetry_cloud = NULL;
#endif

Expand Down Expand Up @@ -453,7 +446,7 @@ _guc_init(void)
DefineCustomEnumVariable("timescaledb.telemetry_level",
"Telemetry settings level",
"Level used to determine which telemetry to send",
&ts_guc_telemetry_level,
(int *) &ts_guc_telemetry_level,
TELEMETRY_DEFAULT,
telemetry_level_options,
PGC_USERSET,
Expand Down
11 changes: 10 additions & 1 deletion src/guc.h
Expand Up @@ -32,10 +32,19 @@ extern TSDLLEXPORT bool ts_guc_enable_skip_scan;
extern bool ts_guc_restoring;
extern int ts_guc_max_open_chunks_per_insert;
extern int ts_guc_max_cached_chunks_per_hypertable;

#ifdef USE_TELEMETRY
extern int ts_guc_telemetry_level;
typedef enum TelemetryLevel
{
TELEMETRY_OFF,
TELEMETRY_NO_FUNCTIONS,
TELEMETRY_BASIC,
} TelemetryLevel;

extern TelemetryLevel ts_guc_telemetry_level;
extern char *ts_telemetry_cloud;
#endif

extern TSDLLEXPORT char *ts_guc_license;
extern char *ts_last_tune_time;
extern char *ts_last_tune_version;
Expand Down
7 changes: 2 additions & 5 deletions src/histogram.c
Expand Up @@ -144,8 +144,6 @@ ts_hist_combinefunc(PG_FUNCTION_ARGS)
}
else
{
Size i;

/* Since number of buckets is part of the aggregation call the initialization
* might be different in the partials so we error out if they are not identical. */
if (state1->nbuckets != state2->nbuckets)
Expand All @@ -154,7 +152,7 @@ ts_hist_combinefunc(PG_FUNCTION_ARGS)
result = copy_state(aggcontext, state1);

/* Combine values from state1 and state2 when both states are non-null */
for (i = 0; i < state1->nbuckets; i++)
for (int32 i = 0; i < state1->nbuckets; i++)
{
/* Perform addition using int64 to check for overflow */
int64 val = (int64) DatumGetInt32(result->buckets[i]);
Expand All @@ -174,7 +172,6 @@ Datum
ts_hist_serializefunc(PG_FUNCTION_ARGS)
{
Histogram *state;
Size i;
StringInfoData buf;

Assert(!PG_ARGISNULL(0));
Expand All @@ -183,7 +180,7 @@ ts_hist_serializefunc(PG_FUNCTION_ARGS)
pq_begintypsend(&buf);
pq_sendint32(&buf, state->nbuckets);

for (i = 0; i < state->nbuckets; i++)
for (int32 i = 0; i < state->nbuckets; i++)
pq_sendint32(&buf, DatumGetInt32(state->buckets[i]));

PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
Expand Down
14 changes: 9 additions & 5 deletions src/hypertable.c
Expand Up @@ -853,7 +853,7 @@ ts_hypertable_set_num_dimensions(Hypertable *ht, int16 num_dimensions)

#define DEFAULT_ASSOCIATED_TABLE_PREFIX_FORMAT "_hyper_%d"
#define DEFAULT_ASSOCIATED_DISTRIBUTED_TABLE_PREFIX_FORMAT "_dist_hyper_%d"
static const int MAXIMUM_PREFIX_LENGTH = NAMEDATALEN - 16;
static const size_t MAXIMUM_PREFIX_LENGTH = NAMEDATALEN - 16;

static void
hypertable_insert_relation(Relation rel, FormData_hypertable *fd)
Expand Down Expand Up @@ -1328,7 +1328,11 @@ table_has_replica_identity(const Relation rel)
return rel->rd_rel->relreplident != REPLICA_IDENTITY_DEFAULT;
}

static bool inline table_has_rules(Relation rel) { return rel->rd_rules != NULL; }
inline static bool
table_has_rules(Relation rel)
{
return rel->rd_rules != NULL;
}

bool
ts_hypertable_has_chunks(Oid table_relid, LOCKMODE lockmode)
Expand Down Expand Up @@ -2386,7 +2390,7 @@ typedef struct AccumHypertable
} AccumHypertable;

bool
ts_is_partitioning_column(const Hypertable *ht, Index column_attno)
ts_is_partitioning_column(const Hypertable *ht, AttrNumber column_attno)
{
uint16 i;

Expand Down Expand Up @@ -2551,7 +2555,7 @@ ts_hypertable_create_compressed(Oid table_relid, int32 hypertable_id)
ChunkSizingInfo *chunk_sizing_info;
Relation rel;
rel = table_open(table_relid, AccessExclusiveLock);
int32 row_size = MAXALIGN(SizeofHeapTupleHeader);
Size row_size = MAXALIGN(SizeofHeapTupleHeader);
/* estimate tuple width of compressed hypertable */
for (int i = 1; i <= RelationGetNumberOfAttributes(rel); i++)
{
Expand All @@ -2568,7 +2572,7 @@ ts_hypertable_create_compressed(Oid table_relid, int32 hypertable_id)
{
ereport(WARNING,
(errmsg("compressed row size might exceed maximum row size"),
errdetail("Estimated row size of compressed hypertable is %u. This exceeds the "
errdetail("Estimated row size of compressed hypertable is %zu. This exceeds the "
"maximum size of %zu and can cause compression of chunks to fail.",
row_size,
MaxHeapTupleSize)));
Expand Down
2 changes: 1 addition & 1 deletion src/hypertable.h
Expand Up @@ -145,7 +145,7 @@ extern Tablespace *ts_hypertable_get_tablespace_at_offset_from(int32 hypertable_
Oid tablespace_oid, int16 offset);
extern bool ts_hypertable_has_chunks(Oid table_relid, LOCKMODE lockmode);
extern void ts_hypertables_rename_schema_name(const char *old_name, const char *new_name);
extern bool ts_is_partitioning_column(const Hypertable *ht, Index column_attno);
extern bool ts_is_partitioning_column(const Hypertable *ht, AttrNumber column_attno);
extern TSDLLEXPORT bool ts_hypertable_set_compressed(Hypertable *ht,
int32 compressed_hypertable_id);
extern TSDLLEXPORT bool ts_hypertable_unset_compressed(Hypertable *ht);
Expand Down
4 changes: 2 additions & 2 deletions src/import/allpaths.c
Expand Up @@ -153,7 +153,7 @@ ts_set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeT
RelOptInfo *childrel;

/* append_rel_list contains all append rels; ignore others */
if (appinfo->parent_relid != parentRTindex)
if (appinfo->parent_relid != (Index) parentRTindex)
continue;

/* Re-locate the child RTE and RelOptInfo */
Expand Down Expand Up @@ -565,7 +565,7 @@ ts_set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEn
ListCell *childvars;

/* append_rel_list contains all append rels; ignore others */
if (appinfo->parent_relid != parentRTindex)
if (appinfo->parent_relid != (Index) parentRTindex)
continue;

childRTindex = appinfo->child_relid;
Expand Down
8 changes: 5 additions & 3 deletions src/loader/loader.c
Expand Up @@ -122,7 +122,7 @@ static ProcessUtility_hook_type prev_ProcessUtility_hook;
/* This is timescaleDB's versioned-extension's post_parse_analyze_hook */
static post_parse_analyze_hook_type extension_post_parse_analyze_hook = NULL;

static void inline extension_check(void);
inline static void extension_check(void);
#if PG14_LT
static void call_extension_post_parse_analyze_hook(ParseState *pstate, Query *query);
#else
Expand Down Expand Up @@ -708,7 +708,8 @@ _PG_init(void)
ProcessUtility_hook = loader_process_utility_hook;
}

static void inline do_load()
inline static void
do_load()
{
char *version = extension_version();
char soname[MAX_SO_NAME_LEN];
Expand Down Expand Up @@ -782,7 +783,8 @@ static void inline do_load()
post_parse_analyze_hook = old_hook;
}

static void inline extension_check()
inline static void
extension_check()
{
enum ExtensionState state = extension_current_state();

Expand Down
2 changes: 1 addition & 1 deletion src/net/http.c
Expand Up @@ -73,7 +73,7 @@ ts_http_send_and_recv(Connection *conn, HttpRequest *req, HttpResponseState *sta
{
ret = ts_connection_write(conn, built_request + write_off, request_len);

if (ret < 0 || ret > request_len)
if (ret < 0 || (size_t) ret > request_len)
return HTTP_ERROR_WRITE;

if (ret == 0)
Expand Down
2 changes: 1 addition & 1 deletion src/net/http_request.c
Expand Up @@ -233,7 +233,7 @@ ts_http_request_build(HttpRequest *req, size_t *buf_size)
if (content_length != -1)
{
/* make sure it's equal to body_len */
if (content_length != req->body_len)
if ((size_t) content_length != req->body_len)
{
return NULL;
}
Expand Down

0 comments on commit fef3823

Please sign in to comment.