Skip to content

Commit 56948ee

Browse files
grooverdanLinuxJedi
authored andcommitted
clang15 warnings - unused vars and old prototypes
clang15 finally errors on old prototype definations. Its also a lot fussier about variables that aren't used as is the case a number of time with loop counters that aren't examined. RocksDB was complaining that its get_range function was declared without the array length in ha_rocksdb.h. While a constant is used rather than trying to import the Rdb_key_def::INDEX_NUMBER_SIZE header (was causing a lot of errors on the defination of other orders). If the constant does change can be assured that the same compile warnings will tell us of the error. The ha_rocksdb::index_read_map_impl DBUG_EXECUTE_IF was similar to the existing endless functions used in replication tests. Its rather moot point as the rocksdb.force_shutdown test that uses myrocks_busy_loop_on_row_read is currently disabled.
1 parent d7f4479 commit 56948ee

File tree

9 files changed

+14
-41
lines changed

9 files changed

+14
-41
lines changed

storage/archive/azio.c

Lines changed: 6 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -249,8 +249,7 @@ int azdopen(azio_stream *s, File fd, int Flags)
249249
for end of file.
250250
IN assertion: the stream s has been sucessfully opened for reading.
251251
*/
252-
int get_byte(s)
253-
azio_stream *s;
252+
int get_byte(azio_stream *s)
254253
{
255254
if (s->z_eof) return EOF;
256255
if (s->stream.avail_in == 0)
@@ -427,8 +426,7 @@ void read_header(azio_stream *s, unsigned char *buffer)
427426
* Cleanup then free the given azio_stream. Return a zlib error code.
428427
Try freeing in the reverse order of allocations.
429428
*/
430-
int destroy (s)
431-
azio_stream *s;
429+
int destroy (azio_stream *s)
432430
{
433431
int err = Z_OK;
434432

@@ -679,9 +677,7 @@ int do_flush (azio_stream *s, int flush)
679677
return s->z_err == Z_STREAM_END ? Z_OK : s->z_err;
680678
}
681679

682-
int ZEXPORT azflush (s, flush)
683-
azio_stream *s;
684-
int flush;
680+
int ZEXPORT azflush (azio_stream *s, int flush)
685681
{
686682
int err;
687683

@@ -708,8 +704,7 @@ int ZEXPORT azflush (s, flush)
708704
/* ===========================================================================
709705
Rewinds input file.
710706
*/
711-
int azrewind (s)
712-
azio_stream *s;
707+
int azrewind (azio_stream *s)
713708
{
714709
if (s == NULL || s->mode != 'r') return -1;
715710

@@ -733,10 +728,7 @@ int azrewind (s)
733728
SEEK_END is not implemented, returns error.
734729
In this version of the library, azseek can be extremely slow.
735730
*/
736-
my_off_t azseek (s, offset, whence)
737-
azio_stream *s;
738-
my_off_t offset;
739-
int whence;
731+
my_off_t azseek (azio_stream *s, my_off_t offset, int whence)
740732
{
741733

742734
if (s == NULL || whence == SEEK_END ||
@@ -812,8 +804,7 @@ my_off_t azseek (s, offset, whence)
812804
given compressed file. This position represents a number of bytes in the
813805
uncompressed data stream.
814806
*/
815-
my_off_t ZEXPORT aztell (file)
816-
azio_stream *file;
807+
my_off_t ZEXPORT aztell (azio_stream *file)
817808
{
818809
return azseek(file, 0L, SEEK_CUR);
819810
}

storage/connect/connect.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -237,7 +237,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
237237
bool del, PHC)
238238
{
239239
char *p;
240-
int i, n;
240+
int n;
241241
bool rcop= true;
242242
PCOL colp;
243243
//PCOLUMN cp;
@@ -276,7 +276,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
276276
n = strlen(p) + 1;
277277
} // endfor p
278278

279-
for (i = 0, colp = tdbp->GetColumns(); colp; i++, colp = colp->GetNext()) {
279+
for (colp = tdbp->GetColumns(); colp; colp = colp->GetNext()) {
280280
if (colp->InitValue(g))
281281
throw 2;
282282

@@ -310,7 +310,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2,
310310
n = strlen(p) + 1;
311311
} // endfor p
312312

313-
for (i = 0, colp = utp->GetColumns(); colp; i++, colp = colp->GetNext()) {
313+
for (colp = utp->GetColumns(); colp; colp = colp->GetNext()) {
314314
if (colp->InitValue(g))
315315
throw 5;
316316

storage/connect/ioapi.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -221,8 +221,7 @@ static int ZCALLBACK ferror_file_func (voidpf opaque __attribute__((unused)), vo
221221
return ret;
222222
}
223223

224-
void fill_fopen_filefunc (pzlib_filefunc_def)
225-
zlib_filefunc_def* pzlib_filefunc_def;
224+
void fill_fopen_filefunc (zlib_filefunc_def* pzlib_filefunc_def)
226225
{
227226
pzlib_filefunc_def->zopen_file = fopen_file_func;
228227
pzlib_filefunc_def->zread_file = fread_file_func;

storage/connect/tabtbl.cpp

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,6 @@ PCOL TDBTBL::InsertSpecialColumn(PCOL scp)
230230
/***********************************************************************/
231231
bool TDBTBL::InitTableList(PGLOBAL g)
232232
{
233-
int n;
234233
uint sln;
235234
const char *scs;
236235
PTABLE tp, tabp;
@@ -243,7 +242,7 @@ bool TDBTBL::InitTableList(PGLOBAL g)
243242
sln = hc->get_table()->s->connect_string.length;
244243
// PlugSetPath(filename, Tdbp->GetFile(g), Tdbp->GetPath());
245244

246-
for (n = 0, tp = tdp->Tablep; tp; tp = tp->GetNext()) {
245+
for (tp = tdp->Tablep; tp; tp = tp->GetNext()) {
247246
if (TestFil(g, To_CondFil, tp)) {
248247
tabp = new(g) XTAB(tp);
249248

@@ -276,7 +275,6 @@ bool TDBTBL::InitTableList(PGLOBAL g)
276275
else
277276
Tablist = tabp;
278277

279-
n++;
280278
} // endif filp
281279

282280
} // endfor tp

storage/connect/zip.c

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1471,11 +1471,6 @@ extern int ZEXPORT zipWriteInFileInZip (zipFile file,const void* buf,unsigned in
14711471
{
14721472
uLong uTotalOutBefore = zi->ci.stream.total_out;
14731473
err=deflate(&zi->ci.stream, Z_NO_FLUSH);
1474-
if(uTotalOutBefore > zi->ci.stream.total_out)
1475-
{
1476-
int bBreak = 0;
1477-
bBreak++;
1478-
}
14791474

14801475
zi->ci.pos_in_buffered_data += (uInt)(zi->ci.stream.total_out - uTotalOutBefore) ;
14811476
}

storage/rocksdb/ha_rocksdb.cc

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8468,8 +8468,7 @@ int ha_rocksdb::index_read_map_impl(uchar *const buf, const uchar *const key,
84688468
const key_range *end_key) {
84698469
DBUG_ENTER_FUNC();
84708470

8471-
DBUG_EXECUTE_IF("myrocks_busy_loop_on_row_read", int debug_i = 0;
8472-
while (1) { debug_i++; });
8471+
DBUG_EXECUTE_IF("myrocks_busy_loop_on_row_read", my_sleep(50000););
84738472

84748473
int rc = 0;
84758474

@@ -12124,7 +12123,6 @@ static int calculate_stats(
1212412123
}
1212512124
}
1212612125

12127-
int num_sst = 0;
1212812126
for (const auto &it : props) {
1212912127
std::vector<Rdb_index_stats> sst_stats;
1213012128
Rdb_tbl_prop_coll::read_stats_from_tbl_props(it.second, &sst_stats);
@@ -12153,7 +12151,6 @@ static int calculate_stats(
1215312151
stats[it1.m_gl_index_id].merge(
1215412152
it1, true, it_index->second->max_storage_fmt_length());
1215512153
}
12156-
num_sst++;
1215712154
}
1215812155

1215912156
if (include_memtables) {

storage/rocksdb/ha_rocksdb.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -401,7 +401,7 @@ class ha_rocksdb : public my_core::handler {
401401
void free_key_buffers();
402402

403403
// the buffer size should be at least 2*Rdb_key_def::INDEX_NUMBER_SIZE
404-
rocksdb::Range get_range(const int i, uchar buf[]) const;
404+
rocksdb::Range get_range(const int i, uchar buf[2 * 4]) const;
405405

406406
/*
407407
Perf timers for data reads

storage/tokudb/PerconaFT/ft/loader/loader.cc

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2423,7 +2423,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
24232423
// The pivots file will contain all the pivot strings (in the form <size(32bits)> <data>)
24242424
// The pivots_fname is the name of the pivots file.
24252425
// Note that the pivots file will have one extra pivot in it (the last key in the dictionary) which will not appear in the tree.
2426-
int64_t n_pivots=0; // number of pivots in pivots_file
24272426
FIDX pivots_file; // the file
24282427

24292428
r = ft_loader_open_temp_file (bl, &pivots_file);
@@ -2539,8 +2538,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
25392538

25402539
allocate_node(&sts, lblock);
25412540

2542-
n_pivots++;
2543-
25442541
invariant(maxkey.data != NULL);
25452542
if ((r = bl_write_dbt(&maxkey, pivots_stream, NULL, nullptr, bl))) {
25462543
ft_loader_set_panic(bl, r, true, which_db, nullptr, nullptr);
@@ -2616,8 +2613,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl,
26162613
// We haven't paniced, so the sum should add up.
26172614
invariant(used_estimate == total_disksize_estimate);
26182615

2619-
n_pivots++;
2620-
26212616
{
26222617
DBT key = make_dbt(0,0); // must write an extra DBT into the pivots file.
26232618
r = bl_write_dbt(&key, pivots_stream, NULL, nullptr, bl);
@@ -3302,7 +3297,7 @@ static int write_nonleaves (FTLOADER bl, FIDX pivots_fidx, struct dbout *out, st
33023297
int height = 1;
33033298

33043299
// Watch out for the case where we saved the last pivot but didn't write any more nodes out.
3305-
// The trick is not to look at n_pivots, but to look at blocks.n_blocks
3300+
// The trick is to look at blocks.n_blocks
33063301
while (sts->n_subtrees > 1) {
33073302
// If there is more than one block in blocks, then we must build another level of the tree.
33083303

storage/tokudb/PerconaFT/ft/txn/txn_manager.cc

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -208,12 +208,10 @@ verify_snapshot_system(TXN_MANAGER txn_manager UU()) {
208208
{
209209
//verify neither pair->begin_id nor end_id is in snapshot_xids
210210
TOKUTXN curr_txn = txn_manager->snapshot_head;
211-
uint32_t curr_index = 0;
212211
while (curr_txn != NULL) {
213212
invariant(tuple->begin_id != curr_txn->txnid.parent_id64);
214213
invariant(tuple->end_id != curr_txn->txnid.parent_id64);
215214
curr_txn = curr_txn->snapshot_next;
216-
curr_index++;
217215
}
218216
}
219217
{

0 commit comments

Comments
 (0)