Skip to content

Commit

Permalink
Merge bfd3502 into 504af68
Browse files Browse the repository at this point in the history
  • Loading branch information
CyberDem0n committed Jul 26, 2021
2 parents 504af68 + bfd3502 commit e8e2951
Show file tree
Hide file tree
Showing 3 changed files with 122 additions and 58 deletions.
15 changes: 11 additions & 4 deletions bg_mon.c
Original file line number Diff line number Diff line change
Expand Up @@ -262,13 +262,20 @@ static void device_io_output(struct evbuffer *evb, device_stat *stats, int id)
evbuffer_add_printf(evb, "\"name\":\"%s\",\"io\":{", d.name);
evbuffer_add_printf(evb, "\"read\":%.2f,\"reads_ps\":%.2f", d.read_diff, d.read_completed_diff);
evbuffer_add_printf(evb, ",\"write\":%.2f,\"writes_ps\":%.2f", d.write_diff, d.write_completed_diff);
if (d.extended) {
if (HAS_EXTENDED_STATS(d)) {
evbuffer_add_printf(evb, ",\"read_merges\":%.2f,\"write_merges\":%.2f", d.read_merges_diff, d.write_merges_diff);
evbuffer_add_printf(evb, ",\"average_queue_length\":%.2f", d.average_queue_length);
evbuffer_add_printf(evb, ",\"average_request_size\":%.2f", d.average_request_size);
evbuffer_add_printf(evb, ",\"average_service_time\":%.2f", d.average_service_time);
evbuffer_add_printf(evb, ",\"await\":%.2f,\"read_await\":%.2f", d.await, d.read_await);
evbuffer_add_printf(evb, ",\"write_await\":%.2f,\"util\":%.2f", d.write_await, d.util);
evbuffer_add_printf(evb, ",\"read_average_request_size\":%.2f", d.read_average_request_size);
evbuffer_add_printf(evb, ",\"write_average_request_size\":%.2f", d.write_average_request_size);
if (HAS_DISCARD_STATS(d))
evbuffer_add_printf(evb, ",\"discard_average_request_size\":%.2f", d.discard_average_request_size);
evbuffer_add_printf(evb, ",\"average_service_time\":%.2f,\"util\":%.2f", d.average_service_time, d.util);
evbuffer_add_printf(evb, ",\"await\":%.2f,\"read_await\":%.2f,\"write_await\":%.2f", d.await, d.read_await, d.write_await);
if (HAS_DISCARD_STATS(d))
evbuffer_add_printf(evb, ",\"discard_await\":%.2f", d.discard_await);
if (HAS_FLUSH_STATS(d))
evbuffer_add_printf(evb, ",\"flush_await\":%.2f", d.flush_await);
}
if (d.slave_size > 0) {
int n;
Expand Down
140 changes: 89 additions & 51 deletions disk_stats.c
Original file line number Diff line number Diff line change
Expand Up @@ -299,46 +299,58 @@ static void read_io_stats(device_stats *ds)
unsigned long read_sectors_or_write_completed, read_time_or_write_sectors;
unsigned long write_completed, write_merges, write_sectors;
unsigned int ios_in_progress, write_time, total_time, weighted_time;
unsigned long discard_completed, discard_merges, discard_sectors, discard_time;
unsigned long flush_completed, flush_time;
device_stat *stats = ds->values;
FILE *io = fopen("/proc/diskstats", "r");

if (io == NULL) return;

while (i < ds->size && fgets(buf, sizeof(buf), io)) {
n = sscanf(buf, "%*u %*u %s %lu %lu %lu %lu %lu %lu %lu %u %u %u %u",
while (fgets(buf, sizeof(buf), io)) {
i = sscanf(buf, "%*u %*u %s %lu %lu %lu %lu %lu %lu %lu %u %u %u %u %lu %lu %lu %lu %lu %lu",
device_name, &read_completed, &read_merges_or_read_sectors,
&read_sectors_or_write_completed, &read_time_or_write_sectors,
&write_completed, &write_merges, &write_sectors, &write_time,
&ios_in_progress, &total_time, &weighted_time);
if (n == 12) {
for (n = 0; n < ds->size; ++n) {
if (strcmp(stats[n].name, device_name) == 0) {
stats[n].is_used = true;
stats[n].read_completed = read_completed;
stats[n].read_merges = read_merges_or_read_sectors;
stats[n].read_sectors = read_sectors_or_write_completed;
stats[n].read_time = (unsigned int)read_time_or_write_sectors;
stats[n].write_completed = write_completed;
stats[n].write_merges = write_merges;
stats[n].write_sectors = write_sectors;
stats[n].write_time = write_time;
stats[n].ios_in_progress = ios_in_progress;
stats[n].total_time = total_time;
stats[n].weighted_time = weighted_time;
stats[n].extended = true;
}
}
} else if (n == 5) {
for (n = 0; n < ds->size; ++n) {
if (strcmp(stats[n].name, device_name) == 0) {
stats[n].is_used = true;
stats[n].read_completed = read_completed;
stats[n].read_sectors = read_merges_or_read_sectors;
stats[n].write_completed = read_sectors_or_write_completed;
stats[n].write_sectors = read_time_or_write_sectors;
&ios_in_progress, &total_time, &weighted_time,
&discard_completed, &discard_merges, &discard_sectors,
&discard_time, &flush_completed, &flush_time);
if (i < 12 && i != 5) continue;

for (n = 0; n < ds->size; ++n) {
if (strcmp(stats[n].name, device_name) != 0) continue;

stats[n].fields = i;
if (i == 5) {
stats[n].read_completed = read_completed;
stats[n].read_sectors = read_merges_or_read_sectors;
stats[n].write_completed = read_sectors_or_write_completed;
stats[n].write_sectors = read_time_or_write_sectors;
} else {
stats[n].read_completed = read_completed;
stats[n].read_merges = read_merges_or_read_sectors;
stats[n].read_sectors = read_sectors_or_write_completed;
stats[n].read_time = (unsigned int)read_time_or_write_sectors;
stats[n].write_completed = write_completed;
stats[n].write_merges = write_merges;
stats[n].write_sectors = write_sectors;
stats[n].write_time = write_time;
stats[n].ios_in_progress = ios_in_progress;
stats[n].total_time = total_time;
stats[n].weighted_time = weighted_time;

if (i >= 16) {
stats[n].discard_completed = discard_completed;
stats[n].discard_merges = discard_merges;
stats[n].discard_sectors = discard_sectors;
stats[n].discard_time = discard_time;

if (i >= 18) {
stats[n].flush_completed = flush_completed;
stats[n].flush_time = flush_time;
}
}
}
} else continue;
}
}
fclose(io);
}
Expand All @@ -356,36 +368,62 @@ static void diff_disk_stats(device_stats *new_stats)
itv = new_stats->uptime - disk_stats_old.dstats.uptime;

for (i = 0; i < new_stats->size; ++i) {
double cmpl_diff, tput;
device_stat *n = new_stats->values + i;

while (j < disk_stats_old.dstats.size && !disk_stats_old.dstats.values[j].is_used)
while (j < disk_stats_old.dstats.size && disk_stats_old.dstats.values[j].fields == 0)
++j;

if (j >= disk_stats_old.dstats.size)
break;

o = disk_stats_old.dstats.values[j++];

if (n->extended && o.extended) {
double cmpl_diff = n->read_completed + n->write_completed - o.read_completed - o.write_completed;
double tput = cmpl_diff * SC_CLK_TCK / itv / 10.0;
n->util = MINIMUM(S_VALUE(o.total_time, n->total_time, itv) / 10.0, 100.0);
n->average_service_time = tput ? n->util / tput : 0.0;
n->average_request_size = cmpl_diff ? (n->read_sectors - o.read_sectors
+ n->write_sectors - o.write_sectors) / cmpl_diff : 0.0;
n->average_queue_length = S_VALUE(o.weighted_time, n->weighted_time, itv) / 1000.0;
n->await = cmpl_diff ? (n->read_time - o.read_time + n->write_time - o.write_time) / cmpl_diff : 0.0;
cmpl_diff = n->read_completed - o.read_completed;
n->read_await = cmpl_diff ? (n->read_time - o.read_time) / cmpl_diff : 0.0;
cmpl_diff = n->write_completed - o.write_completed;
n->write_await = cmpl_diff ? (n->write_time - o.write_time) / cmpl_diff : 0.0;
n->read_merges_diff = S_VALUE(o.read_merges, n->read_merges, itv);
n->write_merges_diff = S_VALUE(o.write_merges, n->write_merges, itv);
}
/* IOPS */
n->read_completed_diff = S_VALUE(o.read_completed, n->read_completed, itv);
n->write_completed_diff = S_VALUE(o.write_completed, n->write_completed, itv);
n->read_diff = S_VALUE(o.read_sectors, n->read_sectors, itv) / 2.0; /* to obtain diffs in kB */
n->write_diff = S_VALUE(o.write_sectors, n->write_sectors, itv) / 2.0; /* to obtain diffs in kB */
n->discard_completed_diff = S_VALUE(o.discard_completed, n->discard_completed, itv);
n->flush_completed_diff = S_VALUE(o.flush_completed, n->flush_completed, itv);

/* The sector size is 512 bytes. By dividing by 2.0 we get the throughput in kB/s */
n->read_diff = S_VALUE(o.read_sectors, n->read_sectors, itv) / 2.0;
n->write_diff = S_VALUE(o.write_sectors, n->write_sectors, itv) / 2.0;
n->discard_diff = S_VALUE(o.discard_sectors, n->discard_sectors, itv) / 2.0;

/* Merges/s */
n->read_merges_diff = S_VALUE(o.read_merges, n->read_merges, itv);
n->write_merges_diff = S_VALUE(o.write_merges, n->write_merges, itv);
n->discard_merges_diff = S_VALUE(o.discard_merges, n->discard_merges, itv);

cmpl_diff = (n->read_completed - o.read_completed) +
(n->write_completed - o.write_completed) +
(n->discard_completed - o.discard_completed);
tput = cmpl_diff * SC_CLK_TCK / itv / 10.0;

n->util = MINIMUM(S_VALUE(o.total_time, n->total_time, itv) / 10.0, 100.0);
n->average_service_time = tput ? n->util / tput : 0.0;
n->average_request_size = cmpl_diff ? (n->read_sectors - o.read_sectors +
n->write_sectors - o.write_sectors +
n->discard_sectors - o.discard_sectors) / cmpl_diff / 2.0: 0.0;
n->average_queue_length = S_VALUE(o.weighted_time, n->weighted_time, itv) / 1000.0;
n->await = cmpl_diff ? (n->read_time - o.read_time +
n->write_time - o.write_time +
n->discard_time - o.discard_time) / cmpl_diff : 0.0;

cmpl_diff = n->read_completed - o.read_completed;
n->read_await = cmpl_diff ? (n->read_time - o.read_time) / cmpl_diff : 0.0;
n->read_average_request_size = cmpl_diff ? (n->read_sectors - o.read_sectors) / cmpl_diff / 2.0 : 0.0;

cmpl_diff = n->write_completed - o.write_completed;
n->write_await = cmpl_diff ? (n->write_time - o.write_time) / cmpl_diff : 0.0;
n->write_average_request_size = cmpl_diff ? (n->write_sectors - o.write_sectors) / cmpl_diff / 2.0 : 0.0;

cmpl_diff = n->discard_completed - o.discard_completed;
n->discard_await = cmpl_diff ? (n->discard_time - o.discard_time) / cmpl_diff : 0.0;
n->discard_average_request_size = cmpl_diff ? (n->discard_sectors - o.discard_sectors) / cmpl_diff / 2.0 : 0.0;

cmpl_diff = n->flush_completed - o.flush_completed;
n->flush_await = cmpl_diff ? (n->flush_time - o.flush_time) / cmpl_diff : 0.0;
}
}

Expand Down Expand Up @@ -456,7 +494,7 @@ static bool copy_device_stats(device_stats o, device_stats *n)
n->size = 0;

for (i = 0; i < o.size; ++i)
if (o.values[i].is_used)
if (o.values[i].fields > 0)
++len;
else {
FREE(o.values[i].name);
Expand All @@ -468,7 +506,7 @@ static bool copy_device_stats(device_stats o, device_stats *n)
n->values = repalloc(n->values, (n->len = len)*sizeof(device_stat));

for (i = 0; i < o.size; ++i)
if (o.values[i].is_used) {
if (o.values[i].fields > 0) {
memset(n->values + n->size, 0, sizeof(device_stat));
if (ret) n->values[n->size].slave_size = 0;
else {
Expand Down
25 changes: 22 additions & 3 deletions disk_stats.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
typedef struct device_stat {
char *name;
size_t name_len;
bool is_used;
size_t fields;

unsigned long read_completed;
unsigned long read_merges;
unsigned long read_sectors;
Expand All @@ -16,30 +17,48 @@ typedef struct device_stat {
unsigned int ios_in_progress;
unsigned int total_time;
unsigned int weighted_time;
unsigned long discard_completed;
unsigned long discard_merges;
unsigned long discard_sectors;
unsigned long discard_time;
unsigned long flush_completed;
unsigned long flush_time;

double read_merges_diff;
double write_merges_diff;
double discard_merges_diff;
double read_completed_diff;
double write_completed_diff;
double discard_completed_diff;
double flush_completed_diff;

double read_diff;
double write_diff;
double discard_diff;

double average_service_time;
double average_queue_length;
double average_request_size;
double read_average_request_size;
double write_average_request_size;
double discard_average_request_size;
double await;
double read_await;
double write_await;
double discard_await;
double flush_await;

double util;

bool extended;

size_t slave_size;
char slaves[64];
} device_stat;


#define HAS_EXTENDED_STATS(e) (e.fields >= 12)
#define HAS_DISCARD_STATS(e) (e.fields >= 16)
#define HAS_FLUSH_STATS(e) (e.fields >= 18)

typedef struct {
unsigned long long uptime;
device_stat *values;
Expand Down

0 comments on commit e8e2951

Please sign in to comment.