Skip to content

Commit

Permalink
external/trace: Introduce structure for reading traces
Browse files Browse the repository at this point in the history
Currently the trace_get and trace_empty functions operate on a tracebuf
struct. This requires being able to write to that struct.  If dump_trace
were able to use these functions it would be convenient as it would
reduce code duplication and these functions are already unit tested.
However, a tracebuf accessed via mmaping will not be able to be written.

The tracebuf struct fields that need to be written are only to be used
by a reader.  The fields are never used by a writer. Add a new structure
for readers, trace_reader,  which contains these fields and remove them
from the tracebuf struct.  Change trace_get and trace_empty to use the
trace_reader struct and update the unit tests accordingly.

Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
Signed-off-by: Stewart Smith <stewart@linux.ibm.com>
  • Loading branch information
iamjpn authored and stewartsmith committed May 20, 2019
1 parent 8c47630 commit 3f61c83
Show file tree
Hide file tree
Showing 4 changed files with 73 additions and 48 deletions.
46 changes: 25 additions & 21 deletions core/test/run-trace.c
Expand Up @@ -102,9 +102,9 @@ extern struct dt_node *opal_node;

#include "../trace.c"

#define rmb() lwsync()

#include "../external/trace/trace.c"
static struct trace_reader trace_readers[CPUS];
struct trace_reader *my_trace_reader;
#include "../device.c"

char __rodata_start[1], __rodata_end[1];
Expand Down Expand Up @@ -185,6 +185,8 @@ static void test_parallel(void)
fake_cpus[i].trace->tb.buf_size = cpu_to_be64(TBUF_SZ);
fake_cpus[i].trace->tb.max_size = cpu_to_be32(sizeof(union trace));
fake_cpus[i].is_secondary = false;
memset(&trace_readers[i], 0, sizeof(struct trace_reader));
trace_readers[i].tb = &fake_cpus[i].trace->tb;
}

for (i = 0; i < CPUS; i++) {
Expand All @@ -199,7 +201,7 @@ static void test_parallel(void)
union trace t;

for (i = 0; i < CPUS; i++) {
if (trace_get(&t, &fake_cpus[(i+last) % CPUS].trace->tb))
if (trace_get(&t, &trace_readers[(i+last) % CPUS]))
break;
}

Expand Down Expand Up @@ -276,17 +278,19 @@ int main(void)
fake_cpus[i].primary = &fake_cpus[i & ~0x1];
}
my_fake_cpu = &fake_cpus[0];
my_trace_reader = &trace_readers[0];
init_trace_buffers();

for (i = 0; i < CPUS; i++) {
assert(trace_empty(&fake_cpus[i].trace->tb));
assert(!trace_get(&trace, &fake_cpus[i].trace->tb));
trace_readers[i].tb = &fake_cpus[i].trace->tb;
assert(trace_empty(&trace_readers[i]));
assert(!trace_get(&trace, &trace_readers[i]));
}

assert(sizeof(trace.hdr) % 8 == 0);
timestamp = 1;
trace_add(&minimal, 100, sizeof(trace.hdr));
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace_get(&trace, my_trace_reader));
assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
assert(be64_to_cpu(trace.hdr.timestamp) == timestamp);

Expand All @@ -296,19 +300,19 @@ int main(void)
trace_add(&minimal, 99 + (i%2), sizeof(trace.hdr));
}

assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace_get(&trace, my_trace_reader));
/* First one must be overflow marker. */
assert(trace.hdr.type == TRACE_OVERFLOW);
assert(trace.hdr.len_div_8 * 8 == sizeof(trace.overflow));
assert(be64_to_cpu(trace.overflow.bytes_missed) == minimal.hdr.len_div_8 * 8);

for (i = 0; i < TBUF_SZ / (minimal.hdr.len_div_8 * 8); i++) {
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace_get(&trace, my_trace_reader));
assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
assert(be64_to_cpu(trace.hdr.timestamp) == i+1);
assert(trace.hdr.type == 99 + ((i+1)%2));
}
assert(!trace_get(&trace, &my_fake_cpu->trace->tb));
assert(!trace_get(&trace, my_trace_reader));

/* Now put in some weird-length ones, to test overlap.
* Last power of 2, minus 8. */
Expand All @@ -317,12 +321,12 @@ int main(void)
timestamp = i;
trace_add(&large, 100 + (i%2), (1 << (j-1)));
}
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace_get(&trace, my_trace_reader));
assert(trace.hdr.type == TRACE_OVERFLOW);
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace_get(&trace, my_trace_reader));
assert(trace.hdr.len_div_8 == large.hdr.len_div_8);
i = be64_to_cpu(trace.hdr.timestamp);
while (trace_get(&trace, &my_fake_cpu->trace->tb))
while (trace_get(&trace, my_trace_reader))
assert(be64_to_cpu(trace.hdr.timestamp) == ++i);

/* Test repeats. */
Expand All @@ -335,30 +339,30 @@ int main(void)
timestamp = i+1;
trace_add(&minimal, 101, sizeof(trace.hdr));

assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace_get(&trace, my_trace_reader));
assert(trace.hdr.timestamp == 0);
assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
assert(trace.hdr.type == 100);
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace_get(&trace, my_trace_reader));
assert(trace.hdr.type == TRACE_REPEAT);
assert(trace.hdr.len_div_8 * 8 == sizeof(trace.repeat));
assert(be16_to_cpu(trace.repeat.num) == 65535);
assert(be64_to_cpu(trace.repeat.timestamp) == 65535);
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace_get(&trace, my_trace_reader));
assert(be64_to_cpu(trace.hdr.timestamp) == 65536);
assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
assert(trace.hdr.type == 100);
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace_get(&trace, my_trace_reader));
assert(trace.hdr.type == TRACE_REPEAT);
assert(trace.hdr.len_div_8 * 8 == sizeof(trace.repeat));
assert(be16_to_cpu(trace.repeat.num) == 1);
assert(be64_to_cpu(trace.repeat.timestamp) == 65537);

assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace_get(&trace, my_trace_reader));
assert(be64_to_cpu(trace.hdr.timestamp) == 65538);
assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
assert(trace.hdr.type == 101);
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace_get(&trace, my_trace_reader));
assert(trace.hdr.type == TRACE_REPEAT);
assert(trace.hdr.len_div_8 * 8 == sizeof(trace.repeat));
assert(be16_to_cpu(trace.repeat.num) == 1);
Expand All @@ -367,15 +371,15 @@ int main(void)
/* Now, test adding repeat while we're reading... */
timestamp = 0;
trace_add(&minimal, 100, sizeof(trace.hdr));
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace_get(&trace, my_trace_reader));
assert(be64_to_cpu(trace.hdr.timestamp) == 0);
assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
assert(trace.hdr.type == 100);

for (i = 1; i < TBUF_SZ; i++) {
timestamp = i;
trace_add(&minimal, 100, sizeof(trace.hdr));
assert(trace_get(&trace, &my_fake_cpu->trace->tb));
assert(trace_get(&trace, my_trace_reader));
if (i % 65536 == 0) {
assert(trace.hdr.type == 100);
assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
Expand All @@ -385,7 +389,7 @@ int main(void)
assert(be16_to_cpu(trace.repeat.num) == 1);
}
assert(be64_to_cpu(trace.repeat.timestamp) == i);
assert(!trace_get(&trace, &my_fake_cpu->trace->tb));
assert(!trace_get(&trace, my_trace_reader));
}

for (i = 0; i < CPUS; i++)
Expand Down
49 changes: 28 additions & 21 deletions external/trace/trace.c
Expand Up @@ -17,57 +17,64 @@
#include <external/trace/trace.h>
#include "../ccan/endian/endian.h"
#include "../ccan/short_types/short_types.h"
#include "trace.h"
#include <trace_types.h>
#include <errno.h>

bool trace_empty(const struct tracebuf *tb)
#if defined(__powerpc__) || defined(__powerpc64__)
#define rmb() lwsync()
#else
#define rmb()
#endif

bool trace_empty(const struct trace_reader *tr)
{
const struct trace_repeat *rep;

if (tb->rpos == tb->end)
if (tr->rpos == be64_to_cpu(tr->tb->end))
return true;

/*
* If we have a single element only, and it's a repeat buffer
* we've already seen every repeat for (yet which may be
* incremented in future), we're also empty.
*/
rep = (void *)tb->buf + be64_to_cpu(tb->rpos) % be64_to_cpu(tb->buf_size);
if (be64_to_cpu(tb->end) != be64_to_cpu(tb->rpos) + sizeof(*rep))
rep = (void *)tr->tb->buf + tr->rpos % be64_to_cpu(tr->tb->buf_size);
if (be64_to_cpu(tr->tb->end) != tr->rpos + sizeof(*rep))
return false;

if (rep->type != TRACE_REPEAT)
return false;

if (be16_to_cpu(rep->num) != be32_to_cpu(tb->last_repeat))
if (be16_to_cpu(rep->num) != tr->last_repeat)
return false;

return true;
}

/* You can't read in parallel, so some locking required in caller. */
bool trace_get(union trace *t, struct tracebuf *tb)
bool trace_get(union trace *t, struct trace_reader *tr)
{
u64 start, rpos;
size_t len;

len = sizeof(*t) < be32_to_cpu(tb->max_size) ? sizeof(*t) :
be32_to_cpu(tb->max_size);
len = sizeof(*t) < be32_to_cpu(tr->tb->max_size) ? sizeof(*t) :
be32_to_cpu(tr->tb->max_size);

if (trace_empty(tb))
if (trace_empty(tr))
return false;

again:
/*
* The actual buffer is slightly larger than tbsize, so this
* memcpy is always valid.
*/
memcpy(t, tb->buf + be64_to_cpu(tb->rpos) % be64_to_cpu(tb->buf_size), len);
memcpy(t, tr->tb->buf + tr->rpos % be64_to_cpu(tr->tb->buf_size), len);

rmb(); /* read barrier, so we read tb->start after copying record. */
rmb(); /* read barrier, so we read tr->tb->start after copying record. */

start = be64_to_cpu(tb->start);
rpos = be64_to_cpu(tb->rpos);
start = be64_to_cpu(tr->tb->start);
rpos = tr->rpos;

/* Now, was that overwritten? */
if (rpos < start) {
Expand All @@ -76,7 +83,7 @@ bool trace_get(union trace *t, struct tracebuf *tb)
t->overflow.type = TRACE_OVERFLOW;
t->overflow.len_div_8 = sizeof(t->overflow) / 8;
t->overflow.bytes_missed = cpu_to_be64(start - rpos);
tb->rpos = cpu_to_be64(start);
tr->rpos = start;
return true;
}

Expand All @@ -85,27 +92,27 @@ bool trace_get(union trace *t, struct tracebuf *tb)
u32 num = be16_to_cpu(t->repeat.num);

/* In case we've read some already... */
t->repeat.num = cpu_to_be16(num - be32_to_cpu(tb->last_repeat));
t->repeat.num = cpu_to_be16(num - tr->last_repeat);

/* Record how many repeats we saw this time. */
tb->last_repeat = cpu_to_be32(num);
tr->last_repeat = num;

/* Don't report an empty repeat buffer. */
if (t->repeat.num == 0) {
/*
* This can't be the last buffer, otherwise
* trace_empty would have returned true.
*/
assert(be64_to_cpu(tb->end) >
assert(be64_to_cpu(tr->tb->end) >
rpos + t->hdr.len_div_8 * 8);
/* Skip to next entry. */
tb->rpos = cpu_to_be64(rpos + t->hdr.len_div_8 * 8);
tb->last_repeat = 0;
tr->rpos = rpos + t->hdr.len_div_8 * 8;
tr->last_repeat = 0;
goto again;
}
} else {
tb->last_repeat = 0;
tb->rpos = cpu_to_be64(rpos + t->hdr.len_div_8 * 8);
tr->last_repeat = 0;
tr->rpos = rpos + t->hdr.len_div_8 * 8;
}

return true;
Expand Down
22 changes: 20 additions & 2 deletions external/trace/trace.h
Expand Up @@ -13,8 +13,26 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef E_TRACE_H
#define E_TRACE_H

#include <stdbool.h>
#include <types.h>
#include <trace.h>
#include <trace_types.h>

struct trace_reader {
/* This is where the reader is up to. */
u64 rpos;
/* If the last one we read was a repeat, this shows how many. */
u32 last_repeat;
struct tracebuf *tb;
};

/* Is this tracebuf empty? */
bool trace_empty(const struct tracebuf *tracebuf);
bool trace_empty(const struct trace_reader *tr);

/* Get the next trace from this buffer (false if empty). */
bool trace_get(union trace *t, struct tracebuf *tb);
bool trace_get(union trace *t, struct trace_reader *tr);

#endif
4 changes: 0 additions & 4 deletions include/trace_types.h
Expand Up @@ -36,10 +36,6 @@ struct tracebuf {
__be64 end;
/* This is where the writer wrote to previously. */
__be64 last;
/* This is where the reader is up to. */
__be64 rpos;
/* If the last one we read was a repeat, this shows how many. */
__be32 last_repeat;
/* Maximum possible size of a record. */
__be32 max_size;

Expand Down

0 comments on commit 3f61c83

Please sign in to comment.