Skip to content

Commit

Permalink
net: buf: add linearize, append_bytes and skip APIs to net_buf
Browse files Browse the repository at this point in the history
This change moves the logic for linearize and append_bytes from
the net_pkt sources into the net_buf sources where it can be
made available to layers which to not depend on net_pkt.  It also,
adds a new net_buf_skip() function which can be used to iterated
through a list of net_buf (freeing the buffers as it goes).

For the append_bytes function to be generic in nature, a net_buf
allocator callback was created.  Callers of append_bytes pass in
the callback which determines where the resulting net_buf is
allocated from.

Also, the dst buffer in linearize is now cleared prior to copy
(this was an addition from the code moved from net_pkt).

In order to preserve existing callers, the original functions are
left in the net_pkt layer, but now merely act as wrappers.

Signed-off-by: Michael Scott <mike@foundries.io>
  • Loading branch information
mike-scott authored and jukkar committed Aug 6, 2018
1 parent 4016136 commit db577f0
Show file tree
Hide file tree
Showing 3 changed files with 172 additions and 71 deletions.
84 changes: 84 additions & 0 deletions include/net/buf.h
Expand Up @@ -1299,6 +1299,90 @@ struct net_buf *net_buf_frag_del_debug(struct net_buf *parent,
struct net_buf *net_buf_frag_del(struct net_buf *parent, struct net_buf *frag);
#endif

/**
* @brief Copy len bytes from src starting from offset to dst buffer
*
* This routine assumes that dst is large enough to store @a len bytes
* starting from offset at src.
*
* @param dst Destination buffer
* @param dst_len Destination buffer max length
* @param src Source buffer that may be fragmented
* @param offset Starting point to copy from
* @param len Number of bytes to copy
* @return number of bytes copied if everything is ok
* @return -ENOMEM on error
*/
int net_buf_linearize(void *dst, size_t dst_len,
struct net_buf *src, u16_t offset, u16_t len);

/**
* @typedef net_buf_allocator_cb
* @brief Network buffer allocator callback.
*
* @details The allocator callback is called when net_buf_append_bytes
* needs to allocate a new net_buf.
*
* @param timeout Affects the action taken should the net buf pool be empty.
* If K_NO_WAIT, then return immediately. If K_FOREVER, then
* wait as long as necessary. Otherwise, wait up to the specified
* number of milliseconds before timing out.
* @param user_data The user data given in net_buf_append_bytes call.
* @return pointer to allocated net_buf or NULL on error.
*/
typedef struct net_buf *(*net_buf_allocator_cb)(s32_t timeout, void *user_data);

/**
* @brief Append data to a list of net_buf
*
* @details Append data to a net_buf. If there is not enough space in the
* net_buf then more net_buf will be added, unless there are no free net_buf
* and timeout occurs.
*
* @param buf Network buffer.
* @param len Total length of input data
* @param value Data to be added
* @param timeout Timeout is passed to the net_buf allocator callback.
* @param allocate_cb When a new net_buf is required, use this callback.
* @param user_data A user data pointer to be supplied to the allocate_cb.
* This pointer is can be anything from a mem_pool or a net_pkt, the
* logic is left up to the allocate_cb function.
*
* @return Length of data actually added. This may be less than input
* length if other timeout than K_FOREVER was used, and there
* were no free fragments in a pool to accommodate all data.
*/
u16_t net_buf_append_bytes(struct net_buf *buf, u16_t len,
const u8_t *value, s32_t timeout,
net_buf_allocator_cb allocate_cb, void *user_data);

/**
* @brief Skip N number of bytes in a net_buf
*
* @details Skip N number of bytes starting from fragment's offset. If the total
* length of data is placed in multiple fragments, this function will skip from
* all fragments until it reaches N number of bytes. Any fully skipped buffers
* are removed from the net_buf list.
*
* @param buf Network buffer.
* @param len Total length of data to be skipped.
*
* @return Pointer to the fragment or
* NULL and pos is 0 after successful skip,
* NULL and pos is 0xffff otherwise.
*/
static inline struct net_buf *net_buf_skip(struct net_buf *buf, u16_t len)
{
while (buf && len--) {
net_buf_pull_u8(buf);
if (!buf->len) {
buf = net_buf_frag_del(NULL, buf);
}
}

return buf;
}

/** @brief Calculate amount of bytes stored in fragments.
*
* Calculates the total amount of data stored in the given buffer and the
Expand Down
81 changes: 81 additions & 0 deletions subsys/net/buf.c
Expand Up @@ -672,6 +672,87 @@ struct net_buf *net_buf_frag_del(struct net_buf *parent, struct net_buf *frag)
return next_frag;
}

int net_buf_linearize(void *dst, size_t dst_len, struct net_buf *src,
u16_t offset, u16_t len)
{
struct net_buf *frag;
u16_t to_copy;
u16_t copied;

if (dst_len < (size_t)len) {
return -ENOMEM;
}

frag = src;

/* clear dst */
memset(dst, 0, dst_len);

/* find the right fragment to start copying from */
while (frag && offset >= frag->len) {
offset -= frag->len;
frag = frag->frags;
}

/* traverse the fragment chain until len bytes are copied */
copied = 0;
while (frag && len > 0) {
to_copy = min(len, frag->len - offset);
memcpy(dst + copied, frag->data + offset, to_copy);

copied += to_copy;

/* to_copy is always <= len */
len -= to_copy;
frag = frag->frags;

/* after the first iteration, this value will be 0 */
offset = 0;
}

if (len > 0) {
return -ENOMEM;
}

return copied;
}

/* This helper routine will append multiple bytes, if there is no place for
* the data in current fragment then create new fragment and add it to
* the buffer. It assumes that the buffer has at least one fragment.
*/
u16_t net_buf_append_bytes(struct net_buf *buf, u16_t len,
const u8_t *value, s32_t timeout,
net_buf_allocator_cb allocate_cb, void *user_data)
{
struct net_buf *frag = net_buf_frag_last(buf);
u16_t added_len = 0;

do {
u16_t count = min(len, net_buf_tailroom(frag));
void *data = net_buf_add(frag, count);

memcpy(data, value, count);
len -= count;
added_len += count;
value += count;

if (len == 0) {
return added_len;
}

frag = allocate_cb(timeout, user_data);
if (!frag) {
return added_len;
}

net_buf_frag_add(buf, frag);
} while (1);

/* Unreachable */
return 0;
}

#if defined(CONFIG_NET_BUF_SIMPLE_LOG)
#define NET_BUF_SIMPLE_DBG(fmt, ...) NET_BUF_DBG(fmt, ##__VA_ARGS__)
#define NET_BUF_SIMPLE_ERR(fmt, ...) NET_BUF_ERR(fmt, ##__VA_ARGS__)
Expand Down
78 changes: 7 additions & 71 deletions subsys/net/ip/net_pkt.c
Expand Up @@ -1079,43 +1079,7 @@ int net_frag_linear_copy(struct net_buf *dst, struct net_buf *src,
int net_frag_linearize(u8_t *dst, size_t dst_len, struct net_pkt *src,
u16_t offset, u16_t len)
{
struct net_buf *frag;
u16_t to_copy;
u16_t copied;

if (dst_len < (size_t)len) {
return -ENOMEM;
}

frag = src->frags;

/* find the right fragment to start copying from */
while (frag && offset >= frag->len) {
offset -= frag->len;
frag = frag->frags;
}

/* traverse the fragment chain until len bytes are copied */
copied = 0;
while (frag && len > 0) {
to_copy = min(len, frag->len - offset);
memcpy(dst + copied, frag->data + offset, to_copy);

copied += to_copy;

/* to_copy is always <= len */
len -= to_copy;
frag = frag->frags;

/* after the first iteration, this value will be 0 */
offset = 0;
}

if (len > 0) {
return -ENOMEM;
}

return copied;
return net_buf_linearize(dst, dst_len, src->frags, offset, len);
}

bool net_pkt_compact(struct net_pkt *pkt)
Expand Down Expand Up @@ -1178,40 +1142,10 @@ bool net_pkt_compact(struct net_pkt *pkt)
return true;
}

/* This helper routine will append multiple bytes, if there is no place for
* the data in current fragment then create new fragment and add it to
* the buffer. It assumes that the buffer has at least one fragment.
*/
static inline u16_t net_pkt_append_bytes(struct net_pkt *pkt,
const u8_t *value,
u16_t len, s32_t timeout)
static inline struct net_buf *net_pkt_append_allocator(s32_t timeout,
void *user_data)
{
struct net_buf *frag = net_buf_frag_last(pkt->frags);
u16_t added_len = 0;

do {
u16_t count = min(len, net_buf_tailroom(frag));
void *data = net_buf_add(frag, count);

memcpy(data, value, count);
len -= count;
added_len += count;
value += count;

if (len == 0) {
return added_len;
}

frag = net_pkt_get_frag(pkt, timeout);
if (!frag) {
return added_len;
}

net_pkt_frag_add(pkt, frag);
} while (1);

/* Unreachable */
return 0;
return net_pkt_get_frag((struct net_pkt *)user_data, timeout);
}

u16_t net_pkt_append(struct net_pkt *pkt, u16_t len, const u8_t *data,
Expand Down Expand Up @@ -1256,7 +1190,9 @@ u16_t net_pkt_append(struct net_pkt *pkt, u16_t len, const u8_t *data,
}
}

appended = net_pkt_append_bytes(pkt, data, len, timeout);
appended = net_buf_append_bytes(net_buf_frag_last(pkt->frags),
len, data, timeout,
net_pkt_append_allocator, pkt);

if (ctx) {
pkt->data_len -= appended;
Expand Down

0 comments on commit db577f0

Please sign in to comment.