Skip to content
Permalink
Browse files

Bluetooth: Add dedicated pool for HCI_Num_Completed_Packets HCI event

This event is a priority one, so it's not safe to have it use the RX
buffer pool which may be depleted due to non-priority events (e.g.
advertising events). Since the event is consumed synchronously it's
safe to have a single-buffer pool for it. Also introduce a new
bt_buf_get_evt() API for HCI drivers to simplify the driver-side code,
this effectively also deprecates bt_buf_get_cmd_complete() which now
has no in-tree HCI driver users anymore.

Fixes #16864

Signed-off-by: Johan Hedberg <johan.hedberg@intel.com>
  • Loading branch information...
jhedberg committed Jun 17, 2019
1 parent 766ad9f commit fc2fcd10cf482023bee9b2b97ca74ed21dd09f8b
@@ -161,16 +161,11 @@ static struct net_buf *get_rx(int timeout)
{
BT_DBG("type 0x%02x, evt 0x%02x", rx.type, rx.evt.evt);

if (rx.type == H4_EVT && (rx.evt.evt == BT_HCI_EVT_CMD_COMPLETE ||
rx.evt.evt == BT_HCI_EVT_CMD_STATUS)) {
return bt_buf_get_cmd_complete(timeout);
if (rx.type == H4_EVT) {
return bt_buf_get_evt(rx.evt.evt, timeout);
}

if (rx.type == H4_ACL) {
return bt_buf_get_rx(BT_BUF_ACL_IN, timeout);
} else {
return bt_buf_get_rx(BT_BUF_EVT, timeout);
}
return bt_buf_get_rx(BT_BUF_ACL_IN, timeout);
}

static void rx_thread(void *p1, void *p2, void *p3)
@@ -408,16 +408,7 @@ static inline struct net_buf *get_evt_buf(u8_t evt)
{
struct net_buf *buf;

switch (evt) {
case BT_HCI_EVT_CMD_COMPLETE:
case BT_HCI_EVT_CMD_STATUS:
buf = bt_buf_get_cmd_complete(K_NO_WAIT);
break;
default:
buf = bt_buf_get_rx(BT_BUF_EVT, K_NO_WAIT);
break;
}

buf = bt_buf_get_evt(evt, K_NO_WAIT);
if (buf) {
net_buf_add_u8(h5.rx_buf, evt);
}
@@ -114,12 +114,8 @@ void TM_EvtReceivedCb(TL_EvtPacket_t *hcievt)
BT_ERR("Unknown evtcode type 0x%02x",
hcievt->evtserial.evt.evtcode);
goto out;
case BT_HCI_EVT_CMD_COMPLETE:
case BT_HCI_EVT_CMD_STATUS:
buf = bt_buf_get_cmd_complete(K_FOREVER);
break;
default:
buf = bt_buf_get_rx(BT_BUF_EVT, K_FOREVER);
buf = bt_buf_get_evt(evtserial.evt.evtcode, K_FOREVER);
break;
}
net_buf_add_mem(buf, &hcievt->evtserial.evt,
@@ -353,12 +353,9 @@ static void bt_spi_rx_thread(void)
/* Vendor events are currently unsupported */
bt_spi_handle_vendor_evt(rxmsg);
continue;
case BT_HCI_EVT_CMD_COMPLETE:
case BT_HCI_EVT_CMD_STATUS:
buf = bt_buf_get_cmd_complete(K_FOREVER);
break;
default:
buf = bt_buf_get_rx(BT_BUF_EVT, K_FOREVER);
buf = bt_buf_get_evt(rxmsg[EVT_HEADER_EVENT],
K_FOREVER);
break;
}

@@ -57,16 +57,11 @@ static int bt_dev_index = -1;

static struct net_buf *get_rx(const u8_t *buf)
{
if (buf[0] == H4_EVT && (buf[1] == BT_HCI_EVT_CMD_COMPLETE ||
buf[1] == BT_HCI_EVT_CMD_STATUS)) {
return bt_buf_get_cmd_complete(K_FOREVER);
if (buf[0] == H4_EVT) {
return bt_buf_get_evt(buf[1], K_FOREVER);
}

if (buf[0] == H4_ACL) {
return bt_buf_get_rx(BT_BUF_ACL_IN, K_FOREVER);
} else {
return bt_buf_get_rx(BT_BUF_EVT, K_FOREVER);
}
return bt_buf_get_rx(BT_BUF_ACL_IN, K_FOREVER);
}

static bool uc_ready(void)
@@ -64,6 +64,18 @@ struct net_buf *bt_buf_get_rx(enum bt_buf_type type, s32_t timeout);
*/
struct net_buf *bt_buf_get_cmd_complete(s32_t timeout);

/** Allocate a buffer for an HCI Event
*
* This will set the buffer type so bt_buf_set_type() does not need to
* be explicitly called before bt_recv_prio() or bt_recv().
*
* @param evt HCI event code
* @param timeout Timeout in milliseconds, or one of the special values
* K_NO_WAIT and K_FOREVER.
* @return A new buffer.
*/
struct net_buf *bt_buf_get_evt(u8_t evt, s32_t timeout);

/** Set the buffer type
*
* @param buf Bluetooth buffer
@@ -120,7 +120,7 @@ void *hci_cmd_complete(struct net_buf **buf, u8_t plen)
{
struct bt_hci_evt_cmd_complete *cc;

*buf = bt_buf_get_cmd_complete(K_FOREVER);
*buf = bt_buf_get_evt(BT_HCI_EVT_CMD_COMPLETE, K_FOREVER);

hci_evt_create(*buf, BT_HCI_EVT_CMD_COMPLETE, sizeof(*cc) + plen);

@@ -137,7 +137,7 @@ static struct net_buf *cmd_status(u8_t status)
struct bt_hci_evt_cmd_status *cs;
struct net_buf *buf;

buf = bt_buf_get_cmd_complete(K_FOREVER);
buf = bt_buf_get_evt(BT_HCI_EVT_CMD_STATUS, K_FOREVER);
hci_evt_create(buf, BT_HCI_EVT_CMD_STATUS, sizeof(*cs));

cs = net_buf_add(buf, sizeof(*cs));
@@ -90,7 +90,8 @@ static void prio_recv_thread(void *p1, void *p2, void *p3)
#if defined(CONFIG_BT_CONN)
struct net_buf *buf;

buf = bt_buf_get_rx(BT_BUF_EVT, K_FOREVER);
buf = bt_buf_get_evt(BT_HCI_EVT_NUM_COMPLETED_PACKETS,
K_FOREVER);
hci_num_cmplt_encode(buf, handle, num_cmplt);
BT_DBG("Num Complete: 0x%04x:%u", handle, num_cmplt);
bt_recv_prio(buf);
@@ -138,6 +138,16 @@ NET_BUF_POOL_DEFINE(hci_cmd_pool, CONFIG_BT_HCI_CMD_COUNT,
NET_BUF_POOL_DEFINE(hci_rx_pool, CONFIG_BT_RX_BUF_COUNT,
BT_BUF_RX_SIZE, BT_BUF_USER_DATA_MIN, NULL);

#if defined(CONFIG_BT_CONN)
/* Dedicated pool for HCI_Number_of_Completed_Packets. This event is always
* consumed synchronously by bt_recv_prio() so a single buffer is enough.
* Having a dedicated pool for it ensures that exhaustion of the RX pool
* cannot block the delivery of this priority event.
*/
NET_BUF_POOL_DEFINE(num_complete_pool, 1, BT_BUF_RX_SIZE,
BT_BUF_USER_DATA_MIN, NULL);
#endif /* CONFIG_BT_CONN */

struct event_handler {
u8_t event;
u8_t min_len;
@@ -5662,6 +5672,31 @@ struct net_buf *bt_buf_get_cmd_complete(s32_t timeout)
return bt_buf_get_rx(BT_BUF_EVT, timeout);
}

struct net_buf *bt_buf_get_evt(u8_t evt, s32_t timeout)
{
switch (evt) {
#if defined(CONFIG_BT_CONN)
case BT_HCI_EVT_NUM_COMPLETED_PACKETS:
{
struct net_buf *buf;

buf = net_buf_alloc(&num_complete_pool, timeout);
if (buf) {
net_buf_reserve(buf, CONFIG_BT_HCI_RESERVE);
bt_buf_set_type(buf, BT_BUF_EVT);
}

return buf;
}
#endif /* CONFIG_BT_CONN */
case BT_HCI_EVT_CMD_COMPLETE:
case BT_HCI_EVT_CMD_STATUS:
return bt_buf_get_cmd_complete(timeout);
default:
return bt_buf_get_rx(BT_BUF_EVT, timeout);
}
}

#if defined(CONFIG_BT_BREDR)
static int br_start_inquiry(const struct bt_br_discovery_param *param)
{
@@ -84,7 +84,7 @@ static void send_cmd_status(u16_t opcode, u8_t status)

BT_DBG("opcode %x status %x", opcode, status);

buf = bt_buf_get_cmd_complete(K_FOREVER);
buf = bt_buf_get_evt(BT_HCI_EVT_CMD_STATUS, K_FOREVER);
bt_buf_set_type(buf, BT_BUF_EVT);

hdr = net_buf_add(buf, sizeof(*hdr));
@@ -72,6 +72,18 @@ struct net_buf *bt_buf_get_cmd_complete(s32_t timeout)
return buf;
}

struct net_buf *bt_buf_get_evt(u8_t evt, s32_t timeout)
{
struct net_buf *buf;

buf = net_buf_alloc(&hci_rx_pool, timeout);
if (buf) {
bt_buf_set_type(buf, BT_BUF_EVT);
}

return buf;
}

int bt_recv(struct net_buf *buf)
{
BT_DBG("buf %p len %u", buf, buf->len);
@@ -53,7 +53,7 @@ static void *cmd_complete(struct net_buf **buf, u8_t plen, u16_t opcode)
{
struct bt_hci_evt_cmd_complete *cc;

*buf = bt_buf_get_cmd_complete(K_FOREVER);
*buf = bt_buf_get_evt(BT_HCI_EVT_CMD_COMPLETE, K_FOREVER);
evt_create(*buf, BT_HCI_EVT_CMD_COMPLETE, sizeof(*cc) + plen);
cc = net_buf_add(*buf, sizeof(*cc));
cc->ncmd = 1U;

0 comments on commit fc2fcd1

Please sign in to comment.
You can’t perform that action at this time.