-
Notifications
You must be signed in to change notification settings - Fork 651
/
main.h
616 lines (545 loc) · 14.5 KB
/
main.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
/*
* Broadcom NetXtreme-E User Space RoCE driver
*
* Copyright (c) 2015-2017, Broadcom. All rights reserved. The term
* Broadcom refers to Broadcom Limited and/or its subsidiaries.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Description: Basic device data structures needed for book-keeping
*/
#ifndef __MAIN_H__
#define __MAIN_H__
#include <inttypes.h>
#include <stdbool.h>
#include <stddef.h>
#include <endian.h>
#include <pthread.h>
#include <sys/param.h>
#include <util/mmio.h>
#include <util/util.h>
#include <infiniband/driver.h>
#include <util/udma_barrier.h>
#include "bnxt_re-abi.h"
#include "memory.h"
#include "flush.h"
#define DEV "bnxt_re : "
#define BNXT_RE_UD_QP_HW_STALL 0x400000
#define CHIP_NUM_57508 0x1750
#define CHIP_NUM_57504 0x1751
#define CHIP_NUM_57502 0x1752
#define CHIP_NUM_58818 0xd818
#define CHIP_NUM_57608 0x1760
#define BNXT_RE_MAX_DO_PACING 0xFFFF
#define BNXT_NSEC_PER_SEC 1000000000UL
#define BNXT_RE_PAGE_MASK(pg_size) (~((__u64)(pg_size) - 1))
struct bnxt_re_chip_ctx {
__u16 chip_num;
__u8 chip_rev;
__u8 chip_metal;
__u8 gen_p5_p7;
__u8 gen_p7;
};
struct bnxt_re_dpi {
__u32 dpindx;
__u32 wcdpi;
__u64 *dbpage;
__u64 *wcdbpg;
};
struct bnxt_re_pd {
struct ibv_pd ibvpd;
uint32_t pdid;
};
struct bnxt_re_cq {
struct ibv_cq ibvcq;
uint32_t cqid;
struct bnxt_re_context *cntx;
struct bnxt_re_queue cqq;
struct bnxt_re_queue resize_cqq;
struct bnxt_re_dpi *udpi;
struct list_head sfhead;
struct list_head rfhead;
struct list_head prev_cq_head;
uint32_t cqe_size;
uint8_t phase;
struct xorshift32_state rand;
uint32_t mem_handle;
void *toggle_map;
uint32_t toggle_size;
bool deffered_db_sup;
uint32_t hw_cqes;
};
struct bnxt_re_push_buffer {
uintptr_t pbuf; /*push wc buffer */
uintptr_t *wqe; /* hwqe addresses */
uintptr_t ucdb;
__u32 st_idx;
__u32 qpid;
__u16 wcdpi;
__u16 nbit;
__u32 tail;
};
enum bnxt_re_push_info_mask {
BNXT_RE_PUSH_SIZE_MASK = 0x1FUL,
BNXT_RE_PUSH_SIZE_SHIFT = 0x18UL
};
struct bnxt_re_db_ppp_hdr {
struct bnxt_re_db_hdr db_hdr;
__u64 rsv_psz_pidx;
};
struct bnxt_re_push_rec {
struct bnxt_re_dpi *udpi;
struct bnxt_re_push_buffer *pbuf;
__u32 pbmap; /* only 16 bits in use */
};
struct bnxt_re_wrid {
struct bnxt_re_psns_ext *psns_ext;
struct bnxt_re_psns *psns;
uint64_t wrid;
uint32_t bytes;
int next_idx;
uint32_t st_slot_idx;
uint8_t slots;
uint8_t sig;
uint8_t wc_opcd;
};
struct bnxt_re_qpcap {
uint32_t max_swr;
uint32_t max_rwr;
uint32_t max_ssge;
uint32_t max_rsge;
uint32_t max_inline;
uint8_t sqsig;
uint8_t is_atomic_cap;
};
struct bnxt_re_srq {
struct ibv_srq ibvsrq;
struct ibv_srq_attr cap;
struct bnxt_re_context *cntx;
struct bnxt_re_queue *srqq;
struct bnxt_re_wrid *srwrid;
struct bnxt_re_dpi *udpi;
struct xorshift32_state rand;
uint32_t srqid;
int start_idx;
int last_idx;
bool arm_req;
};
struct bnxt_re_joint_queue {
struct bnxt_re_queue *hwque;
struct bnxt_re_wrid *swque;
uint32_t start_idx;
uint32_t last_idx;
};
struct bnxt_re_qp {
struct ibv_qp ibvqp;
struct bnxt_re_chip_ctx *cctx;
struct bnxt_re_context *cntx;
struct xorshift32_state rand;
struct bnxt_re_joint_queue *jsqq;
struct bnxt_re_joint_queue *jrqq;
struct bnxt_re_srq *srq;
struct bnxt_re_cq *scq;
struct bnxt_re_cq *rcq;
struct bnxt_re_dpi *udpi;
struct bnxt_re_qpcap cap;
struct bnxt_re_fque_node snode;
struct bnxt_re_fque_node rnode;
uint32_t qpid;
uint32_t tbl_indx;
uint32_t sq_psn;
uint32_t pending_db;
uint64_t wqe_cnt;
uint16_t mtu;
uint16_t qpst;
uint32_t qpmode;
uint8_t push_st_en;
uint16_t max_push_sz;
uint8_t qptyp;
/* irdord? */
};
struct bnxt_re_mr {
struct verbs_mr vmr;
};
struct bnxt_re_ah {
struct ibv_ah ibvah;
uint32_t avid;
};
struct bnxt_re_dev {
struct verbs_device vdev;
uint8_t abi_version;
uint32_t pg_size;
uint32_t cqe_size;
uint32_t max_cq_depth;
struct ibv_device_attr devattr;
};
struct bnxt_re_context {
struct verbs_context ibvctx;
struct bnxt_re_dev *rdev;
uint32_t dev_id;
uint32_t max_qp;
struct bnxt_re_chip_ctx cctx;
uint64_t comp_mask;
uint32_t max_srq;
struct bnxt_re_dpi udpi;
void *shpg;
uint32_t wqe_mode;
pthread_mutex_t shlock;
struct bnxt_re_push_rec *pbrec;
uint32_t wc_handle;
void *dbr_page;
void *bar_map;
};
struct bnxt_re_pacing_data {
uint32_t do_pacing;
uint32_t pacing_th;
uint32_t alarm_th;
uint32_t fifo_max_depth;
uint32_t fifo_room_mask;
uint32_t fifo_room_shift;
uint32_t grc_reg_offset;
};
struct bnxt_re_mmap_info {
__u32 type;
__u32 dpi;
__u64 alloc_offset;
__u32 alloc_size;
__u32 pg_offset;
__u32 res_id;
};
/* DB ring functions used internally*/
void bnxt_re_ring_rq_db(struct bnxt_re_qp *qp);
void bnxt_re_ring_sq_db(struct bnxt_re_qp *qp);
void bnxt_re_ring_srq_arm(struct bnxt_re_srq *srq);
void bnxt_re_ring_srq_db(struct bnxt_re_srq *srq);
void bnxt_re_ring_cq_db(struct bnxt_re_cq *cq);
void bnxt_re_ring_cq_arm_db(struct bnxt_re_cq *cq, uint8_t aflag);
void bnxt_re_ring_pstart_db(struct bnxt_re_qp *qp,
struct bnxt_re_push_buffer *pbuf);
void bnxt_re_ring_pend_db(struct bnxt_re_qp *qp,
struct bnxt_re_push_buffer *pbuf);
void bnxt_re_fill_push_wcb(struct bnxt_re_qp *qp,
struct bnxt_re_push_buffer *pbuf,
uint32_t idx);
int bnxt_re_init_pbuf_list(struct bnxt_re_context *cntx);
void bnxt_re_destroy_pbuf_list(struct bnxt_re_context *cntx);
struct bnxt_re_push_buffer *bnxt_re_get_pbuf(uint8_t *push_st_en,
struct bnxt_re_context *cntx);
void bnxt_re_put_pbuf(struct bnxt_re_context *cntx,
struct bnxt_re_push_buffer *pbuf);
int bnxt_re_alloc_page(struct ibv_context *ibvctx,
struct bnxt_re_mmap_info *minfo,
uint32_t *page_handle);
int bnxt_re_notify_drv(struct ibv_context *ibvctx);
int bnxt_re_get_toggle_mem(struct ibv_context *ibvctx,
struct bnxt_re_mmap_info *minfo,
uint32_t *page_handle);
/* pointer conversion functions*/
static inline struct bnxt_re_dev *to_bnxt_re_dev(struct ibv_device *ibvdev)
{
return container_of(ibvdev, struct bnxt_re_dev, vdev.device);
}
static inline struct bnxt_re_context *to_bnxt_re_context(
struct ibv_context *ibvctx)
{
return container_of(ibvctx, struct bnxt_re_context, ibvctx.context);
}
static inline struct bnxt_re_pd *to_bnxt_re_pd(struct ibv_pd *ibvpd)
{
return container_of(ibvpd, struct bnxt_re_pd, ibvpd);
}
static inline struct bnxt_re_cq *to_bnxt_re_cq(struct ibv_cq *ibvcq)
{
return container_of(ibvcq, struct bnxt_re_cq, ibvcq);
}
static inline struct bnxt_re_qp *to_bnxt_re_qp(struct ibv_qp *ibvqp)
{
return container_of(ibvqp, struct bnxt_re_qp, ibvqp);
}
static inline struct bnxt_re_srq *to_bnxt_re_srq(struct ibv_srq *ibvsrq)
{
return container_of(ibvsrq, struct bnxt_re_srq, ibvsrq);
}
static inline struct bnxt_re_ah *to_bnxt_re_ah(struct ibv_ah *ibvah)
{
return container_of(ibvah, struct bnxt_re_ah, ibvah);
}
static inline uint32_t bnxt_re_get_sqe_sz(void)
{
return sizeof(struct bnxt_re_bsqe) +
sizeof(struct bnxt_re_send) +
BNXT_RE_MAX_INLINE_SIZE;
}
static inline uint32_t bnxt_re_get_sqe_hdr_sz(void)
{
return sizeof(struct bnxt_re_bsqe) + sizeof(struct bnxt_re_send);
}
static inline uint32_t bnxt_re_get_rqe_sz(void)
{
return sizeof(struct bnxt_re_brqe) +
sizeof(struct bnxt_re_rqe) +
BNXT_RE_MAX_INLINE_SIZE;
}
static inline uint32_t bnxt_re_get_rqe_hdr_sz(void)
{
return sizeof(struct bnxt_re_brqe) + sizeof(struct bnxt_re_rqe);
}
static inline uint32_t bnxt_re_get_srqe_sz(void)
{
return sizeof(struct bnxt_re_brqe) +
sizeof(struct bnxt_re_srqe) +
BNXT_RE_MAX_INLINE_SIZE;
}
static inline uint32_t bnxt_re_get_srqe_hdr_sz(void)
{
return sizeof(struct bnxt_re_brqe) + sizeof(struct bnxt_re_srqe);
}
static inline uint32_t bnxt_re_get_cqe_sz(void)
{
return sizeof(struct bnxt_re_req_cqe) + sizeof(struct bnxt_re_bcqe);
}
static inline uint8_t bnxt_re_ibv_to_bnxt_wr_opcd(uint8_t ibv_opcd)
{
uint8_t bnxt_opcd;
switch (ibv_opcd) {
case IBV_WR_SEND:
bnxt_opcd = BNXT_RE_WR_OPCD_SEND;
break;
case IBV_WR_SEND_WITH_IMM:
bnxt_opcd = BNXT_RE_WR_OPCD_SEND_IMM;
break;
case IBV_WR_RDMA_WRITE:
bnxt_opcd = BNXT_RE_WR_OPCD_RDMA_WRITE;
break;
case IBV_WR_RDMA_WRITE_WITH_IMM:
bnxt_opcd = BNXT_RE_WR_OPCD_RDMA_WRITE_IMM;
break;
case IBV_WR_RDMA_READ:
bnxt_opcd = BNXT_RE_WR_OPCD_RDMA_READ;
break;
case IBV_WR_ATOMIC_CMP_AND_SWP:
bnxt_opcd = BNXT_RE_WR_OPCD_ATOMIC_CS;
break;
case IBV_WR_ATOMIC_FETCH_AND_ADD:
bnxt_opcd = BNXT_RE_WR_OPCD_ATOMIC_FA;
break;
/* TODO: Add other opcodes */
default:
bnxt_opcd = BNXT_RE_WR_OPCD_INVAL;
break;
};
return bnxt_opcd;
}
static inline uint8_t bnxt_re_ibv_wr_to_wc_opcd(uint8_t wr_opcd)
{
uint8_t wc_opcd;
switch (wr_opcd) {
case IBV_WR_SEND_WITH_IMM:
case IBV_WR_SEND:
wc_opcd = IBV_WC_SEND;
break;
case IBV_WR_RDMA_WRITE_WITH_IMM:
case IBV_WR_RDMA_WRITE:
wc_opcd = IBV_WC_RDMA_WRITE;
break;
case IBV_WR_RDMA_READ:
wc_opcd = IBV_WC_RDMA_READ;
break;
case IBV_WR_ATOMIC_CMP_AND_SWP:
wc_opcd = IBV_WC_COMP_SWAP;
break;
case IBV_WR_ATOMIC_FETCH_AND_ADD:
wc_opcd = IBV_WC_FETCH_ADD;
break;
default:
wc_opcd = 0xFF;
break;
}
return wc_opcd;
}
static inline uint8_t bnxt_re_to_ibv_wc_status(uint8_t bnxt_wcst,
uint8_t is_req)
{
uint8_t ibv_wcst;
if (is_req) {
switch (bnxt_wcst) {
case BNXT_RE_REQ_ST_BAD_RESP:
ibv_wcst = IBV_WC_BAD_RESP_ERR;
break;
case BNXT_RE_REQ_ST_LOC_LEN:
ibv_wcst = IBV_WC_LOC_LEN_ERR;
break;
case BNXT_RE_REQ_ST_LOC_QP_OP:
ibv_wcst = IBV_WC_LOC_QP_OP_ERR;
break;
case BNXT_RE_REQ_ST_PROT:
ibv_wcst = IBV_WC_LOC_PROT_ERR;
break;
case BNXT_RE_REQ_ST_MEM_OP:
ibv_wcst = IBV_WC_MW_BIND_ERR;
break;
case BNXT_RE_REQ_ST_REM_INVAL:
ibv_wcst = IBV_WC_REM_INV_REQ_ERR;
break;
case BNXT_RE_REQ_ST_REM_ACC:
ibv_wcst = IBV_WC_REM_ACCESS_ERR;
break;
case BNXT_RE_REQ_ST_REM_OP:
ibv_wcst = IBV_WC_REM_OP_ERR;
break;
case BNXT_RE_REQ_ST_RNR_NAK_XCED:
ibv_wcst = IBV_WC_RNR_RETRY_EXC_ERR;
break;
case BNXT_RE_REQ_ST_TRNSP_XCED:
ibv_wcst = IBV_WC_RETRY_EXC_ERR;
break;
case BNXT_RE_REQ_ST_WR_FLUSH:
ibv_wcst = IBV_WC_WR_FLUSH_ERR;
break;
default:
ibv_wcst = IBV_WC_GENERAL_ERR;
break;
}
} else {
switch (bnxt_wcst) {
case BNXT_RE_RSP_ST_LOC_ACC:
ibv_wcst = IBV_WC_LOC_ACCESS_ERR;
break;
case BNXT_RE_RSP_ST_LOC_LEN:
ibv_wcst = IBV_WC_LOC_LEN_ERR;
break;
case BNXT_RE_RSP_ST_LOC_PROT:
ibv_wcst = IBV_WC_LOC_PROT_ERR;
break;
case BNXT_RE_RSP_ST_LOC_QP_OP:
ibv_wcst = IBV_WC_LOC_QP_OP_ERR;
break;
case BNXT_RE_RSP_ST_MEM_OP:
ibv_wcst = IBV_WC_MW_BIND_ERR;
break;
case BNXT_RE_RSP_ST_REM_INVAL:
ibv_wcst = IBV_WC_REM_INV_REQ_ERR;
break;
case BNXT_RE_RSP_ST_WR_FLUSH:
ibv_wcst = IBV_WC_WR_FLUSH_ERR;
break;
case BNXT_RE_RSP_ST_HW_FLUSH:
ibv_wcst = IBV_WC_FATAL_ERR;
break;
default:
ibv_wcst = IBV_WC_GENERAL_ERR;
break;
}
}
return ibv_wcst;
}
static inline uint8_t bnxt_re_is_cqe_valid(struct bnxt_re_cq *cq,
struct bnxt_re_bcqe *hdr)
{
uint8_t valid = 0;
valid = ((le32toh(hdr->flg_st_typ_ph) &
BNXT_RE_BCQE_PH_MASK) == cq->phase);
udma_from_device_barrier();
return valid;
}
static inline void bnxt_re_change_cq_phase(struct bnxt_re_cq *cq)
{
if (!cq->cqq.head)
cq->phase = (~cq->phase & BNXT_RE_BCQE_PH_MASK);
}
static inline void *bnxt_re_get_swqe(struct bnxt_re_joint_queue *jqq,
uint32_t *wqe_idx)
{
if (wqe_idx)
*wqe_idx = jqq->start_idx;
return &jqq->swque[jqq->start_idx];
}
static inline void bnxt_re_jqq_mod_start(struct bnxt_re_joint_queue *jqq,
uint32_t idx)
{
jqq->start_idx = jqq->swque[idx].next_idx;
}
static inline void bnxt_re_jqq_mod_last(struct bnxt_re_joint_queue *jqq,
uint32_t idx)
{
jqq->last_idx = jqq->swque[idx].next_idx;
}
static inline uint32_t bnxt_re_init_depth(uint32_t ent, uint64_t cmask)
{
return cmask & BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED ?
ent : roundup_pow_of_two(ent);
}
/* Helper function to copy to push buffers */
static inline void bnxt_re_copy_data_to_pb(struct bnxt_re_push_buffer *pbuf,
uint8_t offset, uint32_t idx)
{
uintptr_t *src;
uintptr_t *dst;
int indx;
for (indx = 0; indx < idx; indx++) {
dst = (uintptr_t *)(pbuf->pbuf + 2 * (indx + offset));
src = (uintptr_t *)(pbuf->wqe[indx]);
mmio_write64(dst, *src);
dst++;
src++;
mmio_write64(dst, *src);
}
}
static void timespec_sub(const struct timespec *a, const struct timespec *b,
struct timespec *res)
{
res->tv_sec = a->tv_sec - b->tv_sec;
res->tv_nsec = a->tv_nsec - b->tv_nsec;
if (res->tv_nsec < 0) {
res->tv_sec--;
res->tv_nsec += BNXT_NSEC_PER_SEC;
}
}
/*
* Function waits in a busy loop for a given nano seconds
* The maximum wait period allowed is less than one second
*/
static inline void bnxt_re_sub_sec_busy_wait(uint32_t nsec)
{
struct timespec start, cur, res;
if (nsec >= BNXT_NSEC_PER_SEC)
return;
if (clock_gettime(CLOCK_REALTIME, &start))
return;
while (1) {
if (clock_gettime(CLOCK_REALTIME, &cur))
return;
timespec_sub(&cur, &start, &res);
if (res.tv_nsec >= nsec)
break;
}
}
#define BNXT_RE_HW_RETX(a) ((a)->comp_mask & BNXT_RE_COMP_MASK_UCNTX_HW_RETX_ENABLED)
#endif