1 From 36588f5844af4ef1e5b0d6ad002fa1adf9032653 Mon Sep 17 00:00:00 2001
2 From: Potnuri Bharat Teja <bharat@chelsio.com>
3 Date: Mon, 21 Oct 2019 14:01:25 +0530
4 Subject: [PATCH] libcxgb3: Remove libcxgb3 from rdma-core
6 Remove the userspace provider for iw_cxgb3 after removing it from kernel.
8 Signed-off-by: Potnuri Bharat Teja <bharat@chelsio.com>
9 Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
14 debian/control | 10 +-
15 debian/copyright | 3 +-
16 kernel-boot/rdma-description.rules | 1 -
17 kernel-boot/rdma-hw-modules.rules | 1 -
18 libibverbs/verbs.h | 1 -
19 providers/cxgb3/CMakeLists.txt | 6 -
20 providers/cxgb3/cq.c | 442 -----------------
21 providers/cxgb3/cxio_wr.h | 758 -----------------------------
22 providers/cxgb3/firmware_exports.h | 148 ------
23 providers/cxgb3/iwch-abi.h | 51 --
24 providers/cxgb3/iwch.c | 269 ----------
25 providers/cxgb3/iwch.h | 218 ---------
26 providers/cxgb3/qp.c | 560 ---------------------
27 providers/cxgb3/verbs.c | 476 ------------------
28 redhat/rdma-core.spec | 3 -
29 redhat/rdma.kernel-init | 4 -
30 suse/rdma-core.spec | 2 -
31 20 files changed, 4 insertions(+), 2956 deletions(-)
32 delete mode 100644 providers/cxgb3/CMakeLists.txt
33 delete mode 100644 providers/cxgb3/cq.c
34 delete mode 100644 providers/cxgb3/cxio_wr.h
35 delete mode 100644 providers/cxgb3/firmware_exports.h
36 delete mode 100644 providers/cxgb3/iwch-abi.h
37 delete mode 100644 providers/cxgb3/iwch.c
38 delete mode 100644 providers/cxgb3/iwch.h
39 delete mode 100644 providers/cxgb3/qp.c
40 delete mode 100644 providers/cxgb3/verbs.c
42 diff --git a/CMakeLists.txt b/CMakeLists.txt
43 index 7abeea4fe..85485ba00 100644
46 @@ -615,7 +615,6 @@ add_subdirectory(librdmacm/man)
48 if (HAVE_COHERENT_DMA)
49 add_subdirectory(providers/bnxt_re)
50 -add_subdirectory(providers/cxgb3) # NO SPARSE
51 add_subdirectory(providers/cxgb4) # NO SPARSE
52 add_subdirectory(providers/efa)
53 add_subdirectory(providers/efa/man)
54 diff --git a/README.md b/README.md
55 index 451ff7fcb..a96351933 100644
58 @@ -15,7 +15,6 @@ under the providers/ directory. Support for the following Kernel RDMA drivers
66 diff --git a/kernel-boot/rdma-description.rules b/kernel-boot/rdma-description.rules
67 index bb33dce40..4ea59ba19 100644
68 --- a/kernel-boot/rdma-description.rules
69 +++ b/kernel-boot/rdma-description.rules
70 @@ -22,7 +22,6 @@ DRIVERS=="ib_qib", ENV{ID_RDMA_INFINIBAND}="1"
71 DRIVERS=="hfi1", ENV{ID_RDMA_OPA}="1"
73 # Hardware that supports iWarp
74 -DRIVERS=="cxgb3", ENV{ID_RDMA_IWARP}="1"
75 DRIVERS=="cxgb4", ENV{ID_RDMA_IWARP}="1"
76 DRIVERS=="i40e", ENV{ID_RDMA_IWARP}="1"
77 DRIVERS=="nes", ENV{ID_RDMA_IWARP}="1"
78 diff --git a/kernel-boot/rdma-hw-modules.rules b/kernel-boot/rdma-hw-modules.rules
79 index dde0ab8da..da4bbe363 100644
80 --- a/kernel-boot/rdma-hw-modules.rules
81 +++ b/kernel-boot/rdma-hw-modules.rules
82 @@ -8,7 +8,6 @@ SUBSYSTEM!="net", GOTO="rdma_hw_modules_end"
84 ENV{ID_NET_DRIVER}=="be2net", RUN{builtin}+="kmod load ocrdma"
85 ENV{ID_NET_DRIVER}=="bnxt_en", RUN{builtin}+="kmod load bnxt_re"
86 -ENV{ID_NET_DRIVER}=="cxgb3", RUN{builtin}+="kmod load iw_cxgb3"
87 ENV{ID_NET_DRIVER}=="cxgb4", RUN{builtin}+="kmod load iw_cxgb4"
88 ENV{ID_NET_DRIVER}=="hns", RUN{builtin}+="kmod load hns_roce"
89 ENV{ID_NET_DRIVER}=="i40e", RUN{builtin}+="kmod load i40iw"
90 diff --git a/libibverbs/verbs.h b/libibverbs/verbs.h
91 index c411722b1..12a33a99a 100644
92 --- a/libibverbs/verbs.h
93 +++ b/libibverbs/verbs.h
94 @@ -2144,7 +2144,6 @@ struct ibv_device **ibv_get_device_list(int *num_devices);
96 struct verbs_devices_ops;
97 extern const struct verbs_device_ops verbs_provider_bnxt_re;
98 -extern const struct verbs_device_ops verbs_provider_cxgb3;
99 extern const struct verbs_device_ops verbs_provider_cxgb4;
100 extern const struct verbs_device_ops verbs_provider_efa;
101 extern const struct verbs_device_ops verbs_provider_hfi1verbs;
102 diff --git a/providers/cxgb3/CMakeLists.txt b/providers/cxgb3/CMakeLists.txt
103 deleted file mode 100644
104 index a578105e7..000000000
105 --- a/providers/cxgb3/CMakeLists.txt
114 diff --git a/providers/cxgb3/cq.c b/providers/cxgb3/cq.c
115 deleted file mode 100644
116 index 6cb4fe74d..000000000
117 --- a/providers/cxgb3/cq.c
121 - * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
123 - * This software is available to you under a choice of one of two
124 - * licenses. You may choose to be licensed under the terms of the GNU
125 - * General Public License (GPL) Version 2, available from the file
126 - * COPYING in the main directory of this source tree, or the
127 - * OpenIB.org BSD license below:
129 - * Redistribution and use in source and binary forms, with or
130 - * without modification, are permitted provided that the following
131 - * conditions are met:
133 - * - Redistributions of source code must retain the above
134 - * copyright notice, this list of conditions and the following
137 - * - Redistributions in binary form must reproduce the above
138 - * copyright notice, this list of conditions and the following
139 - * disclaimer in the documentation and/or other materials
140 - * provided with the distribution.
142 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
143 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
144 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
145 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
146 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
147 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
148 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
154 -#include <pthread.h>
155 -#include <sys/errno.h>
157 -#include <infiniband/opcode.h>
160 -#include "iwch-abi.h"
162 -int iwch_arm_cq(struct ibv_cq *ibcq, int solicited)
165 - struct iwch_cq *chp = to_iwch_cq(ibcq);
167 - pthread_spin_lock(&chp->lock);
168 - ret = ibv_cmd_req_notify_cq(ibcq, solicited);
169 - pthread_spin_unlock(&chp->lock);
174 -static inline void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
176 - struct t3_swsq *sqp;
177 - uint32_t ptr = wq->sq_rptr;
178 - int count = Q_COUNT(wq->sq_rptr, wq->sq_wptr);
180 - sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
182 - if (!sqp->signaled) {
184 - sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
185 - } else if (sqp->complete) {
188 - * Insert this completed cqe into the swcq.
190 - sqp->cqe.header |= htobe32(V_CQE_SWCQE(1));
191 - *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2))
201 -static inline void create_read_req_cqe(struct t3_wq *wq,
202 - struct t3_cqe *hw_cqe,
203 - struct t3_cqe *read_cqe)
205 - CQE_WRID_SQ_WPTR(*read_cqe) = wq->oldest_read->sq_wptr;
206 - read_cqe->len = wq->oldest_read->read_len;
207 - read_cqe->header = htobe32(V_CQE_QPID(CQE_QPID(*hw_cqe)) |
208 - V_CQE_SWCQE(SW_CQE(*hw_cqe)) |
209 - V_CQE_OPCODE(T3_READ_REQ) |
214 - * Return a ptr to the next read wr in the SWSQ or NULL.
216 -static inline void advance_oldest_read(struct t3_wq *wq)
219 - uint32_t rptr = wq->oldest_read - wq->sq + 1;
220 - uint32_t wptr = Q_PTR2IDX(wq->sq_wptr, wq->sq_size_log2);
222 - while (Q_PTR2IDX(rptr, wq->sq_size_log2) != wptr) {
223 - wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2);
225 - if (wq->oldest_read->opcode == T3_READ_REQ) {
230 - wq->oldest_read = NULL;
233 -static inline int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq,
234 - struct t3_cqe *cqe, uint8_t *cqe_flushed,
238 - struct t3_cqe *hw_cqe, read_cqe;
241 - hw_cqe = cxio_next_cqe(cq);
242 - udma_from_device_barrier();
245 - * Skip cqes not affiliated with a QP.
253 - * Gotta tweak READ completions:
254 - * 1) the cqe doesn't contain the sq_wptr from the wr.
255 - * 2) opcode not reflected from the wr.
256 - * 3) read_len not reflected from the wr.
257 - * 4) cq_type is RQ_TYPE not SQ_TYPE.
259 - if (CQE_OPCODE(*hw_cqe) == T3_READ_RESP) {
262 - * If this is an unsolicited read response to local stag 1,
263 - * then the read was generated by the kernel driver as part
264 - * of peer-2-peer connection setup. So ignore the completion.
266 - if (CQE_WRID_STAG(*hw_cqe) == 1) {
267 - if (CQE_STATUS(*hw_cqe))
274 - * Don't write to the HWCQ, so create a new read req CQE
277 - create_read_req_cqe(wq, hw_cqe, &read_cqe);
278 - hw_cqe = &read_cqe;
279 - advance_oldest_read(wq);
285 - if (CQE_STATUS(*hw_cqe) || t3_wq_in_error(wq)) {
286 - *cqe_flushed = t3_wq_in_error(wq);
287 - t3_set_wq_in_error(wq);
294 - if (RQ_TYPE(*hw_cqe)) {
297 - * HW only validates 4 bits of MSN. So we must validate that
298 - * the MSN in the SEND is the next expected MSN. If its not,
299 - * then we complete this with TPT_ERR_MSN and mark the wq in
302 - if ((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1))) {
303 - t3_set_wq_in_error(wq);
304 - hw_cqe->header |= htobe32(V_CQE_STATUS(TPT_ERR_MSN));
310 - * If we get here its a send completion.
312 - * Handle out of order completion. These get stuffed
313 - * in the SW SQ. Then the SW SQ is walked to move any
314 - * now in-order completions into the SW CQ. This handles
316 - * 1) reaping unsignaled WRs when the first subsequent
317 - * signaled WR is completed.
318 - * 2) out of order read completions.
320 - if (!SW_CQE(*hw_cqe) && (CQE_WRID_SQ_WPTR(*hw_cqe) != wq->sq_rptr)) {
321 - struct t3_swsq *sqp;
324 - Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
325 - sqp->cqe = *hw_cqe;
335 - * Reap the associated WR(s) that are freed up with this
338 - if (SQ_TYPE(*hw_cqe)) {
339 - wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
340 - *cookie = (wq->sq +
341 - Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2))->wr_id;
344 - *cookie = *(wq->rq + Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
350 - * Flush any completed cqes that are now in-order.
352 - flush_completed_wrs(wq, cq);
355 - if (SW_CQE(*hw_cqe)) {
356 - PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
357 - __FUNCTION__, cq, cq->cqid, cq->sw_rptr);
360 - PDBG("%s cq %p cqid 0x%x skip hw cqe sw_rptr 0x%x\n",
361 - __FUNCTION__, cq, cq->cqid, cq->rptr);
369 - * Get one cq entry from cxio and map it to openib.
374 - * -EAGAIN caller must try again
375 - * any other -errno fatal error
377 -static int iwch_poll_cq_one(struct iwch_device *rhp, struct iwch_cq *chp,
380 - struct iwch_qp *qhp = NULL;
381 - struct t3_cqe cqe, *hw_cqe;
383 - uint8_t cqe_flushed;
387 - hw_cqe = cxio_next_cqe(&chp->cq);
388 - udma_from_device_barrier();
393 - qhp = rhp->qpid2ptr[CQE_QPID(*hw_cqe)];
397 - pthread_spin_lock(&qhp->lock);
400 - ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie);
407 - wc->wr_id = cookie;
408 - wc->qp_num = qhp->wq.qpid;
409 - wc->vendor_err = CQE_STATUS(cqe);
412 - PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
413 - "lo 0x%x cookie 0x%" PRIx64 "\n",
414 - __FUNCTION__, CQE_QPID(cqe), CQE_TYPE(cqe),
415 - CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
416 - CQE_WRID_LOW(cqe), cookie);
418 - if (CQE_TYPE(cqe) == 0) {
419 - if (!CQE_STATUS(cqe))
420 - wc->byte_len = CQE_LEN(cqe);
423 - wc->opcode = IBV_WC_RECV;
425 - switch (CQE_OPCODE(cqe)) {
426 - case T3_RDMA_WRITE:
427 - wc->opcode = IBV_WC_RDMA_WRITE;
430 - wc->opcode = IBV_WC_RDMA_READ;
431 - wc->byte_len = CQE_LEN(cqe);
434 - case T3_SEND_WITH_SE:
435 - wc->opcode = IBV_WC_SEND;
438 - wc->opcode = IBV_WC_BIND_MW;
441 - /* these aren't supported yet */
442 - case T3_SEND_WITH_INV:
443 - case T3_SEND_WITH_SE_INV:
445 - case T3_FAST_REGISTER:
447 - PDBG("%s Unexpected opcode %d CQID 0x%x QPID 0x%x\n",
448 - __FUNCTION__, CQE_OPCODE(cqe), chp->cq.cqid,
456 - wc->status = IBV_WC_WR_FLUSH_ERR;
459 - switch (CQE_STATUS(cqe)) {
460 - case TPT_ERR_SUCCESS:
461 - wc->status = IBV_WC_SUCCESS;
464 - wc->status = IBV_WC_LOC_ACCESS_ERR;
467 - wc->status = IBV_WC_LOC_PROT_ERR;
470 - case TPT_ERR_ACCESS:
471 - wc->status = IBV_WC_LOC_ACCESS_ERR;
474 - wc->status = IBV_WC_GENERAL_ERR;
476 - case TPT_ERR_BOUND:
477 - wc->status = IBV_WC_LOC_LEN_ERR;
479 - case TPT_ERR_INVALIDATE_SHARED_MR:
480 - case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
481 - wc->status = IBV_WC_MW_BIND_ERR;
484 - case TPT_ERR_MARKER:
485 - case TPT_ERR_PDU_LEN_ERR:
486 - case TPT_ERR_OUT_OF_RQE:
487 - case TPT_ERR_DDP_VERSION:
488 - case TPT_ERR_RDMA_VERSION:
489 - case TPT_ERR_DDP_QUEUE_NUM:
493 - case TPT_ERR_MSN_RANGE:
494 - case TPT_ERR_IRD_OVERFLOW:
495 - case TPT_ERR_OPCODE:
496 - wc->status = IBV_WC_FATAL_ERR;
498 - case TPT_ERR_SWFLUSH:
499 - wc->status = IBV_WC_WR_FLUSH_ERR;
502 - PDBG("%s Unexpected status 0x%x CQID 0x%x QPID 0x%0x\n",
503 - __FUNCTION__, CQE_STATUS(cqe), chp->cq.cqid,
510 - pthread_spin_unlock(&qhp->lock);
514 -int t3b_poll_cq(struct ibv_cq *ibcq, int num_entries, struct ibv_wc *wc)
516 - struct iwch_device *rhp;
517 - struct iwch_cq *chp;
521 - chp = to_iwch_cq(ibcq);
524 - if (rhp->abi_version > 0 && t3_cq_in_error(&chp->cq)) {
525 - t3_reset_cq_in_error(&chp->cq);
526 - iwch_flush_qps(rhp);
529 - pthread_spin_lock(&chp->lock);
530 - for (npolled = 0; npolled < num_entries; ++npolled) {
533 - * Because T3 can post CQEs that are out of order,
534 - * we might have to poll again after removing
538 - err = iwch_poll_cq_one(rhp, chp, wc + npolled);
539 - } while (err == -EAGAIN);
543 - pthread_spin_unlock(&chp->lock);
552 -int t3a_poll_cq(struct ibv_cq *ibcq, int num_entries, struct ibv_wc *wc)
555 - struct iwch_cq *chp = to_iwch_cq(ibcq);
557 - pthread_spin_lock(&chp->lock);
558 - ret = ibv_cmd_poll_cq(ibcq, num_entries, wc);
559 - pthread_spin_unlock(&chp->lock);
562 diff --git a/providers/cxgb3/cxio_wr.h b/providers/cxgb3/cxio_wr.h
563 deleted file mode 100644
564 index 042bd9414..000000000
565 --- a/providers/cxgb3/cxio_wr.h
569 - * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
571 - * This software is available to you under a choice of one of two
572 - * licenses. You may choose to be licensed under the terms of the GNU
573 - * General Public License (GPL) Version 2, available from the file
574 - * COPYING in the main directory of this source tree, or the
575 - * OpenIB.org BSD license below:
577 - * Redistribution and use in source and binary forms, with or
578 - * without modification, are permitted provided that the following
579 - * conditions are met:
581 - * - Redistributions of source code must retain the above
582 - * copyright notice, this list of conditions and the following
585 - * - Redistributions in binary form must reproduce the above
586 - * copyright notice, this list of conditions and the following
587 - * disclaimer in the documentation and/or other materials
588 - * provided with the distribution.
590 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
591 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
592 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
593 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
594 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
595 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
596 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
599 -#ifndef __CXIO_WR_H__
600 -#define __CXIO_WR_H__
605 -#include <util/udma_barrier.h>
606 -#include "firmware_exports.h"
608 -#define T3_MAX_NUM_QP (1<<15)
609 -#define T3_MAX_NUM_CQ (1<<15)
610 -#define T3_MAX_NUM_PD (1<<15)
611 -#define T3_MAX_NUM_STAG (1<<15)
612 -#define T3_MAX_SGE 4
613 -#define T3_MAX_INLINE 64
615 -#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
616 -#define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
618 -#define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))
619 -#define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))
620 -#define Q_COUNT(rptr,wptr) ((wptr)-(rptr))
621 -#define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))
623 -/* FIXME: Move me to a generic PCI mmio accessor */
624 -#define cpu_to_pci32(val) htole32(val)
626 -#define RING_DOORBELL(doorbell, QPID) { \
627 - *doorbell = cpu_to_pci32(QPID); \
630 -#define SEQ32_GE(x,y) (!( (((uint32_t) (x)) - ((uint32_t) (y))) & 0x80000000 ))
633 - T3_COMPLETION_FLAG = 0x01,
634 - T3_NOTIFY_FLAG = 0x02,
635 - T3_SOLICITED_EVENT_FLAG = 0x04,
636 - T3_READ_FENCE_FLAG = 0x08,
637 - T3_LOCAL_FENCE_FLAG = 0x10
638 -} __attribute__ ((packed));
641 - T3_WR_BP = FW_WROPCODE_RI_BYPASS,
642 - T3_WR_SEND = FW_WROPCODE_RI_SEND,
643 - T3_WR_WRITE = FW_WROPCODE_RI_RDMA_WRITE,
644 - T3_WR_READ = FW_WROPCODE_RI_RDMA_READ,
645 - T3_WR_INV_STAG = FW_WROPCODE_RI_LOCAL_INV,
646 - T3_WR_BIND = FW_WROPCODE_RI_BIND_MW,
647 - T3_WR_RCV = FW_WROPCODE_RI_RECEIVE,
648 - T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
649 - T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP
650 -} __attribute__ ((packed));
652 -enum t3_rdma_opcode {
653 - T3_RDMA_WRITE, /* IETF RDMAP v1.0 ... */
659 - T3_SEND_WITH_SE_INV,
661 - T3_RDMA_INIT, /* CHELSIO RI specific ... */
667 -} __attribute__ ((packed));
669 -static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
672 - case T3_WR_BP: return T3_BYPASS;
673 - case T3_WR_SEND: return T3_SEND;
674 - case T3_WR_WRITE: return T3_RDMA_WRITE;
675 - case T3_WR_READ: return T3_READ_REQ;
676 - case T3_WR_INV_STAG: return T3_LOCAL_INV;
677 - case T3_WR_BIND: return T3_BIND_MW;
678 - case T3_WR_INIT: return T3_RDMA_INIT;
679 - case T3_WR_QP_MOD: return T3_QP_MOD;
686 -/* Work request id */
695 -#define WRID(wrid) (wrid.id1)
696 -#define WRID_GEN(wrid) (wrid.id0.wr_gen)
697 -#define WRID_IDX(wrid) (wrid.id0.wr_idx)
698 -#define WRID_LO(wrid) (wrid.id0.wr_lo)
701 - uint32_t op_seop_flags;
702 - uint32_t gen_tid_len;
705 -#define S_FW_RIWR_OP 24
706 -#define M_FW_RIWR_OP 0xff
707 -#define V_FW_RIWR_OP(x) ((x) << S_FW_RIWR_OP)
708 -#define G_FW_RIWR_OP(x) ((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)
710 -#define S_FW_RIWR_SOPEOP 22
711 -#define M_FW_RIWR_SOPEOP 0x3
712 -#define V_FW_RIWR_SOPEOP(x) ((x) << S_FW_RIWR_SOPEOP)
714 -#define S_FW_RIWR_FLAGS 8
715 -#define M_FW_RIWR_FLAGS 0x3fffff
716 -#define V_FW_RIWR_FLAGS(x) ((x) << S_FW_RIWR_FLAGS)
717 -#define G_FW_RIWR_FLAGS(x) ((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)
719 -#define S_FW_RIWR_TID 8
720 -#define V_FW_RIWR_TID(x) ((x) << S_FW_RIWR_TID)
722 -#define S_FW_RIWR_LEN 0
723 -#define V_FW_RIWR_LEN(x) ((x) << S_FW_RIWR_LEN)
725 -#define S_FW_RIWR_GEN 31
726 -#define V_FW_RIWR_GEN(x) ((x) << S_FW_RIWR_GEN)
734 -/* If num_sgle is zero, flit 5+ contains immediate data.*/
736 - struct fw_riwrh wrh; /* 0 */
737 - union t3_wrid wrid; /* 1 */
739 - enum t3_rdma_opcode rdmaop:8;
740 - uint32_t reserved:24; /* 2 */
741 - uint32_t rem_stag; /* 2 */
742 - uint32_t plen; /* 3 */
744 - struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */
747 -struct t3_local_inv_wr {
748 - struct fw_riwrh wrh; /* 0 */
749 - union t3_wrid wrid; /* 1 */
750 - uint32_t stag; /* 2 */
751 - uint32_t reserved3;
754 -struct t3_rdma_write_wr {
755 - struct fw_riwrh wrh; /* 0 */
756 - union t3_wrid wrid; /* 1 */
757 - enum t3_rdma_opcode rdmaop:8; /* 2 */
758 - uint32_t reserved:24; /* 2 */
759 - uint32_t stag_sink;
760 - uint64_t to_sink; /* 3 */
761 - uint32_t plen; /* 4 */
763 - struct t3_sge sgl[T3_MAX_SGE]; /* 5+ */
766 -struct t3_rdma_read_wr {
767 - struct fw_riwrh wrh; /* 0 */
768 - union t3_wrid wrid; /* 1 */
769 - enum t3_rdma_opcode rdmaop:8; /* 2 */
770 - uint32_t reserved:24;
772 - uint64_t rem_to; /* 3 */
773 - uint32_t local_stag; /* 4 */
774 - uint32_t local_len;
775 - uint64_t local_to; /* 5 */
779 - T3_VA_BASED_TO = 0x0,
780 - T3_ZERO_BASED_TO = 0x1
781 -} __attribute__ ((packed));
784 - T3_MEM_ACCESS_LOCAL_READ = 0x1,
785 - T3_MEM_ACCESS_LOCAL_WRITE = 0x2,
786 - T3_MEM_ACCESS_REM_READ = 0x4,
787 - T3_MEM_ACCESS_REM_WRITE = 0x8
788 -} __attribute__ ((packed));
790 -struct t3_bind_mw_wr {
791 - struct fw_riwrh wrh; /* 0 */
792 - union t3_wrid wrid; /* 1 */
793 - uint32_t reserved:16;
794 - enum t3_addr_type type:8;
795 - enum t3_mem_perms perms:8; /* 2 */
797 - uint32_t mw_stag; /* 3 */
799 - uint64_t mw_va; /* 4 */
800 - uint32_t mr_pbl_addr; /* 5 */
801 - uint32_t reserved2:24;
802 - uint32_t mr_pagesz:8;
805 -struct t3_receive_wr {
806 - struct fw_riwrh wrh; /* 0 */
807 - union t3_wrid wrid; /* 1 */
808 - uint8_t pagesz[T3_MAX_SGE];
809 - uint32_t num_sgle; /* 2 */
810 - struct t3_sge sgl[T3_MAX_SGE]; /* 3+ */
811 - uint32_t pbl_addr[T3_MAX_SGE];
814 -struct t3_bypass_wr {
815 - struct fw_riwrh wrh;
816 - union t3_wrid wrid; /* 1 */
819 -struct t3_modify_qp_wr {
820 - struct fw_riwrh wrh; /* 0 */
821 - union t3_wrid wrid; /* 1 */
822 - uint32_t flags; /* 2 */
823 - uint32_t quiesce; /* 2 */
824 - uint32_t max_ird; /* 3 */
825 - uint32_t max_ord; /* 3 */
826 - uint64_t sge_cmd; /* 4 */
827 - uint64_t ctx1; /* 5 */
828 - uint64_t ctx0; /* 6 */
831 -enum t3_modify_qp_flags {
832 - MODQP_QUIESCE = 0x01,
833 - MODQP_MAX_IRD = 0x02,
834 - MODQP_MAX_ORD = 0x04,
835 - MODQP_WRITE_EC = 0x08,
836 - MODQP_READ_EC = 0x10,
841 - uP_RI_MPA_RX_MARKER_ENABLE = 0x1,
842 - uP_RI_MPA_TX_MARKER_ENABLE = 0x2,
843 - uP_RI_MPA_CRC_ENABLE = 0x4,
844 - uP_RI_MPA_IETF_ENABLE = 0x8
845 -} __attribute__ ((packed));
848 - uP_RI_QP_RDMA_READ_ENABLE = 0x01,
849 - uP_RI_QP_RDMA_WRITE_ENABLE = 0x02,
850 - uP_RI_QP_BIND_ENABLE = 0x04,
851 - uP_RI_QP_FAST_REGISTER_ENABLE = 0x08,
852 - uP_RI_QP_STAG0_ENABLE = 0x10
853 -} __attribute__ ((packed));
855 -struct t3_rdma_init_attr {
863 - enum t3_mpa_attrs mpaattrs;
864 - enum t3_qp_caps qpcaps;
868 - uint64_t qp_dma_addr;
869 - uint32_t qp_dma_size;
870 - uint8_t rqes_posted;
873 -struct t3_rdma_init_wr {
874 - struct fw_riwrh wrh; /* 0 */
875 - union t3_wrid wrid; /* 1 */
876 - uint32_t qpid; /* 2 */
878 - uint32_t scqid; /* 3 */
880 - uint32_t rq_addr; /* 4 */
882 - enum t3_mpa_attrs mpaattrs:8; /* 5 */
883 - enum t3_qp_caps qpcaps:8;
884 - uint32_t ulpdu_size:16;
885 - uint32_t rqes_posted; /* bits 31-1 - reservered */
886 - /* bit 0 - set if RECV posted */
887 - uint32_t ord; /* 6 */
889 - uint64_t qp_dma_addr; /* 7 */
890 - uint32_t qp_dma_size; /* 8 */
895 - struct t3_send_wr send;
896 - struct t3_rdma_write_wr write;
897 - struct t3_rdma_read_wr read;
898 - struct t3_receive_wr recv;
899 - struct t3_local_inv_wr local_inv;
900 - struct t3_bind_mw_wr bind;
901 - struct t3_bypass_wr bypass;
902 - struct t3_rdma_init_wr init;
903 - struct t3_modify_qp_wr qp_mod;
907 -#define T3_SQ_CQE_FLIT 13
908 -#define T3_SQ_COOKIE_FLIT 14
910 -#define T3_RQ_COOKIE_FLIT 13
911 -#define T3_RQ_CQE_FLIT 14
913 -static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,
914 - enum t3_wr_flags flags, uint8_t genbit,
915 - uint32_t tid, uint8_t len)
917 - wqe->op_seop_flags = htobe32(V_FW_RIWR_OP(op) |
918 - V_FW_RIWR_SOPEOP(M_FW_RIWR_SOPEOP) |
919 - V_FW_RIWR_FLAGS(flags));
920 - udma_to_device_barrier();
921 - wqe->gen_tid_len = htobe32(V_FW_RIWR_GEN(genbit) | V_FW_RIWR_TID(tid) |
922 - V_FW_RIWR_LEN(len));
923 - /* 2nd gen bit... */
924 - ((union t3_wr *)wqe)->flit[15] = htobe64(genbit);
928 - * T3 ULP2_TX commands
930 -enum t3_utx_mem_op {
931 - T3_UTX_MEM_READ = 2,
932 - T3_UTX_MEM_WRITE = 3
935 -/* T3 MC7 RDMA TPT entry format */
938 - TPT_NON_SHARED_MR = 0x0,
939 - TPT_SHARED_MR = 0x1,
941 - TPT_MW_RELAXED_PROTECTION = 0x3
944 -enum tpt_addr_type {
950 - TPT_LOCAL_READ = 0x8,
951 - TPT_LOCAL_WRITE = 0x4,
952 - TPT_REMOTE_READ = 0x2,
953 - TPT_REMOTE_WRITE = 0x1
957 - uint32_t valid_stag_pdid;
958 - uint32_t flags_pagesize_qpid;
960 - uint32_t rsvd_pbl_addr;
963 - uint32_t va_low_or_fbo;
965 - uint32_t rsvd_bind_cnt_or_pstag;
966 - uint32_t rsvd_pbl_size;
969 -#define S_TPT_VALID 31
970 -#define V_TPT_VALID(x) ((x) << S_TPT_VALID)
971 -#define F_TPT_VALID V_TPT_VALID(1U)
973 -#define S_TPT_STAG_KEY 23
974 -#define M_TPT_STAG_KEY 0xFF
975 -#define V_TPT_STAG_KEY(x) ((x) << S_TPT_STAG_KEY)
976 -#define G_TPT_STAG_KEY(x) (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)
978 -#define S_TPT_STAG_STATE 22
979 -#define V_TPT_STAG_STATE(x) ((x) << S_TPT_STAG_STATE)
980 -#define F_TPT_STAG_STATE V_TPT_STAG_STATE(1U)
982 -#define S_TPT_STAG_TYPE 20
983 -#define M_TPT_STAG_TYPE 0x3
984 -#define V_TPT_STAG_TYPE(x) ((x) << S_TPT_STAG_TYPE)
985 -#define G_TPT_STAG_TYPE(x) (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)
987 -#define S_TPT_PDID 0
988 -#define M_TPT_PDID 0xFFFFF
989 -#define V_TPT_PDID(x) ((x) << S_TPT_PDID)
990 -#define G_TPT_PDID(x) (((x) >> S_TPT_PDID) & M_TPT_PDID)
992 -#define S_TPT_PERM 28
993 -#define M_TPT_PERM 0xF
994 -#define V_TPT_PERM(x) ((x) << S_TPT_PERM)
995 -#define G_TPT_PERM(x) (((x) >> S_TPT_PERM) & M_TPT_PERM)
997 -#define S_TPT_REM_INV_DIS 27
998 -#define V_TPT_REM_INV_DIS(x) ((x) << S_TPT_REM_INV_DIS)
999 -#define F_TPT_REM_INV_DIS V_TPT_REM_INV_DIS(1U)
1001 -#define S_TPT_ADDR_TYPE 26
1002 -#define V_TPT_ADDR_TYPE(x) ((x) << S_TPT_ADDR_TYPE)
1003 -#define F_TPT_ADDR_TYPE V_TPT_ADDR_TYPE(1U)
1005 -#define S_TPT_MW_BIND_ENABLE 25
1006 -#define V_TPT_MW_BIND_ENABLE(x) ((x) << S_TPT_MW_BIND_ENABLE)
1007 -#define F_TPT_MW_BIND_ENABLE V_TPT_MW_BIND_ENABLE(1U)
1009 -#define S_TPT_PAGE_SIZE 20
1010 -#define M_TPT_PAGE_SIZE 0x1F
1011 -#define V_TPT_PAGE_SIZE(x) ((x) << S_TPT_PAGE_SIZE)
1012 -#define G_TPT_PAGE_SIZE(x) (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)
1014 -#define S_TPT_PBL_ADDR 0
1015 -#define M_TPT_PBL_ADDR 0x1FFFFFFF
1016 -#define V_TPT_PBL_ADDR(x) ((x) << S_TPT_PBL_ADDR)
1017 -#define G_TPT_PBL_ADDR(x) (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)
1019 -#define S_TPT_QPID 0
1020 -#define M_TPT_QPID 0xFFFFF
1021 -#define V_TPT_QPID(x) ((x) << S_TPT_QPID)
1022 -#define G_TPT_QPID(x) (((x) >> S_TPT_QPID) & M_TPT_QPID)
1024 -#define S_TPT_PSTAG 0
1025 -#define M_TPT_PSTAG 0xFFFFFF
1026 -#define V_TPT_PSTAG(x) ((x) << S_TPT_PSTAG)
1027 -#define G_TPT_PSTAG(x) (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)
1029 -#define S_TPT_PBL_SIZE 0
1030 -#define M_TPT_PBL_SIZE 0xFFFFF
1031 -#define V_TPT_PBL_SIZE(x) ((x) << S_TPT_PBL_SIZE)
1032 -#define G_TPT_PBL_SIZE(x) (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)
1038 - uint32_t header:32;
1040 - uint32_t wrid_hi_stag:32;
1041 - uint32_t wrid_low_msn:32;
1044 -#define S_CQE_OOO 31
1045 -#define M_CQE_OOO 0x1
1046 -#define G_CQE_OOO(x) ((((x) >> S_CQE_OOO)) & M_CQE_OOO)
1047 -#define V_CEQ_OOO(x) ((x)<<S_CQE_OOO)
1049 -#define S_CQE_QPID 12
1050 -#define M_CQE_QPID 0x7FFFF
1051 -#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
1052 -#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
1054 -#define S_CQE_SWCQE 11
1055 -#define M_CQE_SWCQE 0x1
1056 -#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
1057 -#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
1059 -#define S_CQE_GENBIT 10
1060 -#define M_CQE_GENBIT 0x1
1061 -#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
1062 -#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
1064 -#define S_CQE_STATUS 5
1065 -#define M_CQE_STATUS 0x1F
1066 -#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
1067 -#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
1069 -#define S_CQE_TYPE 4
1070 -#define M_CQE_TYPE 0x1
1071 -#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
1072 -#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
1074 -#define S_CQE_OPCODE 0
1075 -#define M_CQE_OPCODE 0xF
1076 -#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
1077 -#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
1079 -#define SW_CQE(x) (G_CQE_SWCQE(be32toh((x).header)))
1080 -#define CQE_OOO(x) (G_CQE_OOO(be32toh((x).header)))
1081 -#define CQE_QPID(x) (G_CQE_QPID(be32toh((x).header)))
1082 -#define CQE_GENBIT(x) (G_CQE_GENBIT(be32toh((x).header)))
1083 -#define CQE_TYPE(x) (G_CQE_TYPE(be32toh((x).header)))
1084 -#define SQ_TYPE(x) (CQE_TYPE((x)))
1085 -#define RQ_TYPE(x) (!CQE_TYPE((x)))
1086 -#define CQE_STATUS(x) (G_CQE_STATUS(be32toh((x).header)))
1087 -#define CQE_OPCODE(x) (G_CQE_OPCODE(be32toh((x).header)))
1089 -#define CQE_LEN(x) (be32toh((x).len))
1091 -#define CQE_WRID_HI(x) (be32toh((x).wrid_hi_stag))
1092 -#define CQE_WRID_LOW(x) (be32toh((x).wrid_low_msn))
1094 -/* used for RQ completion processing */
1095 -#define CQE_WRID_STAG(x) (be32toh((x).wrid_hi_stag))
1096 -#define CQE_WRID_MSN(x) (be32toh((x).wrid_low_msn))
1098 -/* used for SQ completion processing */
1099 -#define CQE_WRID_SQ_WPTR(x) ((x).wrid_hi_stag)
1100 -#define CQE_WRID_WPTR(x) ((x).wrid_low_msn)
1102 -#define TPT_ERR_SUCCESS 0x0
1103 -#define TPT_ERR_STAG 0x1 /* STAG invalid: either the */
1104 - /* STAG is offlimt, being 0, */
1105 - /* or STAG_key mismatch */
1106 -#define TPT_ERR_PDID 0x2 /* PDID mismatch */
1107 -#define TPT_ERR_QPID 0x3 /* QPID mismatch */
1108 -#define TPT_ERR_ACCESS 0x4 /* Invalid access right */
1109 -#define TPT_ERR_WRAP 0x5 /* Wrap error */
1110 -#define TPT_ERR_BOUND 0x6 /* base and bounds voilation */
1111 -#define TPT_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
1112 - /* shared memory region */
1113 -#define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
1114 - /* shared memory region */
1115 -#define TPT_ERR_ECC 0x9 /* ECC error detected */
1116 -#define TPT_ERR_ECC_PSTAG 0xA /* ECC error detected when */
1117 - /* reading PSTAG for a MW */
1119 -#define TPT_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
1120 - /* software error */
1121 -#define TPT_ERR_SWFLUSH 0xC /* SW FLUSHED */
1122 -#define TPT_ERR_CRC 0x10 /* CRC error */
1123 -#define TPT_ERR_MARKER 0x11 /* Marker error */
1124 -#define TPT_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
1125 -#define TPT_ERR_OUT_OF_RQE 0x13 /* out of RQE */
1126 -#define TPT_ERR_DDP_VERSION 0x14 /* wrong DDP version */
1127 -#define TPT_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
1128 -#define TPT_ERR_OPCODE 0x16 /* invalid rdma opcode */
1129 -#define TPT_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
1130 -#define TPT_ERR_MSN 0x18 /* MSN error */
1131 -#define TPT_ERR_TBIT 0x19 /* tag bit not set correctly */
1132 -#define TPT_ERR_MO 0x1A /* MO not 0 for TERMINATE */
1134 -#define TPT_ERR_MSN_GAP 0x1B
1135 -#define TPT_ERR_MSN_RANGE 0x1C
1136 -#define TPT_ERR_IRD_OVERFLOW 0x1D
1137 -#define TPT_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
1138 - /* software error */
1139 -#define TPT_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
1144 - struct t3_cqe cqe;
1146 - uint32_t read_len;
1153 - * A T3 WQ implements both the SQ and RQ.
1156 - union t3_wr *queue; /* DMA Mapped work queue */
1157 - uint32_t error; /* 1 once we go to ERROR */
1159 - uint32_t wptr; /* idx to next available WR slot */
1160 - uint32_t size_log2; /* total wq size */
1161 - struct t3_swsq *sq; /* SW SQ */
1162 - struct t3_swsq *oldest_read; /* tracks oldest pending read */
1163 - uint32_t sq_wptr; /* sq_wptr - sq_rptr == count of */
1164 - uint32_t sq_rptr; /* pending wrs */
1165 - uint32_t sq_size_log2; /* sq size */
1166 - uint64_t *rq; /* SW RQ (holds consumer wr_ids) */
1167 - uint32_t rq_wptr; /* rq_wptr - rq_rptr == count of */
1168 - uint32_t rq_rptr; /* pending wrs */
1169 - uint32_t rq_size_log2; /* rq size */
1170 - volatile uint32_t *doorbell; /* mapped adapter doorbell register */
1178 - uint32_t size_log2;
1179 - struct t3_cqe *queue;
1180 - struct t3_cqe *sw_queue;
1186 -static inline unsigned t3_wq_depth(struct t3_wq *wq)
1188 - return (1UL<<wq->size_log2);
1191 -static inline unsigned t3_sq_depth(struct t3_wq *wq)
1193 - return (1UL<<wq->sq_size_log2);
1196 -static inline unsigned t3_rq_depth(struct t3_wq *wq)
1198 - return (1UL<<wq->rq_size_log2);
1201 -static inline unsigned t3_cq_depth(struct t3_cq *cq)
1203 - return (1UL<<cq->size_log2);
1206 -extern unsigned long iwch_page_size;
1207 -extern unsigned long iwch_page_shift;
1208 -extern unsigned long iwch_page_mask;
1210 -#define PAGE_ALIGN(x) (((x) + iwch_page_mask) & ~iwch_page_mask)
1212 -static inline unsigned t3_wq_memsize(struct t3_wq *wq)
1214 - return PAGE_ALIGN((1UL<<wq->size_log2) * sizeof (union t3_wr));
1217 -static inline unsigned t3_cq_memsize(struct t3_cq *cq)
1219 - return cq->memsize;
1222 -static inline unsigned t3_mmid(uint32_t stag)
1227 -struct t3_cq_status_page {
1231 -static inline int t3_cq_in_error(struct t3_cq *cq)
1233 - return ((struct t3_cq_status_page *)
1234 - &cq->queue[1 << cq->size_log2])->cq_err;
1237 -static inline void t3_set_cq_in_error(struct t3_cq *cq)
1239 - ((struct t3_cq_status_page *)
1240 - &cq->queue[1 << cq->size_log2])->cq_err = 1;
1243 -static inline void t3_reset_cq_in_error(struct t3_cq *cq)
1245 - ((struct t3_cq_status_page *)
1246 - &cq->queue[1 << cq->size_log2])->cq_err = 0;
1249 -static inline int t3_wq_in_error(struct t3_wq *wq)
1252 - * The kernel sets bit 0 in the first WR of the WQ memory
1253 - * when the QP moves out of RTS...
1255 - return (wq->queue->flit[13] & 1);
1258 -static inline void t3_set_wq_in_error(struct t3_wq *wq)
1260 - wq->queue->flit[13] |= 1;
1263 -static inline int t3_wq_db_enabled(struct t3_wq *wq)
1265 - return !(wq->queue->flit[13] & 2);
1268 -#define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
1271 -static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
1273 - struct t3_cqe *cqe;
1275 - cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
1276 - if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
1281 -static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq)
1283 - struct t3_cqe *cqe;
1285 - if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
1286 - cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
1292 -static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq)
1294 - struct t3_cqe *cqe;
1296 - if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
1297 - cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
1300 - cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
1301 - if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
1307 - * Return a ptr to the next read wr in the SWSQ or NULL.
1309 -static inline struct t3_swsq *next_read_wr(struct t3_wq *wq)
1311 - uint32_t rptr = wq->oldest_read - wq->sq + 1;
1312 - int count = Q_COUNT(rptr, wq->sq_wptr);
1313 - struct t3_swsq *sqp;
1316 - sqp = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2);
1318 - if (sqp->opcode == T3_READ_REQ)
1326 diff --git a/providers/cxgb3/firmware_exports.h b/providers/cxgb3/firmware_exports.h
1327 deleted file mode 100644
1328 index 831140a4c..000000000
1329 --- a/providers/cxgb3/firmware_exports.h
1333 - * Copyright (c) 2004-2007 Chelsio, Inc. All rights reserved.
1335 - * This software is available to you under a choice of one of two
1336 - * licenses. You may choose to be licensed under the terms of the GNU
1337 - * General Public License (GPL) Version 2, available from the file
1338 - * COPYING in the main directory of this source tree, or the
1339 - * OpenIB.org BSD license below:
1341 - * Redistribution and use in source and binary forms, with or
1342 - * without modification, are permitted provided that the following
1343 - * conditions are met:
1345 - * - Redistributions of source code must retain the above
1346 - * copyright notice, this list of conditions and the following
1349 - * - Redistributions in binary form must reproduce the above
1350 - * copyright notice, this list of conditions and the following
1351 - * disclaimer in the documentation and/or other materials
1352 - * provided with the distribution.
1354 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1355 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1356 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1357 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
1358 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
1359 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
1360 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
1363 -#ifndef _FIRMWARE_EXPORTS_H_
1364 -#define _FIRMWARE_EXPORTS_H_
1366 -/* WR OPCODES supported by the firmware.
1368 -#define FW_WROPCODE_FORWARD 0x01
1369 -#define FW_WROPCODE_BYPASS 0x05
1371 -#define FW_WROPCODE_TUNNEL_TX_PKT 0x03
1373 -#define FW_WROPOCDE_ULPTX_DATA_SGL 0x00
1374 -#define FW_WROPCODE_ULPTX_MEM_READ 0x02
1375 -#define FW_WROPCODE_ULPTX_PKT 0x04
1376 -#define FW_WROPCODE_ULPTX_INVALIDATE 0x06
1378 -#define FW_WROPCODE_TUNNEL_RX_PKT 0x07
1380 -#define FW_WROPCODE_TOE_GETTCB_RPL 0x08
1381 -#define FW_WROPCODE_TOE_CLOSE_CON 0x09
1382 -#define FW_WROPCODE_TOE_TP_ABORT_CON_REQ 0x0A
1383 -#define FW_WROPCODE_TOE_HOST_ABORT_CON_RPL 0x0F
1384 -#define FW_WROPCODE_TOE_HOST_ABORT_CON_REQ 0x0B
1385 -#define FW_WROPCODE_TOE_TP_ABORT_CON_RPL 0x0C
1386 -#define FW_WROPCODE_TOE_TX_DATA 0x0D
1387 -#define FW_WROPCODE_TOE_TX_DATA_ACK 0x0E
1389 -#define FW_WROPCODE_RI_RDMA_INIT 0x10
1390 -#define FW_WROPCODE_RI_RDMA_WRITE 0x11
1391 -#define FW_WROPCODE_RI_RDMA_READ_REQ 0x12
1392 -#define FW_WROPCODE_RI_RDMA_READ_RESP 0x13
1393 -#define FW_WROPCODE_RI_SEND 0x14
1394 -#define FW_WROPCODE_RI_TERMINATE 0x15
1395 -#define FW_WROPCODE_RI_RDMA_READ 0x16
1396 -#define FW_WROPCODE_RI_RECEIVE 0x17
1397 -#define FW_WROPCODE_RI_BIND_MW 0x18
1398 -#define FW_WROPCODE_RI_FASTREGISTER_MR 0x19
1399 -#define FW_WROPCODE_RI_LOCAL_INV 0x1A
1400 -#define FW_WROPCODE_RI_MODIFY_QP 0x1B
1401 -#define FW_WROPCODE_RI_BYPASS 0x1C
1403 -#define FW_WROPOCDE_RSVD 0x1E
1405 -#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR 0x1F
1407 -#define FW_WROPCODE_MNGT 0x1D
1408 -#define FW_MNGTOPCODE_PKTSCHED_SET 0x00
1410 -/* Maximum size of a WR sent from the host, limited by the SGE.
1412 - * Note: WR coming from ULP or TP are only limited by CIM.
1414 -#define FW_WR_SIZE 128
1416 -/* Maximum number of outstanding WRs sent from the host. Value must be
1417 - * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by TOM to
1418 - * limit the number of WRs per connection.
1421 -# define FW_WR_NUM 16
1423 -# define FW_WR_NUM 7
1426 -/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These
1427 - * queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
1428 - * start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
1430 - * Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
1431 - * to RESP Queue[i].
1433 -#define FW_TUNNEL_NUM 8
1434 -#define FW_TUNNEL_SGEEC_START 8
1435 -#define FW_TUNNEL_TID_START 65544
1438 -/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
1439 - * must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
1440 - * (or 'uP Token') FW_CTRL_TID_START.
1442 - * Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
1444 -#define FW_CTRL_NUM 8
1445 -#define FW_CTRL_SGEEC_START 65528
1446 -#define FW_CTRL_TID_START 65536
1448 -/* FW_TOE_NUM corresponds to the number of supported TOE Queues. These queues
1449 - * must start at SGE Egress Context FW_TOE_SGEEC_START.
1451 - * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
1452 - * TOE Queues, as the host is responsible for providing the correct TID in
1455 - * Ingress Trafffic for TOE Queue[i] is sent to RESP Queue[i].
1457 -#define FW_TOE_NUM 8
1458 -#define FW_TOE_SGEEC_START 0
1463 -#define FW_RI_NUM 1
1464 -#define FW_RI_SGEEC_START 65527
1465 -#define FW_RI_TID_START 65552
1470 -#define FW_RX_PKT_NUM 1
1471 -#define FW_RX_PKT_TID_START 65553
1473 -/* FW_WRC_NUM corresponds to the number of Work Request Context that supported
1474 - * by the firmware.
1476 -#define FW_WRC_NUM (65536 + FW_TUNNEL_NUM + FW_CTRL_NUM +\
1477 - FW_RI_NUM + FW_RX_PKT_NUM)
1479 -#endif /* _FIRMWARE_EXPORTS_H_ */
1480 diff --git a/providers/cxgb3/iwch-abi.h b/providers/cxgb3/iwch-abi.h
1481 deleted file mode 100644
1482 index 047f84b7a..000000000
1483 --- a/providers/cxgb3/iwch-abi.h
1487 - * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
1489 - * This software is available to you under a choice of one of two
1490 - * licenses. You may choose to be licensed under the terms of the GNU
1491 - * General Public License (GPL) Version 2, available from the file
1492 - * COPYING in the main directory of this source tree, or the
1493 - * OpenIB.org BSD license below:
1495 - * Redistribution and use in source and binary forms, with or
1496 - * without modification, are permitted provided that the following
1497 - * conditions are met:
1499 - * - Redistributions of source code must retain the above
1500 - * copyright notice, this list of conditions and the following
1503 - * - Redistributions in binary form must reproduce the above
1504 - * copyright notice, this list of conditions and the following
1505 - * disclaimer in the documentation and/or other materials
1506 - * provided with the distribution.
1508 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1509 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1510 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1511 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
1512 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
1513 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
1514 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
1520 -#include <stdint.h>
1521 -#include <infiniband/kern-abi.h>
1522 -#include <rdma/cxgb3-abi.h>
1523 -#include <kernel-abi/cxgb3-abi.h>
1525 -DECLARE_DRV_CMD(uiwch_alloc_pd, IB_USER_VERBS_CMD_ALLOC_PD,
1526 - empty, iwch_alloc_pd_resp);
1527 -DECLARE_DRV_CMD(uiwch_create_cq, IB_USER_VERBS_CMD_CREATE_CQ,
1528 - iwch_create_cq_req, iwch_create_cq_resp);
1529 -DECLARE_DRV_CMD(uiwch_create_qp, IB_USER_VERBS_CMD_CREATE_QP,
1530 - empty, iwch_create_qp_resp);
1531 -DECLARE_DRV_CMD(uiwch_alloc_ucontext, IB_USER_VERBS_CMD_GET_CONTEXT,
1533 -DECLARE_DRV_CMD(uiwch_reg_mr, IB_USER_VERBS_CMD_REG_MR,
1534 - empty, iwch_reg_user_mr_resp);
1536 -#endif /* IWCH_ABI_H */
1537 diff --git a/providers/cxgb3/iwch.c b/providers/cxgb3/iwch.c
1538 deleted file mode 100644
1539 index 6f3c8b9f1..000000000
1540 --- a/providers/cxgb3/iwch.c
1544 - * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
1546 - * This software is available to you under a choice of one of two
1547 - * licenses. You may choose to be licensed under the terms of the GNU
1548 - * General Public License (GPL) Version 2, available from the file
1549 - * COPYING in the main directory of this source tree, or the
1550 - * OpenIB.org BSD license below:
1552 - * Redistribution and use in source and binary forms, with or
1553 - * without modification, are permitted provided that the following
1554 - * conditions are met:
1556 - * - Redistributions of source code must retain the above
1557 - * copyright notice, this list of conditions and the following
1560 - * - Redistributions in binary form must reproduce the above
1561 - * copyright notice, this list of conditions and the following
1562 - * disclaimer in the documentation and/or other materials
1563 - * provided with the distribution.
1565 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1566 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1567 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1568 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
1569 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
1570 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
1571 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
1574 -#include <config.h>
1577 -#include <stdlib.h>
1578 -#include <unistd.h>
1580 -#include <sys/mman.h>
1581 -#include <pthread.h>
1582 -#include <string.h>
1585 -#include "iwch-abi.h"
1587 -#define PCI_VENDOR_ID_CHELSIO 0x1425
1588 -#define PCI_DEVICE_ID_CHELSIO_PE9000_2C 0x0020
1589 -#define PCI_DEVICE_ID_CHELSIO_T302E 0x0021
1590 -#define PCI_DEVICE_ID_CHELSIO_T310E 0x0022
1591 -#define PCI_DEVICE_ID_CHELSIO_T320X 0x0023
1592 -#define PCI_DEVICE_ID_CHELSIO_T302X 0x0024
1593 -#define PCI_DEVICE_ID_CHELSIO_T320E 0x0025
1594 -#define PCI_DEVICE_ID_CHELSIO_T310X 0x0026
1595 -#define PCI_DEVICE_ID_CHELSIO_T3B10 0x0030
1596 -#define PCI_DEVICE_ID_CHELSIO_T3B20 0x0031
1597 -#define PCI_DEVICE_ID_CHELSIO_T3B02 0x0032
1598 -#define PCI_DEVICE_ID_CHELSIO_T3C20 0x0035
1599 -#define PCI_DEVICE_ID_CHELSIO_S320E 0x0036
1601 -#define HCA(v, d, t) \
1602 - VERBS_PCI_MATCH(PCI_VENDOR_ID_##v, PCI_DEVICE_ID_CHELSIO_##d, \
1603 - (void *)(CHELSIO_##t))
1604 -static const struct verbs_match_ent hca_table[] = {
1605 - HCA(CHELSIO, PE9000_2C, T3B),
1606 - HCA(CHELSIO, T302E, T3A),
1607 - HCA(CHELSIO, T302X, T3A),
1608 - HCA(CHELSIO, T310E, T3A),
1609 - HCA(CHELSIO, T310X, T3A),
1610 - HCA(CHELSIO, T320E, T3A),
1611 - HCA(CHELSIO, T320X, T3A),
1612 - HCA(CHELSIO, T3B10, T3B),
1613 - HCA(CHELSIO, T3B20, T3B),
1614 - HCA(CHELSIO, T3B02, T3B),
1615 - HCA(CHELSIO, T3C20, T3B),
1616 - HCA(CHELSIO, S320E, T3B),
1620 -static const struct verbs_context_ops iwch_ctx_common_ops = {
1621 - .query_device = iwch_query_device,
1622 - .query_port = iwch_query_port,
1623 - .alloc_pd = iwch_alloc_pd,
1624 - .dealloc_pd = iwch_free_pd,
1625 - .reg_mr = iwch_reg_mr,
1626 - .dereg_mr = iwch_dereg_mr,
1627 - .create_cq = iwch_create_cq,
1628 - .resize_cq = iwch_resize_cq,
1629 - .destroy_cq = iwch_destroy_cq,
1630 - .create_srq = iwch_create_srq,
1631 - .modify_srq = iwch_modify_srq,
1632 - .destroy_srq = iwch_destroy_srq,
1633 - .create_qp = iwch_create_qp,
1634 - .modify_qp = iwch_modify_qp,
1635 - .destroy_qp = iwch_destroy_qp,
1636 - .query_qp = iwch_query_qp,
1637 - .create_ah = iwch_create_ah,
1638 - .destroy_ah = iwch_destroy_ah,
1639 - .attach_mcast = iwch_attach_mcast,
1640 - .detach_mcast = iwch_detach_mcast,
1641 - .post_srq_recv = iwch_post_srq_recv,
1642 - .req_notify_cq = iwch_arm_cq,
1645 -static const struct verbs_context_ops iwch_ctx_t3a_ops = {
1646 - .poll_cq = t3a_poll_cq,
1647 - .post_recv = t3a_post_recv,
1648 - .post_send = t3a_post_send,
1651 -static const struct verbs_context_ops iwch_ctx_t3b_ops = {
1652 - .async_event = t3b_async_event,
1653 - .poll_cq = t3b_poll_cq,
1654 - .post_recv = t3b_post_recv,
1655 - .post_send = t3b_post_send,
1658 -unsigned long iwch_page_size;
1659 -unsigned long iwch_page_shift;
1660 -unsigned long iwch_page_mask;
1662 -static struct verbs_context *iwch_alloc_context(struct ibv_device *ibdev,
1664 - void *private_data)
1666 - struct iwch_context *context;
1667 - struct ibv_get_context cmd;
1668 - struct uiwch_alloc_ucontext_resp resp;
1669 - struct iwch_device *rhp = to_iwch_dev(ibdev);
1671 - context = verbs_init_and_alloc_context(ibdev, cmd_fd, context, ibv_ctx,
1672 - RDMA_DRIVER_CXGB3);
1676 - if (ibv_cmd_get_context(&context->ibv_ctx, &cmd, sizeof cmd,
1677 - &resp.ibv_resp, sizeof resp))
1680 - verbs_set_ops(&context->ibv_ctx, &iwch_ctx_common_ops);
1682 - switch (rhp->hca_type) {
1684 - PDBG("%s T3B device\n", __FUNCTION__);
1685 - verbs_set_ops(&context->ibv_ctx, &iwch_ctx_t3b_ops);
1688 - PDBG("%s T3A device\n", __FUNCTION__);
1689 - verbs_set_ops(&context->ibv_ctx, &iwch_ctx_t3a_ops);
1692 - PDBG("%s unknown hca type %d\n", __FUNCTION__, rhp->hca_type);
1697 - return &context->ibv_ctx;
1700 - verbs_uninit_context(&context->ibv_ctx);
1705 -static void iwch_free_context(struct ibv_context *ibctx)
1707 - struct iwch_context *context = to_iwch_ctx(ibctx);
1709 - verbs_uninit_context(&context->ibv_ctx);
1713 -static void iwch_uninit_device(struct verbs_device *verbs_device)
1715 - struct iwch_device *dev = to_iwch_dev(&verbs_device->device);
1720 -static bool iwch_device_match(struct verbs_sysfs_dev *sysfs_dev)
1722 - char value[32], *cp;
1723 - unsigned int fw_maj, fw_min;
1725 - /* Rely on the core code to match PCI devices */
1726 - if (!sysfs_dev->match)
1730 - * Verify that the firmware major number matches. Major number
1731 - * mismatches are fatal. Minor number mismatches are tolerated.
1733 - if (ibv_get_fw_ver(value, sizeof(value), sysfs_dev))
1736 - cp = strtok(value+1, ".");
1737 - sscanf(cp, "%i", &fw_maj);
1738 - cp = strtok(NULL, ".");
1739 - sscanf(cp, "%i", &fw_min);
1741 - if (fw_maj < FW_MAJ) {
1742 - fprintf(stderr, "libcxgb3: Fatal firmware version mismatch. "
1743 - "Firmware major number is %u and libcxgb3 needs %u.\n",
1749 - DBGLOG("libcxgb3");
1751 - if ((signed int)fw_min < FW_MIN) {
1752 - PDBG("libcxgb3: non-fatal firmware version mismatch. "
1753 - "Firmware minor number is %u and libcxgb3 needs %u.\n",
1761 -static struct verbs_device *iwch_device_alloc(struct verbs_sysfs_dev *sysfs_dev)
1763 - struct iwch_device *dev;
1765 - dev = calloc(1, sizeof(*dev));
1769 - pthread_spin_init(&dev->lock, PTHREAD_PROCESS_PRIVATE);
1770 - dev->hca_type = (uintptr_t)sysfs_dev->match->driver_data;
1771 - dev->abi_version = sysfs_dev->abi_ver;
1773 - iwch_page_size = sysconf(_SC_PAGESIZE);
1774 - iwch_page_shift = long_log2(iwch_page_size);
1775 - iwch_page_mask = iwch_page_size - 1;
1777 - dev->mmid2ptr = calloc(T3_MAX_NUM_STAG, sizeof(void *));
1778 - if (!dev->mmid2ptr) {
1781 - dev->qpid2ptr = calloc(T3_MAX_NUM_QP, sizeof(void *));
1782 - if (!dev->qpid2ptr) {
1785 - dev->cqid2ptr = calloc(T3_MAX_NUM_CQ, sizeof(void *));
1786 - if (!dev->cqid2ptr)
1789 - return &dev->ibv_dev;
1792 - free(dev->qpid2ptr);
1794 - free(dev->mmid2ptr);
1800 -static const struct verbs_device_ops iwch_dev_ops = {
1802 - .match_min_abi_version = 0,
1803 - .match_max_abi_version = ABI_VERS,
1804 - .match_table = hca_table,
1805 - .match_device = iwch_device_match,
1806 - .alloc_device = iwch_device_alloc,
1807 - .uninit_device = iwch_uninit_device,
1808 - .alloc_context = iwch_alloc_context,
1809 - .free_context = iwch_free_context,
1811 -PROVIDER_DRIVER(cxgb3, iwch_dev_ops);
1812 diff --git a/providers/cxgb3/iwch.h b/providers/cxgb3/iwch.h
1813 deleted file mode 100644
1814 index c7d85d3aa..000000000
1815 --- a/providers/cxgb3/iwch.h
1819 - * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
1821 - * This software is available to you under a choice of one of two
1822 - * licenses. You may choose to be licensed under the terms of the GNU
1823 - * General Public License (GPL) Version 2, available from the file
1824 - * COPYING in the main directory of this source tree, or the
1825 - * OpenIB.org BSD license below:
1827 - * Redistribution and use in source and binary forms, with or
1828 - * without modification, are permitted provided that the following
1829 - * conditions are met:
1831 - * - Redistributions of source code must retain the above
1832 - * copyright notice, this list of conditions and the following
1835 - * - Redistributions in binary form must reproduce the above
1836 - * copyright notice, this list of conditions and the following
1837 - * disclaimer in the documentation and/or other materials
1838 - * provided with the distribution.
1840 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1841 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1842 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1843 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
1844 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
1845 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
1846 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
1852 -#include <pthread.h>
1853 -#include <inttypes.h>
1854 -#include <stddef.h>
1856 -#include <infiniband/driver.h>
1857 -#include <util/udma_barrier.h>
1859 -#include "cxio_wr.h"
1861 -enum iwch_hca_type {
1870 -struct iwch_device {
1871 - struct verbs_device ibv_dev;
1872 - enum iwch_hca_type hca_type;
1873 - struct iwch_mr **mmid2ptr;
1874 - struct iwch_qp **qpid2ptr;
1875 - struct iwch_cq **cqid2ptr;
1876 - pthread_spinlock_t lock;
1880 -static inline int t3b_device(struct iwch_device *dev)
1882 - return (dev->hca_type == CHELSIO_T3B);
1885 -static inline int t3a_device(struct iwch_device *dev)
1887 - return (dev->hca_type == CHELSIO_T3A);
1890 -struct iwch_context {
1891 - struct verbs_context ibv_ctx;
1895 - struct ibv_pd ibv_pd;
1899 - struct verbs_mr vmr;
1901 - uint32_t page_size;
1902 - uint32_t pbl_addr;
1907 - struct ibv_cq ibv_cq;
1908 - struct iwch_device *rhp;
1910 - pthread_spinlock_t lock;
1914 - struct ibv_qp ibv_qp;
1915 - struct iwch_device *rhp;
1917 - pthread_spinlock_t lock;
1921 -#define to_iwch_xxx(xxx, type) \
1922 - container_of(ib##xxx, struct iwch_##type, ibv_##xxx)
1924 -static inline struct iwch_device *to_iwch_dev(struct ibv_device *ibdev)
1926 - return container_of(ibdev, struct iwch_device, ibv_dev.device);
1929 -static inline struct iwch_context *to_iwch_ctx(struct ibv_context *ibctx)
1931 - return container_of(ibctx, struct iwch_context, ibv_ctx.context);
1934 -static inline struct iwch_pd *to_iwch_pd(struct ibv_pd *ibpd)
1936 - return to_iwch_xxx(pd, pd);
1939 -static inline struct iwch_cq *to_iwch_cq(struct ibv_cq *ibcq)
1941 - return to_iwch_xxx(cq, cq);
1944 -static inline struct iwch_qp *to_iwch_qp(struct ibv_qp *ibqp)
1946 - return to_iwch_xxx(qp, qp);
1949 -static inline struct iwch_mr *to_iwch_mr(struct verbs_mr *vmr)
1951 - return container_of(vmr, struct iwch_mr, vmr);
1954 -static inline unsigned long long_log2(unsigned long x)
1956 - unsigned long r = 0;
1957 - for (x >>= 1; x > 0; x >>= 1)
1962 -extern int iwch_query_device(struct ibv_context *context,
1963 - struct ibv_device_attr *attr);
1964 -extern int iwch_query_port(struct ibv_context *context, uint8_t port,
1965 - struct ibv_port_attr *attr);
1967 -extern struct ibv_pd *iwch_alloc_pd(struct ibv_context *context);
1968 -extern int iwch_free_pd(struct ibv_pd *pd);
1970 -extern struct ibv_mr *iwch_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
1971 - uint64_t hca_va, int access);
1972 -extern int iwch_dereg_mr(struct verbs_mr *mr);
1974 -struct ibv_cq *iwch_create_cq(struct ibv_context *context, int cqe,
1975 - struct ibv_comp_channel *channel,
1977 -extern int iwch_resize_cq(struct ibv_cq *cq, int cqe);
1978 -extern int iwch_destroy_cq(struct ibv_cq *cq);
1979 -extern int t3a_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
1980 -extern int t3b_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
1981 -extern int iwch_arm_cq(struct ibv_cq *cq, int solicited);
1982 -extern void iwch_cq_event(struct ibv_cq *cq);
1983 -extern void iwch_init_cq_buf(struct iwch_cq *cq, int nent);
1985 -extern struct ibv_srq *iwch_create_srq(struct ibv_pd *pd,
1986 - struct ibv_srq_init_attr *attr);
1987 -extern int iwch_modify_srq(struct ibv_srq *srq,
1988 - struct ibv_srq_attr *attr,
1990 -extern int iwch_destroy_srq(struct ibv_srq *srq);
1991 -extern int iwch_post_srq_recv(struct ibv_srq *ibsrq,
1992 - struct ibv_recv_wr *wr,
1993 - struct ibv_recv_wr **bad_wr);
1995 -extern struct ibv_qp *iwch_create_qp(struct ibv_pd *pd,
1996 - struct ibv_qp_init_attr *attr);
1997 -extern int iwch_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
1999 -extern int iwch_destroy_qp(struct ibv_qp *qp);
2000 -extern int iwch_query_qp(struct ibv_qp *qp,
2001 - struct ibv_qp_attr *attr,
2003 - struct ibv_qp_init_attr *init_attr);
2004 -extern void iwch_flush_qp(struct iwch_qp *qhp);
2005 -extern void iwch_flush_qps(struct iwch_device *dev);
2006 -extern int t3a_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
2007 - struct ibv_send_wr **bad_wr);
2008 -extern int t3b_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
2009 - struct ibv_send_wr **bad_wr);
2010 -extern int t3a_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
2011 - struct ibv_recv_wr **bad_wr);
2012 -extern int t3b_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
2013 - struct ibv_recv_wr **bad_wr);
2014 -extern struct ibv_ah *iwch_create_ah(struct ibv_pd *pd,
2015 - struct ibv_ah_attr *ah_attr);
2016 -extern int iwch_destroy_ah(struct ibv_ah *ah);
2017 -extern int iwch_attach_mcast(struct ibv_qp *qp, const union ibv_gid *gid,
2019 -extern int iwch_detach_mcast(struct ibv_qp *qp, const union ibv_gid *gid,
2021 -extern void t3b_async_event(struct ibv_context *context,
2022 - struct ibv_async_event *event);
2024 -#include <syslog.h>
2025 -#define DBGLOG(s) openlog(s, LOG_NDELAY|LOG_PID, LOG_LOCAL7)
2026 -#define PDBG(fmt, args...) do {syslog(LOG_DEBUG, fmt, ##args);} while (0)
2029 -#define PDBG(fmt, args...) do {} while (0)
2035 -#endif /* IWCH_H */
2036 diff --git a/providers/cxgb3/qp.c b/providers/cxgb3/qp.c
2037 deleted file mode 100644
2038 index 4a1e7397c..000000000
2039 --- a/providers/cxgb3/qp.c
2043 - * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
2045 - * This software is available to you under a choice of one of two
2046 - * licenses. You may choose to be licensed under the terms of the GNU
2047 - * General Public License (GPL) Version 2, available from the file
2048 - * COPYING in the main directory of this source tree, or the
2049 - * OpenIB.org BSD license below:
2051 - * Redistribution and use in source and binary forms, with or
2052 - * without modification, are permitted provided that the following
2053 - * conditions are met:
2055 - * - Redistributions of source code must retain the above
2056 - * copyright notice, this list of conditions and the following
2059 - * - Redistributions in binary form must reproduce the above
2060 - * copyright notice, this list of conditions and the following
2061 - * disclaimer in the documentation and/or other materials
2062 - * provided with the distribution.
2064 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2065 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2066 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2067 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
2068 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
2069 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
2070 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
2073 -#include <config.h>
2075 -#include <stdlib.h>
2076 -#include <pthread.h>
2077 -#include <string.h>
2082 -#define ROUNDUP8(a) (((a) + 7) & ~7)
2084 -static inline int iwch_build_rdma_send(union t3_wr *wqe, struct ibv_send_wr *wr,
2085 - uint8_t *flit_cnt)
2089 - if (wr->num_sge > T3_MAX_SGE)
2091 - if (wr->send_flags & IBV_SEND_SOLICITED)
2092 - wqe->send.rdmaop = T3_SEND_WITH_SE;
2094 - wqe->send.rdmaop = T3_SEND;
2095 - wqe->send.rem_stag = 0;
2096 - wqe->send.reserved = 0;
2097 - if ((wr->send_flags & IBV_SEND_INLINE) || wr->num_sge == 0) {
2100 - wqe->send.plen = 0;
2101 - datap = (uint8_t *)&wqe->send.sgl[0];
2102 - wqe->send.num_sgle = 0; /* indicates in-line data */
2103 - for (i = 0; i < wr->num_sge; i++) {
2104 - if ((wqe->send.plen + wr->sg_list[i].length) >
2107 - wqe->send.plen += wr->sg_list[i].length;
2109 - (void *)(unsigned long)wr->sg_list[i].addr,
2110 - wr->sg_list[i].length);
2111 - datap += wr->sg_list[i].length;
2113 - *flit_cnt = 4 + (ROUNDUP8(wqe->send.plen) >> 3);
2114 - wqe->send.plen = htobe32(wqe->send.plen);
2116 - wqe->send.plen = 0;
2117 - for (i = 0; i < wr->num_sge; i++) {
2118 - if ((wqe->send.plen + wr->sg_list[i].length) <
2122 - wqe->send.plen += wr->sg_list[i].length;
2123 - wqe->send.sgl[i].stag =
2124 - htobe32(wr->sg_list[i].lkey);
2125 - wqe->send.sgl[i].len =
2126 - htobe32(wr->sg_list[i].length);
2127 - wqe->send.sgl[i].to = htobe64(wr->sg_list[i].addr);
2129 - wqe->send.plen = htobe32(wqe->send.plen);
2130 - wqe->send.num_sgle = htobe32(wr->num_sge);
2131 - *flit_cnt = 4 + ((wr->num_sge) << 1);
2136 -static inline int iwch_build_rdma_write(union t3_wr *wqe,
2137 - struct ibv_send_wr *wr,
2138 - uint8_t *flit_cnt)
2142 - if (wr->num_sge > T3_MAX_SGE)
2144 - wqe->write.rdmaop = T3_RDMA_WRITE;
2145 - wqe->write.reserved = 0;
2146 - wqe->write.stag_sink = htobe32(wr->wr.rdma.rkey);
2147 - wqe->write.to_sink = htobe64(wr->wr.rdma.remote_addr);
2149 - wqe->write.num_sgle = wr->num_sge;
2151 - if ((wr->send_flags & IBV_SEND_INLINE) || wr->num_sge == 0) {
2154 - wqe->write.plen = 0;
2155 - datap = (uint8_t *)&wqe->write.sgl[0];
2156 - wqe->write.num_sgle = 0; /* indicates in-line data */
2157 - for (i = 0; i < wr->num_sge; i++) {
2158 - if ((wqe->write.plen + wr->sg_list[i].length) >
2161 - wqe->write.plen += wr->sg_list[i].length;
2163 - (void *)(unsigned long)wr->sg_list[i].addr,
2164 - wr->sg_list[i].length);
2165 - datap += wr->sg_list[i].length;
2167 - *flit_cnt = 5 + (ROUNDUP8(wqe->write.plen) >> 3);
2168 - wqe->write.plen = htobe32(wqe->write.plen);
2170 - wqe->write.plen = 0;
2171 - for (i = 0; i < wr->num_sge; i++) {
2172 - if ((wqe->write.plen + wr->sg_list[i].length) <
2173 - wqe->write.plen) {
2176 - wqe->write.plen += wr->sg_list[i].length;
2177 - wqe->write.sgl[i].stag =
2178 - htobe32(wr->sg_list[i].lkey);
2179 - wqe->write.sgl[i].len =
2180 - htobe32(wr->sg_list[i].length);
2181 - wqe->write.sgl[i].to =
2182 - htobe64(wr->sg_list[i].addr);
2184 - wqe->write.plen = htobe32(wqe->write.plen);
2185 - wqe->write.num_sgle = htobe32(wr->num_sge);
2186 - *flit_cnt = 5 + ((wr->num_sge) << 1);
2191 -static inline int iwch_build_rdma_read(union t3_wr *wqe, struct ibv_send_wr *wr,
2192 - uint8_t *flit_cnt)
2194 - if (wr->num_sge > 1)
2196 - wqe->read.rdmaop = T3_READ_REQ;
2197 - wqe->read.reserved = 0;
2198 - if (wr->num_sge == 1 && wr->sg_list[0].length > 0) {
2199 - wqe->read.rem_stag = htobe32(wr->wr.rdma.rkey);
2200 - wqe->read.rem_to = htobe64(wr->wr.rdma.remote_addr);
2201 - wqe->read.local_stag = htobe32(wr->sg_list[0].lkey);
2202 - wqe->read.local_len = htobe32(wr->sg_list[0].length);
2203 - wqe->read.local_to = htobe64(wr->sg_list[0].addr);
2206 - /* build passable 0B read request */
2207 - wqe->read.rem_stag = 2;
2208 - wqe->read.rem_to = 2;
2209 - wqe->read.local_stag = 2;
2210 - wqe->read.local_len = 0;
2211 - wqe->read.local_to = 2;
2213 - *flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
2217 -int t3b_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
2218 - struct ibv_send_wr **bad_wr)
2221 - uint8_t t3_wr_flit_cnt;
2222 - enum t3_wr_opcode t3_wr_opcode = 0;
2223 - enum t3_wr_flags t3_wr_flags;
2224 - struct iwch_qp *qhp;
2228 - struct t3_swsq *sqp;
2230 - qhp = to_iwch_qp(ibqp);
2231 - pthread_spin_lock(&qhp->lock);
2232 - if (t3_wq_in_error(&qhp->wq)) {
2233 - iwch_flush_qp(qhp);
2234 - pthread_spin_unlock(&qhp->lock);
2237 - num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
2238 - qhp->wq.sq_size_log2);
2239 - if (num_wrs <= 0) {
2240 - pthread_spin_unlock(&qhp->lock);
2244 - if (num_wrs == 0) {
2249 - idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
2250 - wqe = (union t3_wr *) (qhp->wq.queue + idx);
2252 - if (wr->send_flags & IBV_SEND_SOLICITED)
2253 - t3_wr_flags |= T3_SOLICITED_EVENT_FLAG;
2254 - if (wr->send_flags & IBV_SEND_FENCE)
2255 - t3_wr_flags |= T3_READ_FENCE_FLAG;
2256 - if ((wr->send_flags & IBV_SEND_SIGNALED) || qhp->sq_sig_all)
2257 - t3_wr_flags |= T3_COMPLETION_FLAG;
2258 - sqp = qhp->wq.sq +
2259 - Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
2260 - switch (wr->opcode) {
2262 - t3_wr_opcode = T3_WR_SEND;
2263 - err = iwch_build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
2265 - case IBV_WR_RDMA_WRITE:
2266 - t3_wr_opcode = T3_WR_WRITE;
2267 - err = iwch_build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
2269 - case IBV_WR_RDMA_READ:
2270 - t3_wr_opcode = T3_WR_READ;
2272 - err = iwch_build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
2275 - sqp->read_len = wqe->read.local_len;
2276 - if (!qhp->wq.oldest_read)
2277 - qhp->wq.oldest_read = sqp;
2280 - PDBG("%s post of type=%d TBD!\n", __FUNCTION__,
2288 - wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
2289 - sqp->wr_id = wr->wr_id;
2290 - sqp->opcode = wr2opcode(t3_wr_opcode);
2291 - sqp->sq_wptr = qhp->wq.sq_wptr;
2292 - sqp->complete = 0;
2293 - sqp->signaled = (wr->send_flags & IBV_SEND_SIGNALED);
2295 - build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags,
2296 - Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
2297 - 0, t3_wr_flit_cnt);
2298 - PDBG("%s cookie 0x%" PRIx64
2299 - " wq idx 0x%x swsq idx %ld opcode %d\n",
2300 - __FUNCTION__, wr->wr_id, idx,
2301 - Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
2306 - ++(qhp->wq.sq_wptr);
2308 - pthread_spin_unlock(&qhp->lock);
2309 - if (t3_wq_db_enabled(&qhp->wq))
2310 - RING_DOORBELL(qhp->wq.doorbell, qhp->wq.qpid);
2314 -int t3a_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
2315 - struct ibv_send_wr **bad_wr)
2318 - struct iwch_qp *qhp = to_iwch_qp(ibqp);
2320 - pthread_spin_lock(&qhp->lock);
2321 - ret = ibv_cmd_post_send(ibqp, wr, bad_wr);
2322 - pthread_spin_unlock(&qhp->lock);
2326 -static inline int iwch_build_rdma_recv(struct iwch_device *rhp,
2328 - struct ibv_recv_wr *wr)
2331 - if (wr->num_sge > T3_MAX_SGE)
2334 - wqe->recv.num_sgle = htobe32(wr->num_sge);
2335 - for (i = 0; i < wr->num_sge; i++) {
2336 - wqe->recv.sgl[i].stag = htobe32(wr->sg_list[i].lkey);
2337 - wqe->recv.sgl[i].len = htobe32(wr->sg_list[i].length);
2338 - wqe->recv.sgl[i].to = htobe64(wr->sg_list[i].addr);
2340 - for (; i < T3_MAX_SGE; i++) {
2341 - wqe->recv.sgl[i].stag = 0;
2342 - wqe->recv.sgl[i].len = 0;
2343 - wqe->recv.sgl[i].to = 0;
2348 -static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
2350 - struct t3_cqe cqe;
2352 - PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__,
2353 - wq, cq, cq->sw_rptr, cq->sw_wptr);
2354 - memset(&cqe, 0, sizeof(cqe));
2355 - cqe.header = V_CQE_STATUS(TPT_ERR_SWFLUSH) |
2356 - V_CQE_OPCODE(T3_SEND) |
2359 - V_CQE_QPID(wq->qpid) |
2360 - V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr, cq->size_log2));
2361 - cqe.header = htobe32(cqe.header);
2362 - *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
2366 -static void flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
2371 - PDBG("%s rq_rptr 0x%x rq_wptr 0x%x skip count %u\n", __FUNCTION__,
2372 - wq->rq_rptr, wq->rq_wptr, count);
2373 - ptr = wq->rq_rptr + count;
2374 - while (ptr++ != wq->rq_wptr) {
2375 - insert_recv_cqe(wq, cq);
2379 -static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
2380 - struct t3_swsq *sqp)
2382 - struct t3_cqe cqe;
2384 - PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__,
2385 - wq, cq, cq->sw_rptr, cq->sw_wptr);
2386 - memset(&cqe, 0, sizeof(cqe));
2387 - cqe.header = V_CQE_STATUS(TPT_ERR_SWFLUSH) |
2388 - V_CQE_OPCODE(sqp->opcode) |
2391 - V_CQE_QPID(wq->qpid) |
2392 - V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr, cq->size_log2));
2393 - cqe.header = htobe32(cqe.header);
2394 - CQE_WRID_SQ_WPTR(cqe) = sqp->sq_wptr;
2396 - *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
2400 -static void flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
2403 - struct t3_swsq *sqp;
2405 - ptr = wq->sq_rptr + count;
2406 - sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
2407 - while (ptr != wq->sq_wptr) {
2408 - insert_sq_cqe(wq, cq, sqp);
2410 - sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
2415 - * Move all CQEs from the HWCQ into the SWCQ.
2417 -static void flush_hw_cq(struct t3_cq *cq)
2419 - struct t3_cqe *cqe, *swcqe;
2421 - PDBG("%s cq %p cqid 0x%x\n", __FUNCTION__, cq, cq->cqid);
2422 - cqe = cxio_next_hw_cqe(cq);
2424 - PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
2425 - __FUNCTION__, cq->rptr, cq->sw_wptr);
2426 - swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
2428 - swcqe->header |= htobe32(V_CQE_SWCQE(1));
2431 - cqe = cxio_next_hw_cqe(cq);
2435 -static void count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
2437 - struct t3_cqe *cqe;
2441 - ptr = cq->sw_rptr;
2442 - while (!Q_EMPTY(ptr, cq->sw_wptr)) {
2443 - cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
2444 - if ((SQ_TYPE(*cqe) ||
2445 - (CQE_OPCODE(*cqe) == T3_READ_RESP && CQE_WRID_STAG(*cqe) != 1)) &&
2446 - (CQE_QPID(*cqe) == wq->qpid))
2450 - PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count);
2453 -static void count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
2455 - struct t3_cqe *cqe;
2459 - ptr = cq->sw_rptr;
2460 - while (!Q_EMPTY(ptr, cq->sw_wptr)) {
2461 - cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
2462 - if (RQ_TYPE(*cqe) && (CQE_OPCODE(*cqe) != T3_READ_RESP) &&
2463 - (CQE_QPID(*cqe) == wq->qpid))
2467 - PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count);
2471 - * Assumes qhp lock is held.
2473 -void iwch_flush_qp(struct iwch_qp *qhp)
2475 - struct iwch_cq *rchp, *schp;
2478 - if (qhp->wq.flushed)
2481 - rchp = qhp->rhp->cqid2ptr[to_iwch_cq(qhp->ibv_qp.recv_cq)->cq.cqid];
2482 - schp = qhp->rhp->cqid2ptr[to_iwch_cq(qhp->ibv_qp.send_cq)->cq.cqid];
2484 - PDBG("%s qhp %p rchp %p schp %p\n", __FUNCTION__, qhp, rchp, schp);
2485 - qhp->wq.flushed = 1;
2488 - /* take a ref on the qhp since we must release the lock */
2489 - atomic_inc(&qhp->refcnt);
2491 - pthread_spin_unlock(&qhp->lock);
2493 - /* locking heirarchy: cq lock first, then qp lock. */
2494 - pthread_spin_lock(&rchp->lock);
2495 - pthread_spin_lock(&qhp->lock);
2496 - flush_hw_cq(&rchp->cq);
2497 - count_rcqes(&rchp->cq, &qhp->wq, &count);
2498 - flush_rq(&qhp->wq, &rchp->cq, count);
2499 - pthread_spin_unlock(&qhp->lock);
2500 - pthread_spin_unlock(&rchp->lock);
2502 - /* locking heirarchy: cq lock first, then qp lock. */
2503 - pthread_spin_lock(&schp->lock);
2504 - pthread_spin_lock(&qhp->lock);
2505 - flush_hw_cq(&schp->cq);
2506 - count_scqes(&schp->cq, &qhp->wq, &count);
2507 - flush_sq(&qhp->wq, &schp->cq, count);
2508 - pthread_spin_unlock(&qhp->lock);
2509 - pthread_spin_unlock(&schp->lock);
2513 - if (atomic_dec_and_test(&qhp->refcnt))
2514 - wake_up(&qhp->wait);
2516 - pthread_spin_lock(&qhp->lock);
2519 -void iwch_flush_qps(struct iwch_device *dev)
2523 - pthread_spin_lock(&dev->lock);
2524 - for (i=0; i < T3_MAX_NUM_QP; i++) {
2525 - struct iwch_qp *qhp = dev->qpid2ptr[i];
2527 - if (!qhp->wq.flushed && t3_wq_in_error(&qhp->wq)) {
2528 - pthread_spin_lock(&qhp->lock);
2529 - iwch_flush_qp(qhp);
2530 - pthread_spin_unlock(&qhp->lock);
2534 - pthread_spin_unlock(&dev->lock);
2538 -int t3b_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
2539 - struct ibv_recv_wr **bad_wr)
2542 - struct iwch_qp *qhp;
2547 - qhp = to_iwch_qp(ibqp);
2548 - pthread_spin_lock(&qhp->lock);
2549 - if (t3_wq_in_error(&qhp->wq)) {
2550 - iwch_flush_qp(qhp);
2551 - pthread_spin_unlock(&qhp->lock);
2554 - num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
2555 - qhp->wq.rq_size_log2) - 1;
2557 - pthread_spin_unlock(&qhp->lock);
2561 - idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
2562 - wqe = (union t3_wr *) (qhp->wq.queue + idx);
2564 - err = iwch_build_rdma_recv(qhp->rhp, wqe, wr);
2571 - qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, qhp->wq.rq_size_log2)] =
2573 - build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
2574 - Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
2575 - 0, sizeof(struct t3_receive_wr) >> 3);
2576 - PDBG("%s cookie 0x%" PRIx64
2577 - " idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
2578 - "wqe %p \n", __FUNCTION__, wr->wr_id, idx,
2579 - qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
2580 - ++(qhp->wq.rq_wptr);
2585 - pthread_spin_unlock(&qhp->lock);
2586 - if (t3_wq_db_enabled(&qhp->wq))
2587 - RING_DOORBELL(qhp->wq.doorbell, qhp->wq.qpid);
2591 -int t3a_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
2592 - struct ibv_recv_wr **bad_wr)
2595 - struct iwch_qp *qhp = to_iwch_qp(ibqp);
2597 - pthread_spin_lock(&qhp->lock);
2598 - ret = ibv_cmd_post_recv(ibqp, wr, bad_wr);
2599 - pthread_spin_unlock(&qhp->lock);
2602 diff --git a/providers/cxgb3/verbs.c b/providers/cxgb3/verbs.c
2603 deleted file mode 100644
2604 index 39a44192e..000000000
2605 --- a/providers/cxgb3/verbs.c
2609 - * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
2611 - * This software is available to you under a choice of one of two
2612 - * licenses. You may choose to be licensed under the terms of the GNU
2613 - * General Public License (GPL) Version 2, available from the file
2614 - * COPYING in the main directory of this source tree, or the
2615 - * OpenIB.org BSD license below:
2617 - * Redistribution and use in source and binary forms, with or
2618 - * without modification, are permitted provided that the following
2619 - * conditions are met:
2621 - * - Redistributions of source code must retain the above
2622 - * copyright notice, this list of conditions and the following
2625 - * - Redistributions in binary form must reproduce the above
2626 - * copyright notice, this list of conditions and the following
2627 - * disclaimer in the documentation and/or other materials
2628 - * provided with the distribution.
2630 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2631 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2632 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2633 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
2634 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
2635 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
2636 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
2639 -#include <config.h>
2641 -#include <stdlib.h>
2643 -#include <string.h>
2645 -#include <pthread.h>
2646 -#include <sys/mman.h>
2647 -#include <inttypes.h>
2650 -#include "iwch-abi.h"
2652 -int iwch_query_device(struct ibv_context *context, struct ibv_device_attr *attr)
2654 - struct ibv_query_device cmd;
2655 - uint64_t raw_fw_ver;
2656 - unsigned major, minor, sub_minor;
2659 - ret = ibv_cmd_query_device(context, attr, &raw_fw_ver, &cmd,
2664 - major = (raw_fw_ver >> 32) & 0xffff;
2665 - minor = (raw_fw_ver >> 16) & 0xffff;
2666 - sub_minor = raw_fw_ver & 0xffff;
2668 - snprintf(attr->fw_ver, sizeof attr->fw_ver,
2669 - "%d.%d.%d", major, minor, sub_minor);
2674 -int iwch_query_port(struct ibv_context *context, uint8_t port,
2675 - struct ibv_port_attr *attr)
2677 - struct ibv_query_port cmd;
2679 - return ibv_cmd_query_port(context, port, attr, &cmd, sizeof cmd);
2682 -struct ibv_pd *iwch_alloc_pd(struct ibv_context *context)
2684 - struct ibv_alloc_pd cmd;
2685 - struct uiwch_alloc_pd_resp resp;
2686 - struct iwch_pd *pd;
2688 - pd = malloc(sizeof *pd);
2692 - if (ibv_cmd_alloc_pd(context, &pd->ibv_pd, &cmd, sizeof cmd,
2693 - &resp.ibv_resp, sizeof resp)) {
2698 - return &pd->ibv_pd;
2701 -int iwch_free_pd(struct ibv_pd *pd)
2705 - ret = ibv_cmd_dealloc_pd(pd);
2713 -struct ibv_mr *iwch_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
2714 - uint64_t hca_va, int access)
2716 - struct iwch_mr *mhp;
2717 - struct ibv_reg_mr cmd;
2718 - struct uiwch_reg_mr_resp resp;
2719 - struct iwch_device *dev = to_iwch_dev(pd->context->device);
2721 - PDBG("%s addr %p length %ld hca_va %p\n", __func__, addr, length,
2724 - mhp = malloc(sizeof *mhp);
2728 - if (ibv_cmd_reg_mr(pd, addr, length, hca_va,
2729 - access, &mhp->vmr, &cmd, sizeof(cmd),
2730 - &resp.ibv_resp, sizeof resp)) {
2735 - mhp->va_fbo = hca_va;
2736 - mhp->page_size = iwch_page_shift - 12;
2737 - mhp->pbl_addr = resp.pbl_addr;
2738 - mhp->len = length;
2740 - PDBG("%s stag 0x%x va_fbo 0x%" PRIx64
2741 - " page_size %d pbl_addr 0x%x len %d\n",
2742 - __func__, mhp->vmr.ibv_mr.rkey, mhp->va_fbo,
2743 - mhp->page_size, mhp->pbl_addr, mhp->len);
2745 - pthread_spin_lock(&dev->lock);
2746 - dev->mmid2ptr[t3_mmid(mhp->vmr.ibv_mr.lkey)] = mhp;
2747 - pthread_spin_unlock(&dev->lock);
2749 - return &mhp->vmr.ibv_mr;
2752 -int iwch_dereg_mr(struct verbs_mr *vmr)
2755 - struct iwch_device *dev = to_iwch_dev(vmr->ibv_mr.pd->context->device);
2757 - ret = ibv_cmd_dereg_mr(vmr);
2761 - pthread_spin_lock(&dev->lock);
2762 - dev->mmid2ptr[t3_mmid(vmr->ibv_mr.lkey)] = NULL;
2763 - pthread_spin_unlock(&dev->lock);
2765 - free(to_iwch_mr(vmr));
2770 -struct ibv_cq *iwch_create_cq(struct ibv_context *context, int cqe,
2771 - struct ibv_comp_channel *channel, int comp_vector)
2773 - struct uiwch_create_cq cmd;
2774 - struct uiwch_create_cq_resp resp;
2775 - struct iwch_cq *chp;
2776 - struct iwch_device *dev = to_iwch_dev(context->device);
2779 - chp = calloc(1, sizeof *chp);
2784 - cmd.user_rptr_addr = (uint64_t)(unsigned long)&chp->cq.rptr;
2785 - ret = ibv_cmd_create_cq(context, cqe, channel, comp_vector,
2786 - &chp->ibv_cq, &cmd.ibv_cmd, sizeof cmd,
2787 - &resp.ibv_resp, sizeof resp);
2791 - pthread_spin_init(&chp->lock, PTHREAD_PROCESS_PRIVATE);
2793 - chp->cq.cqid = resp.cqid;
2794 - chp->cq.size_log2 = resp.size_log2;
2795 - if (dev->abi_version == 0)
2796 - chp->cq.memsize = PAGE_ALIGN((1UL << chp->cq.size_log2) *
2797 - sizeof(struct t3_cqe));
2799 - chp->cq.memsize = resp.memsize;
2800 - chp->cq.queue = mmap(NULL, t3_cq_memsize(&chp->cq),
2801 - PROT_READ|PROT_WRITE, MAP_SHARED, context->cmd_fd,
2803 - if (chp->cq.queue == MAP_FAILED)
2806 - chp->cq.sw_queue = calloc(t3_cq_depth(&chp->cq), sizeof(struct t3_cqe));
2807 - if (!chp->cq.sw_queue)
2810 - PDBG("%s cqid 0x%x physaddr %" PRIx64 " va %p memsize %d\n",
2811 - __FUNCTION__, chp->cq.cqid, resp.physaddr, chp->cq.queue,
2812 - t3_cq_memsize(&chp->cq));
2814 - pthread_spin_lock(&dev->lock);
2815 - dev->cqid2ptr[chp->cq.cqid] = chp;
2816 - pthread_spin_unlock(&dev->lock);
2818 - return &chp->ibv_cq;
2820 - munmap(chp->cq.queue, t3_cq_memsize(&chp->cq));
2822 - (void)ibv_cmd_destroy_cq(&chp->ibv_cq);
2828 -int iwch_resize_cq(struct ibv_cq *ibcq, int cqe)
2832 - struct ibv_resize_cq cmd;
2833 - struct iwch_cq *chp = to_iwch_cq(ibcq);
2835 - pthread_spin_lock(&chp->lock);
2836 - ret = ibv_cmd_resize_cq(ibcq, cqe, &cmd, sizeof cmd);
2837 - /* remap and realloc swcq here */
2838 - pthread_spin_unlock(&chp->lock);
2845 -int iwch_destroy_cq(struct ibv_cq *ibcq)
2848 - struct iwch_cq *chp = to_iwch_cq(ibcq);
2849 - void *cqva = chp->cq.queue;
2850 - unsigned size = t3_cq_memsize(&chp->cq);
2851 - struct iwch_device *dev = to_iwch_dev(ibcq->context->device);
2853 - munmap(cqva, size);
2854 - ret = ibv_cmd_destroy_cq(ibcq);
2859 - pthread_spin_lock(&dev->lock);
2860 - dev->cqid2ptr[chp->cq.cqid] = NULL;
2861 - pthread_spin_unlock(&dev->lock);
2863 - free(chp->cq.sw_queue);
2868 -struct ibv_srq *iwch_create_srq(struct ibv_pd *pd,
2869 - struct ibv_srq_init_attr *attr)
2874 -int iwch_modify_srq(struct ibv_srq *srq, struct ibv_srq_attr *attr,
2880 -int iwch_destroy_srq(struct ibv_srq *srq)
2885 -int iwch_post_srq_recv(struct ibv_srq *ibsrq, struct ibv_recv_wr *wr,
2886 - struct ibv_recv_wr **bad_wr)
2891 -struct ibv_qp *iwch_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr)
2893 - struct uiwch_create_qp cmd;
2894 - struct uiwch_create_qp_resp resp;
2895 - struct iwch_qp *qhp;
2896 - struct iwch_device *dev = to_iwch_dev(pd->context->device);
2900 - PDBG("%s enter qp\n", __FUNCTION__);
2901 - qhp = calloc(1, sizeof *qhp);
2905 - ret = ibv_cmd_create_qp(pd, &qhp->ibv_qp, attr, &cmd.ibv_cmd,
2906 - sizeof cmd, &resp.ibv_resp, sizeof resp);
2910 - PDBG("%s qpid 0x%x physaddr %" PRIx64 " doorbell %" PRIx64
2911 - " size %d sq_size %d rq_size %d\n",
2912 - __FUNCTION__, resp.qpid, resp.physaddr, resp.doorbell,
2913 - 1 << resp.size_log2, 1 << resp.sq_size_log2,
2914 - 1 << resp.rq_size_log2);
2917 - qhp->wq.qpid = resp.qpid;
2918 - qhp->wq.size_log2 = resp.size_log2;
2919 - qhp->wq.sq_size_log2 = resp.sq_size_log2;
2920 - qhp->wq.rq_size_log2 = resp.rq_size_log2;
2921 - pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE);
2922 - dbva = mmap(NULL, iwch_page_size, PROT_WRITE, MAP_SHARED,
2923 - pd->context->cmd_fd, resp.db_key & ~(iwch_page_mask));
2924 - if (dbva == MAP_FAILED)
2927 - qhp->wq.doorbell = dbva + (resp.db_key & (iwch_page_mask));
2928 - qhp->wq.queue = mmap(NULL, t3_wq_memsize(&qhp->wq),
2929 - PROT_READ|PROT_WRITE, MAP_SHARED,
2930 - pd->context->cmd_fd, resp.key);
2931 - if (qhp->wq.queue == MAP_FAILED)
2934 - qhp->wq.rq = calloc(t3_rq_depth(&qhp->wq), sizeof (uint64_t));
2938 - qhp->wq.sq = calloc(t3_sq_depth(&qhp->wq), sizeof (struct t3_swsq));
2942 - PDBG("%s dbva %p wqva %p wq memsize %d\n", __FUNCTION__,
2943 - qhp->wq.doorbell, qhp->wq.queue, t3_wq_memsize(&qhp->wq));
2945 - qhp->sq_sig_all = attr->sq_sig_all;
2947 - pthread_spin_lock(&dev->lock);
2948 - dev->qpid2ptr[qhp->wq.qpid] = qhp;
2949 - pthread_spin_unlock(&dev->lock);
2951 - return &qhp->ibv_qp;
2955 - munmap((void *)qhp->wq.queue, t3_wq_memsize(&qhp->wq));
2957 - munmap((void *)dbva, iwch_page_size);
2959 - (void)ibv_cmd_destroy_qp(&qhp->ibv_qp);
2966 -static void reset_qp(struct iwch_qp *qhp)
2968 - PDBG("%s enter qp %p\n", __FUNCTION__, qhp);
2970 - qhp->wq.rq_wptr = qhp->wq.rq_rptr = 0;
2971 - qhp->wq.sq_wptr = qhp->wq.sq_rptr = 0;
2972 - qhp->wq.error = 0;
2973 - qhp->wq.oldest_read = NULL;
2974 - memset(qhp->wq.queue, 0, t3_wq_memsize(&qhp->wq));
2977 -int iwch_modify_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr,
2980 - struct ibv_modify_qp cmd = {};
2981 - struct iwch_qp *qhp = to_iwch_qp(ibqp);
2984 - PDBG("%s enter qp %p new state %d\n", __FUNCTION__, ibqp, attr_mask & IBV_QP_STATE ? attr->qp_state : -1);
2985 - pthread_spin_lock(&qhp->lock);
2986 - if (t3b_device(qhp->rhp) && t3_wq_in_error(&qhp->wq))
2987 - iwch_flush_qp(qhp);
2988 - ret = ibv_cmd_modify_qp(ibqp, attr, attr_mask, &cmd, sizeof cmd);
2989 - if (!ret && (attr_mask & IBV_QP_STATE) && attr->qp_state == IBV_QPS_RESET)
2991 - pthread_spin_unlock(&qhp->lock);
2995 -int iwch_destroy_qp(struct ibv_qp *ibqp)
2998 - struct iwch_qp *qhp = to_iwch_qp(ibqp);
2999 - struct iwch_device *dev = to_iwch_dev(ibqp->context->device);
3000 - void *dbva, *wqva;
3003 - PDBG("%s enter qp %p\n", __FUNCTION__, ibqp);
3004 - if (t3b_device(dev)) {
3005 - pthread_spin_lock(&qhp->lock);
3006 - iwch_flush_qp(qhp);
3007 - pthread_spin_unlock(&qhp->lock);
3010 - dbva = (void *)((unsigned long)qhp->wq.doorbell & ~(iwch_page_mask));
3011 - wqva = qhp->wq.queue;
3012 - wqsize = t3_wq_memsize(&qhp->wq);
3014 - munmap(dbva, iwch_page_size);
3015 - munmap(wqva, wqsize);
3016 - ret = ibv_cmd_destroy_qp(ibqp);
3021 - pthread_spin_lock(&dev->lock);
3022 - dev->qpid2ptr[qhp->wq.qpid] = NULL;
3023 - pthread_spin_unlock(&dev->lock);
3031 -int iwch_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
3032 - int attr_mask, struct ibv_qp_init_attr *init_attr)
3037 -struct ibv_ah *iwch_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr)
3042 -int iwch_destroy_ah(struct ibv_ah *ah)
3047 -int iwch_attach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid)
3052 -int iwch_detach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid)
3057 -void t3b_async_event(struct ibv_context *context,
3058 - struct ibv_async_event *event)
3060 - PDBG("%s type %d obj %p\n", __FUNCTION__, event->event_type,
3061 - event->element.cq);
3063 - switch (event->event_type) {
3064 - case IBV_EVENT_CQ_ERR:
3066 - case IBV_EVENT_QP_FATAL:
3067 - case IBV_EVENT_QP_REQ_ERR:
3068 - case IBV_EVENT_QP_ACCESS_ERR:
3069 - case IBV_EVENT_PATH_MIG_ERR: {
3070 - struct iwch_qp *qhp = to_iwch_qp(event->element.qp);
3071 - pthread_spin_lock(&qhp->lock);
3072 - iwch_flush_qp(qhp);
3073 - pthread_spin_unlock(&qhp->lock);
3076 - case IBV_EVENT_SQ_DRAINED:
3077 - case IBV_EVENT_PATH_MIG:
3078 - case IBV_EVENT_COMM_EST:
3079 - case IBV_EVENT_QP_LAST_WQE_REACHED: