]> git.pld-linux.org Git - packages/rdma-core.git/blame - rdma-core-cxgb3.patch
- updated to 38.1
[packages/rdma-core.git] / rdma-core-cxgb3.patch
CommitLineData
59dabf6f
JB
1From 36588f5844af4ef1e5b0d6ad002fa1adf9032653 Mon Sep 17 00:00:00 2001
2From: Potnuri Bharat Teja <bharat@chelsio.com>
3Date: Mon, 21 Oct 2019 14:01:25 +0530
4Subject: [PATCH] libcxgb3: Remove libcxgb3 from rdma-core
5
6Remove the userspace provider for iw_cxgb3 after removing it from kernel.
7
8Signed-off-by: Potnuri Bharat Teja <bharat@chelsio.com>
9Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
10---
11 CMakeLists.txt | 1 -
12 MAINTAINERS | 5 -
13 README.md | 1 -
14 debian/control | 10 +-
15 debian/copyright | 3 +-
16 kernel-boot/rdma-description.rules | 1 -
17 kernel-boot/rdma-hw-modules.rules | 1 -
18 libibverbs/verbs.h | 1 -
19 providers/cxgb3/CMakeLists.txt | 6 -
20 providers/cxgb3/cq.c | 442 -----------------
21 providers/cxgb3/cxio_wr.h | 758 -----------------------------
22 providers/cxgb3/firmware_exports.h | 148 ------
23 providers/cxgb3/iwch-abi.h | 51 --
24 providers/cxgb3/iwch.c | 269 ----------
25 providers/cxgb3/iwch.h | 218 ---------
26 providers/cxgb3/qp.c | 560 ---------------------
27 providers/cxgb3/verbs.c | 476 ------------------
28 redhat/rdma-core.spec | 3 -
29 redhat/rdma.kernel-init | 4 -
30 suse/rdma-core.spec | 2 -
31 20 files changed, 4 insertions(+), 2956 deletions(-)
32 delete mode 100644 providers/cxgb3/CMakeLists.txt
33 delete mode 100644 providers/cxgb3/cq.c
34 delete mode 100644 providers/cxgb3/cxio_wr.h
35 delete mode 100644 providers/cxgb3/firmware_exports.h
36 delete mode 100644 providers/cxgb3/iwch-abi.h
37 delete mode 100644 providers/cxgb3/iwch.c
38 delete mode 100644 providers/cxgb3/iwch.h
39 delete mode 100644 providers/cxgb3/qp.c
40 delete mode 100644 providers/cxgb3/verbs.c
41
42diff --git a/CMakeLists.txt b/CMakeLists.txt
43index 7abeea4fe..85485ba00 100644
44--- a/CMakeLists.txt
45+++ b/CMakeLists.txt
46@@ -615,7 +615,6 @@ add_subdirectory(librdmacm/man)
47 # Providers
48 if (HAVE_COHERENT_DMA)
49 add_subdirectory(providers/bnxt_re)
50-add_subdirectory(providers/cxgb3) # NO SPARSE
51 add_subdirectory(providers/cxgb4) # NO SPARSE
52 add_subdirectory(providers/efa)
53 add_subdirectory(providers/efa/man)
54diff --git a/README.md b/README.md
55index 451ff7fcb..a96351933 100644
56--- a/README.md
57+++ b/README.md
58@@ -15,7 +15,6 @@ under the providers/ directory. Support for the following Kernel RDMA drivers
59 is included:
60
61 - efa.ko
62- - iw_cxgb3.ko
63 - iw_cxgb4.ko
64 - hfi1.ko
65 - hns-roce.ko
66diff --git a/kernel-boot/rdma-description.rules b/kernel-boot/rdma-description.rules
67index bb33dce40..4ea59ba19 100644
68--- a/kernel-boot/rdma-description.rules
69+++ b/kernel-boot/rdma-description.rules
70@@ -22,7 +22,6 @@ DRIVERS=="ib_qib", ENV{ID_RDMA_INFINIBAND}="1"
71 DRIVERS=="hfi1", ENV{ID_RDMA_OPA}="1"
72
73 # Hardware that supports iWarp
74-DRIVERS=="cxgb3", ENV{ID_RDMA_IWARP}="1"
75 DRIVERS=="cxgb4", ENV{ID_RDMA_IWARP}="1"
76 DRIVERS=="i40e", ENV{ID_RDMA_IWARP}="1"
77 DRIVERS=="nes", ENV{ID_RDMA_IWARP}="1"
78diff --git a/kernel-boot/rdma-hw-modules.rules b/kernel-boot/rdma-hw-modules.rules
79index dde0ab8da..da4bbe363 100644
80--- a/kernel-boot/rdma-hw-modules.rules
81+++ b/kernel-boot/rdma-hw-modules.rules
567e5a90
JB
82@@ -9,7 +9,6 @@ SUBSYSTEM!="net", GOTO="rdma_hw_modules_end"
83
59dabf6f
JB
84 ENV{ID_NET_DRIVER}=="be2net", RUN{builtin}+="kmod load ocrdma"
85 ENV{ID_NET_DRIVER}=="bnxt_en", RUN{builtin}+="kmod load bnxt_re"
86-ENV{ID_NET_DRIVER}=="cxgb3", RUN{builtin}+="kmod load iw_cxgb3"
87 ENV{ID_NET_DRIVER}=="cxgb4", RUN{builtin}+="kmod load iw_cxgb4"
88 ENV{ID_NET_DRIVER}=="hns", RUN{builtin}+="kmod load hns_roce"
89 ENV{ID_NET_DRIVER}=="i40e", RUN{builtin}+="kmod load i40iw"
90diff --git a/libibverbs/verbs.h b/libibverbs/verbs.h
91index c411722b1..12a33a99a 100644
92--- a/libibverbs/verbs.h
93+++ b/libibverbs/verbs.h
94@@ -2144,7 +2144,6 @@ struct ibv_device **ibv_get_device_list(int *num_devices);
95
96 struct verbs_devices_ops;
97 extern const struct verbs_device_ops verbs_provider_bnxt_re;
98-extern const struct verbs_device_ops verbs_provider_cxgb3;
99 extern const struct verbs_device_ops verbs_provider_cxgb4;
100 extern const struct verbs_device_ops verbs_provider_efa;
101 extern const struct verbs_device_ops verbs_provider_hfi1verbs;
102diff --git a/providers/cxgb3/CMakeLists.txt b/providers/cxgb3/CMakeLists.txt
103deleted file mode 100644
104index a578105e7..000000000
105--- a/providers/cxgb3/CMakeLists.txt
106+++ /dev/null
107@@ -1,6 +0,0 @@
108-rdma_provider(cxgb3
109- cq.c
110- iwch.c
111- qp.c
112- verbs.c
113-)
114diff --git a/providers/cxgb3/cq.c b/providers/cxgb3/cq.c
115deleted file mode 100644
116index 6cb4fe74d..000000000
117--- a/providers/cxgb3/cq.c
118+++ /dev/null
119@@ -1,442 +0,0 @@
120-/*
121- * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
122- *
123- * This software is available to you under a choice of one of two
124- * licenses. You may choose to be licensed under the terms of the GNU
125- * General Public License (GPL) Version 2, available from the file
126- * COPYING in the main directory of this source tree, or the
127- * OpenIB.org BSD license below:
128- *
129- * Redistribution and use in source and binary forms, with or
130- * without modification, are permitted provided that the following
131- * conditions are met:
132- *
133- * - Redistributions of source code must retain the above
134- * copyright notice, this list of conditions and the following
135- * disclaimer.
136- *
137- * - Redistributions in binary form must reproduce the above
138- * copyright notice, this list of conditions and the following
139- * disclaimer in the documentation and/or other materials
140- * provided with the distribution.
141- *
142- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
143- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
144- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
145- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
146- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
147- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
148- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
149- * SOFTWARE.
150- */
151-#include <config.h>
152-
153-#include <stdio.h>
154-#include <pthread.h>
155-#include <sys/errno.h>
156-
157-#include <infiniband/opcode.h>
158-
159-#include "iwch.h"
160-#include "iwch-abi.h"
161-
162-int iwch_arm_cq(struct ibv_cq *ibcq, int solicited)
163-{
164- int ret;
165- struct iwch_cq *chp = to_iwch_cq(ibcq);
166-
167- pthread_spin_lock(&chp->lock);
168- ret = ibv_cmd_req_notify_cq(ibcq, solicited);
169- pthread_spin_unlock(&chp->lock);
170-
171- return ret;
172-}
173-
174-static inline void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
175-{
176- struct t3_swsq *sqp;
177- uint32_t ptr = wq->sq_rptr;
178- int count = Q_COUNT(wq->sq_rptr, wq->sq_wptr);
179-
180- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
181- while (count--) {
182- if (!sqp->signaled) {
183- ptr++;
184- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
185- } else if (sqp->complete) {
186-
187- /*
188- * Insert this completed cqe into the swcq.
189- */
190- sqp->cqe.header |= htobe32(V_CQE_SWCQE(1));
191- *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2))
192- = sqp->cqe;
193- cq->sw_wptr++;
194- sqp->signaled = 0;
195- break;
196- } else
197- break;
198- }
199-}
200-
201-static inline void create_read_req_cqe(struct t3_wq *wq,
202- struct t3_cqe *hw_cqe,
203- struct t3_cqe *read_cqe)
204-{
205- CQE_WRID_SQ_WPTR(*read_cqe) = wq->oldest_read->sq_wptr;
206- read_cqe->len = wq->oldest_read->read_len;
207- read_cqe->header = htobe32(V_CQE_QPID(CQE_QPID(*hw_cqe)) |
208- V_CQE_SWCQE(SW_CQE(*hw_cqe)) |
209- V_CQE_OPCODE(T3_READ_REQ) |
210- V_CQE_TYPE(1));
211-}
212-
213-/*
214- * Return a ptr to the next read wr in the SWSQ or NULL.
215- */
216-static inline void advance_oldest_read(struct t3_wq *wq)
217-{
218-
219- uint32_t rptr = wq->oldest_read - wq->sq + 1;
220- uint32_t wptr = Q_PTR2IDX(wq->sq_wptr, wq->sq_size_log2);
221-
222- while (Q_PTR2IDX(rptr, wq->sq_size_log2) != wptr) {
223- wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2);
224-
225- if (wq->oldest_read->opcode == T3_READ_REQ) {
226- return;
227- }
228- rptr++;
229- }
230- wq->oldest_read = NULL;
231-}
232-
233-static inline int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq,
234- struct t3_cqe *cqe, uint8_t *cqe_flushed,
235- uint64_t *cookie)
236-{
237- int ret = 0;
238- struct t3_cqe *hw_cqe, read_cqe;
239-
240- *cqe_flushed = 0;
241- hw_cqe = cxio_next_cqe(cq);
242- udma_from_device_barrier();
243-
244- /*
245- * Skip cqes not affiliated with a QP.
246- */
247- if (wq == NULL) {
248- ret = -1;
249- goto skip_cqe;
250- }
251-
252- /*
253- * Gotta tweak READ completions:
254- * 1) the cqe doesn't contain the sq_wptr from the wr.
255- * 2) opcode not reflected from the wr.
256- * 3) read_len not reflected from the wr.
257- * 4) cq_type is RQ_TYPE not SQ_TYPE.
258- */
259- if (CQE_OPCODE(*hw_cqe) == T3_READ_RESP) {
260-
261- /*
262- * If this is an unsolicited read response to local stag 1,
263- * then the read was generated by the kernel driver as part
264- * of peer-2-peer connection setup. So ignore the completion.
265- */
266- if (CQE_WRID_STAG(*hw_cqe) == 1) {
267- if (CQE_STATUS(*hw_cqe))
268- wq->error = 1;
269- ret = -1;
270- goto skip_cqe;
271- }
272-
273- /*
274- * Don't write to the HWCQ, so create a new read req CQE
275- * in local memory.
276- */
277- create_read_req_cqe(wq, hw_cqe, &read_cqe);
278- hw_cqe = &read_cqe;
279- advance_oldest_read(wq);
280- }
281-
282- /*
283- * Errors.
284- */
285- if (CQE_STATUS(*hw_cqe) || t3_wq_in_error(wq)) {
286- *cqe_flushed = t3_wq_in_error(wq);
287- t3_set_wq_in_error(wq);
288- goto proc_cqe;
289- }
290-
291- /*
292- * RECV completion.
293- */
294- if (RQ_TYPE(*hw_cqe)) {
295-
296- /*
297- * HW only validates 4 bits of MSN. So we must validate that
298- * the MSN in the SEND is the next expected MSN. If its not,
299- * then we complete this with TPT_ERR_MSN and mark the wq in
300- * error.
301- */
302- if ((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1))) {
303- t3_set_wq_in_error(wq);
304- hw_cqe->header |= htobe32(V_CQE_STATUS(TPT_ERR_MSN));
305- }
306- goto proc_cqe;
307- }
308-
309- /*
310- * If we get here its a send completion.
311- *
312- * Handle out of order completion. These get stuffed
313- * in the SW SQ. Then the SW SQ is walked to move any
314- * now in-order completions into the SW CQ. This handles
315- * 2 cases:
316- * 1) reaping unsignaled WRs when the first subsequent
317- * signaled WR is completed.
318- * 2) out of order read completions.
319- */
320- if (!SW_CQE(*hw_cqe) && (CQE_WRID_SQ_WPTR(*hw_cqe) != wq->sq_rptr)) {
321- struct t3_swsq *sqp;
322-
323- sqp = wq->sq +
324- Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
325- sqp->cqe = *hw_cqe;
326- sqp->complete = 1;
327- ret = -1;
328- goto flush_wq;
329- }
330-
331-proc_cqe:
332- *cqe = *hw_cqe;
333-
334- /*
335- * Reap the associated WR(s) that are freed up with this
336- * completion.
337- */
338- if (SQ_TYPE(*hw_cqe)) {
339- wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
340- *cookie = (wq->sq +
341- Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2))->wr_id;
342- wq->sq_rptr++;
343- } else {
344- *cookie = *(wq->rq + Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
345- wq->rq_rptr++;
346- }
347-
348-flush_wq:
349- /*
350- * Flush any completed cqes that are now in-order.
351- */
352- flush_completed_wrs(wq, cq);
353-
354-skip_cqe:
355- if (SW_CQE(*hw_cqe)) {
356- PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
357- __FUNCTION__, cq, cq->cqid, cq->sw_rptr);
358- ++cq->sw_rptr;
359- } else {
360- PDBG("%s cq %p cqid 0x%x skip hw cqe sw_rptr 0x%x\n",
361- __FUNCTION__, cq, cq->cqid, cq->rptr);
362- ++cq->rptr;
363- }
364-
365- return ret;
366-}
367-
368-/*
369- * Get one cq entry from cxio and map it to openib.
370- *
371- * Returns:
372- * 0 EMPTY;
373- * 1 cqe returned
374- * -EAGAIN caller must try again
375- * any other -errno fatal error
376- */
377-static int iwch_poll_cq_one(struct iwch_device *rhp, struct iwch_cq *chp,
378- struct ibv_wc *wc)
379-{
380- struct iwch_qp *qhp = NULL;
381- struct t3_cqe cqe, *hw_cqe;
382- struct t3_wq *wq;
383- uint8_t cqe_flushed;
384- uint64_t cookie;
385- int ret = 1;
386-
387- hw_cqe = cxio_next_cqe(&chp->cq);
388- udma_from_device_barrier();
389-
390- if (!hw_cqe)
391- return 0;
392-
393- qhp = rhp->qpid2ptr[CQE_QPID(*hw_cqe)];
394- if (!qhp)
395- wq = NULL;
396- else {
397- pthread_spin_lock(&qhp->lock);
398- wq = &(qhp->wq);
399- }
400- ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie);
401- if (ret) {
402- ret = -EAGAIN;
403- goto out;
404- }
405- ret = 1;
406-
407- wc->wr_id = cookie;
408- wc->qp_num = qhp->wq.qpid;
409- wc->vendor_err = CQE_STATUS(cqe);
410- wc->wc_flags = 0;
411-
412- PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
413- "lo 0x%x cookie 0x%" PRIx64 "\n",
414- __FUNCTION__, CQE_QPID(cqe), CQE_TYPE(cqe),
415- CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
416- CQE_WRID_LOW(cqe), cookie);
417-
418- if (CQE_TYPE(cqe) == 0) {
419- if (!CQE_STATUS(cqe))
420- wc->byte_len = CQE_LEN(cqe);
421- else
422- wc->byte_len = 0;
423- wc->opcode = IBV_WC_RECV;
424- } else {
425- switch (CQE_OPCODE(cqe)) {
426- case T3_RDMA_WRITE:
427- wc->opcode = IBV_WC_RDMA_WRITE;
428- break;
429- case T3_READ_REQ:
430- wc->opcode = IBV_WC_RDMA_READ;
431- wc->byte_len = CQE_LEN(cqe);
432- break;
433- case T3_SEND:
434- case T3_SEND_WITH_SE:
435- wc->opcode = IBV_WC_SEND;
436- break;
437- case T3_BIND_MW:
438- wc->opcode = IBV_WC_BIND_MW;
439- break;
440-
441- /* these aren't supported yet */
442- case T3_SEND_WITH_INV:
443- case T3_SEND_WITH_SE_INV:
444- case T3_LOCAL_INV:
445- case T3_FAST_REGISTER:
446- default:
447- PDBG("%s Unexpected opcode %d CQID 0x%x QPID 0x%x\n",
448- __FUNCTION__, CQE_OPCODE(cqe), chp->cq.cqid,
449- CQE_QPID(cqe));
450- ret = -EINVAL;
451- goto out;
452- }
453- }
454-
455- if (cqe_flushed) {
456- wc->status = IBV_WC_WR_FLUSH_ERR;
457- } else {
458-
459- switch (CQE_STATUS(cqe)) {
460- case TPT_ERR_SUCCESS:
461- wc->status = IBV_WC_SUCCESS;
462- break;
463- case TPT_ERR_STAG:
464- wc->status = IBV_WC_LOC_ACCESS_ERR;
465- break;
466- case TPT_ERR_PDID:
467- wc->status = IBV_WC_LOC_PROT_ERR;
468- break;
469- case TPT_ERR_QPID:
470- case TPT_ERR_ACCESS:
471- wc->status = IBV_WC_LOC_ACCESS_ERR;
472- break;
473- case TPT_ERR_WRAP:
474- wc->status = IBV_WC_GENERAL_ERR;
475- break;
476- case TPT_ERR_BOUND:
477- wc->status = IBV_WC_LOC_LEN_ERR;
478- break;
479- case TPT_ERR_INVALIDATE_SHARED_MR:
480- case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
481- wc->status = IBV_WC_MW_BIND_ERR;
482- break;
483- case TPT_ERR_CRC:
484- case TPT_ERR_MARKER:
485- case TPT_ERR_PDU_LEN_ERR:
486- case TPT_ERR_OUT_OF_RQE:
487- case TPT_ERR_DDP_VERSION:
488- case TPT_ERR_RDMA_VERSION:
489- case TPT_ERR_DDP_QUEUE_NUM:
490- case TPT_ERR_MSN:
491- case TPT_ERR_TBIT:
492- case TPT_ERR_MO:
493- case TPT_ERR_MSN_RANGE:
494- case TPT_ERR_IRD_OVERFLOW:
495- case TPT_ERR_OPCODE:
496- wc->status = IBV_WC_FATAL_ERR;
497- break;
498- case TPT_ERR_SWFLUSH:
499- wc->status = IBV_WC_WR_FLUSH_ERR;
500- break;
501- default:
502- PDBG("%s Unexpected status 0x%x CQID 0x%x QPID 0x%0x\n",
503- __FUNCTION__, CQE_STATUS(cqe), chp->cq.cqid,
504- CQE_QPID(cqe));
505- ret = -EINVAL;
506- }
507- }
508-out:
509- if (wq)
510- pthread_spin_unlock(&qhp->lock);
511- return ret;
512-}
513-
514-int t3b_poll_cq(struct ibv_cq *ibcq, int num_entries, struct ibv_wc *wc)
515-{
516- struct iwch_device *rhp;
517- struct iwch_cq *chp;
518- int npolled;
519- int err = 0;
520-
521- chp = to_iwch_cq(ibcq);
522- rhp = chp->rhp;
523-
524- if (rhp->abi_version > 0 && t3_cq_in_error(&chp->cq)) {
525- t3_reset_cq_in_error(&chp->cq);
526- iwch_flush_qps(rhp);
527- }
528-
529- pthread_spin_lock(&chp->lock);
530- for (npolled = 0; npolled < num_entries; ++npolled) {
531-
532- /*
533- * Because T3 can post CQEs that are out of order,
534- * we might have to poll again after removing
535- * one of these.
536- */
537- do {
538- err = iwch_poll_cq_one(rhp, chp, wc + npolled);
539- } while (err == -EAGAIN);
540- if (err <= 0)
541- break;
542- }
543- pthread_spin_unlock(&chp->lock);
544-
545- if (err < 0)
546- return err;
547- else {
548- return npolled;
549- }
550-}
551-
552-int t3a_poll_cq(struct ibv_cq *ibcq, int num_entries, struct ibv_wc *wc)
553-{
554- int ret;
555- struct iwch_cq *chp = to_iwch_cq(ibcq);
556-
557- pthread_spin_lock(&chp->lock);
558- ret = ibv_cmd_poll_cq(ibcq, num_entries, wc);
559- pthread_spin_unlock(&chp->lock);
560- return ret;
561-}
562diff --git a/providers/cxgb3/cxio_wr.h b/providers/cxgb3/cxio_wr.h
563deleted file mode 100644
564index 042bd9414..000000000
565--- a/providers/cxgb3/cxio_wr.h
566+++ /dev/null
567@@ -1,758 +0,0 @@
568-/*
569- * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
570- *
571- * This software is available to you under a choice of one of two
572- * licenses. You may choose to be licensed under the terms of the GNU
573- * General Public License (GPL) Version 2, available from the file
574- * COPYING in the main directory of this source tree, or the
575- * OpenIB.org BSD license below:
576- *
577- * Redistribution and use in source and binary forms, with or
578- * without modification, are permitted provided that the following
579- * conditions are met:
580- *
581- * - Redistributions of source code must retain the above
582- * copyright notice, this list of conditions and the following
583- * disclaimer.
584- *
585- * - Redistributions in binary form must reproduce the above
586- * copyright notice, this list of conditions and the following
587- * disclaimer in the documentation and/or other materials
588- * provided with the distribution.
589- *
590- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
591- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
592- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
593- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
594- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
595- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
596- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
597- * SOFTWARE.
598- */
599-#ifndef __CXIO_WR_H__
600-#define __CXIO_WR_H__
601-
602-#include <stddef.h>
603-#include <stdint.h>
604-#include <endian.h>
605-#include <util/udma_barrier.h>
606-#include "firmware_exports.h"
607-
608-#define T3_MAX_NUM_QP (1<<15)
609-#define T3_MAX_NUM_CQ (1<<15)
610-#define T3_MAX_NUM_PD (1<<15)
611-#define T3_MAX_NUM_STAG (1<<15)
612-#define T3_MAX_SGE 4
613-#define T3_MAX_INLINE 64
614-
615-#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
616-#define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
617- ((rptr)!=(wptr)) )
618-#define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))
619-#define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))
620-#define Q_COUNT(rptr,wptr) ((wptr)-(rptr))
621-#define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))
622-
623-/* FIXME: Move me to a generic PCI mmio accessor */
624-#define cpu_to_pci32(val) htole32(val)
625-
626-#define RING_DOORBELL(doorbell, QPID) { \
627- *doorbell = cpu_to_pci32(QPID); \
628-}
629-
630-#define SEQ32_GE(x,y) (!( (((uint32_t) (x)) - ((uint32_t) (y))) & 0x80000000 ))
631-
632-enum t3_wr_flags {
633- T3_COMPLETION_FLAG = 0x01,
634- T3_NOTIFY_FLAG = 0x02,
635- T3_SOLICITED_EVENT_FLAG = 0x04,
636- T3_READ_FENCE_FLAG = 0x08,
637- T3_LOCAL_FENCE_FLAG = 0x10
638-} __attribute__ ((packed));
639-
640-enum t3_wr_opcode {
641- T3_WR_BP = FW_WROPCODE_RI_BYPASS,
642- T3_WR_SEND = FW_WROPCODE_RI_SEND,
643- T3_WR_WRITE = FW_WROPCODE_RI_RDMA_WRITE,
644- T3_WR_READ = FW_WROPCODE_RI_RDMA_READ,
645- T3_WR_INV_STAG = FW_WROPCODE_RI_LOCAL_INV,
646- T3_WR_BIND = FW_WROPCODE_RI_BIND_MW,
647- T3_WR_RCV = FW_WROPCODE_RI_RECEIVE,
648- T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
649- T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP
650-} __attribute__ ((packed));
651-
652-enum t3_rdma_opcode {
653- T3_RDMA_WRITE, /* IETF RDMAP v1.0 ... */
654- T3_READ_REQ,
655- T3_READ_RESP,
656- T3_SEND,
657- T3_SEND_WITH_INV,
658- T3_SEND_WITH_SE,
659- T3_SEND_WITH_SE_INV,
660- T3_TERMINATE,
661- T3_RDMA_INIT, /* CHELSIO RI specific ... */
662- T3_BIND_MW,
663- T3_FAST_REGISTER,
664- T3_LOCAL_INV,
665- T3_QP_MOD,
666- T3_BYPASS
667-} __attribute__ ((packed));
668-
669-static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
670-{
671- switch (wrop) {
672- case T3_WR_BP: return T3_BYPASS;
673- case T3_WR_SEND: return T3_SEND;
674- case T3_WR_WRITE: return T3_RDMA_WRITE;
675- case T3_WR_READ: return T3_READ_REQ;
676- case T3_WR_INV_STAG: return T3_LOCAL_INV;
677- case T3_WR_BIND: return T3_BIND_MW;
678- case T3_WR_INIT: return T3_RDMA_INIT;
679- case T3_WR_QP_MOD: return T3_QP_MOD;
680- default: break;
681- }
682- return -1;
683-}
684-
685-
686-/* Work request id */
687-union t3_wrid {
688- struct {
689- uint32_t hi:32;
690- uint32_t low:32;
691- } id0;
692- uint64_t id1;
693-};
694-
695-#define WRID(wrid) (wrid.id1)
696-#define WRID_GEN(wrid) (wrid.id0.wr_gen)
697-#define WRID_IDX(wrid) (wrid.id0.wr_idx)
698-#define WRID_LO(wrid) (wrid.id0.wr_lo)
699-
700-struct fw_riwrh {
701- uint32_t op_seop_flags;
702- uint32_t gen_tid_len;
703-};
704-
705-#define S_FW_RIWR_OP 24
706-#define M_FW_RIWR_OP 0xff
707-#define V_FW_RIWR_OP(x) ((x) << S_FW_RIWR_OP)
708-#define G_FW_RIWR_OP(x) ((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)
709-
710-#define S_FW_RIWR_SOPEOP 22
711-#define M_FW_RIWR_SOPEOP 0x3
712-#define V_FW_RIWR_SOPEOP(x) ((x) << S_FW_RIWR_SOPEOP)
713-
714-#define S_FW_RIWR_FLAGS 8
715-#define M_FW_RIWR_FLAGS 0x3fffff
716-#define V_FW_RIWR_FLAGS(x) ((x) << S_FW_RIWR_FLAGS)
717-#define G_FW_RIWR_FLAGS(x) ((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)
718-
719-#define S_FW_RIWR_TID 8
720-#define V_FW_RIWR_TID(x) ((x) << S_FW_RIWR_TID)
721-
722-#define S_FW_RIWR_LEN 0
723-#define V_FW_RIWR_LEN(x) ((x) << S_FW_RIWR_LEN)
724-
725-#define S_FW_RIWR_GEN 31
726-#define V_FW_RIWR_GEN(x) ((x) << S_FW_RIWR_GEN)
727-
728-struct t3_sge {
729- uint32_t stag;
730- uint32_t len;
731- uint64_t to;
732-};
733-
734-/* If num_sgle is zero, flit 5+ contains immediate data.*/
735-struct t3_send_wr {
736- struct fw_riwrh wrh; /* 0 */
737- union t3_wrid wrid; /* 1 */
738-
739- enum t3_rdma_opcode rdmaop:8;
740- uint32_t reserved:24; /* 2 */
741- uint32_t rem_stag; /* 2 */
742- uint32_t plen; /* 3 */
743- uint32_t num_sgle;
744- struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */
745-};
746-
747-struct t3_local_inv_wr {
748- struct fw_riwrh wrh; /* 0 */
749- union t3_wrid wrid; /* 1 */
750- uint32_t stag; /* 2 */
751- uint32_t reserved3;
752-};
753-
754-struct t3_rdma_write_wr {
755- struct fw_riwrh wrh; /* 0 */
756- union t3_wrid wrid; /* 1 */
757- enum t3_rdma_opcode rdmaop:8; /* 2 */
758- uint32_t reserved:24; /* 2 */
759- uint32_t stag_sink;
760- uint64_t to_sink; /* 3 */
761- uint32_t plen; /* 4 */
762- uint32_t num_sgle;
763- struct t3_sge sgl[T3_MAX_SGE]; /* 5+ */
764-};
765-
766-struct t3_rdma_read_wr {
767- struct fw_riwrh wrh; /* 0 */
768- union t3_wrid wrid; /* 1 */
769- enum t3_rdma_opcode rdmaop:8; /* 2 */
770- uint32_t reserved:24;
771- uint32_t rem_stag;
772- uint64_t rem_to; /* 3 */
773- uint32_t local_stag; /* 4 */
774- uint32_t local_len;
775- uint64_t local_to; /* 5 */
776-};
777-
778-enum t3_addr_type {
779- T3_VA_BASED_TO = 0x0,
780- T3_ZERO_BASED_TO = 0x1
781-} __attribute__ ((packed));
782-
783-enum t3_mem_perms {
784- T3_MEM_ACCESS_LOCAL_READ = 0x1,
785- T3_MEM_ACCESS_LOCAL_WRITE = 0x2,
786- T3_MEM_ACCESS_REM_READ = 0x4,
787- T3_MEM_ACCESS_REM_WRITE = 0x8
788-} __attribute__ ((packed));
789-
790-struct t3_bind_mw_wr {
791- struct fw_riwrh wrh; /* 0 */
792- union t3_wrid wrid; /* 1 */
793- uint32_t reserved:16;
794- enum t3_addr_type type:8;
795- enum t3_mem_perms perms:8; /* 2 */
796- uint32_t mr_stag;
797- uint32_t mw_stag; /* 3 */
798- uint32_t mw_len;
799- uint64_t mw_va; /* 4 */
800- uint32_t mr_pbl_addr; /* 5 */
801- uint32_t reserved2:24;
802- uint32_t mr_pagesz:8;
803-};
804-
805-struct t3_receive_wr {
806- struct fw_riwrh wrh; /* 0 */
807- union t3_wrid wrid; /* 1 */
808- uint8_t pagesz[T3_MAX_SGE];
809- uint32_t num_sgle; /* 2 */
810- struct t3_sge sgl[T3_MAX_SGE]; /* 3+ */
811- uint32_t pbl_addr[T3_MAX_SGE];
812-};
813-
814-struct t3_bypass_wr {
815- struct fw_riwrh wrh;
816- union t3_wrid wrid; /* 1 */
817-};
818-
819-struct t3_modify_qp_wr {
820- struct fw_riwrh wrh; /* 0 */
821- union t3_wrid wrid; /* 1 */
822- uint32_t flags; /* 2 */
823- uint32_t quiesce; /* 2 */
824- uint32_t max_ird; /* 3 */
825- uint32_t max_ord; /* 3 */
826- uint64_t sge_cmd; /* 4 */
827- uint64_t ctx1; /* 5 */
828- uint64_t ctx0; /* 6 */
829-};
830-
831-enum t3_modify_qp_flags {
832- MODQP_QUIESCE = 0x01,
833- MODQP_MAX_IRD = 0x02,
834- MODQP_MAX_ORD = 0x04,
835- MODQP_WRITE_EC = 0x08,
836- MODQP_READ_EC = 0x10,
837-};
838-
839-
840-enum t3_mpa_attrs {
841- uP_RI_MPA_RX_MARKER_ENABLE = 0x1,
842- uP_RI_MPA_TX_MARKER_ENABLE = 0x2,
843- uP_RI_MPA_CRC_ENABLE = 0x4,
844- uP_RI_MPA_IETF_ENABLE = 0x8
845-} __attribute__ ((packed));
846-
847-enum t3_qp_caps {
848- uP_RI_QP_RDMA_READ_ENABLE = 0x01,
849- uP_RI_QP_RDMA_WRITE_ENABLE = 0x02,
850- uP_RI_QP_BIND_ENABLE = 0x04,
851- uP_RI_QP_FAST_REGISTER_ENABLE = 0x08,
852- uP_RI_QP_STAG0_ENABLE = 0x10
853-} __attribute__ ((packed));
854-
855-struct t3_rdma_init_attr {
856- uint32_t tid;
857- uint32_t qpid;
858- uint32_t pdid;
859- uint32_t scqid;
860- uint32_t rcqid;
861- uint32_t rq_addr;
862- uint32_t rq_size;
863- enum t3_mpa_attrs mpaattrs;
864- enum t3_qp_caps qpcaps;
865- uint16_t tcp_emss;
866- uint32_t ord;
867- uint32_t ird;
868- uint64_t qp_dma_addr;
869- uint32_t qp_dma_size;
870- uint8_t rqes_posted;
871-};
872-
873-struct t3_rdma_init_wr {
874- struct fw_riwrh wrh; /* 0 */
875- union t3_wrid wrid; /* 1 */
876- uint32_t qpid; /* 2 */
877- uint32_t pdid;
878- uint32_t scqid; /* 3 */
879- uint32_t rcqid;
880- uint32_t rq_addr; /* 4 */
881- uint32_t rq_size;
882- enum t3_mpa_attrs mpaattrs:8; /* 5 */
883- enum t3_qp_caps qpcaps:8;
884- uint32_t ulpdu_size:16;
885- uint32_t rqes_posted; /* bits 31-1 - reservered */
886- /* bit 0 - set if RECV posted */
887- uint32_t ord; /* 6 */
888- uint32_t ird;
889- uint64_t qp_dma_addr; /* 7 */
890- uint32_t qp_dma_size; /* 8 */
891- uint32_t rsvd;
892-};
893-
894-union t3_wr {
895- struct t3_send_wr send;
896- struct t3_rdma_write_wr write;
897- struct t3_rdma_read_wr read;
898- struct t3_receive_wr recv;
899- struct t3_local_inv_wr local_inv;
900- struct t3_bind_mw_wr bind;
901- struct t3_bypass_wr bypass;
902- struct t3_rdma_init_wr init;
903- struct t3_modify_qp_wr qp_mod;
904- uint64_t flit[16];
905-};
906-
907-#define T3_SQ_CQE_FLIT 13
908-#define T3_SQ_COOKIE_FLIT 14
909-
910-#define T3_RQ_COOKIE_FLIT 13
911-#define T3_RQ_CQE_FLIT 14
912-
913-static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,
914- enum t3_wr_flags flags, uint8_t genbit,
915- uint32_t tid, uint8_t len)
916-{
917- wqe->op_seop_flags = htobe32(V_FW_RIWR_OP(op) |
918- V_FW_RIWR_SOPEOP(M_FW_RIWR_SOPEOP) |
919- V_FW_RIWR_FLAGS(flags));
920- udma_to_device_barrier();
921- wqe->gen_tid_len = htobe32(V_FW_RIWR_GEN(genbit) | V_FW_RIWR_TID(tid) |
922- V_FW_RIWR_LEN(len));
923- /* 2nd gen bit... */
924- ((union t3_wr *)wqe)->flit[15] = htobe64(genbit);
925-}
926-
927-/*
928- * T3 ULP2_TX commands
929- */
930-enum t3_utx_mem_op {
931- T3_UTX_MEM_READ = 2,
932- T3_UTX_MEM_WRITE = 3
933-};
934-
935-/* T3 MC7 RDMA TPT entry format */
936-
937-enum tpt_mem_type {
938- TPT_NON_SHARED_MR = 0x0,
939- TPT_SHARED_MR = 0x1,
940- TPT_MW = 0x2,
941- TPT_MW_RELAXED_PROTECTION = 0x3
942-};
943-
944-enum tpt_addr_type {
945- TPT_ZBTO = 0,
946- TPT_VATO = 1
947-};
948-
949-enum tpt_mem_perm {
950- TPT_LOCAL_READ = 0x8,
951- TPT_LOCAL_WRITE = 0x4,
952- TPT_REMOTE_READ = 0x2,
953- TPT_REMOTE_WRITE = 0x1
954-};
955-
956-struct tpt_entry {
957- uint32_t valid_stag_pdid;
958- uint32_t flags_pagesize_qpid;
959-
960- uint32_t rsvd_pbl_addr;
961- uint32_t len;
962- uint32_t va_hi;
963- uint32_t va_low_or_fbo;
964-
965- uint32_t rsvd_bind_cnt_or_pstag;
966- uint32_t rsvd_pbl_size;
967-};
968-
969-#define S_TPT_VALID 31
970-#define V_TPT_VALID(x) ((x) << S_TPT_VALID)
971-#define F_TPT_VALID V_TPT_VALID(1U)
972-
973-#define S_TPT_STAG_KEY 23
974-#define M_TPT_STAG_KEY 0xFF
975-#define V_TPT_STAG_KEY(x) ((x) << S_TPT_STAG_KEY)
976-#define G_TPT_STAG_KEY(x) (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)
977-
978-#define S_TPT_STAG_STATE 22
979-#define V_TPT_STAG_STATE(x) ((x) << S_TPT_STAG_STATE)
980-#define F_TPT_STAG_STATE V_TPT_STAG_STATE(1U)
981-
982-#define S_TPT_STAG_TYPE 20
983-#define M_TPT_STAG_TYPE 0x3
984-#define V_TPT_STAG_TYPE(x) ((x) << S_TPT_STAG_TYPE)
985-#define G_TPT_STAG_TYPE(x) (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)
986-
987-#define S_TPT_PDID 0
988-#define M_TPT_PDID 0xFFFFF
989-#define V_TPT_PDID(x) ((x) << S_TPT_PDID)
990-#define G_TPT_PDID(x) (((x) >> S_TPT_PDID) & M_TPT_PDID)
991-
992-#define S_TPT_PERM 28
993-#define M_TPT_PERM 0xF
994-#define V_TPT_PERM(x) ((x) << S_TPT_PERM)
995-#define G_TPT_PERM(x) (((x) >> S_TPT_PERM) & M_TPT_PERM)
996-
997-#define S_TPT_REM_INV_DIS 27
998-#define V_TPT_REM_INV_DIS(x) ((x) << S_TPT_REM_INV_DIS)
999-#define F_TPT_REM_INV_DIS V_TPT_REM_INV_DIS(1U)
1000-
1001-#define S_TPT_ADDR_TYPE 26
1002-#define V_TPT_ADDR_TYPE(x) ((x) << S_TPT_ADDR_TYPE)
1003-#define F_TPT_ADDR_TYPE V_TPT_ADDR_TYPE(1U)
1004-
1005-#define S_TPT_MW_BIND_ENABLE 25
1006-#define V_TPT_MW_BIND_ENABLE(x) ((x) << S_TPT_MW_BIND_ENABLE)
1007-#define F_TPT_MW_BIND_ENABLE V_TPT_MW_BIND_ENABLE(1U)
1008-
1009-#define S_TPT_PAGE_SIZE 20
1010-#define M_TPT_PAGE_SIZE 0x1F
1011-#define V_TPT_PAGE_SIZE(x) ((x) << S_TPT_PAGE_SIZE)
1012-#define G_TPT_PAGE_SIZE(x) (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)
1013-
1014-#define S_TPT_PBL_ADDR 0
1015-#define M_TPT_PBL_ADDR 0x1FFFFFFF
1016-#define V_TPT_PBL_ADDR(x) ((x) << S_TPT_PBL_ADDR)
1017-#define G_TPT_PBL_ADDR(x) (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)
1018-
1019-#define S_TPT_QPID 0
1020-#define M_TPT_QPID 0xFFFFF
1021-#define V_TPT_QPID(x) ((x) << S_TPT_QPID)
1022-#define G_TPT_QPID(x) (((x) >> S_TPT_QPID) & M_TPT_QPID)
1023-
1024-#define S_TPT_PSTAG 0
1025-#define M_TPT_PSTAG 0xFFFFFF
1026-#define V_TPT_PSTAG(x) ((x) << S_TPT_PSTAG)
1027-#define G_TPT_PSTAG(x) (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)
1028-
1029-#define S_TPT_PBL_SIZE 0
1030-#define M_TPT_PBL_SIZE 0xFFFFF
1031-#define V_TPT_PBL_SIZE(x) ((x) << S_TPT_PBL_SIZE)
1032-#define G_TPT_PBL_SIZE(x) (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)
1033-
1034-/*
1035- * CQE defs
1036- */
1037-struct t3_cqe {
1038- uint32_t header:32;
1039- uint32_t len:32;
1040- uint32_t wrid_hi_stag:32;
1041- uint32_t wrid_low_msn:32;
1042-};
1043-
1044-#define S_CQE_OOO 31
1045-#define M_CQE_OOO 0x1
1046-#define G_CQE_OOO(x) ((((x) >> S_CQE_OOO)) & M_CQE_OOO)
1047-#define V_CEQ_OOO(x) ((x)<<S_CQE_OOO)
1048-
1049-#define S_CQE_QPID 12
1050-#define M_CQE_QPID 0x7FFFF
1051-#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
1052-#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
1053-
1054-#define S_CQE_SWCQE 11
1055-#define M_CQE_SWCQE 0x1
1056-#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
1057-#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
1058-
1059-#define S_CQE_GENBIT 10
1060-#define M_CQE_GENBIT 0x1
1061-#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
1062-#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
1063-
1064-#define S_CQE_STATUS 5
1065-#define M_CQE_STATUS 0x1F
1066-#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
1067-#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
1068-
1069-#define S_CQE_TYPE 4
1070-#define M_CQE_TYPE 0x1
1071-#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
1072-#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
1073-
1074-#define S_CQE_OPCODE 0
1075-#define M_CQE_OPCODE 0xF
1076-#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
1077-#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
1078-
1079-#define SW_CQE(x) (G_CQE_SWCQE(be32toh((x).header)))
1080-#define CQE_OOO(x) (G_CQE_OOO(be32toh((x).header)))
1081-#define CQE_QPID(x) (G_CQE_QPID(be32toh((x).header)))
1082-#define CQE_GENBIT(x) (G_CQE_GENBIT(be32toh((x).header)))
1083-#define CQE_TYPE(x) (G_CQE_TYPE(be32toh((x).header)))
1084-#define SQ_TYPE(x) (CQE_TYPE((x)))
1085-#define RQ_TYPE(x) (!CQE_TYPE((x)))
1086-#define CQE_STATUS(x) (G_CQE_STATUS(be32toh((x).header)))
1087-#define CQE_OPCODE(x) (G_CQE_OPCODE(be32toh((x).header)))
1088-
1089-#define CQE_LEN(x) (be32toh((x).len))
1090-
1091-#define CQE_WRID_HI(x) (be32toh((x).wrid_hi_stag))
1092-#define CQE_WRID_LOW(x) (be32toh((x).wrid_low_msn))
1093-
1094-/* used for RQ completion processing */
1095-#define CQE_WRID_STAG(x) (be32toh((x).wrid_hi_stag))
1096-#define CQE_WRID_MSN(x) (be32toh((x).wrid_low_msn))
1097-
1098-/* used for SQ completion processing */
1099-#define CQE_WRID_SQ_WPTR(x) ((x).wrid_hi_stag)
1100-#define CQE_WRID_WPTR(x) ((x).wrid_low_msn)
1101-
1102-#define TPT_ERR_SUCCESS 0x0
1103-#define TPT_ERR_STAG 0x1 /* STAG invalid: either the */
1104- /* STAG is offlimt, being 0, */
1105- /* or STAG_key mismatch */
1106-#define TPT_ERR_PDID 0x2 /* PDID mismatch */
1107-#define TPT_ERR_QPID 0x3 /* QPID mismatch */
1108-#define TPT_ERR_ACCESS 0x4 /* Invalid access right */
1109-#define TPT_ERR_WRAP 0x5 /* Wrap error */
1110-#define TPT_ERR_BOUND 0x6 /* base and bounds voilation */
1111-#define TPT_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
1112- /* shared memory region */
1113-#define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
1114- /* shared memory region */
1115-#define TPT_ERR_ECC 0x9 /* ECC error detected */
1116-#define TPT_ERR_ECC_PSTAG 0xA /* ECC error detected when */
1117- /* reading PSTAG for a MW */
1118- /* Invalidate */
1119-#define TPT_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
1120- /* software error */
1121-#define TPT_ERR_SWFLUSH 0xC /* SW FLUSHED */
1122-#define TPT_ERR_CRC 0x10 /* CRC error */
1123-#define TPT_ERR_MARKER 0x11 /* Marker error */
1124-#define TPT_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
1125-#define TPT_ERR_OUT_OF_RQE 0x13 /* out of RQE */
1126-#define TPT_ERR_DDP_VERSION 0x14 /* wrong DDP version */
1127-#define TPT_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
1128-#define TPT_ERR_OPCODE 0x16 /* invalid rdma opcode */
1129-#define TPT_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
1130-#define TPT_ERR_MSN 0x18 /* MSN error */
1131-#define TPT_ERR_TBIT 0x19 /* tag bit not set correctly */
1132-#define TPT_ERR_MO 0x1A /* MO not 0 for TERMINATE */
1133- /* or READ_REQ */
1134-#define TPT_ERR_MSN_GAP 0x1B
1135-#define TPT_ERR_MSN_RANGE 0x1C
1136-#define TPT_ERR_IRD_OVERFLOW 0x1D
1137-#define TPT_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
1138- /* software error */
1139-#define TPT_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
1140- /* mismatch) */
1141-
1142-struct t3_swsq {
1143- uint64_t wr_id;
1144- struct t3_cqe cqe;
1145- uint32_t sq_wptr;
1146- uint32_t read_len;
1147- int opcode;
1148- int complete;
1149- int signaled;
1150-};
1151-
1152-/*
1153- * A T3 WQ implements both the SQ and RQ.
1154- */
1155-struct t3_wq {
1156- union t3_wr *queue; /* DMA Mapped work queue */
1157- uint32_t error; /* 1 once we go to ERROR */
1158- uint32_t qpid;
1159- uint32_t wptr; /* idx to next available WR slot */
1160- uint32_t size_log2; /* total wq size */
1161- struct t3_swsq *sq; /* SW SQ */
1162- struct t3_swsq *oldest_read; /* tracks oldest pending read */
1163- uint32_t sq_wptr; /* sq_wptr - sq_rptr == count of */
1164- uint32_t sq_rptr; /* pending wrs */
1165- uint32_t sq_size_log2; /* sq size */
1166- uint64_t *rq; /* SW RQ (holds consumer wr_ids) */
1167- uint32_t rq_wptr; /* rq_wptr - rq_rptr == count of */
1168- uint32_t rq_rptr; /* pending wrs */
1169- uint32_t rq_size_log2; /* rq size */
1170- volatile uint32_t *doorbell; /* mapped adapter doorbell register */
1171- int flushed;
1172-};
1173-
1174-struct t3_cq {
1175- uint32_t cqid;
1176- uint32_t rptr;
1177- uint32_t wptr;
1178- uint32_t size_log2;
1179- struct t3_cqe *queue;
1180- struct t3_cqe *sw_queue;
1181- uint32_t sw_rptr;
1182- uint32_t sw_wptr;
1183- uint32_t memsize;
1184-};
1185-
1186-static inline unsigned t3_wq_depth(struct t3_wq *wq)
1187-{
1188- return (1UL<<wq->size_log2);
1189-}
1190-
1191-static inline unsigned t3_sq_depth(struct t3_wq *wq)
1192-{
1193- return (1UL<<wq->sq_size_log2);
1194-}
1195-
1196-static inline unsigned t3_rq_depth(struct t3_wq *wq)
1197-{
1198- return (1UL<<wq->rq_size_log2);
1199-}
1200-
1201-static inline unsigned t3_cq_depth(struct t3_cq *cq)
1202-{
1203- return (1UL<<cq->size_log2);
1204-}
1205-
1206-extern unsigned long iwch_page_size;
1207-extern unsigned long iwch_page_shift;
1208-extern unsigned long iwch_page_mask;
1209-
1210-#define PAGE_ALIGN(x) (((x) + iwch_page_mask) & ~iwch_page_mask)
1211-
1212-static inline unsigned t3_wq_memsize(struct t3_wq *wq)
1213-{
1214- return PAGE_ALIGN((1UL<<wq->size_log2) * sizeof (union t3_wr));
1215-}
1216-
1217-static inline unsigned t3_cq_memsize(struct t3_cq *cq)
1218-{
1219- return cq->memsize;
1220-}
1221-
1222-static inline unsigned t3_mmid(uint32_t stag)
1223-{
1224- return (stag>>8);
1225-}
1226-
1227-struct t3_cq_status_page {
1228- uint32_t cq_err;
1229-};
1230-
1231-static inline int t3_cq_in_error(struct t3_cq *cq)
1232-{
1233- return ((struct t3_cq_status_page *)
1234- &cq->queue[1 << cq->size_log2])->cq_err;
1235-}
1236-
1237-static inline void t3_set_cq_in_error(struct t3_cq *cq)
1238-{
1239- ((struct t3_cq_status_page *)
1240- &cq->queue[1 << cq->size_log2])->cq_err = 1;
1241-}
1242-
1243-static inline void t3_reset_cq_in_error(struct t3_cq *cq)
1244-{
1245- ((struct t3_cq_status_page *)
1246- &cq->queue[1 << cq->size_log2])->cq_err = 0;
1247-}
1248-
1249-static inline int t3_wq_in_error(struct t3_wq *wq)
1250-{
1251- /*
1252- * The kernel sets bit 0 in the first WR of the WQ memory
1253- * when the QP moves out of RTS...
1254- */
1255- return (wq->queue->flit[13] & 1);
1256-}
1257-
1258-static inline void t3_set_wq_in_error(struct t3_wq *wq)
1259-{
1260- wq->queue->flit[13] |= 1;
1261-}
1262-
1263-static inline int t3_wq_db_enabled(struct t3_wq *wq)
1264-{
1265- return !(wq->queue->flit[13] & 2);
1266-}
1267-
1268-#define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
1269- CQE_GENBIT(*cqe))
1270-
1271-static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
1272-{
1273- struct t3_cqe *cqe;
1274-
1275- cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
1276- if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
1277- return cqe;
1278- return NULL;
1279-}
1280-
1281-static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq)
1282-{
1283- struct t3_cqe *cqe;
1284-
1285- if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
1286- cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
1287- return cqe;
1288- }
1289- return NULL;
1290-}
1291-
1292-static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq)
1293-{
1294- struct t3_cqe *cqe;
1295-
1296- if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
1297- cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
1298- return cqe;
1299- }
1300- cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
1301- if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
1302- return cqe;
1303- return NULL;
1304-}
1305-
1306-/*
1307- * Return a ptr to the next read wr in the SWSQ or NULL.
1308- */
1309-static inline struct t3_swsq *next_read_wr(struct t3_wq *wq)
1310-{
1311- uint32_t rptr = wq->oldest_read - wq->sq + 1;
1312- int count = Q_COUNT(rptr, wq->sq_wptr);
1313- struct t3_swsq *sqp;
1314-
1315- while (count--) {
1316- sqp = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2);
1317-
1318- if (sqp->opcode == T3_READ_REQ)
1319- return sqp;
1320-
1321- rptr++;
1322- }
1323- return NULL;
1324-}
1325-#endif
1326diff --git a/providers/cxgb3/firmware_exports.h b/providers/cxgb3/firmware_exports.h
1327deleted file mode 100644
1328index 831140a4c..000000000
1329--- a/providers/cxgb3/firmware_exports.h
1330+++ /dev/null
1331@@ -1,148 +0,0 @@
1332-/*
1333- * Copyright (c) 2004-2007 Chelsio, Inc. All rights reserved.
1334- *
1335- * This software is available to you under a choice of one of two
1336- * licenses. You may choose to be licensed under the terms of the GNU
1337- * General Public License (GPL) Version 2, available from the file
1338- * COPYING in the main directory of this source tree, or the
1339- * OpenIB.org BSD license below:
1340- *
1341- * Redistribution and use in source and binary forms, with or
1342- * without modification, are permitted provided that the following
1343- * conditions are met:
1344- *
1345- * - Redistributions of source code must retain the above
1346- * copyright notice, this list of conditions and the following
1347- * disclaimer.
1348- *
1349- * - Redistributions in binary form must reproduce the above
1350- * copyright notice, this list of conditions and the following
1351- * disclaimer in the documentation and/or other materials
1352- * provided with the distribution.
1353- *
1354- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1355- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1356- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1357- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
1358- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
1359- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
1360- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
1361- * SOFTWARE.
1362- */
1363-#ifndef _FIRMWARE_EXPORTS_H_
1364-#define _FIRMWARE_EXPORTS_H_
1365-
1366-/* WR OPCODES supported by the firmware.
1367- */
1368-#define FW_WROPCODE_FORWARD 0x01
1369-#define FW_WROPCODE_BYPASS 0x05
1370-
1371-#define FW_WROPCODE_TUNNEL_TX_PKT 0x03
1372-
1373-#define FW_WROPOCDE_ULPTX_DATA_SGL 0x00
1374-#define FW_WROPCODE_ULPTX_MEM_READ 0x02
1375-#define FW_WROPCODE_ULPTX_PKT 0x04
1376-#define FW_WROPCODE_ULPTX_INVALIDATE 0x06
1377-
1378-#define FW_WROPCODE_TUNNEL_RX_PKT 0x07
1379-
1380-#define FW_WROPCODE_TOE_GETTCB_RPL 0x08
1381-#define FW_WROPCODE_TOE_CLOSE_CON 0x09
1382-#define FW_WROPCODE_TOE_TP_ABORT_CON_REQ 0x0A
1383-#define FW_WROPCODE_TOE_HOST_ABORT_CON_RPL 0x0F
1384-#define FW_WROPCODE_TOE_HOST_ABORT_CON_REQ 0x0B
1385-#define FW_WROPCODE_TOE_TP_ABORT_CON_RPL 0x0C
1386-#define FW_WROPCODE_TOE_TX_DATA 0x0D
1387-#define FW_WROPCODE_TOE_TX_DATA_ACK 0x0E
1388-
1389-#define FW_WROPCODE_RI_RDMA_INIT 0x10
1390-#define FW_WROPCODE_RI_RDMA_WRITE 0x11
1391-#define FW_WROPCODE_RI_RDMA_READ_REQ 0x12
1392-#define FW_WROPCODE_RI_RDMA_READ_RESP 0x13
1393-#define FW_WROPCODE_RI_SEND 0x14
1394-#define FW_WROPCODE_RI_TERMINATE 0x15
1395-#define FW_WROPCODE_RI_RDMA_READ 0x16
1396-#define FW_WROPCODE_RI_RECEIVE 0x17
1397-#define FW_WROPCODE_RI_BIND_MW 0x18
1398-#define FW_WROPCODE_RI_FASTREGISTER_MR 0x19
1399-#define FW_WROPCODE_RI_LOCAL_INV 0x1A
1400-#define FW_WROPCODE_RI_MODIFY_QP 0x1B
1401-#define FW_WROPCODE_RI_BYPASS 0x1C
1402-
1403-#define FW_WROPOCDE_RSVD 0x1E
1404-
1405-#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR 0x1F
1406-
1407-#define FW_WROPCODE_MNGT 0x1D
1408-#define FW_MNGTOPCODE_PKTSCHED_SET 0x00
1409-
1410-/* Maximum size of a WR sent from the host, limited by the SGE.
1411- *
1412- * Note: WR coming from ULP or TP are only limited by CIM.
1413- */
1414-#define FW_WR_SIZE 128
1415-
1416-/* Maximum number of outstanding WRs sent from the host. Value must be
1417- * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by TOM to
1418- * limit the number of WRs per connection.
1419- */
1420-#ifndef N3
1421-# define FW_WR_NUM 16
1422-#else
1423-# define FW_WR_NUM 7
1424-#endif
1425-
1426-/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These
1427- * queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
1428- * start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
1429- *
1430- * Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
1431- * to RESP Queue[i].
1432- */
1433-#define FW_TUNNEL_NUM 8
1434-#define FW_TUNNEL_SGEEC_START 8
1435-#define FW_TUNNEL_TID_START 65544
1436-
1437-
1438-/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
1439- * must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
1440- * (or 'uP Token') FW_CTRL_TID_START.
1441- *
1442- * Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
1443- */
1444-#define FW_CTRL_NUM 8
1445-#define FW_CTRL_SGEEC_START 65528
1446-#define FW_CTRL_TID_START 65536
1447-
1448-/* FW_TOE_NUM corresponds to the number of supported TOE Queues. These queues
1449- * must start at SGE Egress Context FW_TOE_SGEEC_START.
1450- *
1451- * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
1452- * TOE Queues, as the host is responsible for providing the correct TID in
1453- * every WR.
1454- *
1455- * Ingress Trafffic for TOE Queue[i] is sent to RESP Queue[i].
1456- */
1457-#define FW_TOE_NUM 8
1458-#define FW_TOE_SGEEC_START 0
1459-
1460-/*
1461- *
1462- */
1463-#define FW_RI_NUM 1
1464-#define FW_RI_SGEEC_START 65527
1465-#define FW_RI_TID_START 65552
1466-
1467-/*
1468- * The RX_PKT_TID
1469- */
1470-#define FW_RX_PKT_NUM 1
1471-#define FW_RX_PKT_TID_START 65553
1472-
1473-/* FW_WRC_NUM corresponds to the number of Work Request Context that supported
1474- * by the firmware.
1475- */
1476-#define FW_WRC_NUM (65536 + FW_TUNNEL_NUM + FW_CTRL_NUM +\
1477- FW_RI_NUM + FW_RX_PKT_NUM)
1478-
1479-#endif /* _FIRMWARE_EXPORTS_H_ */
1480diff --git a/providers/cxgb3/iwch-abi.h b/providers/cxgb3/iwch-abi.h
1481deleted file mode 100644
1482index 047f84b7a..000000000
1483--- a/providers/cxgb3/iwch-abi.h
1484+++ /dev/null
1485@@ -1,51 +0,0 @@
1486-/*
1487- * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
1488- *
1489- * This software is available to you under a choice of one of two
1490- * licenses. You may choose to be licensed under the terms of the GNU
1491- * General Public License (GPL) Version 2, available from the file
1492- * COPYING in the main directory of this source tree, or the
1493- * OpenIB.org BSD license below:
1494- *
1495- * Redistribution and use in source and binary forms, with or
1496- * without modification, are permitted provided that the following
1497- * conditions are met:
1498- *
1499- * - Redistributions of source code must retain the above
1500- * copyright notice, this list of conditions and the following
1501- * disclaimer.
1502- *
1503- * - Redistributions in binary form must reproduce the above
1504- * copyright notice, this list of conditions and the following
1505- * disclaimer in the documentation and/or other materials
1506- * provided with the distribution.
1507- *
1508- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1509- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1510- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1511- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
1512- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
1513- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
1514- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
1515- * SOFTWARE.
1516- */
1517-#ifndef IWCH_ABI_H
1518-#define IWCH_ABI_H
1519-
1520-#include <stdint.h>
1521-#include <infiniband/kern-abi.h>
1522-#include <rdma/cxgb3-abi.h>
1523-#include <kernel-abi/cxgb3-abi.h>
1524-
1525-DECLARE_DRV_CMD(uiwch_alloc_pd, IB_USER_VERBS_CMD_ALLOC_PD,
1526- empty, iwch_alloc_pd_resp);
1527-DECLARE_DRV_CMD(uiwch_create_cq, IB_USER_VERBS_CMD_CREATE_CQ,
1528- iwch_create_cq_req, iwch_create_cq_resp);
1529-DECLARE_DRV_CMD(uiwch_create_qp, IB_USER_VERBS_CMD_CREATE_QP,
1530- empty, iwch_create_qp_resp);
1531-DECLARE_DRV_CMD(uiwch_alloc_ucontext, IB_USER_VERBS_CMD_GET_CONTEXT,
1532- empty, empty);
1533-DECLARE_DRV_CMD(uiwch_reg_mr, IB_USER_VERBS_CMD_REG_MR,
1534- empty, iwch_reg_user_mr_resp);
1535-
1536-#endif /* IWCH_ABI_H */
1537diff --git a/providers/cxgb3/iwch.c b/providers/cxgb3/iwch.c
1538deleted file mode 100644
1539index 6f3c8b9f1..000000000
1540--- a/providers/cxgb3/iwch.c
1541+++ /dev/null
1542@@ -1,269 +0,0 @@
1543-/*
1544- * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
1545- *
1546- * This software is available to you under a choice of one of two
1547- * licenses. You may choose to be licensed under the terms of the GNU
1548- * General Public License (GPL) Version 2, available from the file
1549- * COPYING in the main directory of this source tree, or the
1550- * OpenIB.org BSD license below:
1551- *
1552- * Redistribution and use in source and binary forms, with or
1553- * without modification, are permitted provided that the following
1554- * conditions are met:
1555- *
1556- * - Redistributions of source code must retain the above
1557- * copyright notice, this list of conditions and the following
1558- * disclaimer.
1559- *
1560- * - Redistributions in binary form must reproduce the above
1561- * copyright notice, this list of conditions and the following
1562- * disclaimer in the documentation and/or other materials
1563- * provided with the distribution.
1564- *
1565- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1566- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1567- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1568- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
1569- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
1570- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
1571- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
1572- * SOFTWARE.
1573- */
1574-#include <config.h>
1575-
1576-#include <stdio.h>
1577-#include <stdlib.h>
1578-#include <unistd.h>
1579-#include <errno.h>
1580-#include <sys/mman.h>
1581-#include <pthread.h>
1582-#include <string.h>
1583-
1584-#include "iwch.h"
1585-#include "iwch-abi.h"
1586-
1587-#define PCI_VENDOR_ID_CHELSIO 0x1425
1588-#define PCI_DEVICE_ID_CHELSIO_PE9000_2C 0x0020
1589-#define PCI_DEVICE_ID_CHELSIO_T302E 0x0021
1590-#define PCI_DEVICE_ID_CHELSIO_T310E 0x0022
1591-#define PCI_DEVICE_ID_CHELSIO_T320X 0x0023
1592-#define PCI_DEVICE_ID_CHELSIO_T302X 0x0024
1593-#define PCI_DEVICE_ID_CHELSIO_T320E 0x0025
1594-#define PCI_DEVICE_ID_CHELSIO_T310X 0x0026
1595-#define PCI_DEVICE_ID_CHELSIO_T3B10 0x0030
1596-#define PCI_DEVICE_ID_CHELSIO_T3B20 0x0031
1597-#define PCI_DEVICE_ID_CHELSIO_T3B02 0x0032
1598-#define PCI_DEVICE_ID_CHELSIO_T3C20 0x0035
1599-#define PCI_DEVICE_ID_CHELSIO_S320E 0x0036
1600-
1601-#define HCA(v, d, t) \
1602- VERBS_PCI_MATCH(PCI_VENDOR_ID_##v, PCI_DEVICE_ID_CHELSIO_##d, \
1603- (void *)(CHELSIO_##t))
1604-static const struct verbs_match_ent hca_table[] = {
1605- HCA(CHELSIO, PE9000_2C, T3B),
1606- HCA(CHELSIO, T302E, T3A),
1607- HCA(CHELSIO, T302X, T3A),
1608- HCA(CHELSIO, T310E, T3A),
1609- HCA(CHELSIO, T310X, T3A),
1610- HCA(CHELSIO, T320E, T3A),
1611- HCA(CHELSIO, T320X, T3A),
1612- HCA(CHELSIO, T3B10, T3B),
1613- HCA(CHELSIO, T3B20, T3B),
1614- HCA(CHELSIO, T3B02, T3B),
1615- HCA(CHELSIO, T3C20, T3B),
1616- HCA(CHELSIO, S320E, T3B),
1617- {},
1618-};
1619-
1620-static const struct verbs_context_ops iwch_ctx_common_ops = {
1621- .query_device = iwch_query_device,
1622- .query_port = iwch_query_port,
1623- .alloc_pd = iwch_alloc_pd,
1624- .dealloc_pd = iwch_free_pd,
1625- .reg_mr = iwch_reg_mr,
1626- .dereg_mr = iwch_dereg_mr,
1627- .create_cq = iwch_create_cq,
1628- .resize_cq = iwch_resize_cq,
1629- .destroy_cq = iwch_destroy_cq,
1630- .create_srq = iwch_create_srq,
1631- .modify_srq = iwch_modify_srq,
1632- .destroy_srq = iwch_destroy_srq,
1633- .create_qp = iwch_create_qp,
1634- .modify_qp = iwch_modify_qp,
1635- .destroy_qp = iwch_destroy_qp,
1636- .query_qp = iwch_query_qp,
1637- .create_ah = iwch_create_ah,
1638- .destroy_ah = iwch_destroy_ah,
1639- .attach_mcast = iwch_attach_mcast,
1640- .detach_mcast = iwch_detach_mcast,
1641- .post_srq_recv = iwch_post_srq_recv,
1642- .req_notify_cq = iwch_arm_cq,
1643-};
1644-
1645-static const struct verbs_context_ops iwch_ctx_t3a_ops = {
1646- .poll_cq = t3a_poll_cq,
1647- .post_recv = t3a_post_recv,
1648- .post_send = t3a_post_send,
1649-};
1650-
1651-static const struct verbs_context_ops iwch_ctx_t3b_ops = {
1652- .async_event = t3b_async_event,
1653- .poll_cq = t3b_poll_cq,
1654- .post_recv = t3b_post_recv,
1655- .post_send = t3b_post_send,
1656-};
1657-
1658-unsigned long iwch_page_size;
1659-unsigned long iwch_page_shift;
1660-unsigned long iwch_page_mask;
1661-
1662-static struct verbs_context *iwch_alloc_context(struct ibv_device *ibdev,
1663- int cmd_fd,
1664- void *private_data)
1665-{
1666- struct iwch_context *context;
1667- struct ibv_get_context cmd;
1668- struct uiwch_alloc_ucontext_resp resp;
1669- struct iwch_device *rhp = to_iwch_dev(ibdev);
1670-
1671- context = verbs_init_and_alloc_context(ibdev, cmd_fd, context, ibv_ctx,
1672- RDMA_DRIVER_CXGB3);
1673- if (!context)
1674- return NULL;
1675-
1676- if (ibv_cmd_get_context(&context->ibv_ctx, &cmd, sizeof cmd,
1677- &resp.ibv_resp, sizeof resp))
1678- goto err_free;
1679-
1680- verbs_set_ops(&context->ibv_ctx, &iwch_ctx_common_ops);
1681-
1682- switch (rhp->hca_type) {
1683- case CHELSIO_T3B:
1684- PDBG("%s T3B device\n", __FUNCTION__);
1685- verbs_set_ops(&context->ibv_ctx, &iwch_ctx_t3b_ops);
1686- break;
1687- case CHELSIO_T3A:
1688- PDBG("%s T3A device\n", __FUNCTION__);
1689- verbs_set_ops(&context->ibv_ctx, &iwch_ctx_t3a_ops);
1690- break;
1691- default:
1692- PDBG("%s unknown hca type %d\n", __FUNCTION__, rhp->hca_type);
1693- goto err_free;
1694- break;
1695- }
1696-
1697- return &context->ibv_ctx;
1698-
1699-err_free:
1700- verbs_uninit_context(&context->ibv_ctx);
1701- free(context);
1702- return NULL;
1703-}
1704-
1705-static void iwch_free_context(struct ibv_context *ibctx)
1706-{
1707- struct iwch_context *context = to_iwch_ctx(ibctx);
1708-
1709- verbs_uninit_context(&context->ibv_ctx);
1710- free(context);
1711-}
1712-
1713-static void iwch_uninit_device(struct verbs_device *verbs_device)
1714-{
1715- struct iwch_device *dev = to_iwch_dev(&verbs_device->device);
1716-
1717- free(dev);
1718-}
1719-
1720-static bool iwch_device_match(struct verbs_sysfs_dev *sysfs_dev)
1721-{
1722- char value[32], *cp;
1723- unsigned int fw_maj, fw_min;
1724-
1725- /* Rely on the core code to match PCI devices */
1726- if (!sysfs_dev->match)
1727- return false;
1728-
1729- /*
1730- * Verify that the firmware major number matches. Major number
1731- * mismatches are fatal. Minor number mismatches are tolerated.
1732- */
1733- if (ibv_get_fw_ver(value, sizeof(value), sysfs_dev))
1734- return false;
1735-
1736- cp = strtok(value+1, ".");
1737- sscanf(cp, "%i", &fw_maj);
1738- cp = strtok(NULL, ".");
1739- sscanf(cp, "%i", &fw_min);
1740-
1741- if (fw_maj < FW_MAJ) {
1742- fprintf(stderr, "libcxgb3: Fatal firmware version mismatch. "
1743- "Firmware major number is %u and libcxgb3 needs %u.\n",
1744- fw_maj, FW_MAJ);
1745- fflush(stderr);
1746- return false;
1747- }
1748-
1749- DBGLOG("libcxgb3");
1750-
1751- if ((signed int)fw_min < FW_MIN) {
1752- PDBG("libcxgb3: non-fatal firmware version mismatch. "
1753- "Firmware minor number is %u and libcxgb3 needs %u.\n",
1754- fw_min, FW_MIN);
1755- fflush(stderr);
1756- }
1757-
1758- return true;
1759-}
1760-
1761-static struct verbs_device *iwch_device_alloc(struct verbs_sysfs_dev *sysfs_dev)
1762-{
1763- struct iwch_device *dev;
1764-
1765- dev = calloc(1, sizeof(*dev));
1766- if (!dev)
1767- return NULL;
1768-
1769- pthread_spin_init(&dev->lock, PTHREAD_PROCESS_PRIVATE);
1770- dev->hca_type = (uintptr_t)sysfs_dev->match->driver_data;
1771- dev->abi_version = sysfs_dev->abi_ver;
1772-
1773- iwch_page_size = sysconf(_SC_PAGESIZE);
1774- iwch_page_shift = long_log2(iwch_page_size);
1775- iwch_page_mask = iwch_page_size - 1;
1776-
1777- dev->mmid2ptr = calloc(T3_MAX_NUM_STAG, sizeof(void *));
1778- if (!dev->mmid2ptr) {
1779- goto err1;
1780- }
1781- dev->qpid2ptr = calloc(T3_MAX_NUM_QP, sizeof(void *));
1782- if (!dev->qpid2ptr) {
1783- goto err2;
1784- }
1785- dev->cqid2ptr = calloc(T3_MAX_NUM_CQ, sizeof(void *));
1786- if (!dev->cqid2ptr)
1787- goto err3;
1788-
1789- return &dev->ibv_dev;
1790-
1791-err3:
1792- free(dev->qpid2ptr);
1793-err2:
1794- free(dev->mmid2ptr);
1795-err1:
1796- free(dev);
1797- return NULL;
1798-}
1799-
1800-static const struct verbs_device_ops iwch_dev_ops = {
1801- .name = "cxgb3",
1802- .match_min_abi_version = 0,
1803- .match_max_abi_version = ABI_VERS,
1804- .match_table = hca_table,
1805- .match_device = iwch_device_match,
1806- .alloc_device = iwch_device_alloc,
1807- .uninit_device = iwch_uninit_device,
1808- .alloc_context = iwch_alloc_context,
1809- .free_context = iwch_free_context,
1810-};
1811-PROVIDER_DRIVER(cxgb3, iwch_dev_ops);
1812diff --git a/providers/cxgb3/iwch.h b/providers/cxgb3/iwch.h
1813deleted file mode 100644
1814index c7d85d3aa..000000000
1815--- a/providers/cxgb3/iwch.h
1816+++ /dev/null
1817@@ -1,218 +0,0 @@
1818-/*
1819- * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
1820- *
1821- * This software is available to you under a choice of one of two
1822- * licenses. You may choose to be licensed under the terms of the GNU
1823- * General Public License (GPL) Version 2, available from the file
1824- * COPYING in the main directory of this source tree, or the
1825- * OpenIB.org BSD license below:
1826- *
1827- * Redistribution and use in source and binary forms, with or
1828- * without modification, are permitted provided that the following
1829- * conditions are met:
1830- *
1831- * - Redistributions of source code must retain the above
1832- * copyright notice, this list of conditions and the following
1833- * disclaimer.
1834- *
1835- * - Redistributions in binary form must reproduce the above
1836- * copyright notice, this list of conditions and the following
1837- * disclaimer in the documentation and/or other materials
1838- * provided with the distribution.
1839- *
1840- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1841- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
1842- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
1843- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
1844- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
1845- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
1846- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
1847- * SOFTWARE.
1848- */
1849-#ifndef IWCH_H
1850-#define IWCH_H
1851-
1852-#include <pthread.h>
1853-#include <inttypes.h>
1854-#include <stddef.h>
1855-
1856-#include <infiniband/driver.h>
1857-#include <util/udma_barrier.h>
1858-
1859-#include "cxio_wr.h"
1860-
1861-enum iwch_hca_type {
1862- CHELSIO_T3A = 0,
1863- CHELSIO_T3B = 1,
1864-};
1865-
1866-struct iwch_mr;
1867-
1868-#define ABI_VERS 1
1869-
1870-struct iwch_device {
1871- struct verbs_device ibv_dev;
1872- enum iwch_hca_type hca_type;
1873- struct iwch_mr **mmid2ptr;
1874- struct iwch_qp **qpid2ptr;
1875- struct iwch_cq **cqid2ptr;
1876- pthread_spinlock_t lock;
1877- int abi_version;
1878-};
1879-
1880-static inline int t3b_device(struct iwch_device *dev)
1881-{
1882- return (dev->hca_type == CHELSIO_T3B);
1883-}
1884-
1885-static inline int t3a_device(struct iwch_device *dev)
1886-{
1887- return (dev->hca_type == CHELSIO_T3A);
1888-}
1889-
1890-struct iwch_context {
1891- struct verbs_context ibv_ctx;
1892-};
1893-
1894-struct iwch_pd {
1895- struct ibv_pd ibv_pd;
1896-};
1897-
1898-struct iwch_mr {
1899- struct verbs_mr vmr;
1900- uint64_t va_fbo;
1901- uint32_t page_size;
1902- uint32_t pbl_addr;
1903- uint32_t len;
1904-};
1905-
1906-struct iwch_cq {
1907- struct ibv_cq ibv_cq;
1908- struct iwch_device *rhp;
1909- struct t3_cq cq;
1910- pthread_spinlock_t lock;
1911-};
1912-
1913-struct iwch_qp {
1914- struct ibv_qp ibv_qp;
1915- struct iwch_device *rhp;
1916- struct t3_wq wq;
1917- pthread_spinlock_t lock;
1918- int sq_sig_all;
1919-};
1920-
1921-#define to_iwch_xxx(xxx, type) \
1922- container_of(ib##xxx, struct iwch_##type, ibv_##xxx)
1923-
1924-static inline struct iwch_device *to_iwch_dev(struct ibv_device *ibdev)
1925-{
1926- return container_of(ibdev, struct iwch_device, ibv_dev.device);
1927-}
1928-
1929-static inline struct iwch_context *to_iwch_ctx(struct ibv_context *ibctx)
1930-{
1931- return container_of(ibctx, struct iwch_context, ibv_ctx.context);
1932-}
1933-
1934-static inline struct iwch_pd *to_iwch_pd(struct ibv_pd *ibpd)
1935-{
1936- return to_iwch_xxx(pd, pd);
1937-}
1938-
1939-static inline struct iwch_cq *to_iwch_cq(struct ibv_cq *ibcq)
1940-{
1941- return to_iwch_xxx(cq, cq);
1942-}
1943-
1944-static inline struct iwch_qp *to_iwch_qp(struct ibv_qp *ibqp)
1945-{
1946- return to_iwch_xxx(qp, qp);
1947-}
1948-
1949-static inline struct iwch_mr *to_iwch_mr(struct verbs_mr *vmr)
1950-{
1951- return container_of(vmr, struct iwch_mr, vmr);
1952-}
1953-
1954-static inline unsigned long long_log2(unsigned long x)
1955-{
1956- unsigned long r = 0;
1957- for (x >>= 1; x > 0; x >>= 1)
1958- r++;
1959- return r;
1960-}
1961-
1962-extern int iwch_query_device(struct ibv_context *context,
1963- struct ibv_device_attr *attr);
1964-extern int iwch_query_port(struct ibv_context *context, uint8_t port,
1965- struct ibv_port_attr *attr);
1966-
1967-extern struct ibv_pd *iwch_alloc_pd(struct ibv_context *context);
1968-extern int iwch_free_pd(struct ibv_pd *pd);
1969-
1970-extern struct ibv_mr *iwch_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
1971- uint64_t hca_va, int access);
1972-extern int iwch_dereg_mr(struct verbs_mr *mr);
1973-
1974-struct ibv_cq *iwch_create_cq(struct ibv_context *context, int cqe,
1975- struct ibv_comp_channel *channel,
1976- int comp_vector);
1977-extern int iwch_resize_cq(struct ibv_cq *cq, int cqe);
1978-extern int iwch_destroy_cq(struct ibv_cq *cq);
1979-extern int t3a_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
1980-extern int t3b_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
1981-extern int iwch_arm_cq(struct ibv_cq *cq, int solicited);
1982-extern void iwch_cq_event(struct ibv_cq *cq);
1983-extern void iwch_init_cq_buf(struct iwch_cq *cq, int nent);
1984-
1985-extern struct ibv_srq *iwch_create_srq(struct ibv_pd *pd,
1986- struct ibv_srq_init_attr *attr);
1987-extern int iwch_modify_srq(struct ibv_srq *srq,
1988- struct ibv_srq_attr *attr,
1989- int mask);
1990-extern int iwch_destroy_srq(struct ibv_srq *srq);
1991-extern int iwch_post_srq_recv(struct ibv_srq *ibsrq,
1992- struct ibv_recv_wr *wr,
1993- struct ibv_recv_wr **bad_wr);
1994-
1995-extern struct ibv_qp *iwch_create_qp(struct ibv_pd *pd,
1996- struct ibv_qp_init_attr *attr);
1997-extern int iwch_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
1998- int attr_mask);
1999-extern int iwch_destroy_qp(struct ibv_qp *qp);
2000-extern int iwch_query_qp(struct ibv_qp *qp,
2001- struct ibv_qp_attr *attr,
2002- int attr_mask,
2003- struct ibv_qp_init_attr *init_attr);
2004-extern void iwch_flush_qp(struct iwch_qp *qhp);
2005-extern void iwch_flush_qps(struct iwch_device *dev);
2006-extern int t3a_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
2007- struct ibv_send_wr **bad_wr);
2008-extern int t3b_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
2009- struct ibv_send_wr **bad_wr);
2010-extern int t3a_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
2011- struct ibv_recv_wr **bad_wr);
2012-extern int t3b_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
2013- struct ibv_recv_wr **bad_wr);
2014-extern struct ibv_ah *iwch_create_ah(struct ibv_pd *pd,
2015- struct ibv_ah_attr *ah_attr);
2016-extern int iwch_destroy_ah(struct ibv_ah *ah);
2017-extern int iwch_attach_mcast(struct ibv_qp *qp, const union ibv_gid *gid,
2018- uint16_t lid);
2019-extern int iwch_detach_mcast(struct ibv_qp *qp, const union ibv_gid *gid,
2020- uint16_t lid);
2021-extern void t3b_async_event(struct ibv_context *context,
2022- struct ibv_async_event *event);
2023-#ifdef DEBUG
2024-#include <syslog.h>
2025-#define DBGLOG(s) openlog(s, LOG_NDELAY|LOG_PID, LOG_LOCAL7)
2026-#define PDBG(fmt, args...) do {syslog(LOG_DEBUG, fmt, ##args);} while (0)
2027-#else
2028-#define DBGLOG(s)
2029-#define PDBG(fmt, args...) do {} while (0)
2030-#endif
2031-
2032-#define FW_MAJ 5
2033-#define FW_MIN 0
2034-
2035-#endif /* IWCH_H */
2036diff --git a/providers/cxgb3/qp.c b/providers/cxgb3/qp.c
2037deleted file mode 100644
2038index 4a1e7397c..000000000
2039--- a/providers/cxgb3/qp.c
2040+++ /dev/null
2041@@ -1,560 +0,0 @@
2042-/*
2043- * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
2044- *
2045- * This software is available to you under a choice of one of two
2046- * licenses. You may choose to be licensed under the terms of the GNU
2047- * General Public License (GPL) Version 2, available from the file
2048- * COPYING in the main directory of this source tree, or the
2049- * OpenIB.org BSD license below:
2050- *
2051- * Redistribution and use in source and binary forms, with or
2052- * without modification, are permitted provided that the following
2053- * conditions are met:
2054- *
2055- * - Redistributions of source code must retain the above
2056- * copyright notice, this list of conditions and the following
2057- * disclaimer.
2058- *
2059- * - Redistributions in binary form must reproduce the above
2060- * copyright notice, this list of conditions and the following
2061- * disclaimer in the documentation and/or other materials
2062- * provided with the distribution.
2063- *
2064- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2065- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2066- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2067- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
2068- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
2069- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
2070- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
2071- * SOFTWARE.
2072- */
2073-#include <config.h>
2074-
2075-#include <stdlib.h>
2076-#include <pthread.h>
2077-#include <string.h>
2078-
2079-#include "iwch.h"
2080-#include <stdio.h>
2081-
2082-#define ROUNDUP8(a) (((a) + 7) & ~7)
2083-
2084-static inline int iwch_build_rdma_send(union t3_wr *wqe, struct ibv_send_wr *wr,
2085- uint8_t *flit_cnt)
2086-{
2087- int i;
2088-
2089- if (wr->num_sge > T3_MAX_SGE)
2090- return -1;
2091- if (wr->send_flags & IBV_SEND_SOLICITED)
2092- wqe->send.rdmaop = T3_SEND_WITH_SE;
2093- else
2094- wqe->send.rdmaop = T3_SEND;
2095- wqe->send.rem_stag = 0;
2096- wqe->send.reserved = 0;
2097- if ((wr->send_flags & IBV_SEND_INLINE) || wr->num_sge == 0) {
2098- uint8_t *datap;
2099-
2100- wqe->send.plen = 0;
2101- datap = (uint8_t *)&wqe->send.sgl[0];
2102- wqe->send.num_sgle = 0; /* indicates in-line data */
2103- for (i = 0; i < wr->num_sge; i++) {
2104- if ((wqe->send.plen + wr->sg_list[i].length) >
2105- T3_MAX_INLINE)
2106- return -1;
2107- wqe->send.plen += wr->sg_list[i].length;
2108- memcpy(datap,
2109- (void *)(unsigned long)wr->sg_list[i].addr,
2110- wr->sg_list[i].length);
2111- datap += wr->sg_list[i].length;
2112- }
2113- *flit_cnt = 4 + (ROUNDUP8(wqe->send.plen) >> 3);
2114- wqe->send.plen = htobe32(wqe->send.plen);
2115- } else {
2116- wqe->send.plen = 0;
2117- for (i = 0; i < wr->num_sge; i++) {
2118- if ((wqe->send.plen + wr->sg_list[i].length) <
2119- wqe->send.plen) {
2120- return -1;
2121- }
2122- wqe->send.plen += wr->sg_list[i].length;
2123- wqe->send.sgl[i].stag =
2124- htobe32(wr->sg_list[i].lkey);
2125- wqe->send.sgl[i].len =
2126- htobe32(wr->sg_list[i].length);
2127- wqe->send.sgl[i].to = htobe64(wr->sg_list[i].addr);
2128- }
2129- wqe->send.plen = htobe32(wqe->send.plen);
2130- wqe->send.num_sgle = htobe32(wr->num_sge);
2131- *flit_cnt = 4 + ((wr->num_sge) << 1);
2132- }
2133- return 0;
2134-}
2135-
2136-static inline int iwch_build_rdma_write(union t3_wr *wqe,
2137- struct ibv_send_wr *wr,
2138- uint8_t *flit_cnt)
2139-{
2140- int i;
2141-
2142- if (wr->num_sge > T3_MAX_SGE)
2143- return -1;
2144- wqe->write.rdmaop = T3_RDMA_WRITE;
2145- wqe->write.reserved = 0;
2146- wqe->write.stag_sink = htobe32(wr->wr.rdma.rkey);
2147- wqe->write.to_sink = htobe64(wr->wr.rdma.remote_addr);
2148-
2149- wqe->write.num_sgle = wr->num_sge;
2150-
2151- if ((wr->send_flags & IBV_SEND_INLINE) || wr->num_sge == 0) {
2152- uint8_t *datap;
2153-
2154- wqe->write.plen = 0;
2155- datap = (uint8_t *)&wqe->write.sgl[0];
2156- wqe->write.num_sgle = 0; /* indicates in-line data */
2157- for (i = 0; i < wr->num_sge; i++) {
2158- if ((wqe->write.plen + wr->sg_list[i].length) >
2159- T3_MAX_INLINE)
2160- return -1;
2161- wqe->write.plen += wr->sg_list[i].length;
2162- memcpy(datap,
2163- (void *)(unsigned long)wr->sg_list[i].addr,
2164- wr->sg_list[i].length);
2165- datap += wr->sg_list[i].length;
2166- }
2167- *flit_cnt = 5 + (ROUNDUP8(wqe->write.plen) >> 3);
2168- wqe->write.plen = htobe32(wqe->write.plen);
2169- } else {
2170- wqe->write.plen = 0;
2171- for (i = 0; i < wr->num_sge; i++) {
2172- if ((wqe->write.plen + wr->sg_list[i].length) <
2173- wqe->write.plen) {
2174- return -1;
2175- }
2176- wqe->write.plen += wr->sg_list[i].length;
2177- wqe->write.sgl[i].stag =
2178- htobe32(wr->sg_list[i].lkey);
2179- wqe->write.sgl[i].len =
2180- htobe32(wr->sg_list[i].length);
2181- wqe->write.sgl[i].to =
2182- htobe64(wr->sg_list[i].addr);
2183- }
2184- wqe->write.plen = htobe32(wqe->write.plen);
2185- wqe->write.num_sgle = htobe32(wr->num_sge);
2186- *flit_cnt = 5 + ((wr->num_sge) << 1);
2187- }
2188- return 0;
2189-}
2190-
2191-static inline int iwch_build_rdma_read(union t3_wr *wqe, struct ibv_send_wr *wr,
2192- uint8_t *flit_cnt)
2193-{
2194- if (wr->num_sge > 1)
2195- return -1;
2196- wqe->read.rdmaop = T3_READ_REQ;
2197- wqe->read.reserved = 0;
2198- if (wr->num_sge == 1 && wr->sg_list[0].length > 0) {
2199- wqe->read.rem_stag = htobe32(wr->wr.rdma.rkey);
2200- wqe->read.rem_to = htobe64(wr->wr.rdma.remote_addr);
2201- wqe->read.local_stag = htobe32(wr->sg_list[0].lkey);
2202- wqe->read.local_len = htobe32(wr->sg_list[0].length);
2203- wqe->read.local_to = htobe64(wr->sg_list[0].addr);
2204- } else {
2205-
2206- /* build passable 0B read request */
2207- wqe->read.rem_stag = 2;
2208- wqe->read.rem_to = 2;
2209- wqe->read.local_stag = 2;
2210- wqe->read.local_len = 0;
2211- wqe->read.local_to = 2;
2212- }
2213- *flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
2214- return 0;
2215-}
2216-
2217-int t3b_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
2218- struct ibv_send_wr **bad_wr)
2219-{
2220- int err = 0;
2221- uint8_t t3_wr_flit_cnt;
2222- enum t3_wr_opcode t3_wr_opcode = 0;
2223- enum t3_wr_flags t3_wr_flags;
2224- struct iwch_qp *qhp;
2225- uint32_t idx;
2226- union t3_wr *wqe;
2227- uint32_t num_wrs;
2228- struct t3_swsq *sqp;
2229-
2230- qhp = to_iwch_qp(ibqp);
2231- pthread_spin_lock(&qhp->lock);
2232- if (t3_wq_in_error(&qhp->wq)) {
2233- iwch_flush_qp(qhp);
2234- pthread_spin_unlock(&qhp->lock);
2235- return -1;
2236- }
2237- num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
2238- qhp->wq.sq_size_log2);
2239- if (num_wrs <= 0) {
2240- pthread_spin_unlock(&qhp->lock);
2241- return -1;
2242- }
2243- while (wr) {
2244- if (num_wrs == 0) {
2245- err = -1;
2246- *bad_wr = wr;
2247- break;
2248- }
2249- idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
2250- wqe = (union t3_wr *) (qhp->wq.queue + idx);
2251- t3_wr_flags = 0;
2252- if (wr->send_flags & IBV_SEND_SOLICITED)
2253- t3_wr_flags |= T3_SOLICITED_EVENT_FLAG;
2254- if (wr->send_flags & IBV_SEND_FENCE)
2255- t3_wr_flags |= T3_READ_FENCE_FLAG;
2256- if ((wr->send_flags & IBV_SEND_SIGNALED) || qhp->sq_sig_all)
2257- t3_wr_flags |= T3_COMPLETION_FLAG;
2258- sqp = qhp->wq.sq +
2259- Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
2260- switch (wr->opcode) {
2261- case IBV_WR_SEND:
2262- t3_wr_opcode = T3_WR_SEND;
2263- err = iwch_build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
2264- break;
2265- case IBV_WR_RDMA_WRITE:
2266- t3_wr_opcode = T3_WR_WRITE;
2267- err = iwch_build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
2268- break;
2269- case IBV_WR_RDMA_READ:
2270- t3_wr_opcode = T3_WR_READ;
2271- t3_wr_flags = 0;
2272- err = iwch_build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
2273- if (err)
2274- break;
2275- sqp->read_len = wqe->read.local_len;
2276- if (!qhp->wq.oldest_read)
2277- qhp->wq.oldest_read = sqp;
2278- break;
2279- default:
2280- PDBG("%s post of type=%d TBD!\n", __FUNCTION__,
2281- wr->opcode);
2282- err = -1;
2283- }
2284- if (err) {
2285- *bad_wr = wr;
2286- break;
2287- }
2288- wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
2289- sqp->wr_id = wr->wr_id;
2290- sqp->opcode = wr2opcode(t3_wr_opcode);
2291- sqp->sq_wptr = qhp->wq.sq_wptr;
2292- sqp->complete = 0;
2293- sqp->signaled = (wr->send_flags & IBV_SEND_SIGNALED);
2294-
2295- build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags,
2296- Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
2297- 0, t3_wr_flit_cnt);
2298- PDBG("%s cookie 0x%" PRIx64
2299- " wq idx 0x%x swsq idx %ld opcode %d\n",
2300- __FUNCTION__, wr->wr_id, idx,
2301- Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
2302- sqp->opcode);
2303- wr = wr->next;
2304- num_wrs--;
2305- ++(qhp->wq.wptr);
2306- ++(qhp->wq.sq_wptr);
2307- }
2308- pthread_spin_unlock(&qhp->lock);
2309- if (t3_wq_db_enabled(&qhp->wq))
2310- RING_DOORBELL(qhp->wq.doorbell, qhp->wq.qpid);
2311- return err;
2312-}
2313-
2314-int t3a_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
2315- struct ibv_send_wr **bad_wr)
2316-{
2317- int ret;
2318- struct iwch_qp *qhp = to_iwch_qp(ibqp);
2319-
2320- pthread_spin_lock(&qhp->lock);
2321- ret = ibv_cmd_post_send(ibqp, wr, bad_wr);
2322- pthread_spin_unlock(&qhp->lock);
2323- return ret;
2324-}
2325-
2326-static inline int iwch_build_rdma_recv(struct iwch_device *rhp,
2327- union t3_wr *wqe,
2328- struct ibv_recv_wr *wr)
2329-{
2330- int i;
2331- if (wr->num_sge > T3_MAX_SGE)
2332- return -1;
2333-
2334- wqe->recv.num_sgle = htobe32(wr->num_sge);
2335- for (i = 0; i < wr->num_sge; i++) {
2336- wqe->recv.sgl[i].stag = htobe32(wr->sg_list[i].lkey);
2337- wqe->recv.sgl[i].len = htobe32(wr->sg_list[i].length);
2338- wqe->recv.sgl[i].to = htobe64(wr->sg_list[i].addr);
2339- }
2340- for (; i < T3_MAX_SGE; i++) {
2341- wqe->recv.sgl[i].stag = 0;
2342- wqe->recv.sgl[i].len = 0;
2343- wqe->recv.sgl[i].to = 0;
2344- }
2345- return 0;
2346-}
2347-
2348-static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
2349-{
2350- struct t3_cqe cqe;
2351-
2352- PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__,
2353- wq, cq, cq->sw_rptr, cq->sw_wptr);
2354- memset(&cqe, 0, sizeof(cqe));
2355- cqe.header = V_CQE_STATUS(TPT_ERR_SWFLUSH) |
2356- V_CQE_OPCODE(T3_SEND) |
2357- V_CQE_TYPE(0) |
2358- V_CQE_SWCQE(1) |
2359- V_CQE_QPID(wq->qpid) |
2360- V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr, cq->size_log2));
2361- cqe.header = htobe32(cqe.header);
2362- *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
2363- cq->sw_wptr++;
2364-}
2365-
2366-static void flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
2367-{
2368- uint32_t ptr;
2369-
2370- /* flush RQ */
2371- PDBG("%s rq_rptr 0x%x rq_wptr 0x%x skip count %u\n", __FUNCTION__,
2372- wq->rq_rptr, wq->rq_wptr, count);
2373- ptr = wq->rq_rptr + count;
2374- while (ptr++ != wq->rq_wptr) {
2375- insert_recv_cqe(wq, cq);
2376- }
2377-}
2378-
2379-static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
2380- struct t3_swsq *sqp)
2381-{
2382- struct t3_cqe cqe;
2383-
2384- PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__,
2385- wq, cq, cq->sw_rptr, cq->sw_wptr);
2386- memset(&cqe, 0, sizeof(cqe));
2387- cqe.header = V_CQE_STATUS(TPT_ERR_SWFLUSH) |
2388- V_CQE_OPCODE(sqp->opcode) |
2389- V_CQE_TYPE(1) |
2390- V_CQE_SWCQE(1) |
2391- V_CQE_QPID(wq->qpid) |
2392- V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr, cq->size_log2));
2393- cqe.header = htobe32(cqe.header);
2394- CQE_WRID_SQ_WPTR(cqe) = sqp->sq_wptr;
2395-
2396- *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
2397- cq->sw_wptr++;
2398-}
2399-
2400-static void flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
2401-{
2402- uint32_t ptr;
2403- struct t3_swsq *sqp;
2404-
2405- ptr = wq->sq_rptr + count;
2406- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
2407- while (ptr != wq->sq_wptr) {
2408- insert_sq_cqe(wq, cq, sqp);
2409- ptr++;
2410- sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
2411- }
2412-}
2413-
2414-/*
2415- * Move all CQEs from the HWCQ into the SWCQ.
2416- */
2417-static void flush_hw_cq(struct t3_cq *cq)
2418-{
2419- struct t3_cqe *cqe, *swcqe;
2420-
2421- PDBG("%s cq %p cqid 0x%x\n", __FUNCTION__, cq, cq->cqid);
2422- cqe = cxio_next_hw_cqe(cq);
2423- while (cqe) {
2424- PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
2425- __FUNCTION__, cq->rptr, cq->sw_wptr);
2426- swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
2427- *swcqe = *cqe;
2428- swcqe->header |= htobe32(V_CQE_SWCQE(1));
2429- cq->sw_wptr++;
2430- cq->rptr++;
2431- cqe = cxio_next_hw_cqe(cq);
2432- }
2433-}
2434-
2435-static void count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
2436-{
2437- struct t3_cqe *cqe;
2438- uint32_t ptr;
2439-
2440- *count = 0;
2441- ptr = cq->sw_rptr;
2442- while (!Q_EMPTY(ptr, cq->sw_wptr)) {
2443- cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
2444- if ((SQ_TYPE(*cqe) ||
2445- (CQE_OPCODE(*cqe) == T3_READ_RESP && CQE_WRID_STAG(*cqe) != 1)) &&
2446- (CQE_QPID(*cqe) == wq->qpid))
2447- (*count)++;
2448- ptr++;
2449- }
2450- PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count);
2451-}
2452-
2453-static void count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
2454-{
2455- struct t3_cqe *cqe;
2456- uint32_t ptr;
2457-
2458- *count = 0;
2459- ptr = cq->sw_rptr;
2460- while (!Q_EMPTY(ptr, cq->sw_wptr)) {
2461- cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
2462- if (RQ_TYPE(*cqe) && (CQE_OPCODE(*cqe) != T3_READ_RESP) &&
2463- (CQE_QPID(*cqe) == wq->qpid))
2464- (*count)++;
2465- ptr++;
2466- }
2467- PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count);
2468-}
2469-
2470-/*
2471- * Assumes qhp lock is held.
2472- */
2473-void iwch_flush_qp(struct iwch_qp *qhp)
2474-{
2475- struct iwch_cq *rchp, *schp;
2476- int count;
2477-
2478- if (qhp->wq.flushed)
2479- return;
2480-
2481- rchp = qhp->rhp->cqid2ptr[to_iwch_cq(qhp->ibv_qp.recv_cq)->cq.cqid];
2482- schp = qhp->rhp->cqid2ptr[to_iwch_cq(qhp->ibv_qp.send_cq)->cq.cqid];
2483-
2484- PDBG("%s qhp %p rchp %p schp %p\n", __FUNCTION__, qhp, rchp, schp);
2485- qhp->wq.flushed = 1;
2486-
2487-#ifdef notyet
2488- /* take a ref on the qhp since we must release the lock */
2489- atomic_inc(&qhp->refcnt);
2490-#endif
2491- pthread_spin_unlock(&qhp->lock);
2492-
2493- /* locking heirarchy: cq lock first, then qp lock. */
2494- pthread_spin_lock(&rchp->lock);
2495- pthread_spin_lock(&qhp->lock);
2496- flush_hw_cq(&rchp->cq);
2497- count_rcqes(&rchp->cq, &qhp->wq, &count);
2498- flush_rq(&qhp->wq, &rchp->cq, count);
2499- pthread_spin_unlock(&qhp->lock);
2500- pthread_spin_unlock(&rchp->lock);
2501-
2502- /* locking heirarchy: cq lock first, then qp lock. */
2503- pthread_spin_lock(&schp->lock);
2504- pthread_spin_lock(&qhp->lock);
2505- flush_hw_cq(&schp->cq);
2506- count_scqes(&schp->cq, &qhp->wq, &count);
2507- flush_sq(&qhp->wq, &schp->cq, count);
2508- pthread_spin_unlock(&qhp->lock);
2509- pthread_spin_unlock(&schp->lock);
2510-
2511-#ifdef notyet
2512- /* deref */
2513- if (atomic_dec_and_test(&qhp->refcnt))
2514- wake_up(&qhp->wait);
2515-#endif
2516- pthread_spin_lock(&qhp->lock);
2517-}
2518-
2519-void iwch_flush_qps(struct iwch_device *dev)
2520-{
2521- int i;
2522-
2523- pthread_spin_lock(&dev->lock);
2524- for (i=0; i < T3_MAX_NUM_QP; i++) {
2525- struct iwch_qp *qhp = dev->qpid2ptr[i];
2526- if (qhp) {
2527- if (!qhp->wq.flushed && t3_wq_in_error(&qhp->wq)) {
2528- pthread_spin_lock(&qhp->lock);
2529- iwch_flush_qp(qhp);
2530- pthread_spin_unlock(&qhp->lock);
2531- }
2532- }
2533- }
2534- pthread_spin_unlock(&dev->lock);
2535-
2536-}
2537-
2538-int t3b_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
2539- struct ibv_recv_wr **bad_wr)
2540-{
2541- int err = 0;
2542- struct iwch_qp *qhp;
2543- uint32_t idx;
2544- union t3_wr *wqe;
2545- uint32_t num_wrs;
2546-
2547- qhp = to_iwch_qp(ibqp);
2548- pthread_spin_lock(&qhp->lock);
2549- if (t3_wq_in_error(&qhp->wq)) {
2550- iwch_flush_qp(qhp);
2551- pthread_spin_unlock(&qhp->lock);
2552- return -1;
2553- }
2554- num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
2555- qhp->wq.rq_size_log2) - 1;
2556- if (!wr) {
2557- pthread_spin_unlock(&qhp->lock);
2558- return -1;
2559- }
2560- while (wr) {
2561- idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
2562- wqe = (union t3_wr *) (qhp->wq.queue + idx);
2563- if (num_wrs)
2564- err = iwch_build_rdma_recv(qhp->rhp, wqe, wr);
2565- else
2566- err = -1;
2567- if (err) {
2568- *bad_wr = wr;
2569- break;
2570- }
2571- qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, qhp->wq.rq_size_log2)] =
2572- wr->wr_id;
2573- build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
2574- Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
2575- 0, sizeof(struct t3_receive_wr) >> 3);
2576- PDBG("%s cookie 0x%" PRIx64
2577- " idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
2578- "wqe %p \n", __FUNCTION__, wr->wr_id, idx,
2579- qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
2580- ++(qhp->wq.rq_wptr);
2581- ++(qhp->wq.wptr);
2582- wr = wr->next;
2583- num_wrs--;
2584- }
2585- pthread_spin_unlock(&qhp->lock);
2586- if (t3_wq_db_enabled(&qhp->wq))
2587- RING_DOORBELL(qhp->wq.doorbell, qhp->wq.qpid);
2588- return err;
2589-}
2590-
2591-int t3a_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
2592- struct ibv_recv_wr **bad_wr)
2593-{
2594- int ret;
2595- struct iwch_qp *qhp = to_iwch_qp(ibqp);
2596-
2597- pthread_spin_lock(&qhp->lock);
2598- ret = ibv_cmd_post_recv(ibqp, wr, bad_wr);
2599- pthread_spin_unlock(&qhp->lock);
2600- return ret;
2601-}
2602diff --git a/providers/cxgb3/verbs.c b/providers/cxgb3/verbs.c
2603deleted file mode 100644
2604index 39a44192e..000000000
2605--- a/providers/cxgb3/verbs.c
2606+++ /dev/null
2607@@ -1,476 +0,0 @@
2608-/*
2609- * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
2610- *
2611- * This software is available to you under a choice of one of two
2612- * licenses. You may choose to be licensed under the terms of the GNU
2613- * General Public License (GPL) Version 2, available from the file
2614- * COPYING in the main directory of this source tree, or the
2615- * OpenIB.org BSD license below:
2616- *
2617- * Redistribution and use in source and binary forms, with or
2618- * without modification, are permitted provided that the following
2619- * conditions are met:
2620- *
2621- * - Redistributions of source code must retain the above
2622- * copyright notice, this list of conditions and the following
2623- * disclaimer.
2624- *
2625- * - Redistributions in binary form must reproduce the above
2626- * copyright notice, this list of conditions and the following
2627- * disclaimer in the documentation and/or other materials
2628- * provided with the distribution.
2629- *
2630- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
2631- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
2632- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2633- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
2634- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
2635- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
2636- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
2637- * SOFTWARE.
2638- */
2639-#include <config.h>
2640-
2641-#include <stdlib.h>
2642-#include <stdio.h>
2643-#include <string.h>
2644-#include <errno.h>
2645-#include <pthread.h>
2646-#include <sys/mman.h>
2647-#include <inttypes.h>
2648-
2649-#include "iwch.h"
2650-#include "iwch-abi.h"
2651-
2652-int iwch_query_device(struct ibv_context *context, struct ibv_device_attr *attr)
2653-{
2654- struct ibv_query_device cmd;
2655- uint64_t raw_fw_ver;
2656- unsigned major, minor, sub_minor;
2657- int ret;
2658-
2659- ret = ibv_cmd_query_device(context, attr, &raw_fw_ver, &cmd,
2660- sizeof cmd);
2661- if (ret)
2662- return ret;
2663-
2664- major = (raw_fw_ver >> 32) & 0xffff;
2665- minor = (raw_fw_ver >> 16) & 0xffff;
2666- sub_minor = raw_fw_ver & 0xffff;
2667-
2668- snprintf(attr->fw_ver, sizeof attr->fw_ver,
2669- "%d.%d.%d", major, minor, sub_minor);
2670-
2671- return 0;
2672-}
2673-
2674-int iwch_query_port(struct ibv_context *context, uint8_t port,
2675- struct ibv_port_attr *attr)
2676-{
2677- struct ibv_query_port cmd;
2678-
2679- return ibv_cmd_query_port(context, port, attr, &cmd, sizeof cmd);
2680-}
2681-
2682-struct ibv_pd *iwch_alloc_pd(struct ibv_context *context)
2683-{
2684- struct ibv_alloc_pd cmd;
2685- struct uiwch_alloc_pd_resp resp;
2686- struct iwch_pd *pd;
2687-
2688- pd = malloc(sizeof *pd);
2689- if (!pd)
2690- return NULL;
2691-
2692- if (ibv_cmd_alloc_pd(context, &pd->ibv_pd, &cmd, sizeof cmd,
2693- &resp.ibv_resp, sizeof resp)) {
2694- free(pd);
2695- return NULL;
2696- }
2697-
2698- return &pd->ibv_pd;
2699-}
2700-
2701-int iwch_free_pd(struct ibv_pd *pd)
2702-{
2703- int ret;
2704-
2705- ret = ibv_cmd_dealloc_pd(pd);
2706- if (ret)
2707- return ret;
2708-
2709- free(pd);
2710- return 0;
2711-}
2712-
2713-struct ibv_mr *iwch_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
2714- uint64_t hca_va, int access)
2715-{
2716- struct iwch_mr *mhp;
2717- struct ibv_reg_mr cmd;
2718- struct uiwch_reg_mr_resp resp;
2719- struct iwch_device *dev = to_iwch_dev(pd->context->device);
2720-
2721- PDBG("%s addr %p length %ld hca_va %p\n", __func__, addr, length,
2722- hca_va);
2723-
2724- mhp = malloc(sizeof *mhp);
2725- if (!mhp)
2726- return NULL;
2727-
2728- if (ibv_cmd_reg_mr(pd, addr, length, hca_va,
2729- access, &mhp->vmr, &cmd, sizeof(cmd),
2730- &resp.ibv_resp, sizeof resp)) {
2731- free(mhp);
2732- return NULL;
2733- }
2734-
2735- mhp->va_fbo = hca_va;
2736- mhp->page_size = iwch_page_shift - 12;
2737- mhp->pbl_addr = resp.pbl_addr;
2738- mhp->len = length;
2739-
2740- PDBG("%s stag 0x%x va_fbo 0x%" PRIx64
2741- " page_size %d pbl_addr 0x%x len %d\n",
2742- __func__, mhp->vmr.ibv_mr.rkey, mhp->va_fbo,
2743- mhp->page_size, mhp->pbl_addr, mhp->len);
2744-
2745- pthread_spin_lock(&dev->lock);
2746- dev->mmid2ptr[t3_mmid(mhp->vmr.ibv_mr.lkey)] = mhp;
2747- pthread_spin_unlock(&dev->lock);
2748-
2749- return &mhp->vmr.ibv_mr;
2750-}
2751-
2752-int iwch_dereg_mr(struct verbs_mr *vmr)
2753-{
2754- int ret;
2755- struct iwch_device *dev = to_iwch_dev(vmr->ibv_mr.pd->context->device);
2756-
2757- ret = ibv_cmd_dereg_mr(vmr);
2758- if (ret)
2759- return ret;
2760-
2761- pthread_spin_lock(&dev->lock);
2762- dev->mmid2ptr[t3_mmid(vmr->ibv_mr.lkey)] = NULL;
2763- pthread_spin_unlock(&dev->lock);
2764-
2765- free(to_iwch_mr(vmr));
2766-
2767- return 0;
2768-}
2769-
2770-struct ibv_cq *iwch_create_cq(struct ibv_context *context, int cqe,
2771- struct ibv_comp_channel *channel, int comp_vector)
2772-{
2773- struct uiwch_create_cq cmd;
2774- struct uiwch_create_cq_resp resp;
2775- struct iwch_cq *chp;
2776- struct iwch_device *dev = to_iwch_dev(context->device);
2777- int ret;
2778-
2779- chp = calloc(1, sizeof *chp);
2780- if (!chp) {
2781- return NULL;
2782- }
2783-
2784- cmd.user_rptr_addr = (uint64_t)(unsigned long)&chp->cq.rptr;
2785- ret = ibv_cmd_create_cq(context, cqe, channel, comp_vector,
2786- &chp->ibv_cq, &cmd.ibv_cmd, sizeof cmd,
2787- &resp.ibv_resp, sizeof resp);
2788- if (ret)
2789- goto err1;
2790-
2791- pthread_spin_init(&chp->lock, PTHREAD_PROCESS_PRIVATE);
2792- chp->rhp = dev;
2793- chp->cq.cqid = resp.cqid;
2794- chp->cq.size_log2 = resp.size_log2;
2795- if (dev->abi_version == 0)
2796- chp->cq.memsize = PAGE_ALIGN((1UL << chp->cq.size_log2) *
2797- sizeof(struct t3_cqe));
2798- else
2799- chp->cq.memsize = resp.memsize;
2800- chp->cq.queue = mmap(NULL, t3_cq_memsize(&chp->cq),
2801- PROT_READ|PROT_WRITE, MAP_SHARED, context->cmd_fd,
2802- resp.key);
2803- if (chp->cq.queue == MAP_FAILED)
2804- goto err2;
2805-
2806- chp->cq.sw_queue = calloc(t3_cq_depth(&chp->cq), sizeof(struct t3_cqe));
2807- if (!chp->cq.sw_queue)
2808- goto err3;
2809-
2810- PDBG("%s cqid 0x%x physaddr %" PRIx64 " va %p memsize %d\n",
2811- __FUNCTION__, chp->cq.cqid, resp.physaddr, chp->cq.queue,
2812- t3_cq_memsize(&chp->cq));
2813-
2814- pthread_spin_lock(&dev->lock);
2815- dev->cqid2ptr[chp->cq.cqid] = chp;
2816- pthread_spin_unlock(&dev->lock);
2817-
2818- return &chp->ibv_cq;
2819-err3:
2820- munmap(chp->cq.queue, t3_cq_memsize(&chp->cq));
2821-err2:
2822- (void)ibv_cmd_destroy_cq(&chp->ibv_cq);
2823-err1:
2824- free(chp);
2825- return NULL;
2826-}
2827-
2828-int iwch_resize_cq(struct ibv_cq *ibcq, int cqe)
2829-{
2830-#ifdef notyet
2831- int ret;
2832- struct ibv_resize_cq cmd;
2833- struct iwch_cq *chp = to_iwch_cq(ibcq);
2834-
2835- pthread_spin_lock(&chp->lock);
2836- ret = ibv_cmd_resize_cq(ibcq, cqe, &cmd, sizeof cmd);
2837- /* remap and realloc swcq here */
2838- pthread_spin_unlock(&chp->lock);
2839- return ret;
2840-#else
2841- return -ENOSYS;
2842-#endif
2843-}
2844-
2845-int iwch_destroy_cq(struct ibv_cq *ibcq)
2846-{
2847- int ret;
2848- struct iwch_cq *chp = to_iwch_cq(ibcq);
2849- void *cqva = chp->cq.queue;
2850- unsigned size = t3_cq_memsize(&chp->cq);
2851- struct iwch_device *dev = to_iwch_dev(ibcq->context->device);
2852-
2853- munmap(cqva, size);
2854- ret = ibv_cmd_destroy_cq(ibcq);
2855- if (ret) {
2856- return ret;
2857- }
2858-
2859- pthread_spin_lock(&dev->lock);
2860- dev->cqid2ptr[chp->cq.cqid] = NULL;
2861- pthread_spin_unlock(&dev->lock);
2862-
2863- free(chp->cq.sw_queue);
2864- free(chp);
2865- return 0;
2866-}
2867-
2868-struct ibv_srq *iwch_create_srq(struct ibv_pd *pd,
2869- struct ibv_srq_init_attr *attr)
2870-{
2871- return NULL;
2872-}
2873-
2874-int iwch_modify_srq(struct ibv_srq *srq, struct ibv_srq_attr *attr,
2875- int attr_mask)
2876-{
2877- return -ENOSYS;
2878-}
2879-
2880-int iwch_destroy_srq(struct ibv_srq *srq)
2881-{
2882- return -ENOSYS;
2883-}
2884-
2885-int iwch_post_srq_recv(struct ibv_srq *ibsrq, struct ibv_recv_wr *wr,
2886- struct ibv_recv_wr **bad_wr)
2887-{
2888- return -ENOSYS;
2889-}
2890-
2891-struct ibv_qp *iwch_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr)
2892-{
2893- struct uiwch_create_qp cmd;
2894- struct uiwch_create_qp_resp resp;
2895- struct iwch_qp *qhp;
2896- struct iwch_device *dev = to_iwch_dev(pd->context->device);
2897- int ret;
2898- void *dbva;
2899-
2900- PDBG("%s enter qp\n", __FUNCTION__);
2901- qhp = calloc(1, sizeof *qhp);
2902- if (!qhp)
2903- goto err1;
2904-
2905- ret = ibv_cmd_create_qp(pd, &qhp->ibv_qp, attr, &cmd.ibv_cmd,
2906- sizeof cmd, &resp.ibv_resp, sizeof resp);
2907- if (ret)
2908- goto err2;
2909-
2910- PDBG("%s qpid 0x%x physaddr %" PRIx64 " doorbell %" PRIx64
2911- " size %d sq_size %d rq_size %d\n",
2912- __FUNCTION__, resp.qpid, resp.physaddr, resp.doorbell,
2913- 1 << resp.size_log2, 1 << resp.sq_size_log2,
2914- 1 << resp.rq_size_log2);
2915-
2916- qhp->rhp = dev;
2917- qhp->wq.qpid = resp.qpid;
2918- qhp->wq.size_log2 = resp.size_log2;
2919- qhp->wq.sq_size_log2 = resp.sq_size_log2;
2920- qhp->wq.rq_size_log2 = resp.rq_size_log2;
2921- pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE);
2922- dbva = mmap(NULL, iwch_page_size, PROT_WRITE, MAP_SHARED,
2923- pd->context->cmd_fd, resp.db_key & ~(iwch_page_mask));
2924- if (dbva == MAP_FAILED)
2925- goto err3;
2926-
2927- qhp->wq.doorbell = dbva + (resp.db_key & (iwch_page_mask));
2928- qhp->wq.queue = mmap(NULL, t3_wq_memsize(&qhp->wq),
2929- PROT_READ|PROT_WRITE, MAP_SHARED,
2930- pd->context->cmd_fd, resp.key);
2931- if (qhp->wq.queue == MAP_FAILED)
2932- goto err4;
2933-
2934- qhp->wq.rq = calloc(t3_rq_depth(&qhp->wq), sizeof (uint64_t));
2935- if (!qhp->wq.rq)
2936- goto err5;
2937-
2938- qhp->wq.sq = calloc(t3_sq_depth(&qhp->wq), sizeof (struct t3_swsq));
2939- if (!qhp->wq.sq)
2940- goto err6;
2941-
2942- PDBG("%s dbva %p wqva %p wq memsize %d\n", __FUNCTION__,
2943- qhp->wq.doorbell, qhp->wq.queue, t3_wq_memsize(&qhp->wq));
2944-
2945- qhp->sq_sig_all = attr->sq_sig_all;
2946-
2947- pthread_spin_lock(&dev->lock);
2948- dev->qpid2ptr[qhp->wq.qpid] = qhp;
2949- pthread_spin_unlock(&dev->lock);
2950-
2951- return &qhp->ibv_qp;
2952-err6:
2953- free(qhp->wq.rq);
2954-err5:
2955- munmap((void *)qhp->wq.queue, t3_wq_memsize(&qhp->wq));
2956-err4:
2957- munmap((void *)dbva, iwch_page_size);
2958-err3:
2959- (void)ibv_cmd_destroy_qp(&qhp->ibv_qp);
2960-err2:
2961- free(qhp);
2962-err1:
2963- return NULL;
2964-}
2965-
2966-static void reset_qp(struct iwch_qp *qhp)
2967-{
2968- PDBG("%s enter qp %p\n", __FUNCTION__, qhp);
2969- qhp->wq.wptr = 0;
2970- qhp->wq.rq_wptr = qhp->wq.rq_rptr = 0;
2971- qhp->wq.sq_wptr = qhp->wq.sq_rptr = 0;
2972- qhp->wq.error = 0;
2973- qhp->wq.oldest_read = NULL;
2974- memset(qhp->wq.queue, 0, t3_wq_memsize(&qhp->wq));
2975-}
2976-
2977-int iwch_modify_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr,
2978- int attr_mask)
2979-{
2980- struct ibv_modify_qp cmd = {};
2981- struct iwch_qp *qhp = to_iwch_qp(ibqp);
2982- int ret;
2983-
2984- PDBG("%s enter qp %p new state %d\n", __FUNCTION__, ibqp, attr_mask & IBV_QP_STATE ? attr->qp_state : -1);
2985- pthread_spin_lock(&qhp->lock);
2986- if (t3b_device(qhp->rhp) && t3_wq_in_error(&qhp->wq))
2987- iwch_flush_qp(qhp);
2988- ret = ibv_cmd_modify_qp(ibqp, attr, attr_mask, &cmd, sizeof cmd);
2989- if (!ret && (attr_mask & IBV_QP_STATE) && attr->qp_state == IBV_QPS_RESET)
2990- reset_qp(qhp);
2991- pthread_spin_unlock(&qhp->lock);
2992- return ret;
2993-}
2994-
2995-int iwch_destroy_qp(struct ibv_qp *ibqp)
2996-{
2997- int ret;
2998- struct iwch_qp *qhp = to_iwch_qp(ibqp);
2999- struct iwch_device *dev = to_iwch_dev(ibqp->context->device);
3000- void *dbva, *wqva;
3001- unsigned wqsize;
3002-
3003- PDBG("%s enter qp %p\n", __FUNCTION__, ibqp);
3004- if (t3b_device(dev)) {
3005- pthread_spin_lock(&qhp->lock);
3006- iwch_flush_qp(qhp);
3007- pthread_spin_unlock(&qhp->lock);
3008- }
3009-
3010- dbva = (void *)((unsigned long)qhp->wq.doorbell & ~(iwch_page_mask));
3011- wqva = qhp->wq.queue;
3012- wqsize = t3_wq_memsize(&qhp->wq);
3013-
3014- munmap(dbva, iwch_page_size);
3015- munmap(wqva, wqsize);
3016- ret = ibv_cmd_destroy_qp(ibqp);
3017- if (ret) {
3018- return ret;
3019- }
3020-
3021- pthread_spin_lock(&dev->lock);
3022- dev->qpid2ptr[qhp->wq.qpid] = NULL;
3023- pthread_spin_unlock(&dev->lock);
3024-
3025- free(qhp->wq.rq);
3026- free(qhp->wq.sq);
3027- free(qhp);
3028- return 0;
3029-}
3030-
3031-int iwch_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
3032- int attr_mask, struct ibv_qp_init_attr *init_attr)
3033-{
3034- return -ENOSYS;
3035-}
3036-
3037-struct ibv_ah *iwch_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr)
3038-{
3039- return NULL;
3040-}
3041-
3042-int iwch_destroy_ah(struct ibv_ah *ah)
3043-{
3044- return -ENOSYS;
3045-}
3046-
3047-int iwch_attach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid)
3048-{
3049- return -ENOSYS;
3050-}
3051-
3052-int iwch_detach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid)
3053-{
3054- return -ENOSYS;
3055-}
3056-
3057-void t3b_async_event(struct ibv_context *context,
3058- struct ibv_async_event *event)
3059-{
3060- PDBG("%s type %d obj %p\n", __FUNCTION__, event->event_type,
3061- event->element.cq);
3062-
3063- switch (event->event_type) {
3064- case IBV_EVENT_CQ_ERR:
3065- break;
3066- case IBV_EVENT_QP_FATAL:
3067- case IBV_EVENT_QP_REQ_ERR:
3068- case IBV_EVENT_QP_ACCESS_ERR:
3069- case IBV_EVENT_PATH_MIG_ERR: {
3070- struct iwch_qp *qhp = to_iwch_qp(event->element.qp);
3071- pthread_spin_lock(&qhp->lock);
3072- iwch_flush_qp(qhp);
3073- pthread_spin_unlock(&qhp->lock);
3074- break;
3075- }
3076- case IBV_EVENT_SQ_DRAINED:
3077- case IBV_EVENT_PATH_MIG:
3078- case IBV_EVENT_COMM_EST:
3079- case IBV_EVENT_QP_LAST_WQE_REACHED:
3080- default:
3081- break;
3082- }
3083-}
This page took 0.561348 seconds and 4 git commands to generate.