]> git.pld-linux.org Git - packages/rdma-core.git/commitdiff
- updated to 28.0
authorJakub Bogusz <qboosh@pld-linux.org>
Thu, 13 Feb 2020 16:27:26 +0000 (17:27 +0100)
committerJakub Bogusz <qboosh@pld-linux.org>
Thu, 13 Feb 2020 16:27:26 +0000 (17:27 +0100)
- added kernel-abi,cxgb3,nes,providers-update patches (readd cxgb3 and nes drivers retired by vendor, usable with kernels < 5.5)

rdma-core-cxgb3.patch [new file with mode: 0644]
rdma-core-kernel-abi.patch [new file with mode: 0644]
rdma-core-nes.patch [new file with mode: 0644]
rdma-core-providers-update.patch [new file with mode: 0644]

diff --git a/rdma-core-cxgb3.patch b/rdma-core-cxgb3.patch
new file mode 100644 (file)
index 0000000..4ca3b82
--- /dev/null
@@ -0,0 +1,3098 @@
+From 36588f5844af4ef1e5b0d6ad002fa1adf9032653 Mon Sep 17 00:00:00 2001
+From: Potnuri Bharat Teja <bharat@chelsio.com>
+Date: Mon, 21 Oct 2019 14:01:25 +0530
+Subject: [PATCH] libcxgb3: Remove libcxgb3 from rdma-core
+
+Remove the userspace provider for iw_cxgb3 after removing it from kernel.
+
+Signed-off-by: Potnuri Bharat Teja <bharat@chelsio.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+---
+ CMakeLists.txt                     |   1 -
+ MAINTAINERS                        |   5 -
+ README.md                          |   1 -
+ debian/control                     |  10 +-
+ debian/copyright                   |   3 +-
+ kernel-boot/rdma-description.rules |   1 -
+ kernel-boot/rdma-hw-modules.rules  |   1 -
+ libibverbs/verbs.h                 |   1 -
+ providers/cxgb3/CMakeLists.txt     |   6 -
+ providers/cxgb3/cq.c               | 442 -----------------
+ providers/cxgb3/cxio_wr.h          | 758 -----------------------------
+ providers/cxgb3/firmware_exports.h | 148 ------
+ providers/cxgb3/iwch-abi.h         |  51 --
+ providers/cxgb3/iwch.c             | 269 ----------
+ providers/cxgb3/iwch.h             | 218 ---------
+ providers/cxgb3/qp.c               | 560 ---------------------
+ providers/cxgb3/verbs.c            | 476 ------------------
+ redhat/rdma-core.spec              |   3 -
+ redhat/rdma.kernel-init            |   4 -
+ suse/rdma-core.spec                |   2 -
+ 20 files changed, 4 insertions(+), 2956 deletions(-)
+ delete mode 100644 providers/cxgb3/CMakeLists.txt
+ delete mode 100644 providers/cxgb3/cq.c
+ delete mode 100644 providers/cxgb3/cxio_wr.h
+ delete mode 100644 providers/cxgb3/firmware_exports.h
+ delete mode 100644 providers/cxgb3/iwch-abi.h
+ delete mode 100644 providers/cxgb3/iwch.c
+ delete mode 100644 providers/cxgb3/iwch.h
+ delete mode 100644 providers/cxgb3/qp.c
+ delete mode 100644 providers/cxgb3/verbs.c
+
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index 7abeea4fe..85485ba00 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -615,7 +615,6 @@ add_subdirectory(librdmacm/man)
+ # Providers
+ if (HAVE_COHERENT_DMA)
+ add_subdirectory(providers/bnxt_re)
+-add_subdirectory(providers/cxgb3) # NO SPARSE
+ add_subdirectory(providers/cxgb4) # NO SPARSE
+ add_subdirectory(providers/efa)
+ add_subdirectory(providers/efa/man)
+diff --git a/README.md b/README.md
+index 451ff7fcb..a96351933 100644
+--- a/README.md
++++ b/README.md
+@@ -15,7 +15,6 @@ under the providers/ directory. Support for the following Kernel RDMA drivers
+ is included:
+  - efa.ko
+- - iw_cxgb3.ko
+  - iw_cxgb4.ko
+  - hfi1.ko
+  - hns-roce.ko
+diff --git a/kernel-boot/rdma-description.rules b/kernel-boot/rdma-description.rules
+index bb33dce40..4ea59ba19 100644
+--- a/kernel-boot/rdma-description.rules
++++ b/kernel-boot/rdma-description.rules
+@@ -22,7 +22,6 @@ DRIVERS=="ib_qib", ENV{ID_RDMA_INFINIBAND}="1"
+ DRIVERS=="hfi1", ENV{ID_RDMA_OPA}="1"
+ # Hardware that supports iWarp
+-DRIVERS=="cxgb3", ENV{ID_RDMA_IWARP}="1"
+ DRIVERS=="cxgb4", ENV{ID_RDMA_IWARP}="1"
+ DRIVERS=="i40e", ENV{ID_RDMA_IWARP}="1"
+ DRIVERS=="nes", ENV{ID_RDMA_IWARP}="1"
+diff --git a/kernel-boot/rdma-hw-modules.rules b/kernel-boot/rdma-hw-modules.rules
+index dde0ab8da..da4bbe363 100644
+--- a/kernel-boot/rdma-hw-modules.rules
++++ b/kernel-boot/rdma-hw-modules.rules
+@@ -8,7 +8,6 @@ SUBSYSTEM!="net", GOTO="rdma_hw_modules_end"
+ # RDMA.
+ ENV{ID_NET_DRIVER}=="be2net", RUN{builtin}+="kmod load ocrdma"
+ ENV{ID_NET_DRIVER}=="bnxt_en", RUN{builtin}+="kmod load bnxt_re"
+-ENV{ID_NET_DRIVER}=="cxgb3", RUN{builtin}+="kmod load iw_cxgb3"
+ ENV{ID_NET_DRIVER}=="cxgb4", RUN{builtin}+="kmod load iw_cxgb4"
+ ENV{ID_NET_DRIVER}=="hns", RUN{builtin}+="kmod load hns_roce"
+ ENV{ID_NET_DRIVER}=="i40e", RUN{builtin}+="kmod load i40iw"
+diff --git a/libibverbs/verbs.h b/libibverbs/verbs.h
+index c411722b1..12a33a99a 100644
+--- a/libibverbs/verbs.h
++++ b/libibverbs/verbs.h
+@@ -2144,7 +2144,6 @@ struct ibv_device **ibv_get_device_list(int *num_devices);
+ struct verbs_devices_ops;
+ extern const struct verbs_device_ops verbs_provider_bnxt_re;
+-extern const struct verbs_device_ops verbs_provider_cxgb3;
+ extern const struct verbs_device_ops verbs_provider_cxgb4;
+ extern const struct verbs_device_ops verbs_provider_efa;
+ extern const struct verbs_device_ops verbs_provider_hfi1verbs;
+diff --git a/providers/cxgb3/CMakeLists.txt b/providers/cxgb3/CMakeLists.txt
+deleted file mode 100644
+index a578105e7..000000000
+--- a/providers/cxgb3/CMakeLists.txt
++++ /dev/null
+@@ -1,6 +0,0 @@
+-rdma_provider(cxgb3
+-  cq.c
+-  iwch.c
+-  qp.c
+-  verbs.c
+-)
+diff --git a/providers/cxgb3/cq.c b/providers/cxgb3/cq.c
+deleted file mode 100644
+index 6cb4fe74d..000000000
+--- a/providers/cxgb3/cq.c
++++ /dev/null
+@@ -1,442 +0,0 @@
+-/*
+- * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
+- *
+- * This software is available to you under a choice of one of two
+- * licenses.  You may choose to be licensed under the terms of the GNU
+- * General Public License (GPL) Version 2, available from the file
+- * COPYING in the main directory of this source tree, or the
+- * OpenIB.org BSD license below:
+- *
+- *     Redistribution and use in source and binary forms, with or
+- *     without modification, are permitted provided that the following
+- *     conditions are met:
+- *
+- *      - Redistributions of source code must retain the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer.
+- *
+- *      - Redistributions in binary form must reproduce the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer in the documentation and/or other materials
+- *        provided with the distribution.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+- * SOFTWARE.
+- */
+-#include <config.h>
+-
+-#include <stdio.h>
+-#include <pthread.h>
+-#include <sys/errno.h>
+-
+-#include <infiniband/opcode.h>
+-
+-#include "iwch.h"
+-#include "iwch-abi.h"
+-
+-int iwch_arm_cq(struct ibv_cq *ibcq, int solicited)
+-{
+-      int ret;
+-      struct iwch_cq *chp = to_iwch_cq(ibcq);
+-
+-      pthread_spin_lock(&chp->lock);
+-      ret = ibv_cmd_req_notify_cq(ibcq, solicited);
+-      pthread_spin_unlock(&chp->lock);
+-
+-      return ret;
+-}
+-
+-static inline void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
+-{
+-      struct t3_swsq *sqp;
+-      uint32_t ptr = wq->sq_rptr;
+-      int count = Q_COUNT(wq->sq_rptr, wq->sq_wptr);
+-      
+-      sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
+-      while (count--) {
+-              if (!sqp->signaled) {
+-                      ptr++;
+-                      sqp = wq->sq + Q_PTR2IDX(ptr,  wq->sq_size_log2);
+-              } else if (sqp->complete) {
+-
+-                      /* 
+-                       * Insert this completed cqe into the swcq.
+-                       */
+-                      sqp->cqe.header |= htobe32(V_CQE_SWCQE(1));
+-                      *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) 
+-                              = sqp->cqe;
+-                      cq->sw_wptr++;
+-                      sqp->signaled = 0;
+-                      break;
+-              } else
+-                      break;
+-      }
+-}
+-
+-static inline void create_read_req_cqe(struct t3_wq *wq,
+-                                     struct t3_cqe *hw_cqe,
+-                                     struct t3_cqe *read_cqe)
+-{
+-      CQE_WRID_SQ_WPTR(*read_cqe) = wq->oldest_read->sq_wptr;
+-      read_cqe->len = wq->oldest_read->read_len;
+-      read_cqe->header = htobe32(V_CQE_QPID(CQE_QPID(*hw_cqe)) |
+-                               V_CQE_SWCQE(SW_CQE(*hw_cqe)) |
+-                               V_CQE_OPCODE(T3_READ_REQ) |
+-                               V_CQE_TYPE(1));
+-}
+-
+-/*
+- * Return a ptr to the next read wr in the SWSQ or NULL.
+- */
+-static inline void advance_oldest_read(struct t3_wq *wq)
+-{
+-
+-      uint32_t rptr = wq->oldest_read - wq->sq + 1;
+-      uint32_t wptr = Q_PTR2IDX(wq->sq_wptr, wq->sq_size_log2);
+-
+-      while (Q_PTR2IDX(rptr, wq->sq_size_log2) != wptr) {
+-              wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2);
+-
+-              if (wq->oldest_read->opcode == T3_READ_REQ) {
+-                      return;
+-              }
+-              rptr++;
+-      }
+-      wq->oldest_read = NULL;
+-}
+-
+-static inline int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq,
+-                 struct t3_cqe *cqe, uint8_t *cqe_flushed,
+-                 uint64_t *cookie)
+-{
+-      int ret = 0;
+-      struct t3_cqe *hw_cqe, read_cqe;
+-
+-      *cqe_flushed = 0;
+-      hw_cqe = cxio_next_cqe(cq);
+-      udma_from_device_barrier();
+-
+-      /* 
+-       * Skip cqes not affiliated with a QP.
+-       */
+-      if (wq == NULL) {
+-              ret = -1;
+-              goto skip_cqe;
+-      }
+-
+-      /*
+-       * Gotta tweak READ completions:
+-       *      1) the cqe doesn't contain the sq_wptr from the wr.
+-       *      2) opcode not reflected from the wr.
+-       *      3) read_len not reflected from the wr.
+-       *      4) cq_type is RQ_TYPE not SQ_TYPE.
+-       */
+-      if (CQE_OPCODE(*hw_cqe) == T3_READ_RESP) {
+-
+-                /*
+-               * If this is an unsolicited read response to local stag 1, 
+-               * then the read was generated by the kernel driver as part 
+-               * of peer-2-peer connection setup.  So ignore the completion.
+-               */
+-              if (CQE_WRID_STAG(*hw_cqe) == 1) {
+-                      if (CQE_STATUS(*hw_cqe))
+-                              wq->error = 1;
+-                      ret = -1;
+-                      goto skip_cqe;
+-              }
+-              
+-              /* 
+-               * Don't write to the HWCQ, so create a new read req CQE 
+-               * in local memory.
+-               */
+-              create_read_req_cqe(wq, hw_cqe, &read_cqe);
+-              hw_cqe = &read_cqe;
+-              advance_oldest_read(wq);
+-      }
+-
+-      /* 
+-       * Errors.
+-       */
+-      if (CQE_STATUS(*hw_cqe) || t3_wq_in_error(wq)) {
+-              *cqe_flushed = t3_wq_in_error(wq);
+-              t3_set_wq_in_error(wq);
+-              goto proc_cqe;
+-      }
+-
+-      /*
+-       * RECV completion.
+-       */
+-      if (RQ_TYPE(*hw_cqe)) {
+-
+-              /* 
+-               * HW only validates 4 bits of MSN.  So we must validate that
+-               * the MSN in the SEND is the next expected MSN.  If its not,
+-               * then we complete this with TPT_ERR_MSN and mark the wq in 
+-               * error.
+-               */
+-              if ((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1))) {
+-                      t3_set_wq_in_error(wq);
+-                      hw_cqe->header |= htobe32(V_CQE_STATUS(TPT_ERR_MSN));
+-              }
+-              goto proc_cqe;
+-      }
+-
+-      /* 
+-       * If we get here its a send completion.
+-       *
+-       * Handle out of order completion. These get stuffed
+-       * in the SW SQ. Then the SW SQ is walked to move any
+-       * now in-order completions into the SW CQ.  This handles
+-       * 2 cases:
+-       *      1) reaping unsignaled WRs when the first subsequent
+-       *         signaled WR is completed.
+-       *      2) out of order read completions.
+-       */
+-      if (!SW_CQE(*hw_cqe) && (CQE_WRID_SQ_WPTR(*hw_cqe) != wq->sq_rptr)) {
+-              struct t3_swsq *sqp;
+-
+-              sqp = wq->sq + 
+-                    Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
+-              sqp->cqe = *hw_cqe;
+-              sqp->complete = 1;
+-              ret = -1;
+-              goto flush_wq;
+-      }
+-
+-proc_cqe:
+-      *cqe = *hw_cqe;
+-
+-      /*
+-       * Reap the associated WR(s) that are freed up with this
+-       * completion.
+-       */
+-      if (SQ_TYPE(*hw_cqe)) {
+-              wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
+-              *cookie = (wq->sq + 
+-                         Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2))->wr_id;
+-              wq->sq_rptr++;
+-      } else {
+-              *cookie = *(wq->rq + Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
+-              wq->rq_rptr++;
+-      }
+-
+-flush_wq:
+-      /*
+-       * Flush any completed cqes that are now in-order.
+-       */
+-      flush_completed_wrs(wq, cq);
+-
+-skip_cqe:
+-      if (SW_CQE(*hw_cqe)) {
+-              PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n", 
+-                   __FUNCTION__, cq, cq->cqid, cq->sw_rptr);
+-              ++cq->sw_rptr;
+-      } else {
+-              PDBG("%s cq %p cqid 0x%x skip hw cqe sw_rptr 0x%x\n", 
+-                   __FUNCTION__, cq, cq->cqid, cq->rptr);
+-              ++cq->rptr;
+-      }
+-
+-      return ret;
+-}
+-
+-/*
+- * Get one cq entry from cxio and map it to openib.
+- *
+- * Returns:
+- *    0                       EMPTY;
+- *    1                       cqe returned
+- *    -EAGAIN                 caller must try again
+- *    any other -errno        fatal error
+- */
+-static int iwch_poll_cq_one(struct iwch_device *rhp, struct iwch_cq *chp,
+-                   struct ibv_wc *wc)
+-{
+-      struct iwch_qp *qhp = NULL;
+-      struct t3_cqe cqe, *hw_cqe;
+-      struct t3_wq *wq;
+-      uint8_t cqe_flushed;
+-      uint64_t cookie;
+-      int ret = 1;
+-
+-      hw_cqe = cxio_next_cqe(&chp->cq);
+-      udma_from_device_barrier();
+-
+-      if (!hw_cqe)
+-              return 0;
+-
+-      qhp = rhp->qpid2ptr[CQE_QPID(*hw_cqe)];
+-      if (!qhp)
+-              wq = NULL;
+-      else {
+-              pthread_spin_lock(&qhp->lock);
+-              wq = &(qhp->wq);
+-      }
+-      ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie);
+-      if (ret) {
+-              ret = -EAGAIN;
+-              goto out;
+-      }
+-      ret = 1;
+-
+-      wc->wr_id = cookie;
+-      wc->qp_num = qhp->wq.qpid;
+-      wc->vendor_err = CQE_STATUS(cqe);
+-      wc->wc_flags = 0;
+-
+-      PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
+-           "lo 0x%x cookie 0x%" PRIx64 "\n", 
+-           __FUNCTION__, CQE_QPID(cqe), CQE_TYPE(cqe),
+-           CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
+-           CQE_WRID_LOW(cqe), cookie);
+-
+-      if (CQE_TYPE(cqe) == 0) {
+-              if (!CQE_STATUS(cqe))
+-                      wc->byte_len = CQE_LEN(cqe);
+-              else
+-                      wc->byte_len = 0;
+-              wc->opcode = IBV_WC_RECV;
+-      } else {
+-              switch (CQE_OPCODE(cqe)) {
+-              case T3_RDMA_WRITE:
+-                      wc->opcode = IBV_WC_RDMA_WRITE;
+-                      break;
+-              case T3_READ_REQ:
+-                      wc->opcode = IBV_WC_RDMA_READ;
+-                      wc->byte_len = CQE_LEN(cqe);
+-                      break;
+-              case T3_SEND:
+-              case T3_SEND_WITH_SE:
+-                      wc->opcode = IBV_WC_SEND;
+-                      break;
+-              case T3_BIND_MW:
+-                      wc->opcode = IBV_WC_BIND_MW;
+-                      break;
+-
+-              /* these aren't supported yet */
+-              case T3_SEND_WITH_INV:
+-              case T3_SEND_WITH_SE_INV:
+-              case T3_LOCAL_INV:
+-              case T3_FAST_REGISTER:
+-              default:
+-                      PDBG("%s Unexpected opcode %d CQID 0x%x QPID 0x%x\n", 
+-                           __FUNCTION__, CQE_OPCODE(cqe), chp->cq.cqid, 
+-                           CQE_QPID(cqe));
+-                      ret = -EINVAL;
+-                      goto out;
+-              }
+-      }
+-
+-      if (cqe_flushed) {
+-              wc->status = IBV_WC_WR_FLUSH_ERR;
+-      } else {
+-              
+-              switch (CQE_STATUS(cqe)) {
+-              case TPT_ERR_SUCCESS:
+-                      wc->status = IBV_WC_SUCCESS;
+-                      break;
+-              case TPT_ERR_STAG:
+-                      wc->status = IBV_WC_LOC_ACCESS_ERR;
+-                      break;
+-              case TPT_ERR_PDID:
+-                      wc->status = IBV_WC_LOC_PROT_ERR;
+-                      break;
+-              case TPT_ERR_QPID:
+-              case TPT_ERR_ACCESS:
+-                      wc->status = IBV_WC_LOC_ACCESS_ERR;
+-                      break;
+-              case TPT_ERR_WRAP:
+-                      wc->status = IBV_WC_GENERAL_ERR;
+-                      break;
+-              case TPT_ERR_BOUND:
+-                      wc->status = IBV_WC_LOC_LEN_ERR;
+-                      break;
+-              case TPT_ERR_INVALIDATE_SHARED_MR:
+-              case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
+-                      wc->status = IBV_WC_MW_BIND_ERR;
+-                      break;
+-              case TPT_ERR_CRC:
+-              case TPT_ERR_MARKER:
+-              case TPT_ERR_PDU_LEN_ERR:
+-              case TPT_ERR_OUT_OF_RQE:
+-              case TPT_ERR_DDP_VERSION:
+-              case TPT_ERR_RDMA_VERSION:
+-              case TPT_ERR_DDP_QUEUE_NUM:
+-              case TPT_ERR_MSN:
+-              case TPT_ERR_TBIT:
+-              case TPT_ERR_MO:
+-              case TPT_ERR_MSN_RANGE:
+-              case TPT_ERR_IRD_OVERFLOW:
+-              case TPT_ERR_OPCODE:
+-                      wc->status = IBV_WC_FATAL_ERR;
+-                      break;
+-              case TPT_ERR_SWFLUSH:
+-                      wc->status = IBV_WC_WR_FLUSH_ERR;
+-                      break;
+-              default:
+-                      PDBG("%s Unexpected status 0x%x CQID 0x%x QPID 0x%0x\n",
+-                           __FUNCTION__, CQE_STATUS(cqe), chp->cq.cqid, 
+-                           CQE_QPID(cqe));
+-                      ret = -EINVAL;
+-              }
+-      }
+-out:
+-      if (wq)
+-              pthread_spin_unlock(&qhp->lock);
+-      return ret;
+-}
+-
+-int t3b_poll_cq(struct ibv_cq *ibcq, int num_entries, struct ibv_wc *wc)
+-{
+-      struct iwch_device *rhp;
+-      struct iwch_cq *chp;
+-      int npolled;
+-      int err = 0;
+-
+-      chp = to_iwch_cq(ibcq);
+-      rhp = chp->rhp;
+-
+-      if (rhp->abi_version > 0 && t3_cq_in_error(&chp->cq)) {
+-              t3_reset_cq_in_error(&chp->cq);
+-              iwch_flush_qps(rhp);
+-      }
+-
+-      pthread_spin_lock(&chp->lock);
+-      for (npolled = 0; npolled < num_entries; ++npolled) {
+-
+-              /*
+-               * Because T3 can post CQEs that are out of order,
+-               * we might have to poll again after removing
+-               * one of these.  
+-               */
+-              do {
+-                      err = iwch_poll_cq_one(rhp, chp, wc + npolled);
+-              } while (err == -EAGAIN);
+-              if (err <= 0)
+-                      break;
+-      }
+-      pthread_spin_unlock(&chp->lock);
+-
+-      if (err < 0)
+-              return err;
+-      else {
+-              return npolled;
+-      }
+-}
+-
+-int t3a_poll_cq(struct ibv_cq *ibcq, int num_entries, struct ibv_wc *wc)
+-{
+-      int ret;
+-      struct iwch_cq *chp = to_iwch_cq(ibcq);
+-      
+-      pthread_spin_lock(&chp->lock);
+-      ret = ibv_cmd_poll_cq(ibcq, num_entries, wc);
+-      pthread_spin_unlock(&chp->lock);
+-      return ret;
+-}
+diff --git a/providers/cxgb3/cxio_wr.h b/providers/cxgb3/cxio_wr.h
+deleted file mode 100644
+index 042bd9414..000000000
+--- a/providers/cxgb3/cxio_wr.h
++++ /dev/null
+@@ -1,758 +0,0 @@
+-/*
+- * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
+- *
+- * This software is available to you under a choice of one of two
+- * licenses.  You may choose to be licensed under the terms of the GNU
+- * General Public License (GPL) Version 2, available from the file
+- * COPYING in the main directory of this source tree, or the
+- * OpenIB.org BSD license below:
+- *
+- *     Redistribution and use in source and binary forms, with or
+- *     without modification, are permitted provided that the following
+- *     conditions are met:
+- *
+- *      - Redistributions of source code must retain the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer.
+- *
+- *      - Redistributions in binary form must reproduce the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer in the documentation and/or other materials
+- *        provided with the distribution.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+- * SOFTWARE.
+- */
+-#ifndef __CXIO_WR_H__
+-#define __CXIO_WR_H__
+-
+-#include <stddef.h>
+-#include <stdint.h>
+-#include <endian.h>
+-#include <util/udma_barrier.h>
+-#include "firmware_exports.h"
+-
+-#define T3_MAX_NUM_QP (1<<15)
+-#define T3_MAX_NUM_CQ (1<<15)
+-#define T3_MAX_NUM_PD (1<<15)
+-#define T3_MAX_NUM_STAG (1<<15)
+-#define T3_MAX_SGE      4
+-#define T3_MAX_INLINE  64
+-
+-#define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
+-#define Q_FULL(rptr,wptr,size_log2)  ( (((wptr)-(rptr))>>(size_log2)) && \
+-                                     ((rptr)!=(wptr)) )
+-#define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))
+-#define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))
+-#define Q_COUNT(rptr,wptr) ((wptr)-(rptr))
+-#define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))
+-
+-/* FIXME: Move me to a generic PCI mmio accessor */
+-#define cpu_to_pci32(val) htole32(val)
+-
+-#define RING_DOORBELL(doorbell, QPID) { \
+-      *doorbell = cpu_to_pci32(QPID); \
+-}
+-
+-#define SEQ32_GE(x,y) (!( (((uint32_t) (x)) - ((uint32_t) (y))) & 0x80000000 ))
+-
+-enum t3_wr_flags {
+-      T3_COMPLETION_FLAG = 0x01,
+-      T3_NOTIFY_FLAG = 0x02,
+-      T3_SOLICITED_EVENT_FLAG = 0x04,
+-      T3_READ_FENCE_FLAG = 0x08,
+-      T3_LOCAL_FENCE_FLAG = 0x10
+-} __attribute__ ((packed));
+-
+-enum t3_wr_opcode {
+-      T3_WR_BP = FW_WROPCODE_RI_BYPASS,
+-      T3_WR_SEND = FW_WROPCODE_RI_SEND,
+-      T3_WR_WRITE = FW_WROPCODE_RI_RDMA_WRITE,
+-      T3_WR_READ = FW_WROPCODE_RI_RDMA_READ,
+-      T3_WR_INV_STAG = FW_WROPCODE_RI_LOCAL_INV,
+-      T3_WR_BIND = FW_WROPCODE_RI_BIND_MW,
+-      T3_WR_RCV = FW_WROPCODE_RI_RECEIVE,
+-      T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
+-      T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP
+-} __attribute__ ((packed));
+-
+-enum t3_rdma_opcode {
+-      T3_RDMA_WRITE,          /* IETF RDMAP v1.0 ... */
+-      T3_READ_REQ,
+-      T3_READ_RESP,
+-      T3_SEND,
+-      T3_SEND_WITH_INV,
+-      T3_SEND_WITH_SE,
+-      T3_SEND_WITH_SE_INV,
+-      T3_TERMINATE,
+-      T3_RDMA_INIT,           /* CHELSIO RI specific ... */
+-      T3_BIND_MW,
+-      T3_FAST_REGISTER,
+-      T3_LOCAL_INV,
+-      T3_QP_MOD,
+-      T3_BYPASS
+-} __attribute__ ((packed));
+-
+-static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
+-{
+-      switch (wrop) {
+-              case T3_WR_BP: return T3_BYPASS;
+-              case T3_WR_SEND: return T3_SEND;
+-              case T3_WR_WRITE: return T3_RDMA_WRITE;
+-              case T3_WR_READ: return T3_READ_REQ;
+-              case T3_WR_INV_STAG: return T3_LOCAL_INV;
+-              case T3_WR_BIND: return T3_BIND_MW;
+-              case T3_WR_INIT: return T3_RDMA_INIT;
+-              case T3_WR_QP_MOD: return T3_QP_MOD;
+-              default: break;
+-      }
+-      return -1;
+-}
+-
+-
+-/* Work request id */
+-union t3_wrid {
+-      struct {
+-              uint32_t hi:32;
+-              uint32_t low:32;
+-      } id0;
+-      uint64_t id1;
+-};
+-
+-#define WRID(wrid)            (wrid.id1)
+-#define WRID_GEN(wrid)                (wrid.id0.wr_gen)
+-#define WRID_IDX(wrid)                (wrid.id0.wr_idx)
+-#define WRID_LO(wrid)         (wrid.id0.wr_lo)
+-
+-struct fw_riwrh {
+-      uint32_t op_seop_flags;
+-      uint32_t gen_tid_len;
+-};
+-
+-#define S_FW_RIWR_OP          24
+-#define M_FW_RIWR_OP          0xff
+-#define V_FW_RIWR_OP(x)               ((x) << S_FW_RIWR_OP)
+-#define G_FW_RIWR_OP(x)       ((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)
+-
+-#define S_FW_RIWR_SOPEOP      22
+-#define M_FW_RIWR_SOPEOP      0x3
+-#define V_FW_RIWR_SOPEOP(x)   ((x) << S_FW_RIWR_SOPEOP)
+-
+-#define S_FW_RIWR_FLAGS               8
+-#define M_FW_RIWR_FLAGS               0x3fffff
+-#define V_FW_RIWR_FLAGS(x)    ((x) << S_FW_RIWR_FLAGS)
+-#define G_FW_RIWR_FLAGS(x)    ((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)
+-
+-#define S_FW_RIWR_TID         8
+-#define V_FW_RIWR_TID(x)      ((x) << S_FW_RIWR_TID)
+-
+-#define S_FW_RIWR_LEN         0
+-#define V_FW_RIWR_LEN(x)      ((x) << S_FW_RIWR_LEN)
+-
+-#define S_FW_RIWR_GEN           31
+-#define V_FW_RIWR_GEN(x)        ((x)  << S_FW_RIWR_GEN)
+-
+-struct t3_sge {
+-      uint32_t stag;
+-      uint32_t len;
+-      uint64_t to;
+-};
+-
+-/* If num_sgle is zero, flit 5+ contains immediate data.*/
+-struct t3_send_wr {
+-      struct fw_riwrh wrh;    /* 0 */
+-      union t3_wrid wrid;     /* 1 */
+-
+-      enum t3_rdma_opcode rdmaop:8;
+-      uint32_t reserved:24;   /* 2 */
+-      uint32_t rem_stag;      /* 2 */
+-      uint32_t plen;          /* 3 */
+-      uint32_t num_sgle;
+-      struct t3_sge sgl[T3_MAX_SGE];  /* 4+ */
+-};
+-
+-struct t3_local_inv_wr {
+-      struct fw_riwrh wrh;    /* 0 */
+-      union t3_wrid wrid;     /* 1 */
+-      uint32_t stag;          /* 2 */
+-      uint32_t reserved3;
+-};
+-
+-struct t3_rdma_write_wr {
+-      struct fw_riwrh wrh;    /* 0 */
+-      union t3_wrid wrid;     /* 1 */
+-      enum t3_rdma_opcode rdmaop:8;   /* 2 */
+-      uint32_t reserved:24;   /* 2 */
+-      uint32_t stag_sink;
+-      uint64_t to_sink;       /* 3 */
+-      uint32_t plen;          /* 4 */
+-      uint32_t num_sgle;
+-      struct t3_sge sgl[T3_MAX_SGE];  /* 5+ */
+-};
+-
+-struct t3_rdma_read_wr {
+-      struct fw_riwrh wrh;    /* 0 */
+-      union t3_wrid wrid;     /* 1 */
+-      enum t3_rdma_opcode rdmaop:8;   /* 2 */
+-      uint32_t reserved:24;
+-      uint32_t rem_stag;
+-      uint64_t rem_to;        /* 3 */
+-      uint32_t local_stag;    /* 4 */
+-      uint32_t local_len;
+-      uint64_t local_to;      /* 5 */
+-};
+-
+-enum t3_addr_type {
+-      T3_VA_BASED_TO = 0x0,
+-      T3_ZERO_BASED_TO = 0x1
+-} __attribute__ ((packed));
+-
+-enum t3_mem_perms {
+-      T3_MEM_ACCESS_LOCAL_READ = 0x1,
+-      T3_MEM_ACCESS_LOCAL_WRITE = 0x2,
+-      T3_MEM_ACCESS_REM_READ = 0x4,
+-      T3_MEM_ACCESS_REM_WRITE = 0x8
+-} __attribute__ ((packed));
+-
+-struct t3_bind_mw_wr {
+-      struct fw_riwrh wrh;    /* 0 */
+-      union t3_wrid wrid;     /* 1 */
+-      uint32_t reserved:16;
+-      enum t3_addr_type type:8;
+-      enum t3_mem_perms perms:8;      /* 2 */
+-      uint32_t mr_stag;
+-      uint32_t mw_stag;       /* 3 */
+-      uint32_t mw_len;
+-      uint64_t mw_va;         /* 4 */
+-      uint32_t mr_pbl_addr;   /* 5 */
+-      uint32_t reserved2:24;
+-      uint32_t mr_pagesz:8;
+-};
+-
+-struct t3_receive_wr {
+-      struct fw_riwrh wrh;    /* 0 */
+-      union t3_wrid wrid;     /* 1 */
+-      uint8_t pagesz[T3_MAX_SGE];
+-      uint32_t num_sgle;              /* 2 */
+-      struct t3_sge sgl[T3_MAX_SGE];  /* 3+ */
+-      uint32_t pbl_addr[T3_MAX_SGE];
+-};
+-
+-struct t3_bypass_wr {
+-      struct fw_riwrh wrh;
+-      union t3_wrid wrid;     /* 1 */
+-};
+-
+-struct t3_modify_qp_wr {
+-      struct fw_riwrh wrh;    /* 0 */
+-      union t3_wrid wrid;     /* 1 */
+-      uint32_t flags;         /* 2 */
+-      uint32_t quiesce;       /* 2 */
+-      uint32_t max_ird;       /* 3 */
+-      uint32_t max_ord;       /* 3 */
+-      uint64_t sge_cmd;       /* 4 */
+-      uint64_t ctx1;          /* 5 */
+-      uint64_t ctx0;          /* 6 */
+-};
+-
+-enum t3_modify_qp_flags {
+-      MODQP_QUIESCE  = 0x01,
+-      MODQP_MAX_IRD  = 0x02,
+-      MODQP_MAX_ORD  = 0x04,
+-      MODQP_WRITE_EC = 0x08,
+-      MODQP_READ_EC  = 0x10,
+-};
+-      
+-
+-enum t3_mpa_attrs {
+-      uP_RI_MPA_RX_MARKER_ENABLE = 0x1,
+-      uP_RI_MPA_TX_MARKER_ENABLE = 0x2,
+-      uP_RI_MPA_CRC_ENABLE = 0x4,
+-      uP_RI_MPA_IETF_ENABLE = 0x8
+-} __attribute__ ((packed));
+-
+-enum t3_qp_caps {
+-      uP_RI_QP_RDMA_READ_ENABLE = 0x01,
+-      uP_RI_QP_RDMA_WRITE_ENABLE = 0x02,
+-      uP_RI_QP_BIND_ENABLE = 0x04,
+-      uP_RI_QP_FAST_REGISTER_ENABLE = 0x08,
+-      uP_RI_QP_STAG0_ENABLE = 0x10
+-} __attribute__ ((packed));
+-
+-struct t3_rdma_init_attr {
+-      uint32_t tid;
+-      uint32_t qpid;
+-      uint32_t pdid;
+-      uint32_t scqid;
+-      uint32_t rcqid;
+-      uint32_t rq_addr;
+-      uint32_t rq_size;
+-      enum t3_mpa_attrs mpaattrs;
+-      enum t3_qp_caps qpcaps;
+-      uint16_t tcp_emss;
+-      uint32_t ord;
+-      uint32_t ird;
+-      uint64_t qp_dma_addr;
+-      uint32_t qp_dma_size;
+-      uint8_t rqes_posted;
+-};
+-
+-struct t3_rdma_init_wr {
+-      struct fw_riwrh wrh;    /* 0 */
+-      union t3_wrid wrid;     /* 1 */
+-      uint32_t qpid;          /* 2 */
+-      uint32_t pdid;
+-      uint32_t scqid;         /* 3 */
+-      uint32_t rcqid;
+-      uint32_t rq_addr;       /* 4 */
+-      uint32_t rq_size;
+-      enum t3_mpa_attrs mpaattrs:8;   /* 5 */
+-      enum t3_qp_caps qpcaps:8;
+-      uint32_t ulpdu_size:16;
+-      uint32_t rqes_posted;   /* bits 31-1 - reservered */
+-                              /* bit     0 - set if RECV posted */
+-      uint32_t ord;           /* 6 */
+-      uint32_t ird;
+-      uint64_t qp_dma_addr;   /* 7 */
+-      uint32_t qp_dma_size;   /* 8 */
+-      uint32_t rsvd;
+-};
+-
+-union t3_wr {
+-      struct t3_send_wr send;
+-      struct t3_rdma_write_wr write;
+-      struct t3_rdma_read_wr read;
+-      struct t3_receive_wr recv;
+-      struct t3_local_inv_wr local_inv;
+-      struct t3_bind_mw_wr bind;
+-      struct t3_bypass_wr bypass;
+-      struct t3_rdma_init_wr init;
+-      struct t3_modify_qp_wr qp_mod;
+-      uint64_t flit[16];
+-};
+-
+-#define T3_SQ_CQE_FLIT          13
+-#define T3_SQ_COOKIE_FLIT 14
+-
+-#define T3_RQ_COOKIE_FLIT 13
+-#define T3_RQ_CQE_FLIT          14
+-
+-static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,
+-                                enum t3_wr_flags flags, uint8_t genbit, 
+-                                uint32_t tid, uint8_t len)
+-{
+-      wqe->op_seop_flags = htobe32(V_FW_RIWR_OP(op) |
+-                                 V_FW_RIWR_SOPEOP(M_FW_RIWR_SOPEOP) |
+-                                 V_FW_RIWR_FLAGS(flags));
+-      udma_to_device_barrier();
+-      wqe->gen_tid_len = htobe32(V_FW_RIWR_GEN(genbit) | V_FW_RIWR_TID(tid) |
+-                               V_FW_RIWR_LEN(len));
+-      /* 2nd gen bit... */
+-        ((union t3_wr *)wqe)->flit[15] = htobe64(genbit);
+-}
+-
+-/*
+- * T3 ULP2_TX commands
+- */
+-enum t3_utx_mem_op {
+-      T3_UTX_MEM_READ = 2,
+-      T3_UTX_MEM_WRITE = 3
+-};
+-
+-/* T3 MC7 RDMA TPT entry format */
+-
+-enum tpt_mem_type {
+-      TPT_NON_SHARED_MR = 0x0,
+-      TPT_SHARED_MR = 0x1,
+-      TPT_MW = 0x2,
+-      TPT_MW_RELAXED_PROTECTION = 0x3
+-};
+-
+-enum tpt_addr_type {
+-      TPT_ZBTO = 0,
+-      TPT_VATO = 1
+-};
+-
+-enum tpt_mem_perm {
+-      TPT_LOCAL_READ = 0x8,
+-      TPT_LOCAL_WRITE = 0x4,
+-      TPT_REMOTE_READ = 0x2,
+-      TPT_REMOTE_WRITE = 0x1
+-};
+-
+-struct tpt_entry {
+-      uint32_t valid_stag_pdid;
+-      uint32_t flags_pagesize_qpid;
+-
+-      uint32_t rsvd_pbl_addr;
+-      uint32_t len;
+-      uint32_t va_hi;
+-      uint32_t va_low_or_fbo;
+-
+-      uint32_t rsvd_bind_cnt_or_pstag;
+-      uint32_t rsvd_pbl_size;
+-};
+-
+-#define S_TPT_VALID           31
+-#define V_TPT_VALID(x)                ((x) << S_TPT_VALID)
+-#define F_TPT_VALID           V_TPT_VALID(1U)
+-
+-#define S_TPT_STAG_KEY                23
+-#define M_TPT_STAG_KEY                0xFF
+-#define V_TPT_STAG_KEY(x)     ((x) << S_TPT_STAG_KEY)
+-#define G_TPT_STAG_KEY(x)     (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)
+-
+-#define S_TPT_STAG_STATE      22
+-#define V_TPT_STAG_STATE(x)   ((x) << S_TPT_STAG_STATE)
+-#define F_TPT_STAG_STATE      V_TPT_STAG_STATE(1U)
+-
+-#define S_TPT_STAG_TYPE               20
+-#define M_TPT_STAG_TYPE               0x3
+-#define V_TPT_STAG_TYPE(x)    ((x) << S_TPT_STAG_TYPE)
+-#define G_TPT_STAG_TYPE(x)    (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)
+-
+-#define S_TPT_PDID            0
+-#define M_TPT_PDID            0xFFFFF
+-#define V_TPT_PDID(x)         ((x) << S_TPT_PDID)
+-#define G_TPT_PDID(x)         (((x) >> S_TPT_PDID) & M_TPT_PDID)
+-
+-#define S_TPT_PERM            28
+-#define M_TPT_PERM            0xF
+-#define V_TPT_PERM(x)         ((x) << S_TPT_PERM)
+-#define G_TPT_PERM(x)         (((x) >> S_TPT_PERM) & M_TPT_PERM)
+-
+-#define S_TPT_REM_INV_DIS     27
+-#define V_TPT_REM_INV_DIS(x)  ((x) << S_TPT_REM_INV_DIS)
+-#define F_TPT_REM_INV_DIS     V_TPT_REM_INV_DIS(1U)
+-
+-#define S_TPT_ADDR_TYPE               26
+-#define V_TPT_ADDR_TYPE(x)    ((x) << S_TPT_ADDR_TYPE)
+-#define F_TPT_ADDR_TYPE               V_TPT_ADDR_TYPE(1U)
+-
+-#define S_TPT_MW_BIND_ENABLE  25
+-#define V_TPT_MW_BIND_ENABLE(x)       ((x) << S_TPT_MW_BIND_ENABLE)
+-#define F_TPT_MW_BIND_ENABLE    V_TPT_MW_BIND_ENABLE(1U)
+-
+-#define S_TPT_PAGE_SIZE               20
+-#define M_TPT_PAGE_SIZE               0x1F
+-#define V_TPT_PAGE_SIZE(x)    ((x) << S_TPT_PAGE_SIZE)
+-#define G_TPT_PAGE_SIZE(x)    (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)
+-
+-#define S_TPT_PBL_ADDR                0
+-#define M_TPT_PBL_ADDR                0x1FFFFFFF
+-#define V_TPT_PBL_ADDR(x)     ((x) << S_TPT_PBL_ADDR)
+-#define G_TPT_PBL_ADDR(x)       (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)
+-
+-#define S_TPT_QPID            0
+-#define M_TPT_QPID            0xFFFFF
+-#define V_TPT_QPID(x)         ((x) << S_TPT_QPID)
+-#define G_TPT_QPID(x)         (((x) >> S_TPT_QPID) & M_TPT_QPID)
+-
+-#define S_TPT_PSTAG           0
+-#define M_TPT_PSTAG           0xFFFFFF
+-#define V_TPT_PSTAG(x)                ((x) << S_TPT_PSTAG)
+-#define G_TPT_PSTAG(x)                (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)
+-
+-#define S_TPT_PBL_SIZE                0
+-#define M_TPT_PBL_SIZE                0xFFFFF
+-#define V_TPT_PBL_SIZE(x)     ((x) << S_TPT_PBL_SIZE)
+-#define G_TPT_PBL_SIZE(x)     (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)
+-
+-/*
+- * CQE defs
+- */
+-struct t3_cqe {
+-      uint32_t header:32;
+-      uint32_t len:32;
+-      uint32_t wrid_hi_stag:32;
+-      uint32_t wrid_low_msn:32;
+-};
+-
+-#define S_CQE_OOO       31
+-#define M_CQE_OOO       0x1
+-#define G_CQE_OOO(x)    ((((x) >> S_CQE_OOO)) & M_CQE_OOO)
+-#define V_CEQ_OOO(x)    ((x)<<S_CQE_OOO)
+-
+-#define S_CQE_QPID        12
+-#define M_CQE_QPID        0x7FFFF
+-#define G_CQE_QPID(x)     ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
+-#define V_CQE_QPID(x)           ((x)<<S_CQE_QPID)
+-
+-#define S_CQE_SWCQE       11
+-#define M_CQE_SWCQE       0x1
+-#define G_CQE_SWCQE(x)    ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
+-#define V_CQE_SWCQE(x)          ((x)<<S_CQE_SWCQE)
+-
+-#define S_CQE_GENBIT      10
+-#define M_CQE_GENBIT      0x1
+-#define G_CQE_GENBIT(x)   (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
+-#define V_CQE_GENBIT(x)         ((x)<<S_CQE_GENBIT)
+-
+-#define S_CQE_STATUS      5
+-#define M_CQE_STATUS      0x1F
+-#define G_CQE_STATUS(x)   ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
+-#define V_CQE_STATUS(x)   ((x)<<S_CQE_STATUS)
+-
+-#define S_CQE_TYPE        4
+-#define M_CQE_TYPE        0x1
+-#define G_CQE_TYPE(x)     ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
+-#define V_CQE_TYPE(x)     ((x)<<S_CQE_TYPE)
+-
+-#define S_CQE_OPCODE      0
+-#define M_CQE_OPCODE      0xF
+-#define G_CQE_OPCODE(x)   ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
+-#define V_CQE_OPCODE(x)   ((x)<<S_CQE_OPCODE)
+-
+-#define SW_CQE(x)         (G_CQE_SWCQE(be32toh((x).header)))
+-#define CQE_OOO(x)        (G_CQE_OOO(be32toh((x).header)))
+-#define CQE_QPID(x)       (G_CQE_QPID(be32toh((x).header)))
+-#define CQE_GENBIT(x)     (G_CQE_GENBIT(be32toh((x).header)))
+-#define CQE_TYPE(x)       (G_CQE_TYPE(be32toh((x).header)))
+-#define SQ_TYPE(x)      (CQE_TYPE((x)))
+-#define RQ_TYPE(x)      (!CQE_TYPE((x)))
+-#define CQE_STATUS(x)     (G_CQE_STATUS(be32toh((x).header)))
+-#define CQE_OPCODE(x)     (G_CQE_OPCODE(be32toh((x).header)))
+-
+-#define CQE_LEN(x)        (be32toh((x).len))
+-
+-#define CQE_WRID_HI(x)    (be32toh((x).wrid_hi_stag))
+-#define CQE_WRID_LOW(x)   (be32toh((x).wrid_low_msn))
+-
+-/* used for RQ completion processing */
+-#define CQE_WRID_STAG(x)  (be32toh((x).wrid_hi_stag))
+-#define CQE_WRID_MSN(x)   (be32toh((x).wrid_low_msn))
+-
+-/* used for SQ completion processing */
+-#define CQE_WRID_SQ_WPTR(x)   ((x).wrid_hi_stag)
+-#define CQE_WRID_WPTR(x)      ((x).wrid_low_msn)
+-
+-#define TPT_ERR_SUCCESS                     0x0
+-#define TPT_ERR_STAG                        0x1        /* STAG invalid: either the */
+-                                               /* STAG is offlimt, being 0, */
+-                                               /* or STAG_key mismatch */
+-#define TPT_ERR_PDID                        0x2        /* PDID mismatch */
+-#define TPT_ERR_QPID                        0x3        /* QPID mismatch */
+-#define TPT_ERR_ACCESS                      0x4        /* Invalid access right */
+-#define TPT_ERR_WRAP                        0x5        /* Wrap error */
+-#define TPT_ERR_BOUND                       0x6        /* base and bounds voilation */
+-#define TPT_ERR_INVALIDATE_SHARED_MR        0x7        /* attempt to invalidate a  */
+-                                               /* shared memory region */
+-#define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8        /* attempt to invalidate a  */
+-                                               /* shared memory region */
+-#define TPT_ERR_ECC                         0x9        /* ECC error detected */
+-#define TPT_ERR_ECC_PSTAG                   0xA        /* ECC error detected when  */
+-                                               /* reading PSTAG for a MW  */
+-                                               /* Invalidate */
+-#define TPT_ERR_PBL_ADDR_BOUND              0xB        /* pbl addr out of bounds:  */
+-                                               /* software error */
+-#define TPT_ERR_SWFLUSH                           0xC  /* SW FLUSHED */
+-#define TPT_ERR_CRC                         0x10 /* CRC error */
+-#define TPT_ERR_MARKER                      0x11 /* Marker error */
+-#define TPT_ERR_PDU_LEN_ERR                 0x12 /* invalid PDU length */
+-#define TPT_ERR_OUT_OF_RQE                  0x13 /* out of RQE */
+-#define TPT_ERR_DDP_VERSION                 0x14 /* wrong DDP version */
+-#define TPT_ERR_RDMA_VERSION                0x15 /* wrong RDMA version */
+-#define TPT_ERR_OPCODE                      0x16 /* invalid rdma opcode */
+-#define TPT_ERR_DDP_QUEUE_NUM               0x17 /* invalid ddp queue number */
+-#define TPT_ERR_MSN                         0x18 /* MSN error */
+-#define TPT_ERR_TBIT                        0x19 /* tag bit not set correctly */
+-#define TPT_ERR_MO                          0x1A /* MO not 0 for TERMINATE  */
+-                                               /* or READ_REQ */
+-#define TPT_ERR_MSN_GAP                     0x1B
+-#define TPT_ERR_MSN_RANGE                   0x1C
+-#define TPT_ERR_IRD_OVERFLOW                0x1D
+-#define TPT_ERR_RQE_ADDR_BOUND              0x1E /* RQE addr out of bounds:  */
+-                                               /* software error */
+-#define TPT_ERR_INTERNAL_ERR                0x1F /* internal error (opcode  */
+-                                               /* mismatch) */
+-
+-struct t3_swsq {
+-      uint64_t                wr_id;
+-      struct t3_cqe           cqe;
+-      uint32_t                sq_wptr;
+-      uint32_t                read_len;
+-      int                     opcode;
+-      int                     complete;
+-      int                     signaled;       
+-};
+-
+-/*
+- * A T3 WQ implements both the SQ and RQ.
+- */
+-struct t3_wq {
+-      union t3_wr *queue;             /* DMA Mapped work queue */
+-      uint32_t error;                 /* 1 once we go to ERROR */
+-      uint32_t qpid;
+-      uint32_t wptr;                  /* idx to next available WR slot */
+-      uint32_t size_log2;             /* total wq size */
+-      struct t3_swsq *sq;             /* SW SQ */
+-      struct t3_swsq *oldest_read;    /* tracks oldest pending read */
+-      uint32_t sq_wptr;               /* sq_wptr - sq_rptr == count of */
+-      uint32_t sq_rptr;               /* pending wrs */
+-      uint32_t sq_size_log2;          /* sq size */
+-      uint64_t *rq;                   /* SW RQ (holds consumer wr_ids) */
+-      uint32_t rq_wptr;               /* rq_wptr - rq_rptr == count of */
+-      uint32_t rq_rptr;               /* pending wrs */
+-      uint32_t rq_size_log2;          /* rq size */
+-      volatile uint32_t *doorbell;    /* mapped adapter doorbell register */
+-      int flushed;
+-};
+-
+-struct t3_cq {
+-      uint32_t cqid;
+-      uint32_t rptr;
+-      uint32_t wptr;
+-      uint32_t size_log2;
+-      struct t3_cqe *queue;
+-      struct t3_cqe *sw_queue;
+-      uint32_t sw_rptr;
+-      uint32_t sw_wptr;
+-      uint32_t memsize;
+-};
+-
+-static inline unsigned t3_wq_depth(struct t3_wq *wq)
+-{
+-      return (1UL<<wq->size_log2);
+-}
+-
+-static inline unsigned t3_sq_depth(struct t3_wq *wq)
+-{
+-      return (1UL<<wq->sq_size_log2);
+-}
+-
+-static inline unsigned t3_rq_depth(struct t3_wq *wq)
+-{
+-      return (1UL<<wq->rq_size_log2);
+-}
+-
+-static inline unsigned t3_cq_depth(struct t3_cq *cq)
+-{
+-      return (1UL<<cq->size_log2);
+-}
+-
+-extern unsigned long iwch_page_size;
+-extern unsigned long iwch_page_shift;
+-extern unsigned long iwch_page_mask;
+-
+-#define PAGE_ALIGN(x) (((x) + iwch_page_mask) & ~iwch_page_mask)
+-
+-static inline unsigned t3_wq_memsize(struct t3_wq *wq)
+-{
+-      return PAGE_ALIGN((1UL<<wq->size_log2) * sizeof (union t3_wr));
+-}
+-
+-static inline unsigned t3_cq_memsize(struct t3_cq *cq)
+-{
+-      return cq->memsize;
+-}
+-
+-static inline unsigned t3_mmid(uint32_t stag)
+-{
+-      return (stag>>8);
+-}
+-
+-struct t3_cq_status_page {
+-      uint32_t cq_err;
+-};
+-
+-static inline int t3_cq_in_error(struct t3_cq *cq)
+-{
+-      return ((struct t3_cq_status_page *)
+-             &cq->queue[1 << cq->size_log2])->cq_err;
+-}
+-
+-static inline void t3_set_cq_in_error(struct t3_cq *cq)
+-{
+-      ((struct t3_cq_status_page *)
+-              &cq->queue[1 << cq->size_log2])->cq_err = 1;
+-}
+-
+-static inline void t3_reset_cq_in_error(struct t3_cq *cq)
+-{
+-      ((struct t3_cq_status_page *)
+-              &cq->queue[1 << cq->size_log2])->cq_err = 0;
+-}
+-
+-static inline int t3_wq_in_error(struct t3_wq *wq)
+-{
+-      /*
+-       * The kernel sets bit 0 in the first WR of the WQ memory
+-       * when the QP moves out of RTS...
+-       */
+-        return (wq->queue->flit[13] & 1);
+-}
+-
+-static inline void t3_set_wq_in_error(struct t3_wq *wq)
+-{
+-        wq->queue->flit[13] |= 1;
+-}
+-
+-static inline int t3_wq_db_enabled(struct t3_wq *wq)
+-{
+-      return !(wq->queue->flit[13] & 2);
+-}
+-
+-#define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
+-                                       CQE_GENBIT(*cqe))
+-
+-static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
+-{
+-      struct t3_cqe *cqe;
+-
+-      cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
+-      if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
+-              return cqe;
+-      return NULL;
+-}
+-
+-static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq)
+-{
+-      struct t3_cqe *cqe;
+-
+-      if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
+-              cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
+-              return cqe;
+-      }
+-      return NULL;
+-}
+-
+-static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq)
+-{
+-      struct t3_cqe *cqe;
+-
+-      if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
+-              cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
+-              return cqe;
+-      }
+-      cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
+-      if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
+-              return cqe;
+-      return NULL;
+-}
+-
+-/*
+- * Return a ptr to the next read wr in the SWSQ or NULL.
+- */
+-static inline struct t3_swsq *next_read_wr(struct t3_wq *wq)
+-{
+-      uint32_t rptr = wq->oldest_read - wq->sq + 1;
+-      int count = Q_COUNT(rptr, wq->sq_wptr);
+-      struct t3_swsq *sqp;
+-
+-      while (count--) {
+-              sqp = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2);
+-
+-              if (sqp->opcode == T3_READ_REQ)
+-                      return sqp;
+-
+-              rptr++;
+-      }
+-      return NULL;
+-}
+-#endif
+diff --git a/providers/cxgb3/firmware_exports.h b/providers/cxgb3/firmware_exports.h
+deleted file mode 100644
+index 831140a4c..000000000
+--- a/providers/cxgb3/firmware_exports.h
++++ /dev/null
+@@ -1,148 +0,0 @@
+-/*
+- * Copyright (c) 2004-2007 Chelsio, Inc. All rights reserved.
+- *
+- * This software is available to you under a choice of one of two
+- * licenses.  You may choose to be licensed under the terms of the GNU
+- * General Public License (GPL) Version 2, available from the file
+- * COPYING in the main directory of this source tree, or the
+- * OpenIB.org BSD license below:
+- *
+- *     Redistribution and use in source and binary forms, with or
+- *     without modification, are permitted provided that the following
+- *     conditions are met:
+- *
+- *      - Redistributions of source code must retain the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer.
+- *
+- *      - Redistributions in binary form must reproduce the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer in the documentation and/or other materials
+- *        provided with the distribution.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+- * SOFTWARE.
+- */
+-#ifndef _FIRMWARE_EXPORTS_H_
+-#define _FIRMWARE_EXPORTS_H_
+-
+-/* WR OPCODES supported by the firmware.
+- */
+-#define       FW_WROPCODE_FORWARD                     0x01
+-#define FW_WROPCODE_BYPASS                    0x05
+-
+-#define FW_WROPCODE_TUNNEL_TX_PKT             0x03
+-
+-#define FW_WROPOCDE_ULPTX_DATA_SGL            0x00
+-#define FW_WROPCODE_ULPTX_MEM_READ            0x02
+-#define FW_WROPCODE_ULPTX_PKT                 0x04
+-#define FW_WROPCODE_ULPTX_INVALIDATE          0x06
+-
+-#define FW_WROPCODE_TUNNEL_RX_PKT             0x07
+-
+-#define FW_WROPCODE_TOE_GETTCB_RPL            0x08
+-#define FW_WROPCODE_TOE_CLOSE_CON             0x09
+-#define FW_WROPCODE_TOE_TP_ABORT_CON_REQ      0x0A
+-#define FW_WROPCODE_TOE_HOST_ABORT_CON_RPL    0x0F
+-#define FW_WROPCODE_TOE_HOST_ABORT_CON_REQ    0x0B
+-#define FW_WROPCODE_TOE_TP_ABORT_CON_RPL      0x0C
+-#define FW_WROPCODE_TOE_TX_DATA                       0x0D
+-#define FW_WROPCODE_TOE_TX_DATA_ACK           0x0E
+-
+-#define FW_WROPCODE_RI_RDMA_INIT              0x10
+-#define FW_WROPCODE_RI_RDMA_WRITE             0x11
+-#define FW_WROPCODE_RI_RDMA_READ_REQ          0x12
+-#define FW_WROPCODE_RI_RDMA_READ_RESP         0x13
+-#define FW_WROPCODE_RI_SEND                   0x14
+-#define FW_WROPCODE_RI_TERMINATE              0x15
+-#define FW_WROPCODE_RI_RDMA_READ              0x16
+-#define FW_WROPCODE_RI_RECEIVE                        0x17
+-#define FW_WROPCODE_RI_BIND_MW                        0x18
+-#define FW_WROPCODE_RI_FASTREGISTER_MR                0x19
+-#define FW_WROPCODE_RI_LOCAL_INV              0x1A
+-#define FW_WROPCODE_RI_MODIFY_QP              0x1B
+-#define FW_WROPCODE_RI_BYPASS                 0x1C
+-
+-#define FW_WROPOCDE_RSVD                      0x1E
+-
+-#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR      0x1F
+-
+-#define FW_WROPCODE_MNGT                      0x1D
+-#define FW_MNGTOPCODE_PKTSCHED_SET            0x00
+-
+-/* Maximum size of a WR sent from the host, limited by the SGE. 
+- *
+- * Note: WR coming from ULP or TP are only limited by CIM. 
+- */
+-#define FW_WR_SIZE                    128
+-
+-/* Maximum number of outstanding WRs sent from the host. Value must be
+- * programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by TOM to 
+- * limit the number of WRs per connection.
+- */
+-#ifndef N3
+-# define FW_WR_NUM                    16
+-#else
+-# define FW_WR_NUM                    7
+-#endif
+-
+-/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These
+- * queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
+- * start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
+- *
+- * Ingress Traffic (e.g. DMA completion credit)  for TUNNEL Queue[i] is sent 
+- * to RESP Queue[i].
+- */
+-#define FW_TUNNEL_NUM                   8
+-#define FW_TUNNEL_SGEEC_START           8
+-#define FW_TUNNEL_TID_START             65544
+-
+-
+-/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
+- * must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
+- * (or 'uP Token') FW_CTRL_TID_START.
+- *
+- * Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
+- */ 
+-#define FW_CTRL_NUM                   8
+-#define FW_CTRL_SGEEC_START           65528
+-#define FW_CTRL_TID_START             65536
+-
+-/* FW_TOE_NUM corresponds to the number of supported TOE Queues. These queues 
+- * must start at SGE Egress Context FW_TOE_SGEEC_START. 
+- * 
+- * Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for 
+- * TOE Queues, as the host is responsible for providing the correct TID in
+- * every WR.
+- *
+- * Ingress Trafffic for TOE Queue[i] is sent to RESP Queue[i].
+- */
+-#define FW_TOE_NUM                    8
+-#define FW_TOE_SGEEC_START            0
+-
+-/*
+- * 
+- */
+-#define FW_RI_NUM                     1
+-#define FW_RI_SGEEC_START             65527
+-#define FW_RI_TID_START                       65552
+-
+-/*
+- * The RX_PKT_TID 
+- */
+-#define FW_RX_PKT_NUM                 1
+-#define FW_RX_PKT_TID_START           65553
+-
+-/* FW_WRC_NUM corresponds to the number of Work Request Context that supported
+- * by the firmware.
+- */
+-#define FW_WRC_NUM                    (65536 + FW_TUNNEL_NUM + FW_CTRL_NUM +\
+-                                       FW_RI_NUM + FW_RX_PKT_NUM)
+-
+-#endif /* _FIRMWARE_EXPORTS_H_ */
+diff --git a/providers/cxgb3/iwch-abi.h b/providers/cxgb3/iwch-abi.h
+deleted file mode 100644
+index 047f84b7a..000000000
+--- a/providers/cxgb3/iwch-abi.h
++++ /dev/null
+@@ -1,51 +0,0 @@
+-/*
+- * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
+- *
+- * This software is available to you under a choice of one of two
+- * licenses.  You may choose to be licensed under the terms of the GNU
+- * General Public License (GPL) Version 2, available from the file
+- * COPYING in the main directory of this source tree, or the
+- * OpenIB.org BSD license below:
+- *
+- *     Redistribution and use in source and binary forms, with or
+- *     without modification, are permitted provided that the following
+- *     conditions are met:
+- *
+- *      - Redistributions of source code must retain the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer.
+- *
+- *      - Redistributions in binary form must reproduce the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer in the documentation and/or other materials
+- *        provided with the distribution.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+- * SOFTWARE.
+- */
+-#ifndef IWCH_ABI_H
+-#define IWCH_ABI_H
+-
+-#include <stdint.h>
+-#include <infiniband/kern-abi.h>
+-#include <rdma/cxgb3-abi.h>
+-#include <kernel-abi/cxgb3-abi.h>
+-
+-DECLARE_DRV_CMD(uiwch_alloc_pd, IB_USER_VERBS_CMD_ALLOC_PD,
+-              empty, iwch_alloc_pd_resp);
+-DECLARE_DRV_CMD(uiwch_create_cq, IB_USER_VERBS_CMD_CREATE_CQ,
+-              iwch_create_cq_req, iwch_create_cq_resp);
+-DECLARE_DRV_CMD(uiwch_create_qp, IB_USER_VERBS_CMD_CREATE_QP,
+-              empty, iwch_create_qp_resp);
+-DECLARE_DRV_CMD(uiwch_alloc_ucontext, IB_USER_VERBS_CMD_GET_CONTEXT,
+-              empty, empty);
+-DECLARE_DRV_CMD(uiwch_reg_mr, IB_USER_VERBS_CMD_REG_MR,
+-              empty, iwch_reg_user_mr_resp);
+-
+-#endif                                /* IWCH_ABI_H */
+diff --git a/providers/cxgb3/iwch.c b/providers/cxgb3/iwch.c
+deleted file mode 100644
+index 6f3c8b9f1..000000000
+--- a/providers/cxgb3/iwch.c
++++ /dev/null
+@@ -1,269 +0,0 @@
+-/*
+- * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
+- *
+- * This software is available to you under a choice of one of two
+- * licenses.  You may choose to be licensed under the terms of the GNU
+- * General Public License (GPL) Version 2, available from the file
+- * COPYING in the main directory of this source tree, or the
+- * OpenIB.org BSD license below:
+- *
+- *     Redistribution and use in source and binary forms, with or
+- *     without modification, are permitted provided that the following
+- *     conditions are met:
+- *
+- *      - Redistributions of source code must retain the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer.
+- *
+- *      - Redistributions in binary form must reproduce the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer in the documentation and/or other materials
+- *        provided with the distribution.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+- * SOFTWARE.
+- */
+-#include <config.h>
+-
+-#include <stdio.h>
+-#include <stdlib.h>
+-#include <unistd.h>
+-#include <errno.h>
+-#include <sys/mman.h>
+-#include <pthread.h>
+-#include <string.h>
+-
+-#include "iwch.h"
+-#include "iwch-abi.h"
+-
+-#define PCI_VENDOR_ID_CHELSIO         0x1425
+-#define PCI_DEVICE_ID_CHELSIO_PE9000_2C       0x0020
+-#define PCI_DEVICE_ID_CHELSIO_T302E   0x0021
+-#define PCI_DEVICE_ID_CHELSIO_T310E   0x0022
+-#define PCI_DEVICE_ID_CHELSIO_T320X   0x0023
+-#define PCI_DEVICE_ID_CHELSIO_T302X   0x0024
+-#define PCI_DEVICE_ID_CHELSIO_T320E   0x0025
+-#define PCI_DEVICE_ID_CHELSIO_T310X   0x0026
+-#define PCI_DEVICE_ID_CHELSIO_T3B10   0x0030
+-#define PCI_DEVICE_ID_CHELSIO_T3B20   0x0031
+-#define PCI_DEVICE_ID_CHELSIO_T3B02   0x0032
+-#define PCI_DEVICE_ID_CHELSIO_T3C20   0x0035
+-#define PCI_DEVICE_ID_CHELSIO_S320E   0x0036
+-
+-#define HCA(v, d, t)                                                           \
+-      VERBS_PCI_MATCH(PCI_VENDOR_ID_##v, PCI_DEVICE_ID_CHELSIO_##d,          \
+-                      (void *)(CHELSIO_##t))
+-static const struct verbs_match_ent hca_table[] = {
+-      HCA(CHELSIO, PE9000_2C, T3B),
+-      HCA(CHELSIO, T302E, T3A),
+-      HCA(CHELSIO, T302X, T3A),
+-      HCA(CHELSIO, T310E, T3A),
+-      HCA(CHELSIO, T310X, T3A),
+-      HCA(CHELSIO, T320E, T3A),
+-      HCA(CHELSIO, T320X, T3A),
+-      HCA(CHELSIO, T3B10, T3B),
+-      HCA(CHELSIO, T3B20, T3B),
+-      HCA(CHELSIO, T3B02, T3B),
+-      HCA(CHELSIO, T3C20, T3B),
+-      HCA(CHELSIO, S320E, T3B),
+-      {},
+-};
+-
+-static const struct verbs_context_ops iwch_ctx_common_ops = {
+-      .query_device = iwch_query_device,
+-      .query_port = iwch_query_port,
+-      .alloc_pd = iwch_alloc_pd,
+-      .dealloc_pd = iwch_free_pd,
+-      .reg_mr = iwch_reg_mr,
+-      .dereg_mr = iwch_dereg_mr,
+-      .create_cq = iwch_create_cq,
+-      .resize_cq = iwch_resize_cq,
+-      .destroy_cq = iwch_destroy_cq,
+-      .create_srq = iwch_create_srq,
+-      .modify_srq = iwch_modify_srq,
+-      .destroy_srq = iwch_destroy_srq,
+-      .create_qp = iwch_create_qp,
+-      .modify_qp = iwch_modify_qp,
+-      .destroy_qp = iwch_destroy_qp,
+-      .query_qp = iwch_query_qp,
+-      .create_ah = iwch_create_ah,
+-      .destroy_ah = iwch_destroy_ah,
+-      .attach_mcast = iwch_attach_mcast,
+-      .detach_mcast = iwch_detach_mcast,
+-      .post_srq_recv = iwch_post_srq_recv,
+-      .req_notify_cq = iwch_arm_cq,
+-};
+-
+-static const struct verbs_context_ops iwch_ctx_t3a_ops = {
+-      .poll_cq = t3a_poll_cq,
+-      .post_recv = t3a_post_recv,
+-      .post_send = t3a_post_send,
+-};
+-
+-static const struct verbs_context_ops iwch_ctx_t3b_ops = {
+-      .async_event = t3b_async_event,
+-      .poll_cq = t3b_poll_cq,
+-      .post_recv = t3b_post_recv,
+-      .post_send = t3b_post_send,
+-};
+-
+-unsigned long iwch_page_size;
+-unsigned long iwch_page_shift;
+-unsigned long iwch_page_mask;
+-
+-static struct verbs_context *iwch_alloc_context(struct ibv_device *ibdev,
+-                                              int cmd_fd,
+-                                              void *private_data)
+-{
+-      struct iwch_context *context;
+-      struct ibv_get_context cmd;
+-      struct uiwch_alloc_ucontext_resp resp;
+-      struct iwch_device *rhp = to_iwch_dev(ibdev);
+-
+-      context = verbs_init_and_alloc_context(ibdev, cmd_fd, context, ibv_ctx,
+-                                             RDMA_DRIVER_CXGB3);
+-      if (!context)
+-              return NULL;
+-
+-      if (ibv_cmd_get_context(&context->ibv_ctx, &cmd, sizeof cmd,
+-                              &resp.ibv_resp, sizeof resp))
+-              goto err_free;
+-
+-      verbs_set_ops(&context->ibv_ctx, &iwch_ctx_common_ops);
+-
+-      switch (rhp->hca_type) {
+-      case CHELSIO_T3B:
+-              PDBG("%s T3B device\n", __FUNCTION__);
+-              verbs_set_ops(&context->ibv_ctx, &iwch_ctx_t3b_ops);
+-              break;
+-      case CHELSIO_T3A:
+-              PDBG("%s T3A device\n", __FUNCTION__);
+-              verbs_set_ops(&context->ibv_ctx, &iwch_ctx_t3a_ops);
+-              break;
+-      default:
+-              PDBG("%s unknown hca type %d\n", __FUNCTION__, rhp->hca_type);
+-              goto err_free;
+-              break;
+-      }       
+-
+-      return &context->ibv_ctx;
+-
+-err_free:
+-      verbs_uninit_context(&context->ibv_ctx);
+-      free(context);
+-      return NULL;
+-}
+-
+-static void iwch_free_context(struct ibv_context *ibctx)
+-{
+-      struct iwch_context *context = to_iwch_ctx(ibctx);
+-
+-      verbs_uninit_context(&context->ibv_ctx);
+-      free(context);
+-}
+-
+-static void iwch_uninit_device(struct verbs_device *verbs_device)
+-{
+-      struct iwch_device *dev = to_iwch_dev(&verbs_device->device);
+-
+-      free(dev);
+-}
+-
+-static bool iwch_device_match(struct verbs_sysfs_dev *sysfs_dev)
+-{
+-      char value[32], *cp;
+-      unsigned int fw_maj, fw_min;
+-
+-      /* Rely on the core code to match PCI devices */
+-      if (!sysfs_dev->match)
+-              return false;
+-
+-      /* 
+-       * Verify that the firmware major number matches.  Major number
+-       * mismatches are fatal.  Minor number mismatches are tolerated.
+-       */
+-      if (ibv_get_fw_ver(value, sizeof(value), sysfs_dev))
+-              return false;
+-
+-      cp = strtok(value+1, ".");
+-      sscanf(cp, "%i", &fw_maj);
+-      cp = strtok(NULL, ".");
+-      sscanf(cp, "%i", &fw_min);
+-
+-      if (fw_maj < FW_MAJ) {
+-              fprintf(stderr, "libcxgb3: Fatal firmware version mismatch.  "
+-                      "Firmware major number is %u and libcxgb3 needs %u.\n",
+-                      fw_maj, FW_MAJ);        
+-              fflush(stderr);
+-              return false;
+-      }
+-
+-      DBGLOG("libcxgb3");
+-
+-      if ((signed int)fw_min < FW_MIN) {
+-              PDBG("libcxgb3: non-fatal firmware version mismatch.  "
+-                      "Firmware minor number is %u and libcxgb3 needs %u.\n",
+-                      fw_min, FW_MIN);
+-              fflush(stderr);
+-      }
+-
+-      return true;
+-}
+-
+-static struct verbs_device *iwch_device_alloc(struct verbs_sysfs_dev *sysfs_dev)
+-{
+-      struct iwch_device *dev;
+-
+-      dev = calloc(1, sizeof(*dev));
+-      if (!dev)
+-              return NULL;
+-
+-      pthread_spin_init(&dev->lock, PTHREAD_PROCESS_PRIVATE);
+-      dev->hca_type = (uintptr_t)sysfs_dev->match->driver_data;
+-      dev->abi_version = sysfs_dev->abi_ver;
+-
+-      iwch_page_size = sysconf(_SC_PAGESIZE);
+-      iwch_page_shift = long_log2(iwch_page_size);
+-      iwch_page_mask = iwch_page_size - 1;
+-
+-      dev->mmid2ptr = calloc(T3_MAX_NUM_STAG, sizeof(void *));
+-      if (!dev->mmid2ptr) {
+-              goto err1;
+-      }
+-      dev->qpid2ptr = calloc(T3_MAX_NUM_QP, sizeof(void *)); 
+-      if (!dev->qpid2ptr) {
+-              goto err2;
+-      }
+-      dev->cqid2ptr = calloc(T3_MAX_NUM_CQ, sizeof(void *));
+-      if (!dev->cqid2ptr) 
+-              goto err3;
+-
+-      return &dev->ibv_dev;
+-
+-err3:
+-      free(dev->qpid2ptr);
+-err2:
+-      free(dev->mmid2ptr);
+-err1:
+-      free(dev);
+-      return NULL;
+-}
+-
+-static const struct verbs_device_ops iwch_dev_ops = {
+-      .name = "cxgb3",
+-      .match_min_abi_version = 0,
+-      .match_max_abi_version = ABI_VERS,
+-      .match_table = hca_table,
+-      .match_device = iwch_device_match,
+-      .alloc_device = iwch_device_alloc,
+-      .uninit_device = iwch_uninit_device,
+-      .alloc_context = iwch_alloc_context,
+-      .free_context = iwch_free_context,
+-};
+-PROVIDER_DRIVER(cxgb3, iwch_dev_ops);
+diff --git a/providers/cxgb3/iwch.h b/providers/cxgb3/iwch.h
+deleted file mode 100644
+index c7d85d3aa..000000000
+--- a/providers/cxgb3/iwch.h
++++ /dev/null
+@@ -1,218 +0,0 @@
+-/*
+- * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
+- *
+- * This software is available to you under a choice of one of two
+- * licenses.  You may choose to be licensed under the terms of the GNU
+- * General Public License (GPL) Version 2, available from the file
+- * COPYING in the main directory of this source tree, or the
+- * OpenIB.org BSD license below:
+- *
+- *     Redistribution and use in source and binary forms, with or
+- *     without modification, are permitted provided that the following
+- *     conditions are met:
+- *
+- *      - Redistributions of source code must retain the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer.
+- *
+- *      - Redistributions in binary form must reproduce the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer in the documentation and/or other materials
+- *        provided with the distribution.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+- * SOFTWARE.
+- */
+-#ifndef IWCH_H
+-#define IWCH_H
+-
+-#include <pthread.h>
+-#include <inttypes.h>
+-#include <stddef.h>
+-
+-#include <infiniband/driver.h>
+-#include <util/udma_barrier.h>
+-
+-#include "cxio_wr.h"
+-
+-enum iwch_hca_type {
+-      CHELSIO_T3A = 0,
+-      CHELSIO_T3B = 1,
+-};
+-
+-struct iwch_mr;
+-
+-#define ABI_VERS 1
+-
+-struct iwch_device {
+-      struct verbs_device ibv_dev;
+-      enum iwch_hca_type hca_type;
+-      struct iwch_mr **mmid2ptr;
+-      struct iwch_qp **qpid2ptr;
+-      struct iwch_cq **cqid2ptr;
+-      pthread_spinlock_t lock;
+-      int abi_version;
+-};
+-
+-static inline int t3b_device(struct iwch_device *dev)
+-{
+-      return (dev->hca_type == CHELSIO_T3B);
+-}
+-
+-static inline int t3a_device(struct iwch_device *dev)
+-{
+-      return (dev->hca_type == CHELSIO_T3A);
+-}
+-
+-struct iwch_context {
+-      struct verbs_context ibv_ctx;
+-};
+-
+-struct iwch_pd {
+-      struct ibv_pd ibv_pd;
+-};
+-
+-struct iwch_mr {
+-      struct verbs_mr vmr;
+-      uint64_t va_fbo;
+-      uint32_t page_size;
+-      uint32_t pbl_addr;
+-      uint32_t len;
+-};
+-
+-struct iwch_cq {
+-      struct ibv_cq ibv_cq;
+-      struct iwch_device *rhp;
+-      struct t3_cq cq;
+-      pthread_spinlock_t lock;
+-};
+-
+-struct iwch_qp {
+-      struct ibv_qp ibv_qp;
+-      struct iwch_device *rhp;
+-      struct t3_wq wq;
+-      pthread_spinlock_t lock;
+-      int sq_sig_all;
+-};
+-
+-#define to_iwch_xxx(xxx, type)                                                 \
+-      container_of(ib##xxx, struct iwch_##type, ibv_##xxx)
+-
+-static inline struct iwch_device *to_iwch_dev(struct ibv_device *ibdev)
+-{
+-      return container_of(ibdev, struct iwch_device, ibv_dev.device);
+-}
+-
+-static inline struct iwch_context *to_iwch_ctx(struct ibv_context *ibctx)
+-{
+-      return container_of(ibctx, struct iwch_context, ibv_ctx.context);
+-}
+-
+-static inline struct iwch_pd *to_iwch_pd(struct ibv_pd *ibpd)
+-{
+-      return to_iwch_xxx(pd, pd);
+-}
+-
+-static inline struct iwch_cq *to_iwch_cq(struct ibv_cq *ibcq)
+-{
+-      return to_iwch_xxx(cq, cq);
+-}
+-
+-static inline struct iwch_qp *to_iwch_qp(struct ibv_qp *ibqp)
+-{
+-      return to_iwch_xxx(qp, qp);
+-}
+-
+-static inline struct iwch_mr *to_iwch_mr(struct verbs_mr *vmr)
+-{
+-      return container_of(vmr, struct iwch_mr, vmr);
+-}
+-
+-static inline unsigned long long_log2(unsigned long x)
+-{
+-        unsigned long r = 0;
+-        for (x >>= 1; x > 0; x >>= 1)
+-                r++;
+-        return r;
+-}
+-
+-extern int iwch_query_device(struct ibv_context *context,
+-                           struct ibv_device_attr *attr);
+-extern int iwch_query_port(struct ibv_context *context, uint8_t port,
+-                         struct ibv_port_attr *attr);
+-
+-extern struct ibv_pd *iwch_alloc_pd(struct ibv_context *context);
+-extern int iwch_free_pd(struct ibv_pd *pd);
+-
+-extern struct ibv_mr *iwch_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
+-                                uint64_t hca_va, int access);
+-extern int iwch_dereg_mr(struct verbs_mr *mr);
+-
+-struct ibv_cq *iwch_create_cq(struct ibv_context *context, int cqe,
+-                            struct ibv_comp_channel *channel,
+-                            int comp_vector);
+-extern int iwch_resize_cq(struct ibv_cq *cq, int cqe);
+-extern int iwch_destroy_cq(struct ibv_cq *cq);
+-extern int t3a_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
+-extern int t3b_poll_cq(struct ibv_cq *cq, int ne, struct ibv_wc *wc);
+-extern int iwch_arm_cq(struct ibv_cq *cq, int solicited);
+-extern void iwch_cq_event(struct ibv_cq *cq);
+-extern void iwch_init_cq_buf(struct iwch_cq *cq, int nent);
+-
+-extern struct ibv_srq *iwch_create_srq(struct ibv_pd *pd,
+-                                     struct ibv_srq_init_attr *attr);
+-extern int iwch_modify_srq(struct ibv_srq *srq,
+-                         struct ibv_srq_attr *attr,
+-                         int mask);
+-extern int iwch_destroy_srq(struct ibv_srq *srq);
+-extern int iwch_post_srq_recv(struct ibv_srq *ibsrq,
+-                            struct ibv_recv_wr *wr,
+-                            struct ibv_recv_wr **bad_wr);
+-
+-extern struct ibv_qp *iwch_create_qp(struct ibv_pd *pd,
+-                                   struct ibv_qp_init_attr *attr);
+-extern int iwch_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+-                        int attr_mask);
+-extern int iwch_destroy_qp(struct ibv_qp *qp);
+-extern int iwch_query_qp(struct ibv_qp *qp,
+-                       struct ibv_qp_attr *attr,
+-                       int attr_mask,
+-                       struct ibv_qp_init_attr *init_attr);
+-extern void iwch_flush_qp(struct iwch_qp *qhp);
+-extern void iwch_flush_qps(struct iwch_device *dev);
+-extern int t3a_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
+-                        struct ibv_send_wr **bad_wr);
+-extern int t3b_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
+-                        struct ibv_send_wr **bad_wr);
+-extern int t3a_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
+-                        struct ibv_recv_wr **bad_wr);
+-extern int t3b_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
+-                        struct ibv_recv_wr **bad_wr);
+-extern struct ibv_ah *iwch_create_ah(struct ibv_pd *pd,
+-                           struct ibv_ah_attr *ah_attr);
+-extern int iwch_destroy_ah(struct ibv_ah *ah);
+-extern int iwch_attach_mcast(struct ibv_qp *qp, const union ibv_gid *gid,
+-                           uint16_t lid);
+-extern int iwch_detach_mcast(struct ibv_qp *qp, const union ibv_gid *gid,
+-                           uint16_t lid);
+-extern void t3b_async_event(struct ibv_context *context,
+-                          struct ibv_async_event *event);
+-#ifdef DEBUG
+-#include <syslog.h>
+-#define DBGLOG(s) openlog(s, LOG_NDELAY|LOG_PID, LOG_LOCAL7)
+-#define PDBG(fmt, args...) do {syslog(LOG_DEBUG, fmt, ##args);} while (0)
+-#else
+-#define DBGLOG(s) 
+-#define PDBG(fmt, args...) do {} while (0)
+-#endif
+-
+-#define FW_MAJ 5
+-#define FW_MIN 0
+-
+-#endif                                /* IWCH_H */
+diff --git a/providers/cxgb3/qp.c b/providers/cxgb3/qp.c
+deleted file mode 100644
+index 4a1e7397c..000000000
+--- a/providers/cxgb3/qp.c
++++ /dev/null
+@@ -1,560 +0,0 @@
+-/*
+- * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
+- *
+- * This software is available to you under a choice of one of two
+- * licenses.  You may choose to be licensed under the terms of the GNU
+- * General Public License (GPL) Version 2, available from the file
+- * COPYING in the main directory of this source tree, or the
+- * OpenIB.org BSD license below:
+- *
+- *     Redistribution and use in source and binary forms, with or
+- *     without modification, are permitted provided that the following
+- *     conditions are met:
+- *
+- *      - Redistributions of source code must retain the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer.
+- *
+- *      - Redistributions in binary form must reproduce the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer in the documentation and/or other materials
+- *        provided with the distribution.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+- * SOFTWARE.
+- */
+-#include <config.h>
+-
+-#include <stdlib.h>
+-#include <pthread.h>
+-#include <string.h>
+-
+-#include "iwch.h"
+-#include <stdio.h>
+-
+-#define ROUNDUP8(a) (((a) + 7) & ~7)
+-
+-static inline int iwch_build_rdma_send(union t3_wr *wqe, struct ibv_send_wr *wr,
+-                                     uint8_t *flit_cnt)
+-{
+-      int i;
+-
+-      if (wr->num_sge > T3_MAX_SGE)
+-              return -1;
+-      if (wr->send_flags & IBV_SEND_SOLICITED)
+-              wqe->send.rdmaop = T3_SEND_WITH_SE;
+-      else
+-              wqe->send.rdmaop = T3_SEND;
+-      wqe->send.rem_stag = 0;
+-      wqe->send.reserved = 0;
+-      if ((wr->send_flags & IBV_SEND_INLINE) || wr->num_sge == 0) {
+-              uint8_t *datap;
+-
+-              wqe->send.plen = 0;
+-              datap = (uint8_t *)&wqe->send.sgl[0];
+-              wqe->send.num_sgle = 0; /* indicates in-line data */
+-              for (i = 0; i < wr->num_sge; i++) {
+-                      if ((wqe->send.plen + wr->sg_list[i].length) > 
+-                          T3_MAX_INLINE)
+-                              return -1;
+-                      wqe->send.plen += wr->sg_list[i].length;
+-                      memcpy(datap, 
+-                             (void *)(unsigned long)wr->sg_list[i].addr, 
+-                             wr->sg_list[i].length);
+-                      datap += wr->sg_list[i].length;
+-              }
+-              *flit_cnt = 4 + (ROUNDUP8(wqe->send.plen) >> 3);
+-              wqe->send.plen = htobe32(wqe->send.plen);
+-      } else {
+-              wqe->send.plen = 0;
+-              for (i = 0; i < wr->num_sge; i++) {
+-                      if ((wqe->send.plen + wr->sg_list[i].length) < 
+-                          wqe->send.plen) {
+-                              return -1;
+-                      }
+-                      wqe->send.plen += wr->sg_list[i].length;
+-                      wqe->send.sgl[i].stag =
+-                          htobe32(wr->sg_list[i].lkey);
+-                      wqe->send.sgl[i].len =
+-                          htobe32(wr->sg_list[i].length);
+-                      wqe->send.sgl[i].to = htobe64(wr->sg_list[i].addr);
+-              }
+-              wqe->send.plen = htobe32(wqe->send.plen);
+-              wqe->send.num_sgle = htobe32(wr->num_sge);
+-              *flit_cnt = 4 + ((wr->num_sge) << 1);
+-      }
+-      return 0;
+-}
+-
+-static inline int iwch_build_rdma_write(union t3_wr *wqe, 
+-                                      struct ibv_send_wr *wr,
+-                                      uint8_t *flit_cnt)
+-{
+-      int i;
+-
+-      if (wr->num_sge > T3_MAX_SGE)
+-              return -1;
+-      wqe->write.rdmaop = T3_RDMA_WRITE;
+-      wqe->write.reserved = 0;
+-      wqe->write.stag_sink = htobe32(wr->wr.rdma.rkey);
+-      wqe->write.to_sink = htobe64(wr->wr.rdma.remote_addr);
+-
+-      wqe->write.num_sgle = wr->num_sge;
+-
+-      if ((wr->send_flags & IBV_SEND_INLINE) || wr->num_sge == 0) {
+-              uint8_t *datap;
+-
+-              wqe->write.plen = 0;
+-              datap = (uint8_t *)&wqe->write.sgl[0];
+-              wqe->write.num_sgle = 0;        /* indicates in-line data */
+-              for (i = 0; i < wr->num_sge; i++) {
+-                      if ((wqe->write.plen + wr->sg_list[i].length) >
+-                          T3_MAX_INLINE)
+-                              return -1;
+-                      wqe->write.plen += wr->sg_list[i].length;
+-                      memcpy(datap, 
+-                             (void *)(unsigned long)wr->sg_list[i].addr, 
+-                             wr->sg_list[i].length);
+-                      datap += wr->sg_list[i].length;
+-              }
+-              *flit_cnt = 5 + (ROUNDUP8(wqe->write.plen) >> 3);
+-              wqe->write.plen = htobe32(wqe->write.plen);
+-      } else {
+-              wqe->write.plen = 0;
+-              for (i = 0; i < wr->num_sge; i++) {
+-                      if ((wqe->write.plen + wr->sg_list[i].length) < 
+-                          wqe->write.plen) {
+-                              return -1;
+-                      }
+-                      wqe->write.plen += wr->sg_list[i].length;
+-                      wqe->write.sgl[i].stag =
+-                          htobe32(wr->sg_list[i].lkey);
+-                      wqe->write.sgl[i].len =
+-                          htobe32(wr->sg_list[i].length);
+-                      wqe->write.sgl[i].to =
+-                          htobe64(wr->sg_list[i].addr);
+-              }
+-              wqe->write.plen = htobe32(wqe->write.plen);
+-              wqe->write.num_sgle = htobe32(wr->num_sge);
+-              *flit_cnt = 5 + ((wr->num_sge) << 1);
+-      }
+-      return 0;
+-}
+-
+-static inline int iwch_build_rdma_read(union t3_wr *wqe, struct ibv_send_wr *wr,
+-                                     uint8_t *flit_cnt)
+-{
+-      if (wr->num_sge > 1)
+-              return -1;
+-      wqe->read.rdmaop = T3_READ_REQ;
+-      wqe->read.reserved = 0;
+-      if (wr->num_sge == 1 && wr->sg_list[0].length > 0) {
+-              wqe->read.rem_stag = htobe32(wr->wr.rdma.rkey);
+-              wqe->read.rem_to = htobe64(wr->wr.rdma.remote_addr);
+-              wqe->read.local_stag = htobe32(wr->sg_list[0].lkey);
+-              wqe->read.local_len = htobe32(wr->sg_list[0].length);
+-              wqe->read.local_to = htobe64(wr->sg_list[0].addr);
+-      } else {
+-
+-              /* build passable 0B read request */
+-              wqe->read.rem_stag = 2;
+-              wqe->read.rem_to = 2;
+-              wqe->read.local_stag = 2;
+-              wqe->read.local_len = 0;
+-              wqe->read.local_to = 2;
+-      }
+-      *flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
+-      return 0;
+-}
+-
+-int t3b_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr, 
+-                struct ibv_send_wr **bad_wr)
+-{
+-      int err = 0;
+-      uint8_t t3_wr_flit_cnt;
+-      enum t3_wr_opcode t3_wr_opcode = 0;
+-      enum t3_wr_flags t3_wr_flags;
+-      struct iwch_qp *qhp;
+-      uint32_t idx;
+-      union t3_wr *wqe;
+-      uint32_t num_wrs;
+-      struct t3_swsq *sqp;
+-
+-      qhp = to_iwch_qp(ibqp);
+-      pthread_spin_lock(&qhp->lock);
+-      if (t3_wq_in_error(&qhp->wq)) {
+-              iwch_flush_qp(qhp);
+-              pthread_spin_unlock(&qhp->lock);
+-              return -1;
+-      }
+-      num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr, 
+-                qhp->wq.sq_size_log2);
+-      if (num_wrs <= 0) {
+-              pthread_spin_unlock(&qhp->lock);
+-              return -1;
+-      }
+-      while (wr) {
+-              if (num_wrs == 0) {
+-                      err = -1;
+-                      *bad_wr = wr;
+-                      break;
+-              }
+-              idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
+-              wqe = (union t3_wr *) (qhp->wq.queue + idx);
+-              t3_wr_flags = 0;
+-              if (wr->send_flags & IBV_SEND_SOLICITED)
+-                      t3_wr_flags |= T3_SOLICITED_EVENT_FLAG;
+-              if (wr->send_flags & IBV_SEND_FENCE)
+-                      t3_wr_flags |= T3_READ_FENCE_FLAG;
+-              if ((wr->send_flags & IBV_SEND_SIGNALED) || qhp->sq_sig_all)
+-                      t3_wr_flags |= T3_COMPLETION_FLAG;
+-              sqp = qhp->wq.sq + 
+-                    Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
+-              switch (wr->opcode) {
+-              case IBV_WR_SEND:
+-                      t3_wr_opcode = T3_WR_SEND;
+-                      err = iwch_build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
+-                      break;
+-              case IBV_WR_RDMA_WRITE:
+-                      t3_wr_opcode = T3_WR_WRITE;
+-                      err = iwch_build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
+-                      break;
+-              case IBV_WR_RDMA_READ:
+-                      t3_wr_opcode = T3_WR_READ;
+-                      t3_wr_flags = 0;
+-                      err = iwch_build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
+-                      if (err)
+-                              break;
+-                      sqp->read_len = wqe->read.local_len;
+-                      if (!qhp->wq.oldest_read)
+-                              qhp->wq.oldest_read = sqp;
+-                      break;
+-              default:
+-                      PDBG("%s post of type=%d TBD!\n", __FUNCTION__, 
+-                           wr->opcode);
+-                      err = -1;
+-              }
+-              if (err) {
+-                      *bad_wr = wr;
+-                      break;
+-              }
+-              wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
+-              sqp->wr_id = wr->wr_id;
+-              sqp->opcode = wr2opcode(t3_wr_opcode);
+-              sqp->sq_wptr = qhp->wq.sq_wptr;
+-              sqp->complete = 0;
+-              sqp->signaled = (wr->send_flags & IBV_SEND_SIGNALED);
+-
+-              build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags,
+-                             Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
+-                             0, t3_wr_flit_cnt);
+-              PDBG("%s cookie 0x%" PRIx64 
+-                   " wq idx 0x%x swsq idx %ld opcode %d\n", 
+-                   __FUNCTION__, wr->wr_id, idx, 
+-                   Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
+-                   sqp->opcode);
+-              wr = wr->next;
+-              num_wrs--;
+-              ++(qhp->wq.wptr);
+-              ++(qhp->wq.sq_wptr);
+-      }
+-      pthread_spin_unlock(&qhp->lock);
+-      if (t3_wq_db_enabled(&qhp->wq))
+-              RING_DOORBELL(qhp->wq.doorbell, qhp->wq.qpid);
+-      return err;
+-}
+-
+-int t3a_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr, 
+-                 struct ibv_send_wr **bad_wr)
+-{
+-      int ret;
+-      struct iwch_qp *qhp = to_iwch_qp(ibqp);
+-
+-      pthread_spin_lock(&qhp->lock);
+-      ret = ibv_cmd_post_send(ibqp, wr, bad_wr);
+-      pthread_spin_unlock(&qhp->lock);
+-      return ret;
+-}
+-
+-static inline int iwch_build_rdma_recv(struct iwch_device *rhp,
+-                                     union t3_wr *wqe, 
+-                                     struct ibv_recv_wr *wr)
+-{
+-      int i;
+-      if (wr->num_sge > T3_MAX_SGE)
+-              return -1;
+-
+-      wqe->recv.num_sgle = htobe32(wr->num_sge);
+-      for (i = 0; i < wr->num_sge; i++) {
+-              wqe->recv.sgl[i].stag = htobe32(wr->sg_list[i].lkey);
+-              wqe->recv.sgl[i].len = htobe32(wr->sg_list[i].length);
+-              wqe->recv.sgl[i].to = htobe64(wr->sg_list[i].addr);
+-      }
+-      for (; i < T3_MAX_SGE; i++) {
+-              wqe->recv.sgl[i].stag = 0;
+-              wqe->recv.sgl[i].len = 0;
+-              wqe->recv.sgl[i].to = 0;
+-      }
+-      return 0;
+-}
+-
+-static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
+-{
+-      struct t3_cqe cqe;
+-
+-      PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__, 
+-           wq, cq, cq->sw_rptr, cq->sw_wptr);
+-      memset(&cqe, 0, sizeof(cqe));
+-      cqe.header = V_CQE_STATUS(TPT_ERR_SWFLUSH) | 
+-                   V_CQE_OPCODE(T3_SEND) | 
+-                   V_CQE_TYPE(0) |
+-                   V_CQE_SWCQE(1) |
+-                   V_CQE_QPID(wq->qpid) | 
+-                   V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr, cq->size_log2));
+-      cqe.header = htobe32(cqe.header);
+-      *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
+-      cq->sw_wptr++;
+-}
+-
+-static void flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
+-{
+-      uint32_t ptr;
+-
+-      /* flush RQ */
+-      PDBG("%s rq_rptr 0x%x rq_wptr 0x%x skip count %u\n", __FUNCTION__, 
+-           wq->rq_rptr, wq->rq_wptr, count);
+-      ptr = wq->rq_rptr + count;
+-      while (ptr++ != wq->rq_wptr) {
+-              insert_recv_cqe(wq, cq);
+-      }
+-}
+-
+-static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq, 
+-                        struct t3_swsq *sqp)
+-{
+-      struct t3_cqe cqe;
+-
+-      PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__, 
+-           wq, cq, cq->sw_rptr, cq->sw_wptr);
+-      memset(&cqe, 0, sizeof(cqe));
+-      cqe.header = V_CQE_STATUS(TPT_ERR_SWFLUSH) | 
+-                   V_CQE_OPCODE(sqp->opcode) |
+-                   V_CQE_TYPE(1) |
+-                   V_CQE_SWCQE(1) |
+-                   V_CQE_QPID(wq->qpid) | 
+-                   V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr, cq->size_log2));
+-      cqe.header = htobe32(cqe.header);
+-      CQE_WRID_SQ_WPTR(cqe) = sqp->sq_wptr;
+-
+-      *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
+-      cq->sw_wptr++;
+-}
+-
+-static void flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
+-{
+-      uint32_t ptr;
+-      struct t3_swsq *sqp;
+-
+-      ptr = wq->sq_rptr + count;
+-      sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
+-      while (ptr != wq->sq_wptr) {
+-              insert_sq_cqe(wq, cq, sqp);
+-              ptr++;
+-              sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
+-      }
+-}
+-
+-/* 
+- * Move all CQEs from the HWCQ into the SWCQ.
+- */
+-static void flush_hw_cq(struct t3_cq *cq)
+-{
+-      struct t3_cqe *cqe, *swcqe;
+-
+-      PDBG("%s cq %p cqid 0x%x\n", __FUNCTION__, cq, cq->cqid);
+-      cqe = cxio_next_hw_cqe(cq);
+-      while (cqe) {
+-              PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n", 
+-                   __FUNCTION__, cq->rptr, cq->sw_wptr);
+-              swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
+-              *swcqe = *cqe;
+-              swcqe->header |= htobe32(V_CQE_SWCQE(1));
+-              cq->sw_wptr++;
+-              cq->rptr++;
+-              cqe = cxio_next_hw_cqe(cq);
+-      }
+-}
+-
+-static void count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
+-{
+-      struct t3_cqe *cqe;
+-      uint32_t ptr;
+-
+-      *count = 0;
+-      ptr = cq->sw_rptr;
+-      while (!Q_EMPTY(ptr, cq->sw_wptr)) {
+-              cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
+-              if ((SQ_TYPE(*cqe) || 
+-                   (CQE_OPCODE(*cqe) == T3_READ_RESP && CQE_WRID_STAG(*cqe) != 1)) &&
+-                  (CQE_QPID(*cqe) == wq->qpid))
+-                      (*count)++;
+-              ptr++;
+-      }       
+-      PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count);
+-}
+-
+-static void count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
+-{
+-      struct t3_cqe *cqe;
+-      uint32_t ptr;
+-
+-      *count = 0;
+-      ptr = cq->sw_rptr;
+-      while (!Q_EMPTY(ptr, cq->sw_wptr)) {
+-              cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
+-              if (RQ_TYPE(*cqe) && (CQE_OPCODE(*cqe) != T3_READ_RESP) && 
+-                  (CQE_QPID(*cqe) == wq->qpid))
+-                      (*count)++;
+-              ptr++;
+-      }       
+-      PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count);
+-}
+-
+-/*
+- * Assumes qhp lock is held.
+- */
+-void iwch_flush_qp(struct iwch_qp *qhp)
+-{
+-      struct iwch_cq *rchp, *schp;
+-      int count;
+-
+-      if (qhp->wq.flushed)
+-              return;
+-
+-      rchp = qhp->rhp->cqid2ptr[to_iwch_cq(qhp->ibv_qp.recv_cq)->cq.cqid];
+-      schp = qhp->rhp->cqid2ptr[to_iwch_cq(qhp->ibv_qp.send_cq)->cq.cqid];
+-      
+-      PDBG("%s qhp %p rchp %p schp %p\n", __FUNCTION__, qhp, rchp, schp);
+-      qhp->wq.flushed = 1;
+-
+-#ifdef notyet
+-      /* take a ref on the qhp since we must release the lock */
+-      atomic_inc(&qhp->refcnt);
+-#endif
+-      pthread_spin_unlock(&qhp->lock);
+-
+-      /* locking heirarchy: cq lock first, then qp lock. */
+-      pthread_spin_lock(&rchp->lock);
+-      pthread_spin_lock(&qhp->lock);
+-      flush_hw_cq(&rchp->cq);
+-      count_rcqes(&rchp->cq, &qhp->wq, &count);
+-      flush_rq(&qhp->wq, &rchp->cq, count);
+-      pthread_spin_unlock(&qhp->lock);
+-      pthread_spin_unlock(&rchp->lock);
+-
+-      /* locking heirarchy: cq lock first, then qp lock. */
+-      pthread_spin_lock(&schp->lock);
+-      pthread_spin_lock(&qhp->lock);
+-      flush_hw_cq(&schp->cq);
+-      count_scqes(&schp->cq, &qhp->wq, &count);
+-      flush_sq(&qhp->wq, &schp->cq, count);
+-      pthread_spin_unlock(&qhp->lock);
+-      pthread_spin_unlock(&schp->lock);
+-
+-#ifdef notyet
+-      /* deref */
+-      if (atomic_dec_and_test(&qhp->refcnt))
+-                wake_up(&qhp->wait);
+-#endif
+-      pthread_spin_lock(&qhp->lock);
+-}
+-
+-void iwch_flush_qps(struct iwch_device *dev)
+-{
+-      int i;
+-
+-      pthread_spin_lock(&dev->lock);
+-      for (i=0; i < T3_MAX_NUM_QP; i++) {
+-              struct iwch_qp *qhp = dev->qpid2ptr[i];
+-              if (qhp) {
+-                      if (!qhp->wq.flushed && t3_wq_in_error(&qhp->wq)) {
+-                              pthread_spin_lock(&qhp->lock);
+-                              iwch_flush_qp(qhp);
+-                              pthread_spin_unlock(&qhp->lock);
+-                      }
+-              }
+-      }
+-      pthread_spin_unlock(&dev->lock);
+-
+-}
+-
+-int t3b_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
+-                 struct ibv_recv_wr **bad_wr)
+-{
+-      int err = 0;
+-      struct iwch_qp *qhp;
+-      uint32_t idx;
+-      union t3_wr *wqe;
+-      uint32_t num_wrs;
+-
+-      qhp = to_iwch_qp(ibqp);
+-      pthread_spin_lock(&qhp->lock);
+-      if (t3_wq_in_error(&qhp->wq)) {
+-              iwch_flush_qp(qhp);
+-              pthread_spin_unlock(&qhp->lock);
+-              return -1;
+-      }
+-      num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr, 
+-                          qhp->wq.rq_size_log2) - 1;
+-      if (!wr) {
+-              pthread_spin_unlock(&qhp->lock);
+-              return -1;
+-      }
+-      while (wr) {
+-              idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
+-              wqe = (union t3_wr *) (qhp->wq.queue + idx);
+-              if (num_wrs)
+-                      err = iwch_build_rdma_recv(qhp->rhp, wqe, wr);
+-              else
+-                      err = -1;
+-              if (err) {
+-                      *bad_wr = wr;
+-                      break;
+-              }
+-              qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, qhp->wq.rq_size_log2)] = 
+-                      wr->wr_id;
+-              build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
+-                             Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
+-                             0, sizeof(struct t3_receive_wr) >> 3);
+-              PDBG("%s cookie 0x%" PRIx64 
+-                   " idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
+-                   "wqe %p \n", __FUNCTION__, wr->wr_id, idx, 
+-                   qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
+-              ++(qhp->wq.rq_wptr);
+-              ++(qhp->wq.wptr);
+-              wr = wr->next;
+-              num_wrs--;
+-      }
+-      pthread_spin_unlock(&qhp->lock);
+-      if (t3_wq_db_enabled(&qhp->wq))
+-              RING_DOORBELL(qhp->wq.doorbell, qhp->wq.qpid);
+-      return err;
+-}
+-
+-int t3a_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
+-                 struct ibv_recv_wr **bad_wr)
+-{
+-      int ret;
+-      struct iwch_qp *qhp = to_iwch_qp(ibqp);
+-
+-      pthread_spin_lock(&qhp->lock);
+-      ret = ibv_cmd_post_recv(ibqp, wr, bad_wr);
+-      pthread_spin_unlock(&qhp->lock);
+-      return ret;
+-}
+diff --git a/providers/cxgb3/verbs.c b/providers/cxgb3/verbs.c
+deleted file mode 100644
+index 39a44192e..000000000
+--- a/providers/cxgb3/verbs.c
++++ /dev/null
+@@ -1,476 +0,0 @@
+-/*
+- * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved.
+- *
+- * This software is available to you under a choice of one of two
+- * licenses.  You may choose to be licensed under the terms of the GNU
+- * General Public License (GPL) Version 2, available from the file
+- * COPYING in the main directory of this source tree, or the
+- * OpenIB.org BSD license below:
+- *
+- *     Redistribution and use in source and binary forms, with or
+- *     without modification, are permitted provided that the following
+- *     conditions are met:
+- *
+- *      - Redistributions of source code must retain the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer.
+- *
+- *      - Redistributions in binary form must reproduce the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer in the documentation and/or other materials
+- *        provided with the distribution.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+- * SOFTWARE.
+- */
+-#include <config.h>
+-
+-#include <stdlib.h>
+-#include <stdio.h>
+-#include <string.h>
+-#include <errno.h>
+-#include <pthread.h>
+-#include <sys/mman.h>
+-#include <inttypes.h>
+-
+-#include "iwch.h"
+-#include "iwch-abi.h"
+-
+-int iwch_query_device(struct ibv_context *context, struct ibv_device_attr *attr)
+-{
+-      struct ibv_query_device cmd;
+-      uint64_t raw_fw_ver;
+-      unsigned major, minor, sub_minor;
+-      int ret;
+-
+-      ret = ibv_cmd_query_device(context, attr, &raw_fw_ver, &cmd, 
+-                                 sizeof cmd);
+-      if (ret)
+-              return ret;
+-
+-      major = (raw_fw_ver >> 32) & 0xffff;
+-      minor = (raw_fw_ver >> 16) & 0xffff;
+-      sub_minor = raw_fw_ver & 0xffff;
+-
+-      snprintf(attr->fw_ver, sizeof attr->fw_ver,
+-               "%d.%d.%d", major, minor, sub_minor);
+-
+-      return 0;
+-}
+-
+-int iwch_query_port(struct ibv_context *context, uint8_t port,
+-                  struct ibv_port_attr *attr)
+-{
+-      struct ibv_query_port cmd;
+-
+-      return ibv_cmd_query_port(context, port, attr, &cmd, sizeof cmd);
+-}
+-
+-struct ibv_pd *iwch_alloc_pd(struct ibv_context *context)
+-{
+-      struct ibv_alloc_pd cmd;
+-      struct uiwch_alloc_pd_resp resp;
+-      struct iwch_pd *pd;
+-
+-      pd = malloc(sizeof *pd);
+-      if (!pd)
+-              return NULL;
+-
+-      if (ibv_cmd_alloc_pd(context, &pd->ibv_pd, &cmd, sizeof cmd,
+-                           &resp.ibv_resp, sizeof resp)) {
+-              free(pd);
+-              return NULL;
+-      }
+-
+-      return &pd->ibv_pd;
+-}
+-
+-int iwch_free_pd(struct ibv_pd *pd)
+-{
+-      int ret;
+-
+-      ret = ibv_cmd_dealloc_pd(pd);
+-      if (ret)
+-              return ret;
+-
+-      free(pd);
+-      return 0;
+-}
+-
+-struct ibv_mr *iwch_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
+-                         uint64_t hca_va, int access)
+-{
+-      struct iwch_mr *mhp;
+-      struct ibv_reg_mr cmd;
+-      struct uiwch_reg_mr_resp resp;
+-      struct iwch_device *dev = to_iwch_dev(pd->context->device);
+-
+-      PDBG("%s addr %p length %ld hca_va %p\n", __func__, addr, length,
+-           hca_va);
+-
+-      mhp = malloc(sizeof *mhp);
+-      if (!mhp)
+-              return NULL;
+-
+-      if (ibv_cmd_reg_mr(pd, addr, length, hca_va,
+-                         access, &mhp->vmr, &cmd, sizeof(cmd),
+-                         &resp.ibv_resp, sizeof resp)) {
+-              free(mhp);
+-              return NULL;
+-      }
+-
+-      mhp->va_fbo = hca_va;
+-      mhp->page_size = iwch_page_shift - 12;
+-      mhp->pbl_addr = resp.pbl_addr;
+-      mhp->len = length;
+-
+-      PDBG("%s stag 0x%x va_fbo 0x%" PRIx64
+-             " page_size %d pbl_addr 0x%x len %d\n",
+-           __func__, mhp->vmr.ibv_mr.rkey, mhp->va_fbo,
+-           mhp->page_size, mhp->pbl_addr, mhp->len);
+-
+-      pthread_spin_lock(&dev->lock);
+-      dev->mmid2ptr[t3_mmid(mhp->vmr.ibv_mr.lkey)] = mhp;
+-      pthread_spin_unlock(&dev->lock);
+-      
+-      return &mhp->vmr.ibv_mr;
+-}
+-
+-int iwch_dereg_mr(struct verbs_mr *vmr)
+-{
+-      int ret;
+-      struct iwch_device *dev = to_iwch_dev(vmr->ibv_mr.pd->context->device);
+-
+-      ret = ibv_cmd_dereg_mr(vmr);
+-      if (ret)
+-              return ret;
+-
+-      pthread_spin_lock(&dev->lock);
+-      dev->mmid2ptr[t3_mmid(vmr->ibv_mr.lkey)] = NULL;
+-      pthread_spin_unlock(&dev->lock);
+-
+-      free(to_iwch_mr(vmr));
+-      
+-      return 0;
+-}
+-
+-struct ibv_cq *iwch_create_cq(struct ibv_context *context, int cqe,
+-                            struct ibv_comp_channel *channel, int comp_vector)
+-{
+-      struct uiwch_create_cq cmd;
+-      struct uiwch_create_cq_resp resp;
+-      struct iwch_cq *chp;
+-      struct iwch_device *dev = to_iwch_dev(context->device);
+-      int ret;
+-
+-      chp = calloc(1, sizeof *chp);
+-      if (!chp) {
+-              return NULL;
+-      }
+-
+-      cmd.user_rptr_addr = (uint64_t)(unsigned long)&chp->cq.rptr;
+-      ret = ibv_cmd_create_cq(context, cqe, channel, comp_vector,
+-                              &chp->ibv_cq, &cmd.ibv_cmd, sizeof cmd,
+-                              &resp.ibv_resp, sizeof resp);
+-      if (ret)
+-              goto err1;
+-
+-      pthread_spin_init(&chp->lock, PTHREAD_PROCESS_PRIVATE);
+-      chp->rhp = dev;
+-      chp->cq.cqid = resp.cqid;
+-      chp->cq.size_log2 = resp.size_log2;
+-      if (dev->abi_version == 0)
+-              chp->cq.memsize = PAGE_ALIGN((1UL << chp->cq.size_log2) *
+-                                           sizeof(struct t3_cqe));
+-      else
+-              chp->cq.memsize = resp.memsize;
+-      chp->cq.queue = mmap(NULL, t3_cq_memsize(&chp->cq),
+-                           PROT_READ|PROT_WRITE, MAP_SHARED, context->cmd_fd,
+-                           resp.key);
+-      if (chp->cq.queue == MAP_FAILED)
+-              goto err2;
+-
+-      chp->cq.sw_queue = calloc(t3_cq_depth(&chp->cq), sizeof(struct t3_cqe));
+-      if (!chp->cq.sw_queue)
+-              goto err3;
+-
+-      PDBG("%s cqid 0x%x physaddr %" PRIx64 " va %p memsize %d\n", 
+-             __FUNCTION__, chp->cq.cqid, resp.physaddr, chp->cq.queue, 
+-             t3_cq_memsize(&chp->cq));
+-      
+-      pthread_spin_lock(&dev->lock);
+-      dev->cqid2ptr[chp->cq.cqid] = chp;
+-      pthread_spin_unlock(&dev->lock);
+-
+-      return &chp->ibv_cq;
+-err3:
+-      munmap(chp->cq.queue, t3_cq_memsize(&chp->cq));
+-err2:
+-      (void)ibv_cmd_destroy_cq(&chp->ibv_cq);
+-err1:
+-      free(chp);
+-      return NULL;
+-}
+-
+-int iwch_resize_cq(struct ibv_cq *ibcq, int cqe)
+-{
+-#ifdef notyet
+-      int ret;
+-      struct ibv_resize_cq cmd;
+-      struct iwch_cq *chp = to_iwch_cq(ibcq);
+-
+-      pthread_spin_lock(&chp->lock);
+-      ret = ibv_cmd_resize_cq(ibcq, cqe, &cmd, sizeof cmd);
+-      /* remap and realloc swcq here */
+-      pthread_spin_unlock(&chp->lock);
+-      return ret;
+-#else
+-      return -ENOSYS;
+-#endif
+-}
+-
+-int iwch_destroy_cq(struct ibv_cq *ibcq)
+-{
+-      int ret;
+-      struct iwch_cq *chp = to_iwch_cq(ibcq);
+-      void *cqva = chp->cq.queue;
+-      unsigned size = t3_cq_memsize(&chp->cq);
+-      struct iwch_device *dev = to_iwch_dev(ibcq->context->device);
+-
+-      munmap(cqva, size);
+-      ret = ibv_cmd_destroy_cq(ibcq);
+-      if (ret) {
+-              return ret;
+-      }
+-
+-      pthread_spin_lock(&dev->lock);
+-      dev->cqid2ptr[chp->cq.cqid] = NULL;
+-      pthread_spin_unlock(&dev->lock);
+-
+-      free(chp->cq.sw_queue);
+-      free(chp);
+-      return 0;
+-}
+-
+-struct ibv_srq *iwch_create_srq(struct ibv_pd *pd,
+-                              struct ibv_srq_init_attr *attr)
+-{
+-      return NULL;
+-}
+-
+-int iwch_modify_srq(struct ibv_srq *srq, struct ibv_srq_attr *attr, 
+-                  int attr_mask)
+-{
+-      return -ENOSYS;
+-}
+-
+-int iwch_destroy_srq(struct ibv_srq *srq)
+-{
+-      return -ENOSYS;
+-}
+-
+-int iwch_post_srq_recv(struct ibv_srq *ibsrq, struct ibv_recv_wr *wr, 
+-                     struct ibv_recv_wr **bad_wr)
+-{
+-      return -ENOSYS;
+-}
+-
+-struct ibv_qp *iwch_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr)
+-{
+-      struct uiwch_create_qp cmd;
+-      struct uiwch_create_qp_resp resp;
+-      struct iwch_qp *qhp;
+-      struct iwch_device *dev = to_iwch_dev(pd->context->device);
+-      int ret;
+-      void *dbva;
+-
+-      PDBG("%s enter qp\n", __FUNCTION__);
+-      qhp = calloc(1, sizeof *qhp);
+-      if (!qhp)
+-              goto err1;
+-
+-      ret = ibv_cmd_create_qp(pd, &qhp->ibv_qp, attr, &cmd.ibv_cmd, 
+-                              sizeof cmd, &resp.ibv_resp, sizeof resp);
+-      if (ret)
+-              goto err2;
+-
+-      PDBG("%s qpid 0x%x physaddr %" PRIx64 " doorbell %" PRIx64 
+-             " size %d sq_size %d rq_size %d\n",
+-              __FUNCTION__, resp.qpid, resp.physaddr, resp.doorbell,
+-              1 << resp.size_log2, 1 << resp.sq_size_log2, 
+-              1 << resp.rq_size_log2);
+-
+-      qhp->rhp = dev;
+-      qhp->wq.qpid = resp.qpid;
+-      qhp->wq.size_log2 = resp.size_log2;
+-      qhp->wq.sq_size_log2 = resp.sq_size_log2;
+-      qhp->wq.rq_size_log2 = resp.rq_size_log2;
+-      pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE);
+-      dbva = mmap(NULL, iwch_page_size, PROT_WRITE, MAP_SHARED, 
+-                  pd->context->cmd_fd, resp.db_key & ~(iwch_page_mask));
+-      if (dbva == MAP_FAILED)
+-              goto err3;
+-
+-      qhp->wq.doorbell = dbva + (resp.db_key & (iwch_page_mask));
+-      qhp->wq.queue = mmap(NULL, t3_wq_memsize(&qhp->wq),
+-                          PROT_READ|PROT_WRITE, MAP_SHARED, 
+-                          pd->context->cmd_fd, resp.key);
+-      if (qhp->wq.queue == MAP_FAILED)
+-              goto err4;
+-
+-      qhp->wq.rq = calloc(t3_rq_depth(&qhp->wq), sizeof (uint64_t));
+-      if (!qhp->wq.rq) 
+-              goto err5;
+-
+-      qhp->wq.sq = calloc(t3_sq_depth(&qhp->wq), sizeof (struct t3_swsq));
+-      if (!qhp->wq.sq) 
+-              goto err6;
+-
+-      PDBG("%s dbva %p wqva %p wq memsize %d\n", __FUNCTION__, 
+-           qhp->wq.doorbell, qhp->wq.queue, t3_wq_memsize(&qhp->wq));
+-
+-      qhp->sq_sig_all = attr->sq_sig_all;
+-
+-      pthread_spin_lock(&dev->lock);
+-      dev->qpid2ptr[qhp->wq.qpid] = qhp;
+-      pthread_spin_unlock(&dev->lock);
+-
+-      return &qhp->ibv_qp;
+-err6:
+-      free(qhp->wq.rq);
+-err5:
+-      munmap((void *)qhp->wq.queue, t3_wq_memsize(&qhp->wq));
+-err4:
+-      munmap((void *)dbva, iwch_page_size);
+-err3:
+-      (void)ibv_cmd_destroy_qp(&qhp->ibv_qp);
+-err2:
+-      free(qhp);
+-err1:
+-      return NULL;
+-}
+-
+-static void reset_qp(struct iwch_qp *qhp)
+-{
+-      PDBG("%s enter qp %p\n", __FUNCTION__, qhp);
+-      qhp->wq.wptr = 0;
+-      qhp->wq.rq_wptr = qhp->wq.rq_rptr = 0;
+-      qhp->wq.sq_wptr = qhp->wq.sq_rptr = 0;
+-      qhp->wq.error = 0;
+-      qhp->wq.oldest_read = NULL;
+-      memset(qhp->wq.queue, 0, t3_wq_memsize(&qhp->wq));
+-}
+-
+-int iwch_modify_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr,
+-                 int attr_mask)
+-{
+-      struct ibv_modify_qp cmd = {};
+-      struct iwch_qp *qhp = to_iwch_qp(ibqp);
+-      int ret;
+-
+-      PDBG("%s enter qp %p new state %d\n", __FUNCTION__, ibqp, attr_mask & IBV_QP_STATE ? attr->qp_state : -1);
+-      pthread_spin_lock(&qhp->lock);
+-      if (t3b_device(qhp->rhp) && t3_wq_in_error(&qhp->wq))
+-              iwch_flush_qp(qhp);
+-      ret = ibv_cmd_modify_qp(ibqp, attr, attr_mask, &cmd, sizeof cmd);
+-      if (!ret && (attr_mask & IBV_QP_STATE) && attr->qp_state == IBV_QPS_RESET)
+-              reset_qp(qhp);
+-      pthread_spin_unlock(&qhp->lock);
+-      return ret;
+-}
+-
+-int iwch_destroy_qp(struct ibv_qp *ibqp)
+-{
+-      int ret;
+-      struct iwch_qp *qhp = to_iwch_qp(ibqp);
+-      struct iwch_device *dev = to_iwch_dev(ibqp->context->device);
+-      void *dbva, *wqva;
+-      unsigned wqsize;
+-
+-      PDBG("%s enter qp %p\n", __FUNCTION__, ibqp);
+-      if (t3b_device(dev)) {
+-              pthread_spin_lock(&qhp->lock);
+-              iwch_flush_qp(qhp);
+-              pthread_spin_unlock(&qhp->lock);
+-      }
+-
+-      dbva = (void *)((unsigned long)qhp->wq.doorbell & ~(iwch_page_mask));
+-      wqva = qhp->wq.queue;
+-      wqsize = t3_wq_memsize(&qhp->wq);
+-
+-      munmap(dbva, iwch_page_size);
+-      munmap(wqva, wqsize);
+-      ret = ibv_cmd_destroy_qp(ibqp);
+-      if (ret) {
+-              return ret;
+-      }
+-
+-      pthread_spin_lock(&dev->lock);
+-      dev->qpid2ptr[qhp->wq.qpid] = NULL;
+-      pthread_spin_unlock(&dev->lock);
+-
+-      free(qhp->wq.rq);
+-      free(qhp->wq.sq);
+-      free(qhp);
+-      return 0;
+-}
+-
+-int iwch_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+-                int attr_mask, struct ibv_qp_init_attr *init_attr)
+-{
+-      return -ENOSYS;
+-}
+-
+-struct ibv_ah *iwch_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr)
+-{
+-      return NULL;
+-}
+-
+-int iwch_destroy_ah(struct ibv_ah *ah)
+-{
+-      return -ENOSYS;
+-}
+-
+-int iwch_attach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid)
+-{
+-      return -ENOSYS;
+-}
+-
+-int iwch_detach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid)
+-{
+-      return -ENOSYS;
+-}
+-
+-void t3b_async_event(struct ibv_context *context,
+-                   struct ibv_async_event *event)
+-{
+-      PDBG("%s type %d obj %p\n", __FUNCTION__, event->event_type, 
+-           event->element.cq);
+-
+-      switch (event->event_type) {
+-      case IBV_EVENT_CQ_ERR:
+-              break;
+-      case IBV_EVENT_QP_FATAL:
+-      case IBV_EVENT_QP_REQ_ERR:
+-      case IBV_EVENT_QP_ACCESS_ERR:
+-      case IBV_EVENT_PATH_MIG_ERR: {
+-              struct iwch_qp *qhp = to_iwch_qp(event->element.qp);
+-              pthread_spin_lock(&qhp->lock);
+-              iwch_flush_qp(qhp);
+-              pthread_spin_unlock(&qhp->lock);
+-              break;
+-      }
+-      case IBV_EVENT_SQ_DRAINED:
+-      case IBV_EVENT_PATH_MIG:
+-      case IBV_EVENT_COMM_EST:
+-      case IBV_EVENT_QP_LAST_WQE_REACHED:
+-      default:
+-              break;
+-      }
+-}
+diff --git a/redhat/rdma.kernel-init b/redhat/rdma.kernel-init
+index 6f50e72fc..c7444a1c8 100644
+--- a/redhat/rdma.kernel-init
++++ b/redhat/rdma.kernel-init
+@@ -125,10 +125,6 @@ load_hardware_modules()
+       load_modules mlx5_ib
+       RC+=$?
+     fi
+-    if is_loaded cxgb3 -a ! is_loaded iw_cxgb3; then
+-      load_modules iw_cxgb3
+-      RC+=$?
+-    fi
+     if is_loaded cxgb4 -a ! is_loaded iw_cxgb4; then
+       load_modules iw_cxgb4
+       RC+=$?
diff --git a/rdma-core-kernel-abi.patch b/rdma-core-kernel-abi.patch
new file mode 100644 (file)
index 0000000..5f8917a
--- /dev/null
@@ -0,0 +1,267 @@
+From c21a3cf5d9e4cef0904b4d47f1cb43be9efdbf90 Mon Sep 17 00:00:00 2001
+From: Michal Kalderon <michal.kalderon@marvell.com>
+Date: Thu, 24 Oct 2019 14:23:03 +0300
+Subject: [PATCH] Update kernel headers
+
+To commit b4bc76609722 ("RDMA/qedr: Add iWARP doorbell recovery support")
+
+Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+---
+ kernel-headers/CMakeLists.txt              |   5 +-
+ kernel-headers/rdma/cxgb3-abi.h            |  82 ---------------
+ kernel-headers/rdma/ib_user_ioctl_verbs.h  |  22 ++++
+ kernel-headers/rdma/nes-abi.h              | 115 ---------------------
+ kernel-headers/rdma/qedr-abi.h             |  25 +++++
+ kernel-headers/rdma/rdma_user_ioctl_cmds.h |  22 ----
+ kernel-headers/rdma/rvt-abi.h              |  66 ++++++++++++
+ kernel-headers/rdma/vmw_pvrdma-abi.h       |   5 +
+ 8 files changed, 119 insertions(+), 223 deletions(-)
+ delete mode 100644 kernel-headers/rdma/cxgb3-abi.h
+ delete mode 100644 kernel-headers/rdma/nes-abi.h
+ create mode 100644 kernel-headers/rdma/rvt-abi.h
+
+diff --git a/kernel-headers/CMakeLists.txt b/kernel-headers/CMakeLists.txt
+index 50bc77e6a..2d0766dd1 100644
+--- a/kernel-headers/CMakeLists.txt
++++ b/kernel-headers/CMakeLists.txt
+@@ -1,6 +1,5 @@
+ publish_internal_headers(rdma
+   rdma/bnxt_re-abi.h
+-  rdma/cxgb3-abi.h
+   rdma/cxgb4-abi.h
+   rdma/efa-abi.h
+   rdma/hns-abi.h
+@@ -15,7 +14,6 @@ publish_internal_headers(rdma
+   rdma/mlx5_user_ioctl_cmds.h
+   rdma/mlx5_user_ioctl_verbs.h
+   rdma/mthca-abi.h
+-  rdma/nes-abi.h
+   rdma/ocrdma-abi.h
+   rdma/qedr-abi.h
+   rdma/rdma_netlink.h
+@@ -59,7 +58,6 @@ endfunction()
+ # Transform the kernel ABIs used by the providers
+ rdma_kernel_provider_abi(
+   rdma/bnxt_re-abi.h
+-  rdma/cxgb3-abi.h
+   rdma/cxgb4-abi.h
+   rdma/efa-abi.h
+   rdma/hns-abi.h
+@@ -68,7 +66,6 @@ rdma_kernel_provider_abi(
+   rdma/mlx4-abi.h
+   rdma/mlx5-abi.h
+   rdma/mthca-abi.h
+-  rdma/nes-abi.h
+   rdma/ocrdma-abi.h
+   rdma/qedr-abi.h
+   rdma/rdma_user_rxe.h
+diff --git a/kernel-headers/rdma/cxgb3-abi.h b/kernel-headers/rdma/cxgb3-abi.h
+deleted file mode 100644
+index 85aed672f..000000000
+--- a/kernel-headers/rdma/cxgb3-abi.h
++++ /dev/null
+@@ -1,82 +0,0 @@
+-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
+-/*
+- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
+- *
+- * This software is available to you under a choice of one of two
+- * licenses.  You may choose to be licensed under the terms of the GNU
+- * General Public License (GPL) Version 2, available from the file
+- * COPYING in the main directory of this source tree, or the
+- * OpenIB.org BSD license below:
+- *
+- *     Redistribution and use in source and binary forms, with or
+- *     without modification, are permitted provided that the following
+- *     conditions are met:
+- *
+- *      - Redistributions of source code must retain the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer.
+- *
+- *      - Redistributions in binary form must reproduce the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer in the documentation and/or other materials
+- *        provided with the distribution.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+- * SOFTWARE.
+- */
+-#ifndef CXGB3_ABI_USER_H
+-#define CXGB3_ABI_USER_H
+-
+-#include <linux/types.h>
+-
+-#define IWCH_UVERBS_ABI_VERSION       1
+-
+-/*
+- * Make sure that all structs defined in this file remain laid out so
+- * that they pack the same way on 32-bit and 64-bit architectures (to
+- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+- * In particular do not use pointer types -- pass pointers in __aligned_u64
+- * instead.
+- */
+-struct iwch_create_cq_req {
+-      __aligned_u64 user_rptr_addr;
+-};
+-
+-struct iwch_create_cq_resp_v0 {
+-      __aligned_u64 key;
+-      __u32 cqid;
+-      __u32 size_log2;
+-};
+-
+-struct iwch_create_cq_resp {
+-      __aligned_u64 key;
+-      __u32 cqid;
+-      __u32 size_log2;
+-      __u32 memsize;
+-      __u32 reserved;
+-};
+-
+-struct iwch_create_qp_resp {
+-      __aligned_u64 key;
+-      __aligned_u64 db_key;
+-      __u32 qpid;
+-      __u32 size_log2;
+-      __u32 sq_size_log2;
+-      __u32 rq_size_log2;
+-};
+-
+-struct iwch_reg_user_mr_resp {
+-      __u32 pbl_addr;
+-};
+-
+-struct iwch_alloc_pd_resp {
+-      __u32 pdid;
+-};
+-
+-#endif /* CXGB3_ABI_USER_H */
+diff --git a/kernel-headers/rdma/nes-abi.h b/kernel-headers/rdma/nes-abi.h
+deleted file mode 100644
+index f80495baa..000000000
+--- a/kernel-headers/rdma/nes-abi.h
++++ /dev/null
+@@ -1,115 +0,0 @@
+-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
+-/*
+- * Copyright (c) 2006 - 2011 Intel Corporation.  All rights reserved.
+- * Copyright (c) 2005 Topspin Communications.  All rights reserved.
+- * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+- *
+- * This software is available to you under a choice of one of two
+- * licenses.  You may choose to be licensed under the terms of the GNU
+- * General Public License (GPL) Version 2, available from the file
+- * COPYING in the main directory of this source tree, or the
+- * OpenIB.org BSD license below:
+- *
+- *     Redistribution and use in source and binary forms, with or
+- *     without modification, are permitted provided that the following
+- *     conditions are met:
+- *
+- *      - Redistributions of source code must retain the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer.
+- *
+- *      - Redistributions in binary form must reproduce the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer in the documentation and/or other materials
+- *        provided with the distribution.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+- * SOFTWARE.
+- *
+- */
+-
+-#ifndef NES_ABI_USER_H
+-#define NES_ABI_USER_H
+-
+-#include <linux/types.h>
+-
+-#define NES_ABI_USERSPACE_VER 2
+-#define NES_ABI_KERNEL_VER    2
+-
+-/*
+- * Make sure that all structs defined in this file remain laid out so
+- * that they pack the same way on 32-bit and 64-bit architectures (to
+- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+- * In particular do not use pointer types -- pass pointers in __u64
+- * instead.
+- */
+-
+-struct nes_alloc_ucontext_req {
+-      __u32 reserved32;
+-      __u8  userspace_ver;
+-      __u8  reserved8[3];
+-};
+-
+-struct nes_alloc_ucontext_resp {
+-      __u32 max_pds; /* maximum pds allowed for this user process */
+-      __u32 max_qps; /* maximum qps allowed for this user process */
+-      __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
+-      __u8  virtwq;  /* flag to indicate if virtual WQ are to be used or not */
+-      __u8  kernel_ver;
+-      __u8  reserved[2];
+-};
+-
+-struct nes_alloc_pd_resp {
+-      __u32 pd_id;
+-      __u32 mmap_db_index;
+-};
+-
+-struct nes_create_cq_req {
+-      __aligned_u64 user_cq_buffer;
+-      __u32 mcrqf;
+-      __u8 reserved[4];
+-};
+-
+-struct nes_create_qp_req {
+-      __aligned_u64 user_wqe_buffers;
+-      __aligned_u64 user_qp_buffer;
+-};
+-
+-enum iwnes_memreg_type {
+-      IWNES_MEMREG_TYPE_MEM = 0x0000,
+-      IWNES_MEMREG_TYPE_QP = 0x0001,
+-      IWNES_MEMREG_TYPE_CQ = 0x0002,
+-      IWNES_MEMREG_TYPE_MW = 0x0003,
+-      IWNES_MEMREG_TYPE_FMR = 0x0004,
+-      IWNES_MEMREG_TYPE_FMEM = 0x0005,
+-};
+-
+-struct nes_mem_reg_req {
+-      __u32 reg_type; /* indicates if id is memory, QP or CQ */
+-      __u32 reserved;
+-};
+-
+-struct nes_create_cq_resp {
+-      __u32 cq_id;
+-      __u32 cq_size;
+-      __u32 mmap_db_index;
+-      __u32 reserved;
+-};
+-
+-struct nes_create_qp_resp {
+-      __u32 qp_id;
+-      __u32 actual_sq_size;
+-      __u32 actual_rq_size;
+-      __u32 mmap_sq_db_index;
+-      __u32 mmap_rq_db_index;
+-      __u32 nes_drv_opt;
+-};
+-
+-#endif        /* NES_ABI_USER_H */
diff --git a/rdma-core-nes.patch b/rdma-core-nes.patch
new file mode 100644 (file)
index 0000000..961c662
--- /dev/null
@@ -0,0 +1,2318 @@
+From 4daf5c91c1296683924cb9668c3d879da072756b Mon Sep 17 00:00:00 2001
+From: Jason Gunthorpe <jgg@mellanox.com>
+Date: Thu, 24 Oct 2019 12:50:22 -0300
+Subject: [PATCH] libnes: Remove libnes from rdma-core
+
+Remove the userspace provider for nes after removing it from kernel.
+
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+---
+ CMakeLists.txt                     |    1 -
+ MAINTAINERS                        |    5 -
+ README.md                          |    1 -
+ debian/control                     |    4 -
+ debian/copyright                   |    5 -
+ kernel-boot/rdma-description.rules |    1 -
+ kernel-boot/rdma-hw-modules.rules  |    1 -
+ libibverbs/verbs.h                 |    1 -
+ providers/nes/CMakeLists.txt       |    4 -
+ providers/nes/nes-abi.h            |   52 -
+ providers/nes/nes_umain.c          |  220 ----
+ providers/nes/nes_umain.h          |  383 -------
+ providers/nes/nes_uverbs.c         | 1535 ----------------------------
+ redhat/rdma-core.spec              |    3 -
+ suse/rdma-core.spec                |    2 -
+ 15 files changed, 2218 deletions(-)
+ delete mode 100644 providers/nes/CMakeLists.txt
+ delete mode 100644 providers/nes/nes-abi.h
+ delete mode 100644 providers/nes/nes_umain.c
+ delete mode 100644 providers/nes/nes_umain.h
+ delete mode 100644 providers/nes/nes_uverbs.c
+
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index 85485ba00..85eb25936 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -625,7 +625,6 @@ add_subdirectory(providers/mlx4/man)
+ add_subdirectory(providers/mlx5)
+ add_subdirectory(providers/mlx5/man)
+ add_subdirectory(providers/mthca)
+-add_subdirectory(providers/nes) # NO SPARSE
+ add_subdirectory(providers/ocrdma)
+ add_subdirectory(providers/qedr)
+ add_subdirectory(providers/vmw_pvrdma)
+diff --git a/debian/copyright b/debian/copyright
+index db4951993..6f86d1c86 100644
+--- a/debian/copyright
++++ b/debian/copyright
+@@ -197,11 +197,6 @@ Copyright: 2004-2005, Topspin Communications.
+            2005, Mellanox Technologies Ltd.
+ License: BSD-MIT or GPL-2
+-Files: providers/nes/*
+-Copyright: 2006-2010, Intel Corporation.
+-           2006, Open Grid Computing, Inc.
+-License: BSD-MIT or GPL-2
+-
+ Files: providers/ocrdma/*
+ Copyright: 2008-2013, Emulex.
+ License: BSD-2-clause or GPL-2
+diff --git a/kernel-boot/rdma-description.rules b/kernel-boot/rdma-description.rules
+index 4ea59ba19..48a7cede9 100644
+--- a/kernel-boot/rdma-description.rules
++++ b/kernel-boot/rdma-description.rules
+@@ -24,7 +24,6 @@ DRIVERS=="hfi1", ENV{ID_RDMA_OPA}="1"
+ # Hardware that supports iWarp
+ DRIVERS=="cxgb4", ENV{ID_RDMA_IWARP}="1"
+ DRIVERS=="i40e", ENV{ID_RDMA_IWARP}="1"
+-DRIVERS=="nes", ENV{ID_RDMA_IWARP}="1"
+ # Hardware that supports RoCE
+ DRIVERS=="be2net", ENV{ID_RDMA_ROCE}="1"
+diff --git a/kernel-boot/rdma-hw-modules.rules b/kernel-boot/rdma-hw-modules.rules
+index da4bbe363..bee416dbe 100644
+--- a/kernel-boot/rdma-hw-modules.rules
++++ b/kernel-boot/rdma-hw-modules.rules
+@@ -33,6 +33,5 @@ ENV{ID_NET_DRIVER}=="enic", RUN{builtin}+="kmod load usnic_verbs"
+ # ipathverbs
+ # mthca
+ # vmw_pvrdma
+-# nes
+ LABEL="rdma_hw_modules_end"
+diff --git a/libibverbs/verbs.h b/libibverbs/verbs.h
+index 12a33a99a..13e7c63e7 100644
+--- a/libibverbs/verbs.h
++++ b/libibverbs/verbs.h
+@@ -2153,7 +2153,6 @@ extern const struct verbs_device_ops verbs_provider_ipathverbs;
+ extern const struct verbs_device_ops verbs_provider_mlx4;
+ extern const struct verbs_device_ops verbs_provider_mlx5;
+ extern const struct verbs_device_ops verbs_provider_mthca;
+-extern const struct verbs_device_ops verbs_provider_nes;
+ extern const struct verbs_device_ops verbs_provider_ocrdma;
+ extern const struct verbs_device_ops verbs_provider_qedr;
+ extern const struct verbs_device_ops verbs_provider_rxe;
+diff --git a/providers/nes/CMakeLists.txt b/providers/nes/CMakeLists.txt
+deleted file mode 100644
+index 0c7fa8fad..000000000
+--- a/providers/nes/CMakeLists.txt
++++ /dev/null
+@@ -1,4 +0,0 @@
+-rdma_provider(nes
+-  nes_umain.c
+-  nes_uverbs.c
+-)
+diff --git a/providers/nes/nes-abi.h b/providers/nes/nes-abi.h
+deleted file mode 100644
+index 0a531230b..000000000
+--- a/providers/nes/nes-abi.h
++++ /dev/null
+@@ -1,52 +0,0 @@
+-/*
+- * Copyright (c) 2006 - 2010 Intel Corporation.  All rights reserved.
+- * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
+- *
+- * This software is available to you under a choice of one of two
+- * licenses.  You may choose to be licensed under the terms of the GNU
+- * General Public License (GPL) Version 2, available from the file
+- * gpl-2.0.txt in the main directory of this source tree, or the
+- * OpenIB.org BSD license below:
+- *
+- *     Redistribution and use in source and binary forms, with or
+- *     without modification, are permitted provided that the following
+- *     conditions are met:
+- *
+- *      - Redistributions of source code must retain the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer.
+- *
+- *      - Redistributions in binary form must reproduce the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer in the documentation and/or other materials
+- *        provided with the distribution.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+- * SOFTWARE.
+- */
+-
+-#ifndef nes_ABI_H
+-#define nes_ABI_H
+-
+-#include <infiniband/kern-abi.h>
+-#include <rdma/nes-abi.h>
+-#include <kernel-abi/nes-abi.h>
+-
+-DECLARE_DRV_CMD(nes_ualloc_pd, IB_USER_VERBS_CMD_ALLOC_PD,
+-              empty, nes_alloc_pd_resp);
+-DECLARE_DRV_CMD(nes_ucreate_cq, IB_USER_VERBS_CMD_CREATE_CQ,
+-              nes_create_cq_req, nes_create_cq_resp);
+-DECLARE_DRV_CMD(nes_ucreate_qp, IB_USER_VERBS_CMD_CREATE_QP,
+-              nes_create_qp_req, nes_create_qp_resp);
+-DECLARE_DRV_CMD(nes_get_context, IB_USER_VERBS_CMD_GET_CONTEXT,
+-              nes_alloc_ucontext_req, nes_alloc_ucontext_resp);
+-DECLARE_DRV_CMD(nes_ureg_mr, IB_USER_VERBS_CMD_REG_MR,
+-              nes_mem_reg_req, empty);
+-
+-#endif                        /* nes_ABI_H */
+diff --git a/providers/nes/nes_umain.c b/providers/nes/nes_umain.c
+deleted file mode 100644
+index 07aa7ddd1..000000000
+--- a/providers/nes/nes_umain.c
++++ /dev/null
+@@ -1,220 +0,0 @@
+-/*
+- * Copyright (c) 2006 - 2010 Intel Corporation.  All rights reserved.
+- * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
+- *
+- * This software is available to you under a choice of one of two
+- * licenses.  You may choose to be licensed under the terms of the GNU
+- * General Public License (GPL) Version 2, available from the file
+- * gpl-2.0.txt in the main directory of this source tree, or the
+- * OpenIB.org BSD license below:
+- *
+- *     Redistribution and use in source and binary forms, with or
+- *     without modification, are permitted provided that the following
+- *     conditions are met:
+- *
+- *      - Redistributions of source code must retain the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer.
+- *
+- *      - Redistributions in binary form must reproduce the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer in the documentation and/or other materials
+- *        provided with the distribution.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+- * SOFTWARE.
+- */
+-
+-#include <config.h>
+-
+-#include <stdio.h>
+-#include <stdlib.h>
+-#include <string.h>
+-#include <unistd.h>
+-#include <errno.h>
+-#include <sys/mman.h>
+-#include <pthread.h>
+-
+-#include "nes_umain.h"
+-#include "nes-abi.h"
+-
+-unsigned int nes_debug_level = 0;
+-long int page_size;
+-
+-#include <sys/types.h>
+-#include <sys/stat.h>
+-#include <fcntl.h>
+-
+-#ifndef PCI_VENDOR_ID_NETEFFECT
+-#define PCI_VENDOR_ID_NETEFFECT               0x1678
+-#endif
+-
+-#define HCA(v, d) VERBS_PCI_MATCH(PCI_VENDOR_ID_##v, d, NULL)
+-static const struct verbs_match_ent hca_table[] = {
+-      VERBS_DRIVER_ID(RDMA_DRIVER_NES),
+-      HCA(NETEFFECT, 0x0100),
+-      HCA(NETEFFECT, 0x0110),
+-      {},
+-};
+-
+-static const struct verbs_context_ops nes_uctx_ops = {
+-      .query_device = nes_uquery_device,
+-      .query_port = nes_uquery_port,
+-      .alloc_pd = nes_ualloc_pd,
+-      .dealloc_pd = nes_ufree_pd,
+-      .reg_mr = nes_ureg_mr,
+-      .dereg_mr = nes_udereg_mr,
+-      .create_cq = nes_ucreate_cq,
+-      .poll_cq = nes_upoll_cq,
+-      .req_notify_cq = nes_uarm_cq,
+-      .cq_event = nes_cq_event,
+-      .resize_cq = nes_uresize_cq,
+-      .destroy_cq = nes_udestroy_cq,
+-      .create_qp = nes_ucreate_qp,
+-      .query_qp = nes_uquery_qp,
+-      .modify_qp = nes_umodify_qp,
+-      .destroy_qp = nes_udestroy_qp,
+-      .post_send = nes_upost_send,
+-      .post_recv = nes_upost_recv,
+-      .create_ah = nes_ucreate_ah,
+-      .destroy_ah = nes_udestroy_ah,
+-      .attach_mcast = nes_uattach_mcast,
+-      .detach_mcast = nes_udetach_mcast,
+-      .async_event = nes_async_event
+-};
+-
+-static const struct verbs_context_ops nes_uctx_no_db_ops = {
+-      .poll_cq = nes_upoll_cq_no_db_read,
+-};
+-
+-
+-/**
+- * nes_ualloc_context
+- */
+-static struct verbs_context *nes_ualloc_context(struct ibv_device *ibdev,
+-                                              int cmd_fd,
+-                                              void *private_data)
+-{
+-      struct ibv_pd *ibv_pd;
+-      struct nes_uvcontext *nesvctx;
+-      struct nes_get_context cmd;
+-      struct nes_get_context_resp resp;
+-      char value[16];
+-      uint32_t nes_drv_opt = 0;
+-
+-      page_size = sysconf(_SC_PAGESIZE);
+-
+-      nesvctx = verbs_init_and_alloc_context(ibdev, cmd_fd, nesvctx, ibv_ctx,
+-                                             RDMA_DRIVER_NES);
+-      if (!nesvctx)
+-              return NULL;
+-
+-      cmd.userspace_ver = NES_ABI_USERSPACE_VER;
+-
+-      if (ibv_cmd_get_context(&nesvctx->ibv_ctx, (struct ibv_get_context *)&cmd, sizeof cmd,
+-                              &resp.ibv_resp, sizeof(resp)))
+-              goto err_free;
+-
+-      if (resp.kernel_ver != NES_ABI_KERNEL_VER) {
+-              fprintf(stderr, PFX "%s: Invalid kernel driver version detected. Detected %d, should be %d\n",
+-                      __FUNCTION__, resp.kernel_ver, NES_ABI_KERNEL_VER);
+-              goto err_free;
+-      }
+-
+-      if (ibv_read_sysfs_file("/sys/module/iw_nes", "parameters/nes_drv_opt",
+-                      value, sizeof(value)) > 0) {
+-              sscanf(value, "%d", &nes_drv_opt);
+-      } else if (ibv_read_sysfs_file("/sys/module/iw_nes", "nes_drv_opt",
+-                              value, sizeof(value)) > 0) {
+-                      sscanf(value, "%d", &nes_drv_opt);
+-      }
+-
+-      verbs_set_ops(&nesvctx->ibv_ctx, &nes_uctx_ops);
+-      if (nes_drv_opt & NES_DRV_OPT_NO_DB_READ)
+-              verbs_set_ops(&nesvctx->ibv_ctx, &nes_uctx_no_db_ops);
+-
+-      nesvctx->max_pds = resp.max_pds;
+-      nesvctx->max_qps = resp.max_qps;
+-      nesvctx->wq_size = resp.wq_size;
+-      nesvctx->virtwq = resp.virtwq;
+-      nesvctx->mcrqf = 0;
+-
+-      /* Get a doorbell region for the CQs */
+-      ibv_pd = nes_ualloc_pd(&nesvctx->ibv_ctx.context);
+-      if (!ibv_pd)
+-              goto err_free;
+-      ibv_pd->context = &nesvctx->ibv_ctx.context;
+-      nesvctx->nesupd = to_nes_upd(ibv_pd);
+-
+-      return &nesvctx->ibv_ctx;
+-
+-err_free:
+-      fprintf(stderr, PFX "%s: Failed to allocate context for device.\n", __FUNCTION__);
+-      verbs_uninit_context(&nesvctx->ibv_ctx);
+-      free(nesvctx);
+-
+-      return NULL;
+-}
+-
+-
+-/**
+- * nes_ufree_context
+- */
+-static void nes_ufree_context(struct ibv_context *ibctx)
+-{
+-      struct nes_uvcontext *nesvctx = to_nes_uctx(ibctx);
+-      nes_ufree_pd(&nesvctx->nesupd->ibv_pd);
+-
+-      verbs_uninit_context(&nesvctx->ibv_ctx);
+-      free(nesvctx);
+-}
+-
+-static void nes_uninit_device(struct verbs_device *verbs_device)
+-{
+-      struct nes_udevice *dev = to_nes_udev(&verbs_device->device);
+-
+-      free(dev);
+-}
+-
+-static struct verbs_device *
+-nes_device_alloc(struct verbs_sysfs_dev *sysfs_dev)
+-{
+-      struct nes_udevice *dev;
+-      char value[16];
+-
+-      if (ibv_read_sysfs_file("/sys/module/iw_nes", "parameters/debug_level",
+-                      value, sizeof(value)) > 0) {
+-              sscanf(value, "%u", &nes_debug_level);
+-      } else if (ibv_read_sysfs_file("/sys/module/iw_nes", "debug_level",
+-                              value, sizeof(value)) > 0) {
+-                      sscanf(value, "%u", &nes_debug_level);
+-      }
+-
+-      dev = calloc(1, sizeof(*dev));
+-      if (!dev)
+-              return NULL;
+-
+-      dev->page_size = sysconf(_SC_PAGESIZE);
+-
+-      nes_debug(NES_DBG_INIT, "libnes initialized\n");
+-
+-      return &dev->ibv_dev;
+-}
+-
+-static const struct verbs_device_ops nes_udev_ops = {
+-      .name = "nes",
+-      .match_min_abi_version = 0,
+-      .match_max_abi_version = INT_MAX,
+-      .match_table = hca_table,
+-      .alloc_device = nes_device_alloc,
+-      .uninit_device = nes_uninit_device,
+-      .alloc_context = nes_ualloc_context,
+-      .free_context = nes_ufree_context,
+-};
+-PROVIDER_DRIVER(nes, nes_udev_ops);
+diff --git a/providers/nes/nes_umain.h b/providers/nes/nes_umain.h
+deleted file mode 100644
+index 1070ce429..000000000
+--- a/providers/nes/nes_umain.h
++++ /dev/null
+@@ -1,383 +0,0 @@
+-/*
+- * Copyright (c) 2006 - 2010 Intel Corporation.  All rights reserved.
+- * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
+- *
+- * This software is available to you under a choice of one of two
+- * licenses.  You may choose to be licensed under the terms of the GNU
+- * General Public License (GPL) Version 2, available from the file
+- * gpl-2.0.txt in the main directory of this source tree, or the
+- * OpenIB.org BSD license below:
+- *
+- *     Redistribution and use in source and binary forms, with or
+- *     without modification, are permitted provided that the following
+- *     conditions are met:
+- *
+- *      - Redistributions of source code must retain the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer.
+- *
+- *      - Redistributions in binary form must reproduce the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer in the documentation and/or other materials
+- *        provided with the distribution.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+- * SOFTWARE.
+- */
+-
+-#ifndef nes_umain_H
+-#define nes_umain_H
+-
+-#include <inttypes.h>
+-#include <stddef.h>
+-#include <endian.h>
+-#include <util/compiler.h>
+-
+-#include <infiniband/driver.h>
+-#include <util/udma_barrier.h>
+-
+-#define PFX   "libnes: "
+-
+-#define  NES_QP_MMAP          1
+-#define  NES_QP_VMAP          2
+-
+-#define NES_DRV_OPT_NO_INLINE_DATA    0x00000080
+-#define NES_DRV_OPT_NO_DB_READ                0x00001000
+-
+-#define NES_DEBUG
+-/* debug levels */
+-/* must match kernel */
+-#define NES_DBG_HW          0x00000001
+-#define NES_DBG_INIT        0x00000002
+-#define NES_DBG_ISR         0x00000004
+-#define NES_DBG_PHY         0x00000008
+-#define NES_DBG_NETDEV      0x00000010
+-#define NES_DBG_CM          0x00000020
+-#define NES_DBG_CM1         0x00000040
+-#define NES_DBG_NIC_RX      0x00000080
+-#define NES_DBG_NIC_TX      0x00000100
+-#define NES_DBG_CQP         0x00000200
+-#define NES_DBG_MMAP        0x00000400
+-#define NES_DBG_MR          0x00000800
+-#define NES_DBG_PD          0x00001000
+-#define NES_DBG_CQ          0x00002000
+-#define NES_DBG_QP          0x00004000
+-#define NES_DBG_MOD_QP      0x00008000
+-#define NES_DBG_AEQ         0x00010000
+-#define NES_DBG_IW_RX       0x00020000
+-#define NES_DBG_IW_TX       0x00040000
+-#define NES_DBG_SHUTDOWN    0x00080000
+-#define NES_DBG_UD          0x00100000
+-#define NES_DBG_RSVD1       0x10000000
+-#define NES_DBG_RSVD2       0x20000000
+-#define NES_DBG_RSVD3       0x40000000
+-#define NES_DBG_RSVD4       0x80000000
+-#define NES_DBG_ALL         0xffffffff
+-
+-extern unsigned int nes_debug_level;
+-#ifdef NES_DEBUG
+-#define nes_debug(level, fmt, args...) \
+-      if (level & nes_debug_level) \
+-              fprintf(stderr, PFX "%s[%u]: " fmt, __FUNCTION__, __LINE__, ##args)
+-#else
+-#define nes_debug(level, fmt, args...)
+-#endif
+-
+-enum nes_cqe_opcode_bits {
+-      NES_CQE_STAG_VALID = (1<<6),
+-      NES_CQE_ERROR = (1<<7),
+-      NES_CQE_SQ = (1<<8),
+-      NES_CQE_SE = (1<<9),
+-      NES_CQE_PSH = (1<<29),
+-      NES_CQE_FIN = (1<<30),
+-      NES_CQE_VALID = (1<<31),
+-};
+-
+-enum nes_cqe_word_idx {
+-      NES_CQE_PAYLOAD_LENGTH_IDX = 0,
+-      NES_CQE_COMP_COMP_CTX_LOW_IDX = 2,
+-      NES_CQE_COMP_COMP_CTX_HIGH_IDX = 3,
+-      NES_CQE_INV_STAG_IDX = 4,
+-      NES_CQE_QP_ID_IDX = 5,
+-      NES_CQE_ERROR_CODE_IDX = 6,
+-      NES_CQE_OPCODE_IDX = 7,
+-};
+-
+-enum nes_cqe_allocate_bits {
+-      NES_CQE_ALLOC_INC_SELECT = (1<<28),
+-      NES_CQE_ALLOC_NOTIFY_NEXT = (1<<29),
+-      NES_CQE_ALLOC_NOTIFY_SE = (1<<30),
+-      NES_CQE_ALLOC_RESET = (1<<31),
+-};
+-
+-enum nes_iwarp_sq_wqe_word_idx {
+-      NES_IWARP_SQ_WQE_MISC_IDX = 0,
+-      NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX = 1,
+-      NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX = 2,
+-      NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX = 3,
+-      NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX = 4,
+-      NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX = 5,
+-      NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX = 7,
+-      NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX = 8,
+-      NES_IWARP_SQ_WQE_RDMA_TO_HIGH_IDX = 9,
+-      NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX = 10,
+-      NES_IWARP_SQ_WQE_RDMA_STAG_IDX = 11,
+-      NES_IWARP_SQ_WQE_IMM_DATA_START_IDX = 12,
+-      NES_IWARP_SQ_WQE_FRAG0_LOW_IDX = 16,
+-      NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX = 17,
+-      NES_IWARP_SQ_WQE_LENGTH0_IDX = 18,
+-      NES_IWARP_SQ_WQE_STAG0_IDX = 19,
+-      NES_IWARP_SQ_WQE_FRAG1_LOW_IDX = 20,
+-      NES_IWARP_SQ_WQE_FRAG1_HIGH_IDX = 21,
+-      NES_IWARP_SQ_WQE_LENGTH1_IDX = 22,
+-      NES_IWARP_SQ_WQE_STAG1_IDX = 23,
+-      NES_IWARP_SQ_WQE_FRAG2_LOW_IDX = 24,
+-      NES_IWARP_SQ_WQE_FRAG2_HIGH_IDX = 25,
+-      NES_IWARP_SQ_WQE_LENGTH2_IDX = 26,
+-      NES_IWARP_SQ_WQE_STAG2_IDX = 27,
+-      NES_IWARP_SQ_WQE_FRAG3_LOW_IDX = 28,
+-      NES_IWARP_SQ_WQE_FRAG3_HIGH_IDX = 29,
+-      NES_IWARP_SQ_WQE_LENGTH3_IDX = 30,
+-      NES_IWARP_SQ_WQE_STAG3_IDX = 31,
+-};
+-
+-enum nes_iwarp_rq_wqe_word_idx {
+-      NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX = 1,
+-      NES_IWARP_RQ_WQE_COMP_CTX_LOW_IDX = 2,
+-      NES_IWARP_RQ_WQE_COMP_CTX_HIGH_IDX = 3,
+-      NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX = 4,
+-      NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX = 5,
+-      NES_IWARP_RQ_WQE_FRAG0_LOW_IDX = 8,
+-      NES_IWARP_RQ_WQE_FRAG0_HIGH_IDX = 9,
+-      NES_IWARP_RQ_WQE_LENGTH0_IDX = 10,
+-      NES_IWARP_RQ_WQE_STAG0_IDX = 11,
+-      NES_IWARP_RQ_WQE_FRAG1_LOW_IDX = 12,
+-      NES_IWARP_RQ_WQE_FRAG1_HIGH_IDX = 13,
+-      NES_IWARP_RQ_WQE_LENGTH1_IDX = 14,
+-      NES_IWARP_RQ_WQE_STAG1_IDX = 15,
+-      NES_IWARP_RQ_WQE_FRAG2_LOW_IDX = 16,
+-      NES_IWARP_RQ_WQE_FRAG2_HIGH_IDX = 17,
+-      NES_IWARP_RQ_WQE_LENGTH2_IDX = 18,
+-      NES_IWARP_RQ_WQE_STAG2_IDX = 19,
+-      NES_IWARP_RQ_WQE_FRAG3_LOW_IDX = 20,
+-      NES_IWARP_RQ_WQE_FRAG3_HIGH_IDX = 21,
+-      NES_IWARP_RQ_WQE_LENGTH3_IDX = 22,
+-      NES_IWARP_RQ_WQE_STAG3_IDX = 23,
+-};
+-
+-enum nes_iwarp_sq_opcodes {
+-      NES_IWARP_SQ_WQE_STREAMING = (1<<23),
+-      NES_IWARP_SQ_WQE_IMM_DATA = (1<<28),
+-      NES_IWARP_SQ_WQE_READ_FENCE = (1<<29),
+-      NES_IWARP_SQ_WQE_LOCAL_FENCE = (1<<30),
+-      NES_IWARP_SQ_WQE_SIGNALED_COMPL = (1<<31),
+-};
+-
+-enum nes_iwarp_sq_wqe_bits {
+-      NES_IWARP_SQ_OP_RDMAW = 0,
+-      NES_IWARP_SQ_OP_RDMAR = 1,
+-      NES_IWARP_SQ_OP_SEND = 3,
+-      NES_IWARP_SQ_OP_SENDINV = 4,
+-      NES_IWARP_SQ_OP_SENDSE = 5,
+-      NES_IWARP_SQ_OP_SENDSEINV = 6,
+-      NES_IWARP_SQ_OP_BIND = 8,
+-      NES_IWARP_SQ_OP_FAST_REG = 9,
+-      NES_IWARP_SQ_OP_LOCINV = 10,
+-      NES_IWARP_SQ_OP_RDMAR_LOCINV = 11,
+-      NES_IWARP_SQ_OP_NOP = 12,
+-};
+-
+-enum nes_nic_cqe_word_idx {
+-      NES_NIC_CQE_ACCQP_ID_IDX = 0,
+-      NES_NIC_CQE_TAG_PKT_TYPE_IDX = 2,
+-      NES_NIC_CQE_MISC_IDX = 3,
+-};
+-
+-#define NES_NIC_CQE_ERRV_SHIFT 16
+-enum nes_nic_ev_bits {
+-      NES_NIC_ERRV_BITS_MODE = (1<<0),
+-      NES_NIC_ERRV_BITS_IPV4_CSUM_ERR = (1<<1),
+-      NES_NIC_ERRV_BITS_TCPUDP_CSUM_ERR = (1<<2),
+-      NES_NIC_ERRV_BITS_WQE_OVERRUN = (1<<3),
+-      NES_NIC_ERRV_BITS_IPH_ERR = (1<<4),
+-};
+-
+-enum nes_nic_cqe_bits {
+-      NES_NIC_CQE_ERRV_MASK = (0xff<<NES_NIC_CQE_ERRV_SHIFT),
+-      NES_NIC_CQE_SQ = (1<<24),
+-      NES_NIC_CQE_ACCQP_PORT = (1<<28),
+-      NES_NIC_CQE_ACCQP_VALID = (1<<29),
+-      NES_NIC_CQE_TAG_VALID = (1<<30),
+-      NES_NIC_CQE_VALID = (1<<31),
+-};
+-struct nes_hw_nic_cqe {
+-      uint32_t cqe_words[4];
+-};
+-
+-enum nes_iwarp_cqe_major_code {
+-      NES_IWARP_CQE_MAJOR_FLUSH = 1,
+-      NES_IWARP_CQE_MAJOR_DRV = 0x8000
+-};
+-
+-enum nes_iwarp_cqe_minor_code {
+-      NES_IWARP_CQE_MINOR_FLUSH = 1
+-};
+-
+-struct nes_hw_qp_wqe {
+-      uint32_t wqe_words[32];
+-};
+-
+-struct nes_hw_cqe {
+-      uint32_t cqe_words[8];
+-};
+-
+-struct nes_user_doorbell {
+-      uint32_t wqe_alloc;
+-      uint32_t reserved[3];
+-      uint32_t cqe_alloc;
+-};
+-
+-struct nes_udevice {
+-      struct verbs_device ibv_dev;
+-      int page_size;
+-};
+-
+-struct nes_upd {
+-      struct ibv_pd ibv_pd;
+-      struct nes_user_doorbell volatile *udoorbell;
+-      uint32_t pd_id;
+-      uint32_t db_index;
+-};
+-
+-struct nes_uvcontext {
+-      struct verbs_context ibv_ctx;
+-      struct nes_upd *nesupd;
+-      uint32_t max_pds; /* maximum pds allowed for this user process */
+-      uint32_t max_qps; /* maximum qps allowed for this user process */
+-      uint32_t wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
+-      uint32_t mcrqf;
+-      uint8_t virtwq ; /*  flag if to use virt wqs or not */
+-      uint8_t reserved[3];
+-};
+-
+-struct nes_uqp;
+-
+-struct nes_ucq {
+-      struct ibv_cq ibv_cq;
+-      struct nes_hw_cqe volatile *cqes;
+-      struct verbs_mr vmr;
+-      pthread_spinlock_t lock;
+-      uint32_t cq_id;
+-      uint16_t size;
+-      uint16_t head;
+-      uint16_t polled_completions;
+-      uint8_t is_armed;
+-      uint8_t skip_arm;
+-      int arm_sol;
+-      int skip_sol;
+-      int comp_vector;
+-      struct nes_uqp *udqp;
+-};
+-
+-struct nes_uqp {
+-      struct ibv_qp ibv_qp;
+-      struct nes_hw_qp_wqe volatile *sq_vbase;
+-      struct nes_hw_qp_wqe volatile *rq_vbase;
+-      uint32_t qp_id;
+-      struct nes_ucq *send_cq;
+-      struct nes_ucq *recv_cq;
+-      struct  verbs_mr vmr;
+-      uint32_t nes_drv_opt;
+-      pthread_spinlock_t lock;
+-      uint16_t sq_db_index;
+-      uint16_t sq_head;
+-      uint16_t sq_tail;
+-      uint16_t sq_size;
+-      uint16_t sq_sig_all;
+-      uint16_t rq_db_index;
+-      uint16_t rq_head;
+-      uint16_t rq_tail;
+-      uint16_t rq_size;
+-      uint16_t rdma0_msg;
+-      uint16_t mapping;
+-      uint16_t qperr;
+-      uint16_t rsvd;
+-      uint32_t pending_rcvs;
+-      struct ibv_recv_wr *pend_rx_wr;
+-      int nes_ud_sksq_fd;
+-      void *sksq_shared_ctxt;
+-      uint64_t send_wr_id[512]; /* IMA send wr_id ring content */
+-      uint64_t recv_wr_id[512]; /* IMA receive wr_id ring content */
+-};
+-
+-#define to_nes_uxxx(xxx, type)                                                 \
+-      container_of(ib##xxx, struct nes_u##type, ibv_##xxx)
+-
+-static inline struct nes_udevice *to_nes_udev(struct ibv_device *ibdev)
+-{
+-      return container_of(ibdev, struct nes_udevice, ibv_dev.device);
+-}
+-
+-static inline struct nes_uvcontext *to_nes_uctx(struct ibv_context *ibctx)
+-{
+-      return container_of(ibctx, struct nes_uvcontext, ibv_ctx.context);
+-}
+-
+-static inline struct nes_upd *to_nes_upd(struct ibv_pd *ibpd)
+-{
+-      return to_nes_uxxx(pd, pd);
+-}
+-
+-static inline struct nes_ucq *to_nes_ucq(struct ibv_cq *ibcq)
+-{
+-      return to_nes_uxxx(cq, cq);
+-}
+-
+-static inline struct nes_uqp *to_nes_uqp(struct ibv_qp *ibqp)
+-{
+-      return to_nes_uxxx(qp, qp);
+-}
+-
+-
+-/* nes_uverbs.c */
+-int nes_uquery_device(struct ibv_context *, struct ibv_device_attr *);
+-int nes_uquery_port(struct ibv_context *, uint8_t, struct ibv_port_attr *);
+-struct ibv_pd *nes_ualloc_pd(struct ibv_context *);
+-int nes_ufree_pd(struct ibv_pd *);
+-struct ibv_mr *nes_ureg_mr(struct ibv_pd *pd, void *addr, size_t length,
+-                         uint64_t hca_va, int access);
+-int nes_udereg_mr(struct verbs_mr *vmr);
+-struct ibv_cq *nes_ucreate_cq(struct ibv_context *, int, struct ibv_comp_channel *, int);
+-int nes_uresize_cq(struct ibv_cq *, int);
+-int nes_udestroy_cq(struct ibv_cq *);
+-int nes_upoll_cq(struct ibv_cq *, int, struct ibv_wc *);
+-int nes_upoll_cq_no_db_read(struct ibv_cq *, int, struct ibv_wc *);
+-int nes_uarm_cq(struct ibv_cq *, int);
+-void nes_cq_event(struct ibv_cq *);
+-struct ibv_srq *nes_ucreate_srq(struct ibv_pd *, struct ibv_srq_init_attr *);
+-int nes_umodify_srq(struct ibv_srq *, struct ibv_srq_attr *, int);
+-int nes_udestroy_srq(struct ibv_srq *);
+-int nes_upost_srq_recv(struct ibv_srq *, struct ibv_recv_wr *, struct ibv_recv_wr **);
+-struct ibv_qp *nes_ucreate_qp(struct ibv_pd *, struct ibv_qp_init_attr *);
+-int nes_uquery_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+-                int, struct ibv_qp_init_attr *init_attr);
+-int nes_umodify_qp(struct ibv_qp *, struct ibv_qp_attr *, int);
+-int nes_udestroy_qp(struct ibv_qp *);
+-int nes_upost_send(struct ibv_qp *, struct ibv_send_wr *, struct ibv_send_wr **);
+-int nes_upost_recv(struct ibv_qp *, struct ibv_recv_wr *, struct ibv_recv_wr **);
+-struct ibv_ah *nes_ucreate_ah(struct ibv_pd *, struct ibv_ah_attr *);
+-int nes_udestroy_ah(struct ibv_ah *);
+-int nes_uattach_mcast(struct ibv_qp *, const union ibv_gid *, uint16_t);
+-int nes_udetach_mcast(struct ibv_qp *, const union ibv_gid *, uint16_t);
+-void nes_async_event(struct ibv_context *context,
+-                   struct ibv_async_event *event);
+-
+-extern long int page_size;
+-
+-#endif                                /* nes_umain_H */
+diff --git a/providers/nes/nes_uverbs.c b/providers/nes/nes_uverbs.c
+deleted file mode 100644
+index 2b78468b4..000000000
+--- a/providers/nes/nes_uverbs.c
++++ /dev/null
+@@ -1,1535 +0,0 @@
+-/*
+- * Copyright (c) 2006 - 2010 Intel Corporation.  All rights reserved.
+- *
+- * This software is available to you under a choice of one of two
+- * licenses.  You may choose to be licensed under the terms of the GNU
+- * General Public License (GPL) Version 2, available from the file
+- * gpl-2.0.txt in the main directory of this source tree, or the
+- * OpenIB.org BSD license below:
+- *
+- *     Redistribution and use in source and binary forms, with or
+- *     without modification, are permitted provided that the following
+- *     conditions are met:
+- *
+- *      - Redistributions of source code must retain the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer.
+- *
+- *      - Redistributions in binary form must reproduce the above
+- *        copyright notice, this list of conditions and the following
+- *        disclaimer in the documentation and/or other materials
+- *        provided with the distribution.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+- * SOFTWARE.
+- */
+-
+-#include <config.h>
+-
+-#include <endian.h>
+-#include <stdlib.h>
+-#include <stdio.h>
+-#include <string.h>
+-#include <unistd.h>
+-#include <signal.h>
+-#include <errno.h>
+-#include <pthread.h>
+-#include <malloc.h>
+-#include <sys/mman.h>
+-#include <linux/if_ether.h>
+-#include <sys/stat.h>
+-#include <fcntl.h>
+-
+-#include "nes_umain.h"
+-#include "nes-abi.h"
+-
+-#define STATIC static
+-#define INLINE inline
+-
+-#define NES_WC_WITH_VLAN   1 << 3
+-#define NES_UD_RX_BATCH_SZ 64
+-#define NES_UD_MAX_SG_LIST_SZ 1
+-
+-struct nes_ud_send_wr {
+-      uint32_t               wr_cnt;
+-      uint32_t               qpn;
+-      uint32_t               flags;
+-      uint32_t               resv[1];
+-      struct ibv_sge         sg_list[64];
+-};
+-
+-struct nes_ud_recv_wr {
+-      uint32_t               wr_cnt;
+-      uint32_t               qpn;
+-      uint32_t               resv[2];
+-      struct ibv_sge         sg_list[64];
+-};
+-
+-/**
+- * nes_uquery_device
+- */
+-int nes_uquery_device(struct ibv_context *context, struct ibv_device_attr *attr)
+-{
+-      struct ibv_query_device cmd;
+-      uint64_t nes_fw_ver;
+-      int ret;
+-      unsigned int minor, major;
+-
+-      ret = ibv_cmd_query_device(context, attr, &nes_fw_ver,
+-                                      &cmd, sizeof cmd);
+-      if (ret)
+-              return ret;
+-
+-      major = (nes_fw_ver >> 16) & 0xffff;
+-      minor = nes_fw_ver & 0xffff;
+-
+-      snprintf(attr->fw_ver, sizeof attr->fw_ver,
+-              "%d.%d", major, minor);
+-
+-      return 0;
+-}
+-
+-
+-/**
+- * nes_uquery_port
+- */
+-int nes_uquery_port(struct ibv_context *context, uint8_t port,
+-              struct ibv_port_attr *attr)
+-{
+-      struct ibv_query_port cmd;
+-
+-      return ibv_cmd_query_port(context, port, attr, &cmd, sizeof cmd);
+-}
+-
+-
+-/**
+- * nes_ualloc_pd
+- */
+-struct ibv_pd *nes_ualloc_pd(struct ibv_context *context)
+-{
+-      struct ibv_alloc_pd cmd;
+-      struct nes_ualloc_pd_resp resp;
+-      struct nes_upd *nesupd;
+-
+-      nesupd = malloc(sizeof *nesupd);
+-      if (!nesupd)
+-              return NULL;
+-
+-      if (ibv_cmd_alloc_pd(context, &nesupd->ibv_pd, &cmd, sizeof cmd,
+-                      &resp.ibv_resp, sizeof resp)) {
+-              free(nesupd);
+-              return NULL;
+-      }
+-      nesupd->pd_id = resp.pd_id;
+-      nesupd->db_index = resp.mmap_db_index;
+-
+-      nesupd->udoorbell = mmap(NULL, page_size, PROT_WRITE | PROT_READ, MAP_SHARED,
+-                      context->cmd_fd, nesupd->db_index * page_size);
+-
+-      if (nesupd->udoorbell == MAP_FAILED) {
+-              free(nesupd);
+-              return NULL;
+-      }
+-
+-      return &nesupd->ibv_pd;
+-}
+-
+-
+-/**
+- * nes_ufree_pd
+- */
+-int nes_ufree_pd(struct ibv_pd *pd)
+-{
+-      int ret;
+-      struct nes_upd *nesupd;
+-
+-      nesupd = to_nes_upd(pd);
+-
+-      ret = ibv_cmd_dealloc_pd(pd);
+-      if (ret)
+-              return ret;
+-
+-      munmap((void *)nesupd->udoorbell, page_size);
+-      free(nesupd);
+-
+-      return 0;
+-}
+-
+-
+-/**
+- * nes_ureg_mr
+- */
+-struct ibv_mr *nes_ureg_mr(struct ibv_pd *pd, void *addr, size_t length,
+-                         uint64_t hca_va, int access)
+-{
+-      struct verbs_mr *vmr;
+-      struct nes_ureg_mr cmd;
+-      struct ib_uverbs_reg_mr_resp resp;
+-
+-      vmr = malloc(sizeof(*vmr));
+-      if (!vmr)
+-              return NULL;
+-
+-      cmd.reg_type = IWNES_MEMREG_TYPE_MEM;
+-      if (ibv_cmd_reg_mr(pd, addr, length, hca_va, access, vmr, &cmd.ibv_cmd,
+-                         sizeof(cmd), &resp, sizeof(resp))) {
+-              free(vmr);
+-
+-              return NULL;
+-      }
+-
+-      return &vmr->ibv_mr;
+-}
+-
+-
+-/**
+- * nes_udereg_mr
+- */
+-int nes_udereg_mr(struct verbs_mr *vmr)
+-{
+-      int ret;
+-
+-      ret = ibv_cmd_dereg_mr(vmr);
+-      if (ret)
+-              return ret;
+-
+-      free(vmr);
+-      return 0;
+-}
+-
+-/**
+- * nes_ucreate_cq
+- */
+-struct ibv_cq *nes_ucreate_cq(struct ibv_context *context, int cqe,
+-              struct ibv_comp_channel *channel, int comp_vector)
+-{
+-      struct nes_ucq *nesucq;
+-      struct nes_ureg_mr reg_mr_cmd;
+-      struct ib_uverbs_reg_mr_resp reg_mr_resp;
+-      struct nes_ucreate_cq cmd;
+-      struct nes_ucreate_cq_resp resp;
+-      int ret;
+-      struct nes_uvcontext *nesvctx = to_nes_uctx(context);
+-
+-      nesucq = malloc(sizeof *nesucq);
+-      if (!nesucq) {
+-              return NULL;
+-      }
+-      memset(nesucq, 0, sizeof(*nesucq));
+-
+-      if (pthread_spin_init(&nesucq->lock, PTHREAD_PROCESS_PRIVATE)) {
+-              free(nesucq);
+-              return NULL;
+-      }
+-
+-      if (cqe < 4)    /* a reasonable minimum */
+-              cqe = 4;
+-      nesucq->size = cqe + 1;
+-      nesucq->comp_vector = comp_vector;
+-
+-      nesucq->cqes = memalign(page_size, nesucq->size*sizeof(struct nes_hw_cqe));
+-      if (!nesucq->cqes)
+-              goto err;
+-
+-      /* Register the memory for the CQ */
+-      reg_mr_cmd.reg_type = IWNES_MEMREG_TYPE_CQ;
+-
+-      ret = ibv_cmd_reg_mr(&nesvctx->nesupd->ibv_pd, (void *)nesucq->cqes,
+-                      (nesucq->size*sizeof(struct nes_hw_cqe)),
+-                      (uintptr_t)nesucq->cqes, IBV_ACCESS_LOCAL_WRITE,
+-                      &nesucq->vmr, &reg_mr_cmd.ibv_cmd, sizeof(reg_mr_cmd),
+-                      &reg_mr_resp, sizeof(reg_mr_resp));
+-      if (ret) {
+-              /* fprintf(stderr, "ibv_cmd_reg_mr failed (ret = %d).\n", ret); */
+-              free((struct nes_hw_cqe *)nesucq->cqes);
+-              goto err;
+-      }
+-
+-      /* Create the CQ */
+-      memset(&cmd, 0, sizeof(cmd));
+-      cmd.user_cq_buffer = (__u64)((uintptr_t)nesucq->cqes);
+-      cmd.mcrqf = nesvctx->mcrqf;
+-
+-      ret = ibv_cmd_create_cq(context, nesucq->size-1, channel, comp_vector,
+-                      &nesucq->ibv_cq, &cmd.ibv_cmd, sizeof cmd,
+-                      &resp.ibv_resp, sizeof resp);
+-      if (ret)
+-              goto err;
+-
+-      nesucq->cq_id = (uint16_t)resp.cq_id;
+-
+-      /* Zero out the CQ */
+-      memset((struct nes_hw_cqe *)nesucq->cqes, 0, nesucq->size*sizeof(struct nes_hw_cqe));
+-
+-      return &nesucq->ibv_cq;
+-
+-err:
+-      /* fprintf(stderr, PFX "%s: Error Creating CQ.\n", __FUNCTION__); */
+-      pthread_spin_destroy(&nesucq->lock);
+-      free(nesucq);
+-
+-      return NULL;
+-}
+-
+-
+-/**
+- * nes_uresize_cq
+- */
+-int nes_uresize_cq(struct ibv_cq *cq, int cqe)
+-{
+-      /* fprintf(stderr, PFX "%s\n", __FUNCTION__); */
+-      return -ENOSYS;
+-}
+-
+-/**
+- * nes_udestroy_cq
+- */
+-int nes_udestroy_cq(struct ibv_cq *cq)
+-{
+-      struct nes_ucq *nesucq = to_nes_ucq(cq);
+-      int ret;
+-
+-      ret = ibv_cmd_destroy_cq(cq);
+-      if (ret)
+-              return ret;
+-
+-      ret = ibv_cmd_dereg_mr(&nesucq->vmr);
+-      if (ret)
+-              fprintf(stderr, PFX "%s: Failed to deregister CQ Memory Region.\n", __FUNCTION__);
+-
+-      /* Free CQ the memory */
+-      free((struct nes_hw_cqe *)nesucq->cqes);
+-      pthread_spin_destroy(&nesucq->lock);
+-      free(nesucq);
+-
+-      return 0;
+-}
+-
+-#define  NES_CQ_BUF_OV_ERR 0x3
+-
+-static inline
+-int nes_ima_upoll_cq(struct ibv_cq *cq, int num_entries, struct ibv_wc *entry)
+-{
+-      struct nes_ucq *nesucq = to_nes_ucq(cq);
+-      struct nes_uvcontext *nesvctx = to_nes_uctx(cq->context);
+-      uint32_t cqe_misc;
+-      int cqe_count = 0;
+-      uint32_t head;
+-      uint32_t cq_size;
+-
+-      volatile struct nes_hw_nic_cqe *cqe = NULL;
+-      volatile struct nes_hw_nic_cqe *cqes;
+-
+-      struct nes_uqp *nesuqp = nesucq->udqp;
+-      uint32_t vlan_tag = 0;
+-
+-      cqes = (volatile struct nes_hw_nic_cqe *)nesucq->cqes;
+-      head = nesucq->head;
+-      cq_size = nesucq->size;
+-
+-      if (!nesuqp || !nesvctx)
+-              exit(0);
+-      if (nesuqp->ibv_qp.state == IBV_QPS_ERR) {
+-              while (cqe_count < num_entries) {
+-                      memset(entry, 0, sizeof *entry);
+-
+-              if (nesuqp->recv_cq == nesucq) {
+-                      if (nesuqp->rq_tail != nesuqp->rq_head) {
+-                              /* Working on a RQ Completion*/
+-                              entry->wr_id =
+-                                      nesuqp->recv_wr_id[nesuqp->rq_tail];
+-                              if (++nesuqp->rq_tail >= nesuqp->rq_size)
+-                                      nesuqp->rq_tail = 0;
+-                      } else
+-                              return cqe_count;
+-              } else
+-              if (nesuqp->send_cq == nesucq) {
+-                      if (nesuqp->sq_tail != nesuqp->sq_head) {
+-                              entry->wr_id =
+-                                      nesuqp->send_wr_id[nesuqp->sq_tail];
+-                              /* Working on a SQ Completion*/
+-                              if (++nesuqp->sq_tail >= nesuqp->sq_size)
+-                                      nesuqp->sq_tail = 0;
+-                      } else
+-                              return cqe_count;
+-              }
+-              entry->status = IBV_WC_WR_FLUSH_ERR;
+-              entry++;
+-              cqe_count++;
+-              }
+-              return cqe_count;
+-      }
+-
+-      while (cqe_count < num_entries) {
+-              const enum ibv_wc_opcode INVAL_OP = -1;
+-
+-              entry->opcode = INVAL_OP;
+-              cqe = &cqes[head];
+-              cqe_misc =
+-                      le32toh(cqe->cqe_words[NES_NIC_CQE_MISC_IDX]);
+-              if (cqe_misc & NES_NIC_CQE_VALID) {
+-                      memset(entry, 0, sizeof *entry);
+-                      entry->opcode = INVAL_OP;
+-                      cqe->cqe_words[NES_NIC_CQE_MISC_IDX] = 0;
+-                      entry->status = (cqe_misc & NES_NIC_CQE_ERRV_MASK) >>
+-                                              NES_NIC_CQE_ERRV_SHIFT;
+-                      entry->qp_num = nesuqp->qp_id;
+-                      entry->src_qp = nesuqp->qp_id;
+-                      if (cqe_misc & NES_NIC_CQE_SQ) {
+-                              entry->opcode = IBV_WC_SEND;
+-
+-                              entry->wr_id =
+-                                      nesuqp->send_wr_id[nesuqp->sq_tail];
+-
+-                              /* Working on a SQ Completion*/
+-                              if (++nesuqp->sq_tail >= nesuqp->sq_size)
+-                                      nesuqp->sq_tail = 0;
+-                      } else {
+-                              /* no CRC counting at all - all packets
+-                              go to higher layer as they are received -
+-                              the fastest path */
+-
+-                              entry->byte_len = cqe_misc & 0xffff;
+-                              entry->opcode = IBV_WC_RECV;
+-
+-                              entry->wr_id =
+-                                      nesuqp->recv_wr_id[nesuqp->rq_tail];
+-                              if (cqe_misc & NES_NIC_CQE_TAG_VALID) {
+-                                      vlan_tag = le32toh(
+-                              cqe->cqe_words[NES_NIC_CQE_TAG_PKT_TYPE_IDX])
+-                                                                      >> 16;
+-                                      entry->sl = (vlan_tag >> 12) & 0x0f;
+-                                      entry->pkey_index = vlan_tag & 0x0fff;
+-                                      entry->wc_flags |= NES_WC_WITH_VLAN;
+-                              }
+-
+-
+-                              /* Working on a RQ Completion*/
+-                              if (++nesuqp->rq_tail >= nesuqp->rq_size)
+-                                      nesuqp->rq_tail = 0;
+-                              if (entry->status == NES_CQ_BUF_OV_ERR)
+-                                      entry->status = IBV_WC_LOC_LEN_ERR;
+-                      }
+-
+-                      if (++head >= cq_size)
+-                              head = 0;
+-
+-                      if (entry->opcode != INVAL_OP) {
+-                              /* it is possible that no entry will be
+-                                available */
+-                              cqe_count++;
+-                              entry++;
+-                      }
+-
+-                      nesvctx->nesupd->udoorbell->cqe_alloc =
+-                              htole32(nesucq->cq_id | (1 << 16));
+-              } else {
+-                      break;
+-              }
+-      }
+-      nesucq->head = head;
+-      return cqe_count;
+-}
+-
+-/**
+- * nes_upoll_cq
+- */
+-int nes_upoll_cq(struct ibv_cq *cq, int num_entries, struct ibv_wc *entry)
+-{
+-      uint64_t wrid;
+-      struct nes_ucq *nesucq;
+-      struct nes_uvcontext *nesvctx = NULL;
+-      struct nes_uqp *nesuqp;
+-      int cqe_count=0;
+-      uint32_t head;
+-      uint32_t cq_size;
+-      uint32_t wqe_index;
+-      uint32_t wq_tail = 0;
+-      struct nes_hw_cqe cqe;
+-      uint64_t u64temp;
+-      int move_cq_head = 1;
+-      uint32_t err_code;
+-
+-      nesucq = to_nes_ucq(cq);
+-      nesvctx = to_nes_uctx(cq->context);
+-
+-      if (nesucq->cq_id < 64)
+-              return nes_ima_upoll_cq(cq, num_entries, entry);
+-
+-      pthread_spin_lock(&nesucq->lock);
+-
+-      head = nesucq->head;
+-      cq_size = nesucq->size;
+-
+-      while (cqe_count<num_entries) {
+-              if ((le32toh(nesucq->cqes[head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) == 0)
+-                      break;
+-
+-              /* Make sure we read CQ entry contents *after* we've checked the valid bit. */
+-              udma_from_device_barrier();
+-
+-              cqe = (volatile struct nes_hw_cqe)nesucq->cqes[head];
+-
+-              /* parse CQE, get completion context from WQE (either rq or sq */
+-              wqe_index = le32toh(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]) & 511;
+-              u64temp = ((uint64_t) (le32toh(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]))) |
+-                              (((uint64_t) (le32toh(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32);
+-
+-              if (likely(u64temp)) {
+-                      nesuqp = (struct nes_uqp *)(uintptr_t)(u64temp & (~1023));
+-                      memset(entry, 0, sizeof *entry);
+-                      if (likely(le32toh(cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]) == 0)) {
+-                              entry->status = IBV_WC_SUCCESS;
+-                      } else {
+-                              err_code = le32toh(cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]);
+-                              if (NES_IWARP_CQE_MAJOR_DRV == (err_code >> 16)) {
+-                                      entry->status = err_code & 0x0000ffff;
+-                              } else {
+-                                      entry->status = IBV_WC_WR_FLUSH_ERR;
+-                                      if (le32toh(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) {
+-                                              if (wqe_index == 0 && nesuqp->rdma0_msg) {
+-                                                      nesuqp->sq_tail = (wqe_index+1)&(nesuqp->sq_size - 1);
+-                                                      move_cq_head = 0;
+-                                                      wq_tail = nesuqp->sq_tail;
+-                                                      nesuqp->rdma0_msg = 0;
+-                                                      goto nes_upoll_cq_update;
+-                                              }
+-                                      }
+-                              }
+-                      }
+-                      entry->qp_num = nesuqp->qp_id;
+-                      entry->src_qp = nesuqp->qp_id;
+-                      nesuqp->rdma0_msg = 0;
+-
+-                      if (le32toh(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) {
+-                              /* Working on a SQ Completion*/
+-                              wrid = ((uint64_t) le32toh(nesuqp->sq_vbase[wqe_index].wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX])) |
+-                                      (((uint64_t) le32toh(nesuqp->sq_vbase[wqe_index].wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
+-                              entry->byte_len = le32toh(nesuqp->sq_vbase[wqe_index].wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]);
+-
+-                              switch (le32toh(nesuqp->sq_vbase[wqe_index].
+-                                              wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) {
+-                                      case NES_IWARP_SQ_OP_RDMAW:
+-                                              /* fprintf(stderr, PFX "%s: Operation = RDMA WRITE.\n",
+-                                                              __FUNCTION__ ); */
+-                                              entry->opcode = IBV_WC_RDMA_WRITE;
+-                                              break;
+-                                      case NES_IWARP_SQ_OP_RDMAR:
+-                                              /* fprintf(stderr, PFX "%s: Operation = RDMA READ.\n",
+-                                                              __FUNCTION__ ); */
+-                                              entry->opcode = IBV_WC_RDMA_READ;
+-                                              entry->byte_len = le32toh(nesuqp->sq_vbase[wqe_index].
+-                                                              wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]);
+-                                              break;
+-                                      case NES_IWARP_SQ_OP_SENDINV:
+-                                      case NES_IWARP_SQ_OP_SENDSEINV:
+-                                      case NES_IWARP_SQ_OP_SEND:
+-                                      case NES_IWARP_SQ_OP_SENDSE:
+-                                              /* fprintf(stderr, PFX "%s: Operation = Send.\n",
+-                                                              __FUNCTION__ ); */
+-                                              entry->opcode = IBV_WC_SEND;
+-                                              break;
+-                              }
+-
+-                              nesuqp->sq_tail = (wqe_index+1)&(nesuqp->sq_size - 1);
+-                              if ((entry->status != IBV_WC_SUCCESS) && (nesuqp->sq_tail != nesuqp->sq_head)) {
+-                                      move_cq_head = 0;
+-                                      wq_tail = nesuqp->sq_tail;
+-                              }
+-                      } else {
+-                              /* Working on a RQ Completion*/
+-                              entry->byte_len = le32toh(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]);
+-                              wrid = ((uint64_t) le32toh(nesuqp->rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX])) |
+-                                      (((uint64_t) le32toh(nesuqp->rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
+-                              entry->opcode = IBV_WC_RECV;
+-
+-                              nesuqp->rq_tail = (wqe_index+1)&(nesuqp->rq_size - 1);
+-                              if ((entry->status != IBV_WC_SUCCESS) && (nesuqp->rq_tail != nesuqp->rq_head)) {
+-                                      move_cq_head = 0;
+-                                      wq_tail = nesuqp->rq_tail;
+-                              }
+-                      }
+-
+-                      entry->wr_id = wrid;
+-                      entry++;
+-                      cqe_count++;
+-              }
+-nes_upoll_cq_update:
+-              if (move_cq_head) {
+-                      nesucq->cqes[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
+-                      if (++head >= cq_size)
+-                              head = 0;
+-                      nesucq->polled_completions++;
+-
+-                      if ((nesucq->polled_completions > (cq_size/2)) ||
+-                                      (nesucq->polled_completions == 255)) {
+-                              if (nesvctx == NULL)
+-                                      nesvctx = to_nes_uctx(cq->context);
+-                              nesvctx->nesupd->udoorbell->cqe_alloc = htole32(nesucq->cq_id |
+-                                              (nesucq->polled_completions << 16));
+-                              nesucq->polled_completions = 0;
+-                      }
+-              } else {
+-                      /* Update the wqe index and set status to flush */
+-                      wqe_index = le32toh(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
+-                      wqe_index = (wqe_index & (~511)) | wq_tail;
+-                      nesucq->cqes[head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] = 
+-                              htole32(wqe_index);
+-                      nesucq->cqes[head].cqe_words[NES_CQE_ERROR_CODE_IDX] = 
+-                              htole32((NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH);
+-                      move_cq_head = 1; /* ready for next pass */
+-              }
+-      }
+-
+-      if (nesucq->polled_completions) {
+-              if (nesvctx == NULL)
+-                      nesvctx = to_nes_uctx(cq->context);
+-              nesvctx->nesupd->udoorbell->cqe_alloc = htole32(nesucq->cq_id |
+-                              (nesucq->polled_completions << 16));
+-              nesucq->polled_completions = 0;
+-      }
+-      nesucq->head = head;
+-
+-      pthread_spin_unlock(&nesucq->lock);
+-
+-      return cqe_count;
+-}
+-
+-
+-/**
+- * nes_upoll_cq_no_db_read
+- */
+-int nes_upoll_cq_no_db_read(struct ibv_cq *cq, int num_entries, struct ibv_wc *entry)
+-{
+-      uint64_t wrid;
+-      struct nes_ucq *nesucq;
+-      struct nes_uvcontext *nesvctx = NULL;
+-      struct nes_uqp *nesuqp;
+-      int cqe_count=0;
+-      uint32_t head;
+-      uint32_t cq_size;
+-      uint32_t wqe_index;
+-      uint32_t wq_tail = 0;
+-      struct nes_hw_cqe cqe;
+-      uint64_t u64temp;
+-      int move_cq_head = 1;
+-      uint32_t err_code;
+-
+-      nesucq = to_nes_ucq(cq);
+-      nesvctx = to_nes_uctx(cq->context);
+-
+-      if (nesucq->cq_id < 64)
+-              return nes_ima_upoll_cq(cq, num_entries, entry);
+-
+-      pthread_spin_lock(&nesucq->lock);
+-
+-      head = nesucq->head;
+-      cq_size = nesucq->size;
+-
+-      while (cqe_count<num_entries) {
+-              if ((le32toh(nesucq->cqes[head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) == 0)
+-                      break;
+-
+-              /* Make sure we read CQ entry contents *after* we've checked the valid bit. */
+-              udma_from_device_barrier();
+-
+-              cqe = (volatile struct nes_hw_cqe)nesucq->cqes[head];
+-
+-              /* parse CQE, get completion context from WQE (either rq or sq */
+-              wqe_index = le32toh(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]) & 511;
+-              u64temp = ((uint64_t) (le32toh(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]))) |
+-                              (((uint64_t) (le32toh(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32);
+-
+-              if (likely(u64temp)) {
+-                      nesuqp = (struct nes_uqp *)(uintptr_t)(u64temp & (~1023));
+-                      memset(entry, 0, sizeof *entry);
+-                      if (likely(le32toh(cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]) == 0)) {
+-                              entry->status = IBV_WC_SUCCESS;
+-                      } else {
+-                              err_code = le32toh(cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]);
+-                              if (NES_IWARP_CQE_MAJOR_DRV == (err_code >> 16))
+-                                      entry->status = err_code & 0x0000ffff;
+-                              else
+-                                      entry->status = IBV_WC_WR_FLUSH_ERR;
+-                      }
+-                      entry->qp_num = nesuqp->qp_id;
+-                      entry->src_qp = nesuqp->qp_id;
+-
+-                      if (le32toh(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) {
+-                              /* Working on a SQ Completion*/
+-                              wrid = ((uint64_t) le32toh(nesuqp->sq_vbase[wqe_index].wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX])) |
+-                                      (((uint64_t) le32toh(nesuqp->sq_vbase[wqe_index].wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
+-                              entry->byte_len = le32toh(nesuqp->sq_vbase[wqe_index].wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]);
+-
+-                              switch (le32toh(nesuqp->sq_vbase[wqe_index].
+-                                              wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) {
+-                                      case NES_IWARP_SQ_OP_RDMAW:
+-                                              /* fprintf(stderr, PFX "%s: Operation = RDMA WRITE.\n",
+-                                                              __FUNCTION__ ); */
+-                                              entry->opcode = IBV_WC_RDMA_WRITE;
+-                                              break;
+-                                      case NES_IWARP_SQ_OP_RDMAR:
+-                                              /* fprintf(stderr, PFX "%s: Operation = RDMA READ.\n",
+-                                                              __FUNCTION__ ); */
+-                                              entry->opcode = IBV_WC_RDMA_READ;
+-                                              entry->byte_len = le32toh(nesuqp->sq_vbase[wqe_index].
+-                                                              wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]);
+-                                              break;
+-                                      case NES_IWARP_SQ_OP_SENDINV:
+-                                      case NES_IWARP_SQ_OP_SENDSEINV:
+-                                      case NES_IWARP_SQ_OP_SEND:
+-                                      case NES_IWARP_SQ_OP_SENDSE:
+-                                              /* fprintf(stderr, PFX "%s: Operation = Send.\n",
+-                                                              __FUNCTION__ ); */
+-                                              entry->opcode = IBV_WC_SEND;
+-                                              break;
+-                              }
+-
+-                              nesuqp->sq_tail = (wqe_index+1)&(nesuqp->sq_size - 1);
+-                              if ((entry->status != IBV_WC_SUCCESS) && (nesuqp->sq_tail != nesuqp->sq_head)) {
+-                                      move_cq_head = 0;
+-                                      wq_tail = nesuqp->sq_tail;
+-                              }
+-                      } else {
+-                              /* Working on a RQ Completion*/
+-                              entry->byte_len = le32toh(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]);
+-                              wrid = ((uint64_t) le32toh(nesuqp->rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX])) |
+-                                      (((uint64_t) le32toh(nesuqp->rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
+-                              entry->opcode = IBV_WC_RECV;
+-
+-                              nesuqp->rq_tail = (wqe_index+1)&(nesuqp->rq_size - 1);
+-                              if ((entry->status != IBV_WC_SUCCESS) && (nesuqp->rq_tail != nesuqp->rq_head)) {
+-                                      move_cq_head = 0;
+-                                      wq_tail = nesuqp->rq_tail;
+-                              }
+-                      }
+-
+-                      entry->wr_id = wrid;
+-                      entry++;
+-                      cqe_count++;
+-              }
+-
+-              if (move_cq_head) {
+-                      nesucq->cqes[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
+-                      if (++head >= cq_size)
+-                              head = 0;
+-                      nesucq->polled_completions++;
+-
+-                      if ((nesucq->polled_completions > (cq_size/2)) ||
+-                                      (nesucq->polled_completions == 255)) {
+-                              if (nesvctx == NULL)
+-                                      nesvctx = to_nes_uctx(cq->context);
+-                              nesvctx->nesupd->udoorbell->cqe_alloc = htole32(nesucq->cq_id |
+-                                              (nesucq->polled_completions << 16));
+-                              nesucq->polled_completions = 0;
+-                      }
+-              } else {
+-                      /* Update the wqe index and set status to flush */
+-                      wqe_index = le32toh(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
+-                      wqe_index = (wqe_index & (~511)) | wq_tail;
+-                      nesucq->cqes[head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] =
+-                              htole32(wqe_index);
+-                      nesucq->cqes[head].cqe_words[NES_CQE_ERROR_CODE_IDX] =
+-                              htole32((NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH);
+-                      move_cq_head = 1; /* ready for next pass */
+-              }
+-      }
+-
+-      if (nesucq->polled_completions) {
+-              if (nesvctx == NULL)
+-                      nesvctx = to_nes_uctx(cq->context);
+-              nesvctx->nesupd->udoorbell->cqe_alloc = htole32(nesucq->cq_id |
+-                              (nesucq->polled_completions << 16));
+-              nesucq->polled_completions = 0;
+-      }
+-      nesucq->head = head;
+-
+-      pthread_spin_unlock(&nesucq->lock);
+-
+-      return cqe_count;
+-}
+-
+-/**
+- * nes_arm_cq
+- */
+-static void nes_arm_cq(struct nes_ucq *nesucq, struct nes_uvcontext *nesvctx, int sol)
+-{
+-      uint32_t cq_arm;
+-
+-      cq_arm = nesucq->cq_id;
+-
+-      if (sol)
+-              cq_arm |= NES_CQE_ALLOC_NOTIFY_SE;
+-      else
+-              cq_arm |= NES_CQE_ALLOC_NOTIFY_NEXT;
+-
+-      nesvctx->nesupd->udoorbell->cqe_alloc = htole32(cq_arm);
+-      nesucq->is_armed = 1;
+-      nesucq->arm_sol = sol;
+-      nesucq->skip_arm = 0;
+-      nesucq->skip_sol = 1;
+-}
+-
+-/**
+- * nes_uarm_cq
+- */
+-int nes_uarm_cq(struct ibv_cq *cq, int solicited)
+-{
+-      struct nes_ucq *nesucq;
+-      struct nes_uvcontext *nesvctx;
+-
+-      nesucq = to_nes_ucq(cq);
+-      nesvctx = to_nes_uctx(cq->context);
+-
+-      pthread_spin_lock(&nesucq->lock);
+-
+-      if (nesucq->is_armed) {
+-      /* don't arm again unless... */
+-              if ((nesucq->arm_sol) && (!solicited)) {
+-                      /* solicited changed from notify SE to notify next */
+-                      nes_arm_cq(nesucq, nesvctx, solicited);
+-              } else {
+-                      nesucq->skip_arm = 1;
+-                      nesucq->skip_sol &= solicited;
+-              }
+-      } else {
+-              nes_arm_cq(nesucq, nesvctx, solicited);
+-      }
+-
+-      pthread_spin_unlock(&nesucq->lock);
+-
+-      return 0;
+-}
+-
+-
+-/**
+- * nes_cq_event
+- */
+-void nes_cq_event(struct ibv_cq *cq)
+-{
+-      struct nes_ucq *nesucq;
+-
+-      nesucq = to_nes_ucq(cq);
+-
+-      pthread_spin_lock(&nesucq->lock);
+-
+-      if (nesucq->skip_arm) {
+-              struct nes_uvcontext *nesvctx;
+-              nesvctx = to_nes_uctx(cq->context);
+-              nes_arm_cq(nesucq, nesvctx, nesucq->skip_sol);
+-      } else {
+-              nesucq->is_armed = 0;
+-      }
+-
+-      pthread_spin_unlock(&nesucq->lock);
+-}
+-
+-
+-/**
+- * nes_ucreate_srq
+- */
+-struct ibv_srq *nes_ucreate_srq(struct ibv_pd *pd, struct ibv_srq_init_attr *attr)
+-{
+-      /* fprintf(stderr, PFX "%s\n", __FUNCTION__); */
+-      return (void *)-ENOSYS;
+-}
+-
+-
+-/**
+- * nes_umodify_srq
+- */
+-int nes_umodify_srq(struct ibv_srq *srq, struct ibv_srq_attr *attr, int attr_mask)
+-{
+-      /* fprintf(stderr, PFX "%s\n", __FUNCTION__); */
+-      return -ENOSYS;
+-}
+-
+-
+-/**
+- * nes_udestroy_srq
+- */
+-int nes_udestroy_srq(struct ibv_srq *srq)
+-{
+-      /* fprintf(stderr, PFX "%s\n", __FUNCTION__); */
+-      return -ENOSYS;
+-}
+-
+-
+-/**
+- * nes_upost_srq_recv
+- */
+-int nes_upost_srq_recv(struct ibv_srq *ibsrq, struct ibv_recv_wr *wr,
+-              struct ibv_recv_wr **bad_wr)
+-{
+-      /* fprintf(stderr, PFX "%s\n", __FUNCTION__); */
+-      return -ENOSYS;
+-}
+-
+-
+-/**
+- * nes_mmapped_qp
+- * will not invoke registration of memory reqion and will allow
+- * the kernel module to allocate big chunk of contigous memory
+- * for sq and rq... returns 1 if succeeds, 0 if fails..
+- */
+-static int nes_mmapped_qp(struct nes_uqp *nesuqp, struct ibv_pd *pd, struct ibv_qp_init_attr *attr,
+-              struct nes_ucreate_qp_resp *resp)
+-{
+-
+-      unsigned long mmap_offset;
+-      struct nes_ucreate_qp cmd;
+-      struct nes_uvcontext *nesvctx = to_nes_uctx(pd->context);
+-      int ret;
+-
+-      memset (&cmd, 0, sizeof(cmd) );
+-      cmd.user_qp_buffer = (__u64) ((uintptr_t) nesuqp);
+-
+-      /* fprintf(stderr, PFX "%s entering==>\n",__FUNCTION__); */
+-      ret = ibv_cmd_create_qp(pd, &nesuqp->ibv_qp, attr, &cmd.ibv_cmd, sizeof cmd,
+-              &resp->ibv_resp, sizeof (struct nes_ucreate_qp_resp) );
+-      if (ret)
+-              return 0;
+-      nesuqp->send_cq = to_nes_ucq(attr->send_cq);
+-      nesuqp->recv_cq = to_nes_ucq(attr->recv_cq);
+-      nesuqp->sq_db_index = resp->mmap_sq_db_index;
+-      nesuqp->rq_db_index = resp->mmap_rq_db_index;
+-      nesuqp->sq_size = resp->actual_sq_size;
+-      nesuqp->rq_size = resp->actual_rq_size;
+-
+-      /* Map the SQ/RQ buffers */
+-      mmap_offset = nesvctx->max_pds*page_size;
+-      mmap_offset += (((sizeof(struct nes_hw_qp_wqe) * nesvctx->wq_size) + page_size-1) &
+-                      (~(page_size-1)))*nesuqp->sq_db_index;
+-
+-      nesuqp->sq_vbase = mmap(NULL, (nesuqp->sq_size+nesuqp->rq_size) *
+-                      sizeof(struct nes_hw_qp_wqe), PROT_WRITE | PROT_READ,
+-                      MAP_SHARED, pd->context->cmd_fd, mmap_offset);
+-
+-
+-      if (nesuqp->sq_vbase == MAP_FAILED) {
+-              return 0;
+-      }
+-      nesuqp->rq_vbase = (struct nes_hw_qp_wqe *)(((char *)nesuqp->sq_vbase) +
+-                      (nesuqp->sq_size*sizeof(struct nes_hw_qp_wqe)));
+-      *((unsigned int *)nesuqp->sq_vbase) = 0;
+-      nesuqp->mapping = NES_QP_MMAP;
+-
+-      return 1;
+-}
+-
+-
+-/**
+- * nes_vmapped_qp
+- * invoke registration of memory reqion. This method is used
+- * when kernel can not allocate qp memory (contigous physical).
+- *
+- * returns 1 if succeeds, 0 if fails..
+- */
+-static int nes_vmapped_qp(struct nes_uqp *nesuqp, struct ibv_pd *pd, struct ibv_qp_init_attr *attr,
+-                        struct nes_ucreate_qp_resp *resp, int sqdepth, int rqdepth)
+-{
+-      struct nes_ucreate_qp cmd;
+-      struct nes_ureg_mr reg_mr_cmd;
+-      struct ib_uverbs_reg_mr_resp reg_mr_resp;
+-      int totalqpsize;
+-      int ret;
+-
+-      // fprintf(stderr, PFX "%s\n", __FUNCTION__);
+-      totalqpsize = (sqdepth + rqdepth) * sizeof (struct nes_hw_qp_wqe) ;
+-      nesuqp->sq_vbase = memalign(page_size, totalqpsize);
+-      if (!nesuqp->sq_vbase) {
+-      //      fprintf(stderr, PFX "CREATE_QP could not allocate mem of size %d\n", totalqpsize);
+-              return 0;
+-      }
+-      nesuqp->rq_vbase = (struct nes_hw_qp_wqe *) (((char *) nesuqp->sq_vbase) +
+-                         (nesuqp->sq_size * sizeof(struct nes_hw_qp_wqe)));
+-
+-      reg_mr_cmd.reg_type = IWNES_MEMREG_TYPE_QP;
+-
+-      //fprintf(stderr, PFX "qp_rq_vbase = %p qp_sq_vbase=%p reg_mr = %p\n",
+-      //              nesuqp->rq_vbase, nesuqp->sq_vbase, &nesuqp->mr);
+-
+-        ret = ibv_cmd_reg_mr(pd, (void *)nesuqp->sq_vbase,totalqpsize,
+-                           (uintptr_t)nesuqp->sq_vbase,
+-                           IBV_ACCESS_LOCAL_WRITE, &nesuqp->vmr,
+-                           &reg_mr_cmd.ibv_cmd, sizeof(reg_mr_cmd),
+-                           &reg_mr_resp, sizeof(reg_mr_resp));
+-        if (ret) {
+-                // fprintf(stderr, PFX "%s ibv_cmd_reg_mr failed (ret = %d).\n", __FUNCTION__, ret);
+-              free((void *) nesuqp->sq_vbase);
+-              return 0;
+-        }
+-      // So now the memory has been registered..
+-      memset (&cmd, 0, sizeof(cmd) );
+-      cmd.user_wqe_buffers = (__u64) ((uintptr_t) nesuqp->sq_vbase);
+-      cmd.user_qp_buffer = (__u64) ((uintptr_t) nesuqp);
+-      ret = ibv_cmd_create_qp(pd, &nesuqp->ibv_qp, attr, &cmd.ibv_cmd, sizeof cmd,
+-                              &resp->ibv_resp, sizeof (struct nes_ucreate_qp_resp) );
+-      if (ret) {
+-              ibv_cmd_dereg_mr(&nesuqp->vmr);
+-              free((void *)nesuqp->sq_vbase);
+-              return 0;
+-      }
+-      *((unsigned int *)nesuqp->rq_vbase) = 0;
+-      nesuqp->send_cq = to_nes_ucq(attr->send_cq);
+-      nesuqp->recv_cq = to_nes_ucq(attr->recv_cq);
+-      nesuqp->sq_db_index = resp->mmap_sq_db_index;
+-      nesuqp->rq_db_index = resp->mmap_rq_db_index;
+-      nesuqp->sq_size = resp->actual_sq_size;
+-      nesuqp->rq_size = resp->actual_rq_size;
+-      nesuqp->mapping = NES_QP_VMAP;
+-      return 1;
+-}
+-
+-
+-/**
+- * nes_qp_get_qdepth
+- * This routine will return the size of qdepth to be set for one
+- * of the qp (sq or rq)
+- */
+-static int nes_qp_get_qdepth(uint32_t qdepth, uint32_t maxsges)
+-{
+-      int     retdepth;
+-
+-      /* Do sanity check on the parameters */
+-      /* Should the following be 510 or 511 */
+-      if ((qdepth > 510) || (maxsges > 4) )
+-              return 0;
+-
+-      /* Do we need to do the following of */
+-      /* we can just return the actual value.. needed for alignment */
+-      if (qdepth < 32)
+-              retdepth = 32;
+-      else if (qdepth < 128)
+-              retdepth = 128;
+-      else retdepth = 512;
+-
+-      return retdepth;
+-}
+-
+-
+-/**
+- * nes_ucreate_qp
+- */
+-struct ibv_qp *nes_ucreate_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr)
+-{
+-      struct nes_ucreate_qp_resp resp;
+-      struct nes_uvcontext *nesvctx = to_nes_uctx(pd->context);
+-      struct nes_uqp *nesuqp;
+-      int     sqdepth, rqdepth;
+-      int      status = 1;
+-
+-      /* fprintf(stderr, PFX "%s\n", __FUNCTION__); */
+-
+-      /* Sanity check QP size before proceeding */
+-      sqdepth = nes_qp_get_qdepth(attr->cap.max_send_wr, attr->cap.max_send_sge);
+-      if (!sqdepth) {
+-              fprintf(stderr, PFX "%s Bad sq attr parameters max_send_wr=%d max_send_sge=%d\n",
+-                      __FUNCTION__, attr->cap.max_send_wr,attr->cap.max_send_sge);
+-              return NULL;
+-      }
+-
+-      rqdepth = nes_qp_get_qdepth(attr->cap.max_recv_wr, attr->cap.max_recv_sge);
+-      if (!rqdepth) {
+-              fprintf(stderr, PFX "%s Bad rq attr parameters max_recv_wr=%d max_recv_sge=%d\n",
+-                      __FUNCTION__, attr->cap.max_recv_wr,attr->cap.max_recv_sge);
+-              return NULL;
+-      }
+-
+-      nesuqp = memalign(1024, sizeof(*nesuqp));
+-      if (!nesuqp)
+-              return NULL;
+-      memset(nesuqp, 0, sizeof(*nesuqp));
+-
+-      if (pthread_spin_init(&nesuqp->lock, PTHREAD_PROCESS_PRIVATE)) {
+-              free(nesuqp);
+-              return NULL;
+-      }
+-
+-      /* Initially setting it up so we will know how much memory to allocate for mapping */
+-      /* also setting it up in attr.. If we do not want to modify the attr struct, we */
+-      /* can save the original values and restore them before return. */
+-      nesuqp->sq_size = attr->cap.max_send_wr = sqdepth;
+-      nesuqp->rq_size = attr->cap.max_recv_wr = rqdepth;
+-
+-      nesuqp->sq_sig_all = attr->sq_sig_all;
+-      if (nesvctx->virtwq) {
+-              status = nes_vmapped_qp(nesuqp,pd, attr,&resp,sqdepth,rqdepth);
+-      }else {
+-              status = nes_mmapped_qp(nesuqp,pd,attr, &resp);
+-      }
+-
+-      if (!status) {
+-              pthread_spin_destroy(&nesuqp->lock);
+-              free(nesuqp);
+-              return NULL;
+-      }
+-
+-
+-      /* The following are the common parameters no matter how the */
+-      /* sq and rq memory was mapped.. */
+-
+-      /* Account for LSMM, in theory, could get overrun if app preposts to SQ */
+-      nesuqp->sq_head = 1;
+-      nesuqp->sq_tail = 1;
+-      nesuqp->qp_id = resp.qp_id;
+-      nesuqp->nes_drv_opt = resp.nes_drv_opt;
+-      nesuqp->ibv_qp.qp_num = resp.qp_id;
+-      nesuqp->rdma0_msg = 1;
+-
+-      return &nesuqp->ibv_qp;
+-}
+-
+-
+-/**
+- * nes_uquery_qp
+- */
+-int nes_uquery_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
+-                int attr_mask, struct ibv_qp_init_attr *init_attr)
+-{
+-      struct ibv_query_qp cmd;
+-
+-      /* fprintf(stderr, PFX "nes_uquery_qp: calling ibv_cmd_query_qp\n"); */
+-
+-      return ibv_cmd_query_qp(qp, attr, attr_mask, init_attr, &cmd, sizeof(cmd));
+-}
+-
+-
+-/**
+- * nes_umodify_qp
+- */
+-int nes_umodify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask)
+-{
+-      struct ibv_modify_qp cmd = {};
+-      return ibv_cmd_modify_qp(qp, attr, attr_mask, &cmd, sizeof cmd);
+-}
+-
+-
+-/**
+- * nes_clean_cq
+- */
+-static void nes_clean_cq(struct nes_uqp *nesuqp, struct nes_ucq *nesucq)
+-{
+-      uint32_t cq_head;
+-      uint32_t lo;
+-      uint32_t hi;
+-      uint64_t u64temp;
+-
+-      pthread_spin_lock(&nesucq->lock);
+-
+-      cq_head = nesucq->head;
+-      while (le32toh(nesucq->cqes[cq_head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) {
+-              udma_from_device_barrier();
+-              lo = le32toh(nesucq->cqes[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
+-              hi = le32toh(nesucq->cqes[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]);
+-              u64temp = (((uint64_t)hi) << 32) | ((uint64_t)lo);
+-              u64temp &= (~1023);
+-              if (u64temp == (uint64_t)(uintptr_t)nesuqp) {
+-                      /* Zero the context value so cqe will be ignored */
+-                      nesucq->cqes[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] = 0;
+-                      nesucq->cqes[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX] = 0;
+-              }
+-
+-              if (++cq_head >= nesucq->size)
+-                      cq_head = 0;
+-      }
+-
+-      pthread_spin_unlock(&nesucq->lock);
+-}
+-
+-
+-/**
+- * nes_udestroy_qp
+- */
+-int nes_udestroy_qp(struct ibv_qp *qp)
+-{
+-      struct nes_uqp *nesuqp = to_nes_uqp(qp);
+-      int ret = 0;
+-
+-      // fprintf(stderr, PFX "%s addr&mr= %p  \n", __FUNCTION__, &nesuqp->mr );
+-
+-      if (nesuqp->mapping == NES_QP_VMAP) {
+-              ret = ibv_cmd_dereg_mr(&nesuqp->vmr);
+-              if (ret)
+-                      fprintf(stderr, PFX "%s dereg_mr FAILED\n", __FUNCTION__);
+-              free((void *)nesuqp->sq_vbase);
+-      }
+-
+-      if (nesuqp->mapping == NES_QP_MMAP) {
+-              munmap((void *)nesuqp->sq_vbase, (nesuqp->sq_size+nesuqp->rq_size) *
+-                      sizeof(struct nes_hw_qp_wqe));
+-      }
+-
+-      ret = ibv_cmd_destroy_qp(qp);
+-      if (ret) {
+-              fprintf(stderr, PFX "%s FAILED\n", __FUNCTION__);
+-              return ret;
+-      }
+-
+-      pthread_spin_destroy(&nesuqp->lock);
+-
+-      /* Clean any pending completions from the cq(s) */
+-      if (nesuqp->send_cq)
+-              nes_clean_cq(nesuqp, nesuqp->send_cq);
+-
+-      if ((nesuqp->recv_cq) && (nesuqp->recv_cq != nesuqp->send_cq))
+-              nes_clean_cq(nesuqp, nesuqp->recv_cq);
+-      free(nesuqp);
+-
+-      return 0;
+-}
+-
+-/**
+- * nes_upost_send
+- */
+-int nes_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
+-              struct ibv_send_wr **bad_wr)
+-{
+-      uint64_t u64temp;
+-      struct nes_uqp *nesuqp = to_nes_uqp(ib_qp);
+-      struct nes_upd *nesupd = to_nes_upd(ib_qp->pd);
+-      struct nes_hw_qp_wqe volatile *wqe;
+-      uint32_t head;
+-      uint32_t qsize = nesuqp->sq_size;
+-      uint32_t counter;
+-      uint32_t err = 0;
+-      uint32_t wqe_count = 0;
+-      uint32_t outstanding_wqes;
+-      uint32_t total_payload_length = 0;
+-      int sge_index;
+-
+-      pthread_spin_lock(&nesuqp->lock);
+-      udma_to_device_barrier();
+-
+-      head = nesuqp->sq_head;
+-      while (ib_wr) {
+-              if (unlikely(nesuqp->qperr)) {
+-                      err = -EINVAL;
+-                      break;
+-              }
+-
+-              /* Check for SQ overflow */
+-              outstanding_wqes = head + (2 * qsize) - nesuqp->sq_tail;
+-              outstanding_wqes &= qsize - 1;
+-              if (unlikely(outstanding_wqes == (qsize - 1))) {
+-                      err = -EINVAL;
+-                      break;
+-              }
+-              if (unlikely(ib_wr->num_sge > 4)) {
+-                      err = -EINVAL;
+-                      break;
+-              }
+-
+-              wqe = (struct nes_hw_qp_wqe *)&nesuqp->sq_vbase[head];
+-              /* fprintf(stderr, PFX "%s: QP%u: processing sq wqe at %p, head = %u.\n",
+-                              __FUNCTION__, nesuqp->qp_id, wqe, head);  */
+-              u64temp = (uint64_t) ib_wr->wr_id;
+-              wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX] = htole32((uint32_t)u64temp);
+-              wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX] = htole32((uint32_t)(u64temp>>32));
+-              u64temp = (uint64_t)((uintptr_t)nesuqp);
+-              wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX] = htole32((uint32_t)u64temp);
+-              wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX] = htole32((uint32_t)(u64temp>>32));
+-              udma_ordering_write_barrier();
+-              wqe->wqe_words[NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX] |= htole32(head);
+-
+-              switch (ib_wr->opcode) {
+-              case IBV_WR_SEND:
+-              case IBV_WR_SEND_WITH_IMM:
+-                      /* fprintf(stderr, PFX "%s: QP%u: processing sq wqe%u. Opcode = %s\n",
+-                                      __FUNCTION__, nesuqp->qp_id, head, "Send"); */
+-                      if (ib_wr->send_flags & IBV_SEND_SOLICITED) {
+-                              wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = htole32(NES_IWARP_SQ_OP_SENDSE);
+-                      } else {
+-                              wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = htole32(NES_IWARP_SQ_OP_SEND);
+-                      }
+-
+-                      if (ib_wr->send_flags & IBV_SEND_FENCE) {
+-                              wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] |= htole32(NES_IWARP_SQ_WQE_READ_FENCE);
+-                      }
+-
+-                      /* if (ib_wr->send_flags & IBV_SEND_INLINE) {
+-                              fprintf(stderr, PFX "%s: Send SEND_INLINE, length=%d\n",
+-                                              __FUNCTION__, ib_wr->sg_list[0].length);
+-                      } */
+-                      if ((ib_wr->send_flags & IBV_SEND_INLINE) && (ib_wr->sg_list[0].length <= 64) &&
+-                              ((nesuqp->nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
+-                              (ib_wr->num_sge == 1)) {
+-                              memcpy((void *)&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
+-                                              (void *)(intptr_t)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
+-                              wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = htole32(ib_wr->sg_list[0].length);
+-                              wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] |= htole32(NES_IWARP_SQ_WQE_IMM_DATA);
+-                      } else {
+-                              total_payload_length = 0;
+-                              for (sge_index=0; sge_index < ib_wr->num_sge; sge_index++) {
+-                                      wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX+(sge_index*4)] =
+-                                                      htole32((uint32_t)ib_wr->sg_list[sge_index].addr);
+-                                      wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX+(sge_index*4)] =
+-                                                      htole32((uint32_t)(ib_wr->sg_list[sge_index].addr>>32));
+-                                      wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX+(sge_index*4)] =
+-                                                      htole32(ib_wr->sg_list[sge_index].length);
+-                                      wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX+(sge_index*4)] =
+-                                                      htole32(ib_wr->sg_list[sge_index].lkey);
+-                                      total_payload_length += ib_wr->sg_list[sge_index].length;
+-                              }
+-                              wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] =
+-                                              htole32(total_payload_length);
+-                      }
+-
+-                      break;
+-              case IBV_WR_RDMA_WRITE:
+-              case IBV_WR_RDMA_WRITE_WITH_IMM:
+-                      /* fprintf(stderr, PFX "%s:QP%u: processing sq wqe%u. Opcode = %s\n",
+-                                      __FUNCTION__, nesuqp->qp_id, head, "Write"); */
+-                      wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = htole32(NES_IWARP_SQ_OP_RDMAW);
+-
+-                      if (ib_wr->send_flags & IBV_SEND_FENCE) {
+-                              wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] |= htole32(NES_IWARP_SQ_WQE_READ_FENCE);
+-                      }
+-                      wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_STAG_IDX] = htole32(ib_wr->wr.rdma.rkey);
+-                      wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX] = htole32(
+-                                      (uint32_t)ib_wr->wr.rdma.remote_addr);
+-                      wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_TO_HIGH_IDX] = htole32(
+-                                      (uint32_t)(ib_wr->wr.rdma.remote_addr>>32));
+-
+-                      /* if (ib_wr->send_flags & IBV_SEND_INLINE) {
+-                              fprintf(stderr, PFX "%s: Write SEND_INLINE, length=%d\n",
+-                                              __FUNCTION__, ib_wr->sg_list[0].length);
+-                      } */
+-                      if ((ib_wr->send_flags & IBV_SEND_INLINE) && (ib_wr->sg_list[0].length <= 64) &&
+-                              ((nesuqp->nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
+-                              (ib_wr->num_sge == 1)) {
+-                              memcpy((void *)&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
+-                                              (void *)(intptr_t)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
+-                              wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = htole32(ib_wr->sg_list[0].length);
+-                              wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] |= htole32(NES_IWARP_SQ_WQE_IMM_DATA);
+-                      } else {
+-                              total_payload_length = 0;
+-                              for (sge_index=0; sge_index < ib_wr->num_sge; sge_index++) {
+-                                      wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX+(sge_index*4)] = htole32(
+-                                                      (uint32_t)ib_wr->sg_list[sge_index].addr);
+-                                      wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX+(sge_index*4)] = htole32(
+-                                                      (uint32_t)(ib_wr->sg_list[sge_index].addr>>32));
+-                                      wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX+(sge_index*4)] = htole32(
+-                                                      ib_wr->sg_list[sge_index].length);
+-                                      wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX+(sge_index*4)] = htole32(
+-                                                      ib_wr->sg_list[sge_index].lkey);
+-                                      total_payload_length += ib_wr->sg_list[sge_index].length;
+-                              }
+-                              wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = htole32(total_payload_length);
+-                      }
+-                      wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] =
+-                                      wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX];
+-                      break;
+-              case IBV_WR_RDMA_READ:
+-                      /* fprintf(stderr, PFX "%s:QP%u:processing sq wqe%u. Opcode = %s\n",
+-                                      __FUNCTION__, nesuqp->qp_id, head, "Read"); */
+-                      /* IWarp only supports 1 sge for RDMA reads */
+-                      if (ib_wr->num_sge > 1) {
+-                              err = -EINVAL;
+-                              break;
+-                      }
+-                      wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = htole32(NES_IWARP_SQ_OP_RDMAR);
+-                      wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX] = htole32((uint32_t)ib_wr->wr.rdma.remote_addr);
+-                      wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_TO_HIGH_IDX] = htole32((uint32_t)(ib_wr->wr.rdma.remote_addr>>32));
+-                      wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_STAG_IDX] = htole32(ib_wr->wr.rdma.rkey);
+-                      wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] = htole32(ib_wr->sg_list->length);
+-                      wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = htole32((uint32_t)ib_wr->sg_list->addr);
+-                      wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = htole32((uint32_t)(ib_wr->sg_list->addr>>32));
+-                      wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = htole32(ib_wr->sg_list->lkey);
+-                      break;
+-              default:
+-                      /* error */
+-                      err = -EINVAL;
+-                      break;
+-              }
+-
+-                      if ((ib_wr->send_flags & IBV_SEND_SIGNALED) || nesuqp->sq_sig_all) {
+-                      /* fprintf(stderr, PFX "%s:sq wqe%u is signalled\n", __FUNCTION__, head); */
+-                      wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] |= htole32(NES_IWARP_SQ_WQE_SIGNALED_COMPL);
+-              }
+-              ib_wr = ib_wr->next;
+-              head++;
+-              wqe_count++;
+-              if (head >= qsize)
+-                      head = 0;
+-      }
+-
+-      nesuqp->sq_head = head;
+-      udma_to_device_barrier();
+-      while (wqe_count) {
+-              counter = (wqe_count<(uint32_t)255) ? wqe_count : 255;
+-              wqe_count -= counter;
+-              nesupd->udoorbell->wqe_alloc =  htole32((counter<<24) | 0x00800000 | nesuqp->qp_id);
+-      }
+-
+-      if (err)
+-              *bad_wr = ib_wr;
+-
+-      pthread_spin_unlock(&nesuqp->lock);
+-
+-      return err;
+-}
+-
+-/**
+- * nes_upost_recv
+- */
+-int nes_upost_recv(struct ibv_qp *ib_qp, struct ibv_recv_wr *ib_wr,
+-              struct ibv_recv_wr **bad_wr)
+-{
+-      uint64_t u64temp;
+-      struct nes_uqp *nesuqp = to_nes_uqp(ib_qp);
+-      struct nes_upd *nesupd = to_nes_upd(ib_qp->pd);
+-      struct nes_hw_qp_wqe *wqe;
+-      uint32_t head;
+-      uint32_t qsize = nesuqp->rq_size;
+-      uint32_t counter;
+-      uint32_t err = 0;
+-      uint32_t wqe_count = 0;
+-      uint32_t outstanding_wqes;
+-      uint32_t total_payload_length;
+-      int sge_index;
+-
+-      if (unlikely(ib_wr->num_sge > 4)) {
+-              *bad_wr = ib_wr;
+-              return -EINVAL;
+-      }
+-
+-      pthread_spin_lock(&nesuqp->lock);
+-      udma_to_device_barrier();
+-
+-      head = nesuqp->rq_head;
+-      while (ib_wr) {
+-              if (unlikely(nesuqp->qperr)) {
+-                      err = -EINVAL;
+-                      break;
+-              }
+-
+-              /* Check for RQ overflow */
+-              outstanding_wqes = head + (2 * qsize) - nesuqp->rq_tail;
+-              outstanding_wqes &= qsize - 1;
+-              if (unlikely(outstanding_wqes == (qsize - 1))) {
+-                      err = -EINVAL;
+-                      break;
+-              }
+-
+-              wqe = (struct nes_hw_qp_wqe *)&nesuqp->rq_vbase[head];
+-              u64temp = ib_wr->wr_id;
+-              wqe->wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX] =
+-                              htole32((uint32_t)u64temp);
+-              wqe->wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX] =
+-                              htole32((uint32_t)(u64temp >> 32));
+-              u64temp = (uint64_t)((uintptr_t)nesuqp);
+-              wqe->wqe_words[NES_IWARP_RQ_WQE_COMP_CTX_LOW_IDX] =
+-                              htole32((uint32_t)u64temp);
+-              wqe->wqe_words[NES_IWARP_RQ_WQE_COMP_CTX_HIGH_IDX] =
+-                              htole32((uint32_t)(u64temp >> 32));
+-              udma_ordering_write_barrier();
+-              wqe->wqe_words[NES_IWARP_RQ_WQE_COMP_CTX_LOW_IDX] |= htole32(head);
+-
+-              total_payload_length = 0;
+-              for (sge_index=0; sge_index < ib_wr->num_sge; sge_index++) {
+-                      wqe->wqe_words[NES_IWARP_RQ_WQE_FRAG0_LOW_IDX+(sge_index*4)] =
+-                                      htole32((uint32_t)ib_wr->sg_list[sge_index].addr);
+-                      wqe->wqe_words[NES_IWARP_RQ_WQE_FRAG0_HIGH_IDX+(sge_index*4)] =
+-                                      htole32((uint32_t)(ib_wr->sg_list[sge_index].addr>>32));
+-                      wqe->wqe_words[NES_IWARP_RQ_WQE_LENGTH0_IDX+(sge_index*4)] =
+-                                      htole32(ib_wr->sg_list[sge_index].length);
+-                      wqe->wqe_words[NES_IWARP_RQ_WQE_STAG0_IDX+(sge_index*4)] =
+-                                      htole32(ib_wr->sg_list[sge_index].lkey);
+-                      total_payload_length += ib_wr->sg_list[sge_index].length;
+-              }
+-              wqe->wqe_words[NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX] = htole32(total_payload_length);
+-
+-              ib_wr = ib_wr->next;
+-              head++;
+-              wqe_count++;
+-              if (head >= qsize)
+-                      head = 0;
+-      }
+-
+-      nesuqp->rq_head = head;
+-      udma_to_device_barrier();
+-      while (wqe_count) {
+-              counter = (wqe_count<(uint32_t)255) ? wqe_count : 255;
+-              wqe_count -= counter;
+-              nesupd->udoorbell->wqe_alloc = htole32((counter << 24) | nesuqp->qp_id);
+-      }
+-
+-      if (err)
+-              *bad_wr = ib_wr;
+-
+-      pthread_spin_unlock(&nesuqp->lock);
+-
+-      return err;
+-}
+-
+-
+-/**
+- * nes_ucreate_ah
+- */
+-struct ibv_ah *nes_ucreate_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr)
+-{
+-      /* fprintf(stderr, PFX "%s\n", __FUNCTION__); */
+-      return (void *)-ENOSYS;
+-}
+-
+-
+-/**
+- * nes_udestroy_ah
+- */
+-int nes_udestroy_ah(struct ibv_ah *ah)
+-{
+-      /* fprintf(stderr, PFX "%s\n", __FUNCTION__); */
+-      return -ENOSYS;
+-}
+-
+-
+-/**
+- * nes_uattach_mcast
+- */
+-int nes_uattach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid)
+-{
+-      int ret = 0;
+-      ret =  ibv_cmd_attach_mcast(qp, gid, lid);
+-      nes_debug(NES_DBG_UD, "%s ret=%d\n", __func__, ret);
+-      return ret;
+-}
+-
+-
+-/**
+- * nes_udetach_mcast
+- */
+-int nes_udetach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid)
+-{
+-      int ret = 0;
+-      ret = ibv_cmd_detach_mcast(qp, gid, lid);
+-      nes_debug(NES_DBG_UD, "%s ret=%d\n", __func__, ret);
+-      return ret;
+-}
+-
+-/**
+- * nes_async_event
+- */
+-void nes_async_event(struct ibv_context *context,
+-                   struct ibv_async_event *event)
+-{
+-      struct nes_uqp *nesuqp;
+-
+-      switch (event->event_type) {
+-      case IBV_EVENT_QP_FATAL:
+-      case IBV_EVENT_QP_ACCESS_ERR:
+-              /* Do not let application queue anything else to the qp */
+-              nesuqp = to_nes_uqp(event->element.qp);
+-              nesuqp->qperr = 1;
+-              break;
+-
+-      default:
+-              break;
+-      }
+-}
diff --git a/rdma-core-providers-update.patch b/rdma-core-providers-update.patch
new file mode 100644 (file)
index 0000000..d1952d1
--- /dev/null
@@ -0,0 +1,103 @@
+--- rdma-core-28.0/providers/cxgb3/iwch.c.orig 2020-02-12 21:21:08.971189494 +0100
++++ rdma-core-28.0/providers/cxgb3/iwch.c      2020-02-12 21:28:50.972019955 +0100
+@@ -75,6 +75,14 @@
+       {},
+ };
++static void iwch_free_context(struct ibv_context *ibctx)
++{
++      struct iwch_context *context = to_iwch_ctx(ibctx);
++
++      verbs_uninit_context(&context->ibv_ctx);
++      free(context);
++}
++
+ static const struct verbs_context_ops iwch_ctx_common_ops = {
+       .query_device = iwch_query_device,
+       .query_port = iwch_query_port,
+@@ -98,6 +106,7 @@
+       .detach_mcast = iwch_detach_mcast,
+       .post_srq_recv = iwch_post_srq_recv,
+       .req_notify_cq = iwch_arm_cq,
++      .free_context = iwch_free_context,
+ };
+ static const struct verbs_context_ops iwch_ctx_t3a_ops = {
+@@ -160,14 +169,6 @@
+       return NULL;
+ }
+-static void iwch_free_context(struct ibv_context *ibctx)
+-{
+-      struct iwch_context *context = to_iwch_ctx(ibctx);
+-
+-      verbs_uninit_context(&context->ibv_ctx);
+-      free(context);
+-}
+-
+ static void iwch_uninit_device(struct verbs_device *verbs_device)
+ {
+       struct iwch_device *dev = to_iwch_dev(&verbs_device->device);
+@@ -264,6 +265,5 @@
+       .alloc_device = iwch_device_alloc,
+       .uninit_device = iwch_uninit_device,
+       .alloc_context = iwch_alloc_context,
+-      .free_context = iwch_free_context,
+ };
+ PROVIDER_DRIVER(cxgb3, iwch_dev_ops);
+--- rdma-core-28.0/providers/nes/nes_umain.c.orig      2020-02-12 22:09:28.778813223 +0100
++++ rdma-core-28.0/providers/nes/nes_umain.c   2020-02-13 16:19:10.874608034 +0100
+@@ -63,6 +63,18 @@
+       {},
+ };
++/**
++ * nes_ufree_context
++ */
++static void nes_ufree_context(struct ibv_context *ibctx)
++{
++      struct nes_uvcontext *nesvctx = to_nes_uctx(ibctx);
++      nes_ufree_pd(&nesvctx->nesupd->ibv_pd);
++
++      verbs_uninit_context(&nesvctx->ibv_ctx);
++      free(nesvctx);
++}
++
+ static const struct verbs_context_ops nes_uctx_ops = {
+       .query_device = nes_uquery_device,
+       .query_port = nes_uquery_port,
+@@ -87,6 +99,7 @@
+       .attach_mcast = nes_uattach_mcast,
+       .detach_mcast = nes_udetach_mcast,
+-      .async_event = nes_async_event
++      .async_event = nes_async_event,
++      .free_context = nes_ufree_context,
+ };
+ static const struct verbs_context_ops nes_uctx_no_db_ops = {
+@@ -163,18 +176,6 @@
+ }
+-/**
+- * nes_ufree_context
+- */
+-static void nes_ufree_context(struct ibv_context *ibctx)
+-{
+-      struct nes_uvcontext *nesvctx = to_nes_uctx(ibctx);
+-      nes_ufree_pd(&nesvctx->nesupd->ibv_pd);
+-
+-      verbs_uninit_context(&nesvctx->ibv_ctx);
+-      free(nesvctx);
+-}
+-
+ static void nes_uninit_device(struct verbs_device *verbs_device)
+ {
+       struct nes_udevice *dev = to_nes_udev(&verbs_device->device);
+@@ -215,6 +216,5 @@
+       .alloc_device = nes_device_alloc,
+       .uninit_device = nes_uninit_device,
+       .alloc_context = nes_ualloc_context,
+-      .free_context = nes_ufree_context,
+ };
+ PROVIDER_DRIVER(nes, nes_udev_ops);
This page took 0.334841 seconds and 4 git commands to generate.