aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNavdeep Parhar <np@FreeBSD.org>2020-10-05 16:39:38 +0000
committerNavdeep Parhar <np@FreeBSD.org>2020-10-05 16:39:38 +0000
commitf4d391bd542adf0d4c21248d88c2b1e439955e40 (patch)
tree1022572822e27448d8adba075e4b5ad4377630cf
parentac4df2c87767061e240bdc9d549399199f377130 (diff)
downloadsrc-f4d391bd542adf0d4c21248d88c2b1e439955e40.tar.gz
src-f4d391bd542adf0d4c21248d88c2b1e439955e40.zip
MFS r366438:
cxgbe(4): set up the firmware flowc for the tid before send_abort_rpl. Approved by: re@ (gjb@) Sponsored by: Chelsio Communications
Notes
Notes: svn path=/releng/12.2/; revision=366451
-rw-r--r--sys/dev/cxgbe/tom/t4_listen.c67
1 files changed, 41 insertions, 26 deletions
diff --git a/sys/dev/cxgbe/tom/t4_listen.c b/sys/dev/cxgbe/tom/t4_listen.c
index c5fcbf86ac27..1a51bcb806da 100644
--- a/sys/dev/cxgbe/tom/t4_listen.c
+++ b/sys/dev/cxgbe/tom/t4_listen.c
@@ -341,48 +341,32 @@ release_lctx(struct adapter *sc, struct listen_ctx *lctx)
}
static void
-send_reset_synqe(struct toedev *tod, struct synq_entry *synqe)
+send_flowc_wr_synqe(struct adapter *sc, struct synq_entry *synqe)
{
- struct adapter *sc = tod->tod_softc;
struct mbuf *m = synqe->syn;
struct ifnet *ifp = m->m_pkthdr.rcvif;
struct vi_info *vi = ifp->if_softc;
struct port_info *pi = vi->pi;
- struct l2t_entry *e = &sc->l2t->l2tab[synqe->params.l2t_idx];
struct wrqe *wr;
struct fw_flowc_wr *flowc;
- struct cpl_abort_req *req;
- int flowclen;
struct sge_wrq *ofld_txq;
struct sge_ofld_rxq *ofld_rxq;
const int nparams = 6;
+ const int flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
const u_int pfvf = sc->pf << S_FW_VIID_PFN;
INP_WLOCK_ASSERT(synqe->lctx->inp);
-
- CTR5(KTR_CXGBE, "%s: synqe %p (0x%x), tid %d%s",
- __func__, synqe, synqe->flags, synqe->tid,
- synqe->flags & TPF_ABORT_SHUTDOWN ?
- " (abort already in progress)" : "");
- if (synqe->flags & TPF_ABORT_SHUTDOWN)
- return; /* abort already in progress */
- synqe->flags |= TPF_ABORT_SHUTDOWN;
+ MPASS((synqe->flags & TPF_FLOWC_WR_SENT) == 0);
ofld_txq = &sc->sge.ofld_txq[synqe->params.txq_idx];
ofld_rxq = &sc->sge.ofld_rxq[synqe->params.rxq_idx];
- /* The wrqe will have two WRs - a flowc followed by an abort_req */
- flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
-
- wr = alloc_wrqe(roundup2(flowclen, EQ_ESIZE) + sizeof(*req), ofld_txq);
+ wr = alloc_wrqe(roundup2(flowclen, 16), ofld_txq);
if (wr == NULL) {
/* XXX */
panic("%s: allocation failure.", __func__);
}
flowc = wrtod(wr);
- req = (void *)((caddr_t)flowc + roundup2(flowclen, EQ_ESIZE));
-
- /* First the flowc ... */
memset(flowc, 0, wr->wr_len);
flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
V_FW_FLOWC_WR_NPARAMS(nparams));
@@ -396,19 +380,47 @@ send_reset_synqe(struct toedev *tod, struct synq_entry *synqe)
flowc->mnemval[2].val = htobe32(pi->tx_chan);
flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
flowc->mnemval[3].val = htobe32(ofld_rxq->iq.abs_id);
- flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF;
- flowc->mnemval[4].val = htobe32(512);
- flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS;
- flowc->mnemval[5].val = htobe32(512);
+ flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF;
+ flowc->mnemval[4].val = htobe32(512);
+ flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS;
+ flowc->mnemval[5].val = htobe32(512);
+
synqe->flags |= TPF_FLOWC_WR_SENT;
+ t4_wrq_tx(sc, wr);
+}
- /* ... then ABORT request */
+static void
+send_reset_synqe(struct toedev *tod, struct synq_entry *synqe)
+{
+ struct adapter *sc = tod->tod_softc;
+ struct wrqe *wr;
+ struct cpl_abort_req *req;
+
+ INP_WLOCK_ASSERT(synqe->lctx->inp);
+
+ CTR5(KTR_CXGBE, "%s: synqe %p (0x%x), tid %d%s",
+ __func__, synqe, synqe->flags, synqe->tid,
+ synqe->flags & TPF_ABORT_SHUTDOWN ?
+ " (abort already in progress)" : "");
+ if (synqe->flags & TPF_ABORT_SHUTDOWN)
+ return; /* abort already in progress */
+ synqe->flags |= TPF_ABORT_SHUTDOWN;
+
+ if (!(synqe->flags & TPF_FLOWC_WR_SENT))
+ send_flowc_wr_synqe(sc, synqe);
+
+ wr = alloc_wrqe(sizeof(*req), &sc->sge.ofld_txq[synqe->params.txq_idx]);
+ if (wr == NULL) {
+ /* XXX */
+ panic("%s: allocation failure.", __func__);
+ }
+ req = wrtod(wr);
INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, synqe->tid);
req->rsvd0 = 0; /* don't have a snd_nxt */
req->rsvd1 = 1; /* no data sent yet */
req->cmd = CPL_ABORT_SEND_RST;
- t4_l2t_send(sc, wr, e);
+ t4_l2t_send(sc, wr, &sc->l2t->l2tab[synqe->params.l2t_idx]);
}
static int
@@ -889,6 +901,9 @@ do_abort_req_synqe(struct sge_iq *iq, const struct rss_header *rss,
ofld_txq = &sc->sge.ofld_txq[synqe->params.txq_idx];
+ if (!(synqe->flags & TPF_FLOWC_WR_SENT))
+ send_flowc_wr_synqe(sc, synqe);
+
/*
* If we'd initiated an abort earlier the reply to it is responsible for
* cleaning up resources. Otherwise we tear everything down right here