summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/chelsio/cxgb4/sge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4/sge.c')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c218
1 files changed, 170 insertions, 48 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index d22d728d4e5c..fab4c84a1da4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -203,6 +203,9 @@ enum {
RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
};
+static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
+#define MIN_NAPI_WORK 1
+
static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
{
return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
@@ -521,9 +524,23 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
val = PIDX(q->pend_cred / 8);
if (!is_t4(adap->params.chip))
val |= DBTYPE(1);
+ val |= DBPRIO(1);
wmb();
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
- QID(q->cntxt_id) | val);
+
+ /* If we're on T4, use the old doorbell mechanism; otherwise
+ * use the new BAR2 mechanism.
+ */
+ if (is_t4(adap->params.chip)) {
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+ val | QID(q->cntxt_id));
+ } else {
+ writel(val, adap->bar2 + q->udb + SGE_UDB_KDOORBELL);
+
+ /* This Write memory Barrier will force the write to
+ * the User Doorbell area to be flushed.
+ */
+ wmb();
+ }
q->pend_cred &= 7;
}
}
@@ -833,13 +850,14 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
*end = 0;
}
-/* This function copies 64 byte coalesced work request to
- * memory mapped BAR2 space(user space writes).
- * For coalesced WR SGE, fetches data from the FIFO instead of from Host.
+/* This function copies a tx_desc struct to memory mapped BAR2 space(user space
+ * writes). For coalesced WR SGE, fetches data from the FIFO instead of from
+ * Host.
*/
-static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
+static void cxgb_pio_copy(u64 __iomem *dst, struct tx_desc *desc)
{
- int count = 8;
+ int count = sizeof(*desc) / sizeof(u64);
+ u64 *src = (u64 *)desc;
while (count) {
writeq(*src, dst);
@@ -859,30 +877,63 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
*/
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
- unsigned int *wr, index;
- unsigned long flags;
-
wmb(); /* write descriptors before telling HW */
- spin_lock_irqsave(&q->db_lock, flags);
- if (!q->db_disabled) {
- if (is_t4(adap->params.chip)) {
+
+ if (is_t4(adap->params.chip)) {
+ u32 val = PIDX(n);
+ unsigned long flags;
+
+ /* For T4 we need to participate in the Doorbell Recovery
+ * mechanism.
+ */
+ spin_lock_irqsave(&q->db_lock, flags);
+ if (!q->db_disabled)
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
- QID(q->cntxt_id) | PIDX(n));
+ QID(q->cntxt_id) | val);
+ else
+ q->db_pidx_inc += n;
+ q->db_pidx = q->pidx;
+ spin_unlock_irqrestore(&q->db_lock, flags);
+ } else {
+ u32 val = PIDX_T5(n);
+
+ /* T4 and later chips share the same PIDX field offset within
+ * the doorbell, but T5 and later shrank the field in order to
+ * gain a bit for Doorbell Priority. The field was absurdly
+ * large in the first place (14 bits) so we just use the T5
+ * and later limits and warn if a Queue ID is too large.
+ */
+ WARN_ON(val & DBPRIO(1));
+
+ /* For T5 and later we use the Write-Combine mapped BAR2 User
+ * Doorbell mechanism. If we're only writing a single TX
+ * Descriptor and TX Write Combining hasn't been disabled, we
+ * can use the Write Combining Gather Buffer; otherwise we use
+ * the simple doorbell.
+ */
+ if (n == 1) {
+ int index = (q->pidx
+ ? (q->pidx - 1)
+ : (q->size - 1));
+
+ cxgb_pio_copy(adap->bar2 + q->udb + SGE_UDB_WCDOORBELL,
+ q->desc + index);
} else {
- if (n == 1) {
- index = q->pidx ? (q->pidx - 1) : (q->size - 1);
- wr = (unsigned int *)&q->desc[index];
- cxgb_pio_copy((u64 __iomem *)
- (adap->bar2 + q->udb + 64),
- (u64 *)wr);
- } else
- writel(n, adap->bar2 + q->udb + 8);
- wmb();
+ writel(val, adap->bar2 + q->udb + SGE_UDB_KDOORBELL);
}
- } else
- q->db_pidx_inc += n;
- q->db_pidx = q->pidx;
- spin_unlock_irqrestore(&q->db_lock, flags);
+
+ /* This Write Memory Barrier will force the write to the User
+ * Doorbell area to be flushed. This is needed to prevent
+ * writes on different CPUs for the same queue from hitting
+ * the adapter out of order. This is required when some Work
+ * Requests take the Write Combine Gather Buffer path (user
+ * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
+ * take the traditional path where we simply increment the
+ * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
+ * hardware DMA read the actual Work Request.
+ */
+ wmb();
+ }
}
/**
@@ -1916,16 +1967,40 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
unsigned int params;
struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
int work_done = process_responses(q, budget);
+ u32 val;
if (likely(work_done < budget)) {
+ int timer_index;
+
napi_complete(napi);
- params = q->next_intr_params;
- q->next_intr_params = q->intr_params;
+ timer_index = QINTR_TIMER_IDX_GET(q->next_intr_params);
+
+ if (q->adaptive_rx) {
+ if (work_done > max(timer_pkt_quota[timer_index],
+ MIN_NAPI_WORK))
+ timer_index = (timer_index + 1);
+ else
+ timer_index = timer_index - 1;
+
+ timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
+ q->next_intr_params = QINTR_TIMER_IDX(timer_index) |
+ V_QINTR_CNT_EN;
+ params = q->next_intr_params;
+ } else {
+ params = q->next_intr_params;
+ q->next_intr_params = q->intr_params;
+ }
} else
params = QINTR_TIMER_IDX(7);
- t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) |
- INGRESSQID((u32)q->cntxt_id) | SEINTARM(params));
+ val = CIDXINC(work_done) | SEINTARM(params);
+ if (is_t4(q->adap->params.chip)) {
+ t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS),
+ val | INGRESSQID((u32)q->cntxt_id));
+ } else {
+ writel(val, q->adap->bar2 + q->udb + SGE_UDB_GTS);
+ wmb();
+ }
return work_done;
}
@@ -1949,6 +2024,7 @@ static unsigned int process_intrq(struct adapter *adap)
unsigned int credits;
const struct rsp_ctrl *rc;
struct sge_rspq *q = &adap->sge.intrq;
+ u32 val;
spin_lock(&adap->sge.intrq_lock);
for (credits = 0; ; credits++) {
@@ -1967,8 +2043,14 @@ static unsigned int process_intrq(struct adapter *adap)
rspq_next(q);
}
- t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) |
- INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params));
+ val = CIDXINC(credits) | SEINTARM(q->intr_params);
+ if (is_t4(adap->params.chip)) {
+ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
+ val | INGRESSQID(q->cntxt_id));
+ } else {
+ writel(val, adap->bar2 + q->udb + SGE_UDB_GTS);
+ wmb();
+ }
spin_unlock(&adap->sge.intrq_lock);
return credits;
}
@@ -2149,6 +2231,51 @@ static void sge_tx_timer_cb(unsigned long data)
mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
}
+/**
+ * udb_address - return the BAR2 User Doorbell address for a Queue
+ * @adap: the adapter
+ * @cntxt_id: the Queue Context ID
+ * @qpp: Queues Per Page (for all PFs)
+ *
+ * Returns the BAR2 address of the user Doorbell associated with the
+ * indicated Queue Context ID. Note that this is only applicable
+ * for T5 and later.
+ */
+static u64 udb_address(struct adapter *adap, unsigned int cntxt_id,
+ unsigned int qpp)
+{
+ u64 udb;
+ unsigned int s_qpp;
+ unsigned short udb_density;
+ unsigned long qpshift;
+ int page;
+
+ BUG_ON(is_t4(adap->params.chip));
+
+ s_qpp = (QUEUESPERPAGEPF0 +
+ (QUEUESPERPAGEPF1 - QUEUESPERPAGEPF0) * adap->fn);
+ udb_density = 1 << ((qpp >> s_qpp) & QUEUESPERPAGEPF0_MASK);
+ qpshift = PAGE_SHIFT - ilog2(udb_density);
+ udb = (u64)cntxt_id << qpshift;
+ udb &= PAGE_MASK;
+ page = udb / PAGE_SIZE;
+ udb += (cntxt_id - (page * udb_density)) * SGE_UDB_SIZE;
+
+ return udb;
+}
+
+static u64 udb_address_eq(struct adapter *adap, unsigned int cntxt_id)
+{
+ return udb_address(adap, cntxt_id,
+ t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF));
+}
+
+static u64 udb_address_iq(struct adapter *adap, unsigned int cntxt_id)
+{
+ return udb_address(adap, cntxt_id,
+ t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF));
+}
+
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
struct net_device *dev, int intr_idx,
struct sge_fl *fl, rspq_handler_t hnd)
@@ -2214,6 +2341,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->next_intr_params = iq->intr_params;
iq->cntxt_id = ntohs(c.iqid);
iq->abs_id = ntohs(c.physiqid);
+ if (!is_t4(adap->params.chip))
+ iq->udb = udb_address_iq(adap, iq->cntxt_id);
iq->size--; /* subtract status entry */
iq->netdev = dev;
iq->handler = hnd;
@@ -2229,6 +2358,12 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
fl->pidx = fl->cidx = 0;
fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
+
+ /* Note, we must initialize the Free List User Doorbell
+ * address before refilling the Free List!
+ */
+ if (!is_t4(adap->params.chip))
+ fl->udb = udb_address_eq(adap, fl->cntxt_id);
refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
}
return 0;
@@ -2254,21 +2389,8 @@ err:
static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
{
q->cntxt_id = id;
- if (!is_t4(adap->params.chip)) {
- unsigned int s_qpp;
- unsigned short udb_density;
- unsigned long qpshift;
- int page;
-
- s_qpp = QUEUESPERPAGEPF1 * adap->fn;
- udb_density = 1 << QUEUESPERPAGEPF0_GET((t4_read_reg(adap,
- SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp));
- qpshift = PAGE_SHIFT - ilog2(udb_density);
- q->udb = q->cntxt_id << qpshift;
- q->udb &= PAGE_MASK;
- page = q->udb / PAGE_SIZE;
- q->udb += (q->cntxt_id - (page * udb_density)) * 128;
- }
+ if (!is_t4(adap->params.chip))
+ q->udb = udb_address_eq(adap, q->cntxt_id);
q->in_use = 0;
q->cidx = q->pidx = 0;