summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c1048
1 files changed, 880 insertions, 168 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index d1c093dcb054..74fbf9ea7bd8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -41,6 +41,7 @@
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/if_vlan.h>
+#include <linux/crash_dump.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/tcp.h>
@@ -63,7 +64,6 @@
#include "bnx2x_vfpf.h"
#include "bnx2x_dcb.h"
#include "bnx2x_sp.h"
-
#include <linux/firmware.h>
#include "bnx2x_fw_file_hdr.h"
/* FW files */
@@ -290,6 +290,8 @@ static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
* General service functions
****************************************************************************/
+static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
+
static void __storm_memset_dma_mapping(struct bnx2x *bp,
u32 addr, dma_addr_t mapping)
{
@@ -523,6 +525,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
* as long as this code is called both from syscall context and
* from ndo_set_rx_mode() flow that may be called from BH.
*/
+
spin_lock_bh(&bp->dmae_lock);
/* reset completion */
@@ -551,7 +554,9 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
}
unlock:
+
spin_unlock_bh(&bp->dmae_lock);
+
return rc;
}
@@ -646,119 +651,98 @@ static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
}
+enum storms {
+ XSTORM,
+ TSTORM,
+ CSTORM,
+ USTORM,
+ MAX_STORMS
+};
+
+#define STORMS_NUM 4
+#define REGS_IN_ENTRY 4
+
+static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
+ enum storms storm,
+ int entry)
+{
+ switch (storm) {
+ case XSTORM:
+ return XSTORM_ASSERT_LIST_OFFSET(entry);
+ case TSTORM:
+ return TSTORM_ASSERT_LIST_OFFSET(entry);
+ case CSTORM:
+ return CSTORM_ASSERT_LIST_OFFSET(entry);
+ case USTORM:
+ return USTORM_ASSERT_LIST_OFFSET(entry);
+ case MAX_STORMS:
+ default:
+ BNX2X_ERR("unknown storm\n");
+ }
+ return -EINVAL;
+}
+
static int bnx2x_mc_assert(struct bnx2x *bp)
{
char last_idx;
- int i, rc = 0;
- u32 row0, row1, row2, row3;
-
- /* XSTORM */
- last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
- XSTORM_ASSERT_LIST_INDEX_OFFSET);
- if (last_idx)
- BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
-
- /* print the asserts */
- for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
-
- row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
- XSTORM_ASSERT_LIST_OFFSET(i));
- row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
- XSTORM_ASSERT_LIST_OFFSET(i) + 4);
- row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
- XSTORM_ASSERT_LIST_OFFSET(i) + 8);
- row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
- XSTORM_ASSERT_LIST_OFFSET(i) + 12);
-
- if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i, row3, row2, row1, row0);
- rc++;
- } else {
- break;
- }
- }
-
- /* TSTORM */
- last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ASSERT_LIST_INDEX_OFFSET);
- if (last_idx)
- BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
-
- /* print the asserts */
- for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
-
- row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ASSERT_LIST_OFFSET(i));
- row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ASSERT_LIST_OFFSET(i) + 4);
- row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ASSERT_LIST_OFFSET(i) + 8);
- row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ASSERT_LIST_OFFSET(i) + 12);
-
- if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i, row3, row2, row1, row0);
- rc++;
- } else {
- break;
- }
- }
+ int i, j, rc = 0;
+ enum storms storm;
+ u32 regs[REGS_IN_ENTRY];
+ u32 bar_storm_intmem[STORMS_NUM] = {
+ BAR_XSTRORM_INTMEM,
+ BAR_TSTRORM_INTMEM,
+ BAR_CSTRORM_INTMEM,
+ BAR_USTRORM_INTMEM
+ };
+ u32 storm_assert_list_index[STORMS_NUM] = {
+ XSTORM_ASSERT_LIST_INDEX_OFFSET,
+ TSTORM_ASSERT_LIST_INDEX_OFFSET,
+ CSTORM_ASSERT_LIST_INDEX_OFFSET,
+ USTORM_ASSERT_LIST_INDEX_OFFSET
+ };
+ char *storms_string[STORMS_NUM] = {
+ "XSTORM",
+ "TSTORM",
+ "CSTORM",
+ "USTORM"
+ };
- /* CSTORM */
- last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
- CSTORM_ASSERT_LIST_INDEX_OFFSET);
- if (last_idx)
- BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
-
- /* print the asserts */
- for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
-
- row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
- CSTORM_ASSERT_LIST_OFFSET(i));
- row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
- CSTORM_ASSERT_LIST_OFFSET(i) + 4);
- row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
- CSTORM_ASSERT_LIST_OFFSET(i) + 8);
- row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
- CSTORM_ASSERT_LIST_OFFSET(i) + 12);
-
- if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i, row3, row2, row1, row0);
- rc++;
- } else {
- break;
+ for (storm = XSTORM; storm < MAX_STORMS; storm++) {
+ last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
+ storm_assert_list_index[storm]);
+ if (last_idx)
+ BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
+ storms_string[storm], last_idx);
+
+ /* print the asserts */
+ for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+ /* read a single assert entry */
+ for (j = 0; j < REGS_IN_ENTRY; j++)
+ regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
+ bnx2x_get_assert_list_entry(bp,
+ storm,
+ i) +
+ sizeof(u32) * j);
+
+ /* log entry if it contains a valid assert */
+ if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+ BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ storms_string[storm], i, regs[3],
+ regs[2], regs[1], regs[0]);
+ rc++;
+ } else {
+ break;
+ }
}
}
- /* USTORM */
- last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
- USTORM_ASSERT_LIST_INDEX_OFFSET);
- if (last_idx)
- BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
-
- /* print the asserts */
- for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
-
- row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
- USTORM_ASSERT_LIST_OFFSET(i));
- row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
- USTORM_ASSERT_LIST_OFFSET(i) + 4);
- row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
- USTORM_ASSERT_LIST_OFFSET(i) + 8);
- row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
- USTORM_ASSERT_LIST_OFFSET(i) + 12);
-
- if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i, row3, row2, row1, row0);
- rc++;
- } else {
- break;
- }
- }
+ BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
+ CHIP_IS_E1(bp) ? "everest1" :
+ CHIP_IS_E1H(bp) ? "everest1h" :
+ CHIP_IS_E2(bp) ? "everest2" : "everest3",
+ BCM_5710_FW_MAJOR_VERSION,
+ BCM_5710_FW_MINOR_VERSION,
+ BCM_5710_FW_REVISION_VERSION);
return rc;
}
@@ -983,6 +967,12 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
u32 *sb_data_p;
struct bnx2x_fp_txdata txdata;
+ if (!bp->fp)
+ break;
+
+ if (!fp->rx_cons_sb)
+ continue;
+
/* Rx */
BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
i, fp->rx_bd_prod, fp->rx_bd_cons,
@@ -995,7 +985,14 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
/* Tx */
for_each_cos_in_tx_queue(fp, cos)
{
+ if (!fp->txdata_ptr[cos])
+ break;
+
txdata = *fp->txdata_ptr[cos];
+
+ if (!txdata.tx_cons_sb)
+ continue;
+
BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
i, txdata.tx_pkt_prod,
txdata.tx_pkt_cons, txdata.tx_bd_prod,
@@ -1097,6 +1094,12 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
for_each_valid_rx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
+ if (!bp->fp)
+ break;
+
+ if (!fp->rx_cons_sb)
+ continue;
+
start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
for (j = start; j != end; j = RX_BD(j + 1)) {
@@ -1130,9 +1133,19 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
/* Tx */
for_each_valid_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ if (!bp->fp)
+ break;
+
for_each_cos_in_tx_queue(fp, cos) {
struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
+ if (!fp->txdata_ptr[cos])
+ break;
+
+ if (!txdata->tx_cons_sb)
+ continue;
+
start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
for (j = start; j != end; j = TX_BD(j + 1)) {
@@ -2071,8 +2084,6 @@ int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
else
value = 0;
- DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
-
return value;
}
@@ -2894,6 +2905,57 @@ static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
}
}
+static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
+{
+ struct bnx2x_func_switch_update_params *switch_update_params;
+ struct bnx2x_func_state_params func_params;
+
+ memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
+ switch_update_params = &func_params.params.switch_update;
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
+ if (IS_MF_UFP(bp)) {
+ int func = BP_ABS_FUNC(bp);
+ u32 val;
+
+ /* Re-learn the S-tag from shmem */
+ val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+ FUNC_MF_CFG_E1HOV_TAG_MASK;
+ if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+ bp->mf_ov = val;
+ } else {
+ BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
+ goto fail;
+ }
+
+ /* Configure new S-tag in LLH */
+ REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
+ bp->mf_ov);
+
+ /* Send Ramrod to update FW of change */
+ __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
+ &switch_update_params->changes);
+ switch_update_params->vlan = bp->mf_ov;
+
+ if (bnx2x_func_state_change(bp, &func_params) < 0) {
+ BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
+ bp->mf_ov);
+ goto fail;
+ }
+
+ DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov);
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
+
+ return;
+ }
+
+ /* not supported by SW yet */
+fail:
+ bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
+}
+
static void bnx2x_pmf_update(struct bnx2x *bp)
{
int port = BP_PORT(bp);
@@ -3286,7 +3348,8 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
{
int port = BP_PORT(bp);
- REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
+ if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
/* Tx queue should be only re-enabled */
netif_tx_wake_all_queues(bp->dev);
@@ -3641,14 +3704,30 @@ out:
ethver, iscsiver, fcoever);
}
-static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
+static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
{
- DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
+ u32 cmd_ok, cmd_fail;
- if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
+ /* sanity */
+ if (event & DRV_STATUS_DCC_EVENT_MASK &&
+ event & DRV_STATUS_OEM_EVENT_MASK) {
+ BNX2X_ERR("Received simultaneous events %08x\n", event);
+ return;
+ }
- /*
- * This is the only place besides the function initialization
+ if (event & DRV_STATUS_DCC_EVENT_MASK) {
+ cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
+ cmd_ok = DRV_MSG_CODE_DCC_OK;
+ } else /* if (event & DRV_STATUS_OEM_EVENT_MASK) */ {
+ cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
+ cmd_ok = DRV_MSG_CODE_OEM_OK;
+ }
+
+ DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
+
+ if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
+ DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
+ /* This is the only place besides the function initialization
* where the bp->flags can change so it is done without any
* locks
*/
@@ -3663,18 +3742,22 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
bnx2x_e1h_enable(bp);
}
- dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
+ event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
+ DRV_STATUS_OEM_DISABLE_ENABLE_PF);
}
- if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
+
+ if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
+ DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
bnx2x_config_mf_bw(bp);
- dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
+ event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
+ DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
}
/* Report results to MCP */
- if (dcc_event)
- bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
+ if (event)
+ bnx2x_fw_command(bp, cmd_fail, 0);
else
- bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
+ bnx2x_fw_command(bp, cmd_ok, 0);
}
/* must be called under the spq lock */
@@ -4156,9 +4239,12 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
func_mf_config[BP_ABS_FUNC(bp)].config);
val = SHMEM_RD(bp,
func_mb[BP_FW_MB_IDX(bp)].drv_status);
- if (val & DRV_STATUS_DCC_EVENT_MASK)
- bnx2x_dcc_event(bp,
- (val & DRV_STATUS_DCC_EVENT_MASK));
+
+ if (val & (DRV_STATUS_DCC_EVENT_MASK |
+ DRV_STATUS_OEM_EVENT_MASK))
+ bnx2x_oem_event(bp,
+ (val & (DRV_STATUS_DCC_EVENT_MASK |
+ DRV_STATUS_OEM_EVENT_MASK)));
if (val & DRV_STATUS_SET_MF_BW)
bnx2x_set_mf_bw(bp);
@@ -4184,6 +4270,10 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
val & DRV_STATUS_AFEX_EVENT_MASK);
if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
bnx2x_handle_eee_event(bp);
+
+ if (val & DRV_STATUS_OEM_UPDATE_SVID)
+ bnx2x_handle_update_svid_cmd(bp);
+
if (bp->link_vars.periodic_flags &
PERIODIC_FLAGS_LINK_EVENT) {
/* sync with link */
@@ -4678,7 +4768,7 @@ static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
for (i = 0; sig; i++) {
cur_bit = (0x1UL << i);
if (sig & cur_bit) {
- res |= true; /* Each bit is real error! */
+ res = true; /* Each bit is real error! */
if (print) {
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
@@ -4757,21 +4847,21 @@ static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
_print_next_block((*par_num)++,
"MCP ROM");
*global = true;
- res |= true;
+ res = true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
if (print)
_print_next_block((*par_num)++,
"MCP UMP RX");
*global = true;
- res |= true;
+ res = true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
if (print)
_print_next_block((*par_num)++,
"MCP UMP TX");
*global = true;
- res |= true;
+ res = true;
break;
case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
if (print)
@@ -4803,7 +4893,7 @@ static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
for (i = 0; sig; i++) {
cur_bit = (0x1UL << i);
if (sig & cur_bit) {
- res |= true; /* Each bit is real error! */
+ res = true; /* Each bit is real error! */
if (print) {
switch (cur_bit) {
case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
@@ -5452,6 +5542,14 @@ static void bnx2x_eq_int(struct bnx2x *bp)
break;
goto next_spqe;
+
+ case EVENT_RING_OPCODE_SET_TIMESYNC:
+ DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
+ "got set_timesync ramrod completion\n");
+ if (f_obj->complete_cmd(bp, f_obj,
+ BNX2X_F_CMD_SET_TIMESYNC))
+ break;
+ goto next_spqe;
}
switch (opcode | bp->state) {
@@ -6102,7 +6200,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
}
/* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
- if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
+ if (rx_mode != BNX2X_RX_MODE_NONE) {
__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
}
@@ -7662,7 +7760,11 @@ static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
/* Function parameters */
- switch_update_params->suspend = suspend;
+ __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
+ &switch_update_params->changes);
+ if (suspend)
+ __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
+ &switch_update_params->changes);
rc = bnx2x_func_state_change(bp, &func_params);
@@ -7907,8 +8009,11 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
if (IS_MF(bp)) {
- REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
- REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
+ if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
+ REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
+ bp->mf_ov);
+ }
}
bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
@@ -8300,13 +8405,6 @@ int bnx2x_del_all_macs(struct bnx2x *bp,
int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
{
- if (is_zero_ether_addr(bp->dev->dev_addr) &&
- (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
- DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
- "Ignoring Zero MAC for STORAGE SD mode\n");
- return 0;
- }
-
if (IS_PF(bp)) {
unsigned long ramrod_flags = 0;
@@ -9025,7 +9123,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
struct bnx2x_func_state_params func_params = {NULL};
DP(NETIF_MSG_IFDOWN,
- "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
+ "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
func_params.f_obj = &bp->func_obj;
__set_bit(RAMROD_DRV_CLR_ONLY,
@@ -9044,6 +9142,48 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
return 0;
}
+static void bnx2x_disable_ptp(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+
+ /* Disable sending PTP packets to host */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
+ NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
+
+ /* Reset PTP event detection rules */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+ NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+ NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
+
+ /* Disable the PTP feature */
+ REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
+ NIG_REG_P0_PTP_EN, 0x0);
+}
+
+/* Called during unload, to stop PTP-related stuff */
+void bnx2x_stop_ptp(struct bnx2x *bp)
+{
+ /* Cancel PTP work queue. Should be done after the Tx queues are
+ * drained to prevent additional scheduling.
+ */
+ cancel_work_sync(&bp->ptp_task);
+
+ if (bp->ptp_tx_skb) {
+ dev_kfree_skb_any(bp->ptp_tx_skb);
+ bp->ptp_tx_skb = NULL;
+ }
+
+ /* Disable PTP in HW */
+ bnx2x_disable_ptp(bp);
+
+ DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
+}
+
void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
{
int port = BP_PORT(bp);
@@ -9162,6 +9302,13 @@ unload_error:
#endif
}
+ /* stop_ptp should be after the Tx queues are drained to prevent
+ * scheduling to the cancelled PTP work queue. It should also be after
+ * function stop ramrod is sent, since as part of this ramrod FW access
+ * PTP registers.
+ */
+ bnx2x_stop_ptp(bp);
+
/* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 1);
/* Delete all NAPI objects */
@@ -11283,15 +11430,14 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
dev_info.port_hw_config[port].
fcoe_wwn_node_name_lower);
} else if (!IS_MF_SD(bp)) {
- /*
- * Read the WWN info only if the FCoE feature is enabled for
+ /* Read the WWN info only if the FCoE feature is enabled for
* this function.
*/
- if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
+ if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
+ bnx2x_get_ext_wwn_info(bp, func);
+ } else {
+ if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
bnx2x_get_ext_wwn_info(bp, func);
-
- } else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) {
- bnx2x_get_ext_wwn_info(bp, func);
}
BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
@@ -11329,7 +11475,7 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
* In non SD mode features configuration comes from struct
* func_ext_config.
*/
- if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) {
+ if (!IS_MF_SD(bp)) {
u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
val2 = MF_CFG_RD(bp, func_ext_config[func].
@@ -11448,7 +11594,7 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
- if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
+ if (!is_valid_ether_addr(bp->dev->dev_addr))
dev_err(&bp->pdev->dev,
"bad Ethernet MAC address configuration: %pM\n"
"change it manually before bringing up the appropriate network interface\n",
@@ -11478,11 +11624,27 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp)
return cfg;
}
+static void validate_set_si_mode(struct bnx2x *bp)
+{
+ u8 func = BP_ABS_FUNC(bp);
+ u32 val;
+
+ val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
+
+ /* check for legal mac (upper bytes) */
+ if (val != 0xffff) {
+ bp->mf_mode = MULTI_FUNCTION_SI;
+ bp->mf_config[BP_VN(bp)] =
+ MF_CFG_RD(bp, func_mf_config[func].config);
+ } else
+ BNX2X_DEV_INFO("illegal MAC address for SI\n");
+}
+
static int bnx2x_get_hwinfo(struct bnx2x *bp)
{
int /*abs*/func = BP_ABS_FUNC(bp);
int vn;
- u32 val = 0;
+ u32 val = 0, val2 = 0;
int rc = 0;
bnx2x_get_common_hwinfo(bp);
@@ -11562,6 +11724,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_ov = 0;
bp->mf_mode = 0;
+ bp->mf_sub_mode = 0;
vn = BP_VN(bp);
if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
@@ -11591,15 +11754,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
switch (val) {
case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
- val = MF_CFG_RD(bp, func_mf_config[func].
- mac_upper);
- /* check for legal mac (upper bytes)*/
- if (val != 0xffff) {
- bp->mf_mode = MULTI_FUNCTION_SI;
- bp->mf_config[vn] = MF_CFG_RD(bp,
- func_mf_config[func].config);
- } else
- BNX2X_DEV_INFO("illegal MAC address for SI\n");
+ validate_set_si_mode(bp);
break;
case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
if ((!CHIP_IS_E1x(bp)) &&
@@ -11627,9 +11782,33 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
} else
BNX2X_DEV_INFO("illegal OV for SD\n");
break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
+ bp->mf_mode = MULTI_FUNCTION_SD;
+ bp->mf_sub_mode = SUB_MF_MODE_UFP;
+ bp->mf_config[vn] =
+ MF_CFG_RD(bp,
+ func_mf_config[func].config);
+ break;
case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
bp->mf_config[vn] = 0;
break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE:
+ val2 = SHMEM_RD(bp,
+ dev_info.shared_hw_config.config_3);
+ val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK;
+ switch (val2) {
+ case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5:
+ validate_set_si_mode(bp);
+ bp->mf_sub_mode =
+ SUB_MF_MODE_NPAR1_DOT_5;
+ break;
+ default:
+ /* Unknown configuration */
+ bp->mf_config[vn] = 0;
+ BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n",
+ val);
+ }
+ break;
default:
/* Unknown configuration: reset mf_config */
bp->mf_config[vn] = 0;
@@ -11650,6 +11829,11 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
func, bp->mf_ov, bp->mf_ov);
+ } else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) {
+ dev_err(&bp->pdev->dev,
+ "Unexpected - no valid MF OV for func %d in UFP mode\n",
+ func);
+ bp->path_has_ovlan = true;
} else {
dev_err(&bp->pdev->dev,
"No valid MF OV for func %d, aborting\n",
@@ -11898,9 +12082,9 @@ static int bnx2x_init_bp(struct bnx2x *bp)
dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
bp->disable_tpa = disable_tpa;
- bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
+ bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
/* Reduce memory usage in kdump environment by disabling TPA */
- bp->disable_tpa |= reset_devices;
+ bp->disable_tpa |= is_kdump_kernel();
/* Set TPA flags */
if (bp->disable_tpa) {
@@ -11918,7 +12102,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->mrrs = mrrs;
- bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
+ bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
if (IS_VF(bp))
bp->rx_ring_size = MAX_RX_AVAIL;
@@ -11976,6 +12160,9 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->dump_preset_idx = 1;
+ if (CHIP_IS_E3B0(bp))
+ bp->flags |= PTP_SUPPORTED;
+
return rc;
}
@@ -12235,7 +12422,7 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
bp->rx_mode = rx_mode;
/* handle ISCSI SD mode */
- if (IS_MF_ISCSI_SD(bp))
+ if (IS_MF_ISCSI_ONLY(bp))
bp->rx_mode = BNX2X_RX_MODE_NONE;
/* Schedule the rx_mode command */
@@ -12308,13 +12495,17 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct bnx2x *bp = netdev_priv(dev);
struct mii_ioctl_data *mdio = if_mii(ifr);
- DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
- mdio->phy_id, mdio->reg_num, mdio->val_in);
-
if (!netif_running(dev))
return -EAGAIN;
- return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return bnx2x_hwtstamp_ioctl(bp, ifr);
+ default:
+ DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
+ mdio->phy_id, mdio->reg_num, mdio->val_in);
+ return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
+ }
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -12338,7 +12529,7 @@ static int bnx2x_validate_addr(struct net_device *dev)
if (IS_VF(bp))
bnx2x_sample_bulletin(bp);
- if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) {
+ if (!is_valid_ether_addr(dev->dev_addr)) {
BNX2X_ERR("Non-valid Ethernet address\n");
return -EADDRNOTAVAIL;
}
@@ -12958,6 +13149,191 @@ static int set_is_vf(int chip_id)
}
}
+/* nig_tsgen registers relative address */
+#define tsgen_ctrl 0x0
+#define tsgen_freecount 0x10
+#define tsgen_synctime_t0 0x20
+#define tsgen_offset_t0 0x28
+#define tsgen_drift_t0 0x30
+#define tsgen_synctime_t1 0x58
+#define tsgen_offset_t1 0x60
+#define tsgen_drift_t1 0x68
+
+/* FW workaround for setting drift */
+static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
+ int best_val, int best_period)
+{
+ struct bnx2x_func_state_params func_params = {NULL};
+ struct bnx2x_func_set_timesync_params *set_timesync_params =
+ &func_params.params.set_timesync;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
+
+ /* Function parameters */
+ set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
+ set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
+ set_timesync_params->add_sub_drift_adjust_value =
+ drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
+ set_timesync_params->drift_adjust_value = best_val;
+ set_timesync_params->drift_adjust_period = best_period;
+
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+ int rc;
+ int drift_dir = 1;
+ int val, period, period1, period2, dif, dif1, dif2;
+ int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
+
+ DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
+
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_PTP,
+ "PTP adjfreq called while the interface is down\n");
+ return -EFAULT;
+ }
+
+ if (ppb < 0) {
+ ppb = -ppb;
+ drift_dir = 0;
+ }
+
+ if (ppb == 0) {
+ best_val = 1;
+ best_period = 0x1FFFFFF;
+ } else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
+ best_val = 31;
+ best_period = 1;
+ } else {
+ /* Changed not to allow val = 8, 16, 24 as these values
+ * are not supported in workaround.
+ */
+ for (val = 0; val <= 31; val++) {
+ if ((val & 0x7) == 0)
+ continue;
+ period1 = val * 1000000 / ppb;
+ period2 = period1 + 1;
+ if (period1 != 0)
+ dif1 = ppb - (val * 1000000 / period1);
+ else
+ dif1 = BNX2X_MAX_PHC_DRIFT;
+ if (dif1 < 0)
+ dif1 = -dif1;
+ dif2 = ppb - (val * 1000000 / period2);
+ if (dif2 < 0)
+ dif2 = -dif2;
+ dif = (dif1 < dif2) ? dif1 : dif2;
+ period = (dif1 < dif2) ? period1 : period2;
+ if (dif < best_dif) {
+ best_dif = dif;
+ best_val = val;
+ best_period = period;
+ }
+ }
+ }
+
+ rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
+ best_period);
+ if (rc) {
+ BNX2X_ERR("Failed to set drift\n");
+ return -EFAULT;
+ }
+
+ DP(BNX2X_MSG_PTP, "Configrued val = %d, period = %d\n", best_val,
+ best_period);
+
+ return 0;
+}
+
+static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+ u64 now;
+
+ DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
+
+ now = timecounter_read(&bp->timecounter);
+ now += delta;
+ /* Re-init the timecounter */
+ timecounter_init(&bp->timecounter, &bp->cyclecounter, now);
+
+ return 0;
+}
+
+static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+ u64 ns;
+ u32 remainder;
+
+ ns = timecounter_read(&bp->timecounter);
+
+ DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+ u64 ns;
+
+ ns = ts->tv_sec * 1000000000ULL;
+ ns += ts->tv_nsec;
+
+ DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
+
+ /* Re-init the timecounter */
+ timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
+
+ return 0;
+}
+
+/* Enable (or disable) ancillary features of the phc subsystem */
+static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+
+ BNX2X_ERR("PHC ancillary features are not supported\n");
+ return -ENOTSUPP;
+}
+
+void bnx2x_register_phc(struct bnx2x *bp)
+{
+ /* Fill the ptp_clock_info struct and register PTP clock*/
+ bp->ptp_clock_info.owner = THIS_MODULE;
+ snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
+ bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */
+ bp->ptp_clock_info.n_alarm = 0;
+ bp->ptp_clock_info.n_ext_ts = 0;
+ bp->ptp_clock_info.n_per_out = 0;
+ bp->ptp_clock_info.pps = 0;
+ bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
+ bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
+ bp->ptp_clock_info.gettime = bnx2x_ptp_gettime;
+ bp->ptp_clock_info.settime = bnx2x_ptp_settime;
+ bp->ptp_clock_info.enable = bnx2x_ptp_enable;
+
+ bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
+ if (IS_ERR(bp->ptp_clock)) {
+ bp->ptp_clock = NULL;
+ BNX2X_ERR("PTP clock registeration failed\n");
+ }
+}
+
static int bnx2x_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -13129,6 +13505,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
"Unknown",
dev->base_addr, bp->pdev->irq, dev->dev_addr);
+ bnx2x_register_phc(bp);
+
return 0;
init_one_exit:
@@ -13155,6 +13533,11 @@ static void __bnx2x_remove(struct pci_dev *pdev,
struct bnx2x *bp,
bool remove_netdev)
{
+ if (bp->ptp_clock) {
+ ptp_clock_unregister(bp->ptp_clock);
+ bp->ptp_clock = NULL;
+ }
+
/* Delete storage MAC address */
if (!NO_FCOE(bp)) {
rtnl_lock();
@@ -14136,3 +14519,332 @@ int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
REG_RD(bp, pretend_reg);
return 0;
}
+
+static void bnx2x_ptp_task(struct work_struct *work)
+{
+ struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
+ int port = BP_PORT(bp);
+ u32 val_seq;
+ u64 timestamp, ns;
+ struct skb_shared_hwtstamps shhwtstamps;
+
+ /* Read Tx timestamp registers */
+ val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+ NIG_REG_P0_TLLH_PTP_BUF_SEQID);
+ if (val_seq & 0x10000) {
+ /* There is a valid timestamp value */
+ timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
+ NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
+ timestamp <<= 32;
+ timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
+ NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
+ /* Reset timestamp register to allow new timestamp */
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+ NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
+ ns = timecounter_cyc2time(&bp->timecounter, timestamp);
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
+ dev_kfree_skb_any(bp->ptp_tx_skb);
+ bp->ptp_tx_skb = NULL;
+
+ DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
+ timestamp, ns);
+ } else {
+ DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
+ /* Reschedule to keep checking for a valid timestamp value */
+ schedule_work(&bp->ptp_task);
+ }
+}
+
+void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
+{
+ int port = BP_PORT(bp);
+ u64 timestamp, ns;
+
+ timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
+ NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
+ timestamp <<= 32;
+ timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
+ NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
+
+ /* Reset timestamp register to allow new timestamp */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
+ NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
+
+ ns = timecounter_cyc2time(&bp->timecounter, timestamp);
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+
+ DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
+ timestamp, ns);
+}
+
+/* Read the PHC */
+static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc)
+{
+ struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
+ int port = BP_PORT(bp);
+ u32 wb_data[2];
+ u64 phc_cycles;
+
+ REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
+ NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
+ phc_cycles = wb_data[1];
+ phc_cycles = (phc_cycles << 32) + wb_data[0];
+
+ DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
+
+ return phc_cycles;
+}
+
+static void bnx2x_init_cyclecounter(struct bnx2x *bp)
+{
+ memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
+ bp->cyclecounter.read = bnx2x_cyclecounter_read;
+ bp->cyclecounter.mask = CLOCKSOURCE_MASK(64);
+ bp->cyclecounter.shift = 1;
+ bp->cyclecounter.mult = 1;
+}
+
+static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
+{
+ struct bnx2x_func_state_params func_params = {NULL};
+ struct bnx2x_func_set_timesync_params *set_timesync_params =
+ &func_params.params.set_timesync;
+
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+ func_params.f_obj = &bp->func_obj;
+ func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
+
+ /* Function parameters */
+ set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
+ set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
+
+ return bnx2x_func_state_change(bp, &func_params);
+}
+
+int bnx2x_enable_ptp_packets(struct bnx2x *bp)
+{
+ struct bnx2x_queue_state_params q_params;
+ int rc, i;
+
+ /* send queue update ramrod to enable PTP packets */
+ memset(&q_params, 0, sizeof(q_params));
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
+ &q_params.params.update.update_flags);
+ __set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
+ &q_params.params.update.update_flags);
+
+ /* send the ramrod on all the queues of the PF */
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ /* Set the appropriate Queue object */
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to enable PTP packets\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int bnx2x_configure_ptp_filters(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ int rc;
+
+ if (!bp->hwtstamp_ioctl_called)
+ return 0;
+
+ switch (bp->tx_type) {
+ case HWTSTAMP_TX_ON:
+ bp->flags |= TX_TIMESTAMPING_EN;
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+ NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+ NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ BNX2X_ERR("One-step timestamping is not supported\n");
+ return -ERANGE;
+ }
+
+ switch (bp->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ bp->rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ /* Initialize PTP detection for UDP/IPv4 events */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ /* Initialize PTP detection L2 events */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
+
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
+ break;
+ }
+
+ /* Indicate to FW that this PF expects recorded PTP packets */
+ rc = bnx2x_enable_ptp_packets(bp);
+ if (rc)
+ return rc;
+
+ /* Enable sending PTP packets to host */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
+ NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
+
+ return 0;
+}
+
+static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int rc;
+
+ DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
+ config.tx_type, config.rx_filter);
+
+ if (config.flags) {
+ BNX2X_ERR("config.flags is reserved for future use\n");
+ return -EINVAL;
+ }
+
+ bp->hwtstamp_ioctl_called = 1;
+ bp->tx_type = config.tx_type;
+ bp->rx_filter = config.rx_filter;
+
+ rc = bnx2x_configure_ptp_filters(bp);
+ if (rc)
+ return rc;
+
+ config.rx_filter = bp->rx_filter;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/* Configrues HW for PTP */
+static int bnx2x_configure_ptp(struct bnx2x *bp)
+{
+ int rc, port = BP_PORT(bp);
+ u32 wb_data[2];
+
+ /* Reset PTP event detection rules - will be configured in the IOCTL */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+ NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+ NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
+
+ /* Disable PTP packets to host - will be configured in the IOCTL*/
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
+ NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
+
+ /* Enable the PTP feature */
+ REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
+ NIG_REG_P0_PTP_EN, 0x3F);
+
+ /* Enable the free-running counter */
+ wb_data[0] = 0;
+ wb_data[1] = 0;
+ REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
+
+ /* Reset drift register (offset register is not reset) */
+ rc = bnx2x_send_reset_timesync_ramrod(bp);
+ if (rc) {
+ BNX2X_ERR("Failed to reset PHC drift register\n");
+ return -EFAULT;
+ }
+
+ /* Reset possibly old timestamps */
+ REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
+ NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
+ REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+ NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
+
+ return 0;
+}
+
+/* Called during load, to initialize PTP-related stuff */
+void bnx2x_init_ptp(struct bnx2x *bp)
+{
+ int rc;
+
+ /* Configure PTP in HW */
+ rc = bnx2x_configure_ptp(bp);
+ if (rc) {
+ BNX2X_ERR("Stopping PTP initialization\n");
+ return;
+ }
+
+ /* Init work queue for Tx timestamping */
+ INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
+
+ /* Init cyclecounter and timecounter. This is done only in the first
+ * load. If done in every load, PTP application will fail when doing
+ * unload / load (e.g. MTU change) while it is running.
+ */
+ if (!bp->timecounter_init_done) {
+ bnx2x_init_cyclecounter(bp);
+ timecounter_init(&bp->timecounter, &bp->cyclecounter,
+ ktime_to_ns(ktime_get_real()));
+ bp->timecounter_init_done = 1;
+ }
+
+ DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
+}