From 7776e8361d2d3cd795455a29e7cc3e426ca664c2 Mon Sep 17 00:00:00 2001 From: Dong Yibo Date: Tue, 10 Sep 2024 14:38:45 +0800 Subject: [PATCH] net: mucse: Add support for ethernet card drivers from Mucse Technology These drivers is to support follow ethernet cards: *mucse n500/n210 (rnpgbe) mucse n500/n210 virtual function (rnpgbevf) mucse n10-2ports (rnp) mucse n10 virtual function (rnpvf) Link: https://github.com/deepin-community/kernel/pull/413 Link: https://github.com/deepin-community/kernel/pull/441 Link: https://github.com/deepin-community/kernel/pull/447 Link: https://github.com/deepin-community/kernel/pull/448 Link: https://github.com/deepin-community/kernel/pull/449 Link: https://github.com/deepin-community/kernel/pull/468 Link: https://github.com/deepin-community/kernel/pull/495 Co-developed-by: WangYuli Signed-off-by: WangYuli Signed-off-by: Dong Yibo Signed-off-by: WangYuli --- MAINTAINERS | 6 + drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/mucse/Kconfig | 160 + drivers/net/ethernet/mucse/Makefile | 9 + drivers/net/ethernet/mucse/rnp/Makefile | 25 + drivers/net/ethernet/mucse/rnp/rnp.h | 1169 +++ drivers/net/ethernet/mucse/rnp/rnp_common.c | 17 + drivers/net/ethernet/mucse/rnp/rnp_common.h | 383 + drivers/net/ethernet/mucse/rnp/rnp_dcb.c | 351 + drivers/net/ethernet/mucse/rnp/rnp_dcb.h | 36 + drivers/net/ethernet/mucse/rnp/rnp_debugfs.c | 522 ++ drivers/net/ethernet/mucse/rnp/rnp_ethtool.c | 1927 ++++ drivers/net/ethernet/mucse/rnp/rnp_ethtool.h | 125 + drivers/net/ethernet/mucse/rnp/rnp_lib.c | 1334 +++ drivers/net/ethernet/mucse/rnp/rnp_main.c | 7943 +++++++++++++++++ drivers/net/ethernet/mucse/rnp/rnp_mbx.c | 650 ++ drivers/net/ethernet/mucse/rnp/rnp_mbx.h | 238 + drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.c | 1495 ++++ drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.h | 1135 +++ drivers/net/ethernet/mucse/rnp/rnp_mpe.c | 220 + drivers/net/ethernet/mucse/rnp/rnp_mpe.h | 12 + drivers/net/ethernet/mucse/rnp/rnp_n10.c | 4812 ++++++++++ drivers/net/ethernet/mucse/rnp/rnp_param.c | 346 + drivers/net/ethernet/mucse/rnp/rnp_pcs.c | 33 + drivers/net/ethernet/mucse/rnp/rnp_pcs.h | 9 + drivers/net/ethernet/mucse/rnp/rnp_phy.h | 73 + drivers/net/ethernet/mucse/rnp/rnp_ptp.c | 688 ++ drivers/net/ethernet/mucse/rnp/rnp_ptp.h | 99 + drivers/net/ethernet/mucse/rnp/rnp_regs.h | 820 ++ drivers/net/ethernet/mucse/rnp/rnp_sriov.c | 1737 ++++ drivers/net/ethernet/mucse/rnp/rnp_sriov.h | 41 + drivers/net/ethernet/mucse/rnp/rnp_sysfs.c | 2239 +++++ .../net/ethernet/mucse/rnp/rnp_tc_u32_parse.h | 56 + drivers/net/ethernet/mucse/rnp/rnp_type.h | 1298 +++ drivers/net/ethernet/mucse/rnp/version.h | 4 + drivers/net/ethernet/mucse/rnpgbe/Makefile | 23 + drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 1184 +++ .../net/ethernet/mucse/rnpgbe/rnpgbe_chip.c | 4165 +++++++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_common.c | 16 + .../net/ethernet/mucse/rnpgbe/rnpgbe_common.h | 384 + .../ethernet/mucse/rnpgbe/rnpgbe_debugfs.c | 321 + .../ethernet/mucse/rnpgbe/rnpgbe_ethtool.c | 2212 +++++ .../ethernet/mucse/rnpgbe/rnpgbe_ethtool.h | 132 + .../net/ethernet/mucse/rnpgbe/rnpgbe_lib.c | 1118 +++ .../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 7727 ++++++++++++++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c | 643 ++ .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h | 222 + .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c | 1587 ++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h | 1239 +++ .../net/ethernet/mucse/rnpgbe/rnpgbe_param.c | 424 + .../net/ethernet/mucse/rnpgbe/rnpgbe_phy.h | 65 + .../net/ethernet/mucse/rnpgbe/rnpgbe_ptp.c | 760 ++ .../net/ethernet/mucse/rnpgbe/rnpgbe_ptp.h | 95 + .../net/ethernet/mucse/rnpgbe/rnpgbe_regs.h | 774 ++ .../net/ethernet/mucse/rnpgbe/rnpgbe_sfc.c | 97 + .../net/ethernet/mucse/rnpgbe/rnpgbe_sfc.h | 25 + .../net/ethernet/mucse/rnpgbe/rnpgbe_sriov.c | 1652 ++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_sriov.h | 41 + .../net/ethernet/mucse/rnpgbe/rnpgbe_sysfs.c | 1247 +++ .../net/ethernet/mucse/rnpgbe/rnpgbe_type.h | 1362 +++ drivers/net/ethernet/mucse/rnpgbe/version.h | 4 + drivers/net/ethernet/mucse/rnpgbevf/Makefile | 15 + drivers/net/ethernet/mucse/rnpgbevf/defines.h | 394 + .../net/ethernet/mucse/rnpgbevf/rnpgbevf.h | 715 ++ .../mucse/rnpgbevf/rnpgbevf_ethtool.c | 895 ++ .../ethernet/mucse/rnpgbevf/rnpgbevf_main.c | 6287 +++++++++++++ .../ethernet/mucse/rnpgbevf/rnpgbevf_mbx.c | 668 ++ .../ethernet/mucse/rnpgbevf/rnpgbevf_mbx.h | 111 + .../ethernet/mucse/rnpgbevf/rnpgbevf_regs.h | 145 + .../ethernet/mucse/rnpgbevf/rnpgbevf_sysfs.c | 22 + drivers/net/ethernet/mucse/rnpgbevf/vf.c | 869 ++ drivers/net/ethernet/mucse/rnpgbevf/vf.h | 209 + drivers/net/ethernet/mucse/rnpvf/Makefile | 15 + drivers/net/ethernet/mucse/rnpvf/defines.h | 367 + drivers/net/ethernet/mucse/rnpvf/ethtool.c | 781 ++ drivers/net/ethernet/mucse/rnpvf/mbx.c | 624 ++ drivers/net/ethernet/mucse/rnpvf/mbx.h | 119 + drivers/net/ethernet/mucse/rnpvf/regs.h | 141 + drivers/net/ethernet/mucse/rnpvf/rnpvf.h | 738 ++ drivers/net/ethernet/mucse/rnpvf/rnpvf_main.c | 6434 +++++++++++++ drivers/net/ethernet/mucse/rnpvf/sysfs.c | 21 + drivers/net/ethernet/mucse/rnpvf/vf.c | 849 ++ drivers/net/ethernet/mucse/rnpvf/vf.h | 203 + 84 files changed, 78155 insertions(+) create mode 100644 drivers/net/ethernet/mucse/Kconfig create mode 100644 drivers/net/ethernet/mucse/Makefile create mode 100644 drivers/net/ethernet/mucse/rnp/Makefile create mode 100644 drivers/net/ethernet/mucse/rnp/rnp.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_common.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_common.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_dcb.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_dcb.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_debugfs.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_ethtool.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_ethtool.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_lib.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_main.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_mbx.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_mbx.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_mpe.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_mpe.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_n10.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_param.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_pcs.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_pcs.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_phy.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_ptp.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_ptp.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_regs.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_sriov.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_sriov.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_sysfs.c create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_tc_u32_parse.h create mode 100644 drivers/net/ethernet/mucse/rnp/rnp_type.h create mode 100644 drivers/net/ethernet/mucse/rnp/version.h create mode 100644 drivers/net/ethernet/mucse/rnpgbe/Makefile create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_common.c create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_common.h create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_debugfs.c create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ethtool.c create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ethtool.h create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_param.c create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_phy.h create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.c create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.h create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_regs.h create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.c create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.h create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sriov.c create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sriov.h create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sysfs.c create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_type.h create mode 100644 drivers/net/ethernet/mucse/rnpgbe/version.h create mode 100644 drivers/net/ethernet/mucse/rnpgbevf/Makefile create mode 100644 drivers/net/ethernet/mucse/rnpgbevf/defines.h create mode 100644 drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf.h create mode 100644 drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_ethtool.c create mode 100644 drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_main.c create mode 100644 drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_mbx.c create mode 100644 drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_mbx.h create mode 100644 drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_regs.h create mode 100644 drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_sysfs.c create mode 100644 drivers/net/ethernet/mucse/rnpgbevf/vf.c create mode 100644 drivers/net/ethernet/mucse/rnpgbevf/vf.h create mode 100644 drivers/net/ethernet/mucse/rnpvf/Makefile create mode 100644 drivers/net/ethernet/mucse/rnpvf/defines.h create mode 100644 drivers/net/ethernet/mucse/rnpvf/ethtool.c create mode 100644 drivers/net/ethernet/mucse/rnpvf/mbx.c create mode 100644 drivers/net/ethernet/mucse/rnpvf/mbx.h create mode 100644 drivers/net/ethernet/mucse/rnpvf/regs.h create mode 100644 drivers/net/ethernet/mucse/rnpvf/rnpvf.h create mode 100644 drivers/net/ethernet/mucse/rnpvf/rnpvf_main.c create mode 100644 drivers/net/ethernet/mucse/rnpvf/sysfs.c create mode 100644 drivers/net/ethernet/mucse/rnpvf/vf.c create mode 100644 drivers/net/ethernet/mucse/rnpvf/vf.h diff --git a/MAINTAINERS b/MAINTAINERS index ae4c0cec50736..9d58ff1932b1f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -23942,6 +23942,12 @@ L: linux-mm@kvack.org S: Maintained F: mm/zswap.c +MUCSE Ethernet Controller drivers +M: Yibo Dong +R: WangYuli +S: Maintained +F: drivers/net/ethernet/mucse/ + THE REST M: Linus Torvalds L: linux-kernel@vger.kernel.org diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 7d44291378615..691ee909e8e6e 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -86,6 +86,7 @@ source "drivers/net/ethernet/i825xx/Kconfig" source "drivers/net/ethernet/ibm/Kconfig" source "drivers/net/ethernet/intel/Kconfig" source "drivers/net/ethernet/xscale/Kconfig" +source "drivers/net/ethernet/mucse/Kconfig" config JME tristate "JMicron(R) PCI-Express Gigabit Ethernet support" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 9adeade0c1b88..f53c131cf3b07 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -49,6 +49,7 @@ obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/ obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/ obj-$(CONFIG_NET_VENDOR_IBM) += ibm/ obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ +obj-$(CONFIG_NET_VENDOR_MUCSE) += mucse/ obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ obj-$(CONFIG_NET_VENDOR_MICROSOFT) += microsoft/ obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ diff --git a/drivers/net/ethernet/mucse/Kconfig b/drivers/net/ethernet/mucse/Kconfig new file mode 100644 index 0000000000000..ec5690a581ac6 --- /dev/null +++ b/drivers/net/ethernet/mucse/Kconfig @@ -0,0 +1,160 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Mucse network device configuration +# + +config NET_VENDOR_MUCSE + bool "Mucse devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Mucse cards. If you say Y, you will be asked for + your specific card in the following questions. + + +if NET_VENDOR_MUCSE + +config MGBE + tristate "Mucse(R) 1GbE PCI Express adapters support" + depends on PCI + imply PTP_1588_CLOCK + help + This driver supports Mucse(R) 1GbE PCI Express family of + adapters. + + To compile this driver as a module, choose M here. The module + will be called rnp. + +config MGBE_OPTM_WITH_LPAGE + bool "Reduce Memory Cost In Large PAGE_SIZE(>8192)" + default n + depends on MGBE + help + Say Y here if you want to reduce memory cost in large PAGE_SIZE. + + If unsure, say N. + +config MGBE_MSIX_COUNT + int "Number of msix count" + default "26" + depends on MGBE + help + MXGBE range [2,26]. + +config MGBEVF + tristate "Mucse(R) 1GbE PCI Express Virtual Function adapters support" + depends on PCI + imply PTP_1588_CLOCK + help + This driver supports Mucse(R) 1GbE PCI Express family of + adapters. + + To compile this driver as a module, choose M here. The module + will be called rnp. + +config MGBEVF_OPTM_WITH_LARGE + bool "Reduce Memory Cost In Large PAGE_SIZE(>8192)" + default n + depends on MGBEVF + help + Say Y here if you want to reduce memory cost in large PAGE_SIZE. + + If unsure, say N. + +config MXGBE + tristate "Mucse(R) 1/10/25/40GbE PCI Express adapters support" + depends on PCI + imply PTP_1588_CLOCK + help + This driver supports Mucse(R) 1/10/25/40GbE PCI Express family of + adapters. + + To compile this driver as a module, choose M here. The module + will be called rnp. + +config MXGBE_FIX_VF_QUEUE + bool "Fix VF Queue Used(pf)" + default y + depends on MXGBE + help + Say Y here if you want to fix vf queue order in the driver. + + If unsure, say N. + +config MXGBE_FIX_MAC_PADDING + bool "Close Mac Padding Function(pf)" + default y + depends on MXGBE + help + Say Y here if you want to fix close mac padding in the driver. + + If unsure, say N. + +config MXGBE_OPTM_WITH_LARGE + bool "Reduce Memory Cost In Large PAGE_SIZE(>8192)" + default n + depends on MXGBE + help + Say Y here if you want to reduce memory cost in large PAGE_SIZE. + + If unsure, say N. + +config MXGBE_MSIX_COUNT + int "Number of msix count" + default "64" + depends on MXGBE + help + MXGBE range [2,64]. + +config MXGBE_DCB + bool "Data Center Bridging (DCB) Support" + default y + depends on MXGBE && DCB + help + Say Y here if you want to use Data Center Bridging (DCB) in the + driver. + + If unsure, say N. + +config MXGBEVF + tristate "Mucse(R) 1/10/25/40GbE PCI Express Virtual Function adapters support" + depends on PCI + help + This driver supports Mucse(R) 1/10/25/40GbE PCI Express Virtual Function + family of adapters. + + To compile this driver as a module, choose M here. The module + will be called rnpvf. + +config MXGBEVF_FIX_VF_QUEUE + bool "Fix VF Queue Used(vf)" + default y + depends on MXGBEVF + help + Say Y here if you want to fix vf queue order in the driver. + + If unsure, say N. + +config MXGBEVF_FIX_MAC_PADDING + bool "Close Mac Padding Function(pf)" + default y + depends on MXGBEVF + help + Say Y here if you want to fix close mac padding in the driver. + + If unsure, say N. + +config MXGBEVF_OPTM_WITH_LARGE + bool "Reduce Memory Cost In Large PAGE_SIZE(>8192)" + default n + depends on MXGBEVF + help + Say Y here if you want to reduce memory cost in large PAGE_SIZE. + + If unsure, say N. + +endif # NET_VENDOR_MUCSE + diff --git a/drivers/net/ethernet/mucse/Makefile b/drivers/net/ethernet/mucse/Makefile new file mode 100644 index 0000000000000..c1357aab64c86 --- /dev/null +++ b/drivers/net/ethernet/mucse/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Mucse network device drivers. +# + +obj-$(CONFIG_MGBE) += rnpgbe/ +obj-$(CONFIG_MGBEVF) += rnpgbevf/ +obj-$(CONFIG_MXGBE) += rnp/ +obj-$(CONFIG_MXGBEVF) += rnpvf/ diff --git a/drivers/net/ethernet/mucse/rnp/Makefile b/drivers/net/ethernet/mucse/rnp/Makefile new file mode 100644 index 0000000000000..62d3acbba4586 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/Makefile @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2022 - 2024 Mucse Corporation +# +# Makefile for the Mucse(R) 10GbE-2ports PCI Express ethernet driver +# +# + +obj-$(CONFIG_MXGBE) += rnp.o +rnp-objs := \ + rnp_main.o \ + rnp_common.o \ + rnp_ethtool.o \ + rnp_lib.o \ + rnp_mbx.o \ + rnp_pcs.o \ + rnp_n10.o \ + rnp_mbx_fw.o\ + rnp_sriov.o \ + rnp_param.o \ + rnp_sysfs.o \ + rnp_ptp.o \ + rnp_mpe.o + +rnp-$(CONFIG_DCB) += rnp_dcb.o +rnp-$(CONFIG_DEBUG_FS) += rnp_debugfs.o diff --git a/drivers/net/ethernet/mucse/rnp/rnp.h b/drivers/net/ethernet/mucse/rnp/rnp.h new file mode 100644 index 0000000000000..87da29f489e7a --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp.h @@ -0,0 +1,1169 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_H_ +#define _RNP_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rnp_type.h" +#include "rnp_common.h" +#include "rnp_dcb.h" + +/* common prefix used by pr_<> macros */ +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define RNP_ALLOC_PAGE_ORDER 0 +#define RNP_PAGE_BUFFER_NUMS(ring) \ + ((1 << RNP_ALLOC_PAGE_ORDER) * PAGE_SIZE / \ + ALIGN((rnp_rx_offset(ring) + rnp_rx_bufsz(ring) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ + RNP_RX_HWTS_OFFSET), \ + 1024)) + +#define RNP_DEFAULT_TX_WORK (128) +#define RNP_MIN_TX_WORK (32) +#define RNP_MAX_TX_WORK (512) +#define RNP_MIN_RX_WORK (32) +#define RNP_MAX_RX_WORK (512) +#define RNP_WORK_ALIGN (2) +#define RNP_MIN_TX_FRAME (1) +#define RNP_MAX_TX_FRAME (256) +#define RNP_MIN_TX_USEC (30) +#define RNP_MAX_TX_USEC (10000) + +#define RNP_MIN_RX_FRAME (1) +#define RNP_MAX_RX_FRAME (256) +#define RNP_MIN_RX_USEC (10) +#define RNP_MAX_RX_USEC (10000) + +#define RNP_MAX_TXD (4096) +#define RNP_MIN_TXD (64) + +#define ACTION_TO_MPE (130) +#define MPE_PORT (10) +#define AUTO_ALL_MODES 0 +/* TX/RX descriptor defines */ +#ifdef FEITENG +#define RNP_DEFAULT_TXD 4096 +#else +#define RNP_DEFAULT_TXD 512 +#endif + +#define RNP_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define RNP_REQ_RX_DESCRIPTOR_MULTIPLE 8 + +#ifdef FEITENG +#define RNP_DEFAULT_RXD 4096 +#else +#define RNP_DEFAULT_RXD 512 +#endif +#define RNP_MAX_RXD 4096 +#define RNP_MIN_RXD 64 + +/* flow control */ +#define RNP_MIN_FCRTL 0x40 +#define RNP_MAX_FCRTL 0x7FF80 +#define RNP_MIN_FCRTH 0x600 +#define RNP_MAX_FCRTH 0x7FFF0 +#define RNP_DEFAULT_FCPAUSE 0xFFFF +#define RNP10_DEFAULT_HIGH_WATER 0x320 +#define RNP10_DEFAULT_LOW_WATER 0x270 +#define RNP500_DEFAULT_HIGH_WATER 400 +#define RNP500_DEFAULT_LOW_WATER 256 +#define RNP_MIN_FCPAUSE 0 +#define RNP_MAX_FCPAUSE 0xFFFF + +/* Supported Rx Buffer Sizes */ +#define RNP_RXBUFFER_256 256 /* Used for skb receive header */ +#define RNP_RXBUFFER_1536 1536 +#define RNP_RXBUFFER_2K 2048 +#define RNP_RXBUFFER_3K 3072 +#define RNP_RXBUFFER_4K 4096 +#define RNP_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ +#define RNP_RXBUFFER_MAX (RNP_RXBUFFER_2K) + +#define MAX_Q_VECTORS 128 + +#define RNP_RING_COUNTS_PEER_PF 8 +#define RNP_GSO_PARTIAL_FEATURES \ + (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + +/* + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, + * this adds up to 448 bytes of extra data. + * + * Since netdev_alloc_skb now allocates a page fragment we can use a value + * of 256 and the resultant skb will have a truesize of 960 or less. + */ +#define RNP_RX_HDR_SIZE RNP_RXBUFFER_256 + +#define RNP_ITR_ADAPTIVE_MIN_INC 2 +#define RNP_ITR_ADAPTIVE_MIN_USECS 5 +#define RNP_ITR_ADAPTIVE_MAX_USECS 800 +#define RNP_ITR_ADAPTIVE_LATENCY 0x400 +#define RNP_ITR_ADAPTIVE_BULK 0x00 +#define RNP_ITR_ADAPTIVE_MASK_USECS \ + (RNP_ITR_ADAPTIVE_LATENCY - RNP_ITR_ADAPTIVE_MIN_INC) + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#ifdef OPTM_WITH_LPAGE +#define RNP_RX_BUFFER_WRITE (PAGE_SIZE / 2048) /* Must be power of 2 */ +#else +#define RNP_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#endif +enum rnp_tx_flags { + /* cmd_type flags */ + RNP_TX_FLAGS_HW_VLAN = 0x01, + RNP_TX_FLAGS_TSO = 0x02, + RNP_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + RNP_TX_FLAGS_CC = 0x08, + RNP_TX_FLAGS_IPV4 = 0x10, + RNP_TX_FLAGS_CSUM = 0x20, + + /* software defined flags */ + RNP_TX_FLAGS_SW_VLAN = 0x40, + RNP_TX_FLAGS_FCOE = 0x80, +}; +#ifndef RNP_MAX_VF_CNT +#define RNP_MAX_VF_CNT 64 +#endif + +#define RNP_RX_RATE_HIGH 450000 +#define RNP_RX_COAL_TIME_HIGH 128 +#define RNP_RX_SIZE_THRESH 1024 +#define RNP_RX_RATE_THRESH (1000000 / RNP_RX_COAL_TIME_HIGH) +#define RNP_SAMPLE_INTERVAL 0 +#define RNP_AVG_PKT_SMALL 256 + +#define RNP_MAX_VF_MC_ENTRIES 30 +#define RNP_MAX_VF_FUNCTIONS RNP_MAX_VF_CNT +#define RNP_MAX_VFTA_ENTRIES 128 +#define MAX_EMULATION_MAC_ADDRS 16 +#define RNP_MAX_PF_MACVLANS_N10 15 +//#define RNP_MAX_PF_MACVLANS 15 +#define PF_RING_CNT_WHEN_IOV_ENABLED 2 +#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) + +enum vf_link_state { + rnp_link_state_on, + rnp_link_state_auto, + rnp_link_state_off, + +}; + +struct vf_data_storage { + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[RNP_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 default_vf_vlan_id; + u16 vlans_enabled; + bool clear_to_send; + bool pf_set_mac; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 vf_vlan; // vf just can set 1 vlan + u16 pf_qos; + u16 tx_rate; + int link_state; + u16 vlan_count; + u8 spoofchk_enabled; + u8 trusted; + bool promisc_mode; + unsigned long status; + unsigned int vf_api; +}; + +enum vf_state_t { + __VF_MBX_USED, +}; + +struct vf_macvlans { + struct list_head l; + int vf; + int rar_entry; + bool free; + bool is_macvlan; + u8 vf_macvlan[ETH_ALEN]; +}; + +/* now tx max 4k for one desc */ +// feiteng use 12k can get better netperf performance +#define RNP_MAX_TXD_PWR 12 +#define RNP_MAX_DATA_PER_TXD (1 << RNP_MAX_TXD_PWR) +//#define RNP_MAX_DATA_PER_TXD (12 * 1024) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), RNP_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffers + */ +struct rnp_tx_buffer { + struct rnp_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + bool gso_need_padding; + + __be16 protocol; + __be16 priv_tags; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + union { + u32 mss_len_vf_num; + struct { + __le16 mss_len; + u8 vf_num; + u8 l4_hdr_len; + }; + }; + union { + u32 inner_vlan_tunnel_len; + struct { + u8 tunnel_hdr_len; + u8 inner_vlan_l; + u8 inner_vlan_h; + u8 resv; + }; + }; + bool ctx_flag; +}; + +struct rnp_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + __u32 page_offset; +#else + __u16 page_offset; +#endif + __u16 pagecnt_bias; +}; + +struct rnp_queue_stats { + u64 packets; + u64 bytes; +}; + +struct rnp_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; + u64 clean_desc; + u64 poll_count; + u64 irq_more_count; + u64 send_bytes; + u64 send_bytes_to_hw; + u64 todo_update; + u64 send_done_bytes; + u64 vlan_add; + u64 tx_next_to_clean; + u64 tx_irq_miss; + u64 tx_equal_count; + u64 tx_clean_times; + u64 tx_clean_count; +}; + +struct rnp_rx_queue_stats { + u64 driver_drop_packets; + u64 rsc_count; + u64 rsc_flush; + u64 non_eop_descs; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 alloc_rx_page; + u64 csum_err; + u64 csum_good; + u64 poll_again_count; + u64 vlan_remove; + u64 rx_next_to_clean; + u64 rx_irq_miss; + u64 rx_equal_count; + u64 rx_clean_times; + u64 rx_clean_count; +}; + +enum rnp_ring_state_t { + __RNP_RX_3K_BUFFER, + __RNP_RX_BUILD_SKB_ENABLED, + __RNP_TX_FDIR_INIT_DONE, + __RNP_TX_XPS_INIT_DONE, + __RNP_TX_DETECT_HANG, + __RNP_HANG_CHECK_ARMED, + __RNP_RX_CSUM_UDP_ZERO_ERR, + __RNP_RX_FCOE, +}; + +#define ring_uses_build_skb(ring) \ + test_bit(__RNP_RX_BUILD_SKB_ENABLED, &(ring)->state) +#define check_for_tx_hang(ring) test_bit(__RNP_TX_DETECT_HANG, &(ring)->state) +#define set_check_for_tx_hang(ring) \ + set_bit(__RNP_TX_DETECT_HANG, &(ring)->state) +#define clear_check_for_tx_hang(ring) \ + clear_bit(__RNP_TX_DETECT_HANG, &(ring)->state) +struct rnp_ring { + struct rnp_ring *next; /* pointer to next ring in q_vector */ + struct rnp_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct device *dev; /* device for DMA mapping */ + void *desc; /* descriptor ring memory */ + union { + struct rnp_tx_buffer *tx_buffer_info; + struct rnp_rx_buffer *rx_buffer_info; + }; + unsigned long last_rx_timestamp; + unsigned long state; + u8 __iomem *ring_addr; + u8 __iomem *tail; + u8 __iomem *dma_int_stat; + u8 __iomem *dma_int_mask; + u8 __iomem *dma_int_clr; + dma_addr_t dma; /* phys. address of descriptor ring */ + unsigned int size; /* length in bytes */ + u32 ring_flags; +#define RNP_RING_FLAG_DELAY_SETUP_RX_LEN ((u32)(1 << 0)) +#define RNP_RING_FLAG_CHANGE_RX_LEN ((u32)(1 << 1)) +#define RNP_RING_FLAG_DO_RESET_RX_LEN ((u32)(1 << 2)) +#define RNP_RING_SKIP_TX_START ((u32)(1 << 3)) +#define RNP_RING_NO_TUNNEL_SUPPORT ((u32)(1 << 4)) +#define RNP_RING_SIZE_CHANGE_FIX ((u32)(1 << 5)) +#define RNP_RING_SCATER_SETUP ((u32)(1 << 6)) +#define RNP_RING_STAGS_SUPPORT ((u32)(1 << 7)) +#define RNP_RING_DOUBLE_VLAN_SUPPORT ((u32)(1 << 8)) +#define RNP_RING_VEB_MULTI_FIX ((u32)(1 << 9)) +#define RNP_RING_IRQ_MISS_FIX ((u32)(1 << 10)) +#define RNP_RING_OUTER_VLAN_FIX ((u32)(1 << 11)) +#define RNP_RING_CHKSM_FIX ((u32)(1 << 12)) +#define RNP_RING_LOWER_ITR ((u32)(1 << 13)) + u8 pfvfnum; + + u16 count; /* amount of descriptors */ + u16 temp_count; + u16 reset_count; + + u8 queue_index; /* queue_index needed for multiqueue queue management */ + u8 rnp_queue_idx; /* the real ring,used by dma */ + u16 next_to_use; + u16 next_to_clean; + + u16 device_id; +#ifdef OPTM_WITH_LPAGE + u16 rx_page_buf_nums; + u32 rx_per_buf_mem; + struct sk_buff *skb; +#endif + union { + u16 next_to_alloc; + struct { + u8 atr_sample_rate; + u8 atr_count; + }; + }; + + u8 dcb_tc; + struct rnp_queue_stats stats; + struct u64_stats_sync syncp; + union { + struct rnp_tx_queue_stats tx_stats; + struct rnp_rx_queue_stats rx_stats; + }; +} ____cacheline_internodealigned_in_smp; + +#define RING2ADAPT(ring) netdev_priv((ring)->netdev) + +enum rnp_ring_f_enum { + RING_F_NONE = 0, + RING_F_VMDQ, /* SR-IOV uses the same ring feature */ + RING_F_RSS, + RING_F_FDIR, + + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + +#define RNP_MAX_RSS_INDICES 128 +#define RNP_MAX_RSS_INDICES_UV3P 8 +#define RNP_MAX_VMDQ_INDICES 64 +#define RNP_MAX_FDIR_INDICES 63 /* based on q_vector limit */ +#define RNP_MAX_FCOE_INDICES 8 +#define MAX_RX_QUEUES (128) +#define MAX_TX_QUEUES (128) +struct rnp_ring_feature { + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ +} ____cacheline_internodealigned_in_smp; + +#define RNP_n10_VMDQ_8Q_MASK 0x78 +#define RNP_n10_VMDQ_4Q_MASK 0x7C +#define RNP_n10_VMDQ_2Q_MASK 0x7E + +/* + * FCoE requires that all Rx buffers be over 2200 bytes in length. Since + * this is twice the size of a half page we need to double the page order + * for FCoE enabled Rx queues. + */ +static inline unsigned int rnp_rx_bufsz(struct rnp_ring *ring) +{ + return (RNP_RXBUFFER_1536 - NET_IP_ALIGN); +} + +static inline unsigned int rnp_rx_pg_order(struct rnp_ring *ring) +{ + /* fixed 1 page */ + /* we don't support 3k buffer */ + return 0; +} +#define rnp_rx_pg_size(_ring) (PAGE_SIZE << rnp_rx_pg_order(_ring)) + +struct rnp_ring_container { + struct rnp_ring *ring; /* pointer to linked list of rings */ + unsigned long next_update; /* jiffies value of last update */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + unsigned int total_packets_old; + u16 work_limit; /* total work allowed per interrupt */ + u16 count; /* total number of rings in vector */ + u16 itr; /* current ITR/MSIX vector setting for ring */ + u16 add_itr; +}; + +/* iterator for handling rings in ring container */ +#define rnp_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +#define MAX_RX_PACKET_BUFFERS ((adapter->flags & RNP_FLAG_DCB_ENABLED) ? 8 : 1) +#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS + +/* MAX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ + +#define SUPPORT_IRQ_AFFINITY_CHANGE +struct rnp_q_vector { + int old_rx_count; + int new_rx_count; + int new_tx_count; + int large_times; + int small_times; + int too_small_times; + int middle_time; + int large_times_tx; + int small_times_tx; + int too_small_times_tx; + int middle_time_tx; + struct rnp_adapter *adapter; + int v_idx; + /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring */ + u16 itr_rx; + u16 itr_tx; + struct rnp_ring_container rx, tx; + + struct napi_struct napi; + cpumask_t affinity_mask; + struct irq_affinity_notify affinity_notify; + int numa_node; + struct rcu_head rcu; /* to avoid race with update stats on free */ + + u32 vector_flags; +#define RNP_QVECTOR_FLAG_IRQ_MISS_CHECK ((u32)(1 << 0)) +#define RNP_QVECTOR_FLAG_ITR_FEATURE ((u32)(1 << 1)) +#define RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS ((u32)(1 << 2)) + int irq_check_usecs; + struct hrtimer irq_miss_check_timer; + + char name[IFNAMSIZ + 9]; + + /* for dynamic allocation of rings associated with this q_vector */ + struct rnp_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +static inline __le16 rnp_test_ext_cmd(union rnp_rx_desc *rx_desc, + const u16 stat_err_bits) +{ + return rx_desc->wb.rev1 & cpu_to_le16(stat_err_bits); +} + +#ifdef RNP_HWMON + +#define RNP_HWMON_TYPE_LOC 0 +#define RNP_HWMON_TYPE_TEMP 1 +#define RNP_HWMON_TYPE_CAUTION 2 +#define RNP_HWMON_TYPE_MAX 3 +#define RNP_HWMON_TYPE_NAME 4 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct rnp_hw *hw; + struct rnp_thermal_diode_data *sensor; + char name[12]; +}; + +struct hwmon_buff { + struct attribute_group group; + const struct attribute_group *groups[2]; + struct attribute *attrs[RNP_MAX_SENSORS * 4 + 1]; + struct hwmon_attr hwmon_list[RNP_MAX_SENSORS * 4]; + unsigned int n_hwmon; +}; +#endif /* RNPM_HWMON */ + +/* + rnp_test_staterr - tests bits in Rx descriptor status and error fields +*/ +static inline __le16 rnp_test_staterr(union rnp_rx_desc *rx_desc, + const u16 stat_err_bits) +{ + return rx_desc->wb.cmd & cpu_to_le16(stat_err_bits); +} + +static inline __le16 rnp_get_stat(union rnp_rx_desc *rx_desc, + const u16 stat_mask) +{ + return rx_desc->wb.cmd & cpu_to_le16(stat_mask); +} + +static inline u16 rnp_desc_unused(struct rnp_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +static inline u16 rnp_desc_unused_rx(struct rnp_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +#define RNP_RX_DESC(R, i) (&(((union rnp_rx_desc *)((R)->desc))[i])) +#define RNP_TX_DESC(R, i) (&(((struct rnp_tx_desc *)((R)->desc))[i])) +#define RNP_TX_CTXTDESC(R, i) (&(((struct rnp_tx_ctx_desc *)((R)->desc))[i])) + +#define RNP_MAX_JUMBO_FRAME_SIZE 9590 /* Maximum Supported Size 9.5KB */ +#define RNP_MIN_MTU 68 +#define RNP500_MAX_JUMBO_FRAME_SIZE 9722 /* Maximum Supported Size 9728 */ + +#define OTHER_VECTOR 1 +#define NON_Q_VECTORS (OTHER_VECTOR) + +/* default to trying for four seconds */ +#define RNP_TRY_LINK_TIMEOUT (4 * HZ) + +#define RNP_MAX_USER_PRIO (8) +#define RNP_MAX_TCS_NUM (4) +struct rnp_pfc_cfg { + u8 pfc_max; /* hardware can enabled max pfc channel */ + u8 hw_pfc_map; /* enable the prio channel bit */ + u8 pfc_num; /* at present enabled the pfc-channel num */ + u8 pfc_en; /* enabled the pfc feature or not */ +}; + +struct rnp_dcb_num_tcs { + u8 pg_tcs; + u8 pfc_tcs; +}; + +struct rnp_dcb_cfg { + u8 tc_num; + u16 delay; /* pause time */ + u8 dcb_en; /* enabled the dcb feature or not */ + u8 dcbx_mode; + struct rnp_pfc_cfg pfc_cfg; + struct rnp_dcb_num_tcs num_tcs; + /* statistic info */ + u64 requests[RNP_MAX_TCS_NUM]; + u64 indications[RNP_MAX_TCS_NUM]; + enum rnp_fc_mode last_lfc_mode; +}; + +struct rnp_pps_cfg { + bool available; + struct timespec64 start; + struct timespec64 period; +}; + +enum rss_func_mode_enum { + rss_func_top, + rss_func_xor, + rss_func_order, +}; + +enum outer_vlan_type_enum { + outer_vlan_type_88a8, + outer_vlan_type_9100, + outer_vlan_type_9200, + outer_vlan_type_max, +}; + +enum irq_mode_enum { + irq_mode_legency, + irq_mode_msi, + irq_mode_msix, +}; + +/* board specific private data structure */ +struct rnp_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + unsigned long active_vlans_stags[BITS_TO_LONGS(VLAN_N_VID)]; + /* OS defined structs */ + u16 vf_vlan; + u16 vlan_count; + int miss_time; + struct net_device *netdev; + struct pci_dev *pdev; + bool quit_poll_thread; + struct task_struct *rx_poll_thread; + unsigned long state; + spinlock_t link_stat_lock; + + /* this var is used for auto itr modify */ + /* hw not Supported well */ + unsigned long last_moder_packets[MAX_RX_QUEUES]; + unsigned long last_moder_tx_packets; + unsigned long last_moder_bytes[MAX_RX_QUEUES]; + unsigned long last_moder_jiffies; + int last_moder_time[MAX_RX_QUEUES]; + /* only rx itr is Supported */ + int usecendcount; + u16 rx_usecs; + u16 rx_usecs_usr_set; + u16 rx_frames; + u16 usecstocount; + u16 tx_frames; + u16 tx_usecs; + u16 tx_usecs_usr_set; + u32 pkt_rate_low; + u16 rx_usecs_low; + u32 pkt_rate_high; + u16 rx_usecs_high; + u32 sample_interval; + u32 adaptive_rx_coal; + u32 adaptive_tx_coal; + u32 auto_rx_coal; + int napi_budge; + union { + int phy_addr; + struct { + u8 mod_abs : 1; + u8 fault : 1; + u8 tx_dis : 1; + u8 los : 1; + } sfp; + }; + + struct { + u32 main; + u32 pre; + u32 post; + u32 tx_boost; + } si; + + int speed; + + u8 an : 1; + u8 fec : 1; + u8 link_traing : 1; + u8 duplex : 1; + u8 rpu_inited : 1; + + /* Some features need tri-state capability, + * thus the additional *_CAPABLE flags. + */ + u32 vf_num_for_pf; + u32 flags; +#define RNP_FLAG_MSI_CAPABLE ((u32)(1 << 0)) +#define RNP_FLAG_MSI_ENABLED ((u32)(1 << 1)) +#define RNP_FLAG_MSIX_CAPABLE ((u32)(1 << 2)) +#define RNP_FLAG_MSIX_ENABLED ((u32)(1 << 3)) +#define RNP_FLAG_RX_1BUF_CAPABLE ((u32)(1 << 4)) +#define RNP_FLAG_RX_PS_CAPABLE ((u32)(1 << 5)) +#define RNP_FLAG_RX_PS_ENABLED ((u32)(1 << 6)) +#define RNP_FLAG_IN_NETPOLL ((u32)(1 << 7)) +#define RNP_FLAG_DCA_ENABLED ((u32)(1 << 8)) +#define RNP_FLAG_DCA_CAPABLE ((u32)(1 << 9)) +#define RNP_FLAG_IMIR_ENABLED ((u32)(1 << 10)) +#define RNP_FLAG_MQ_CAPABLE ((u32)(1 << 11)) +#define RNP_FLAG_DCB_ENABLED ((u32)(1 << 12)) +#define RNP_FLAG_VMDQ_CAPABLE ((u32)(1 << 13)) +#define RNP_FLAG_VMDQ_ENABLED ((u32)(1 << 14)) +#define RNP_FLAG_FAN_FAIL_CAPABLE ((u32)(1 << 15)) +#define RNP_FLAG_NEED_LINK_UPDATE ((u32)(1 << 16)) +#define RNP_FLAG_NEED_LINK_CONFIG ((u32)(1 << 17)) +#define RNP_FLAG_FDIR_HASH_CAPABLE ((u32)(1 << 18)) +#define RNP_FLAG_FDIR_PERFECT_CAPABLE ((u32)(1 << 19)) +#define RNP_FLAG_FCOE_CAPABLE ((u32)(1 << 20)) +#define RNP_FLAG_FCOE_ENABLED ((u32)(1 << 21)) +#define RNP_FLAG_SRIOV_CAPABLE ((u32)(1 << 22)) +#define RNP_FLAG_SRIOV_ENABLED ((u32)(1 << 23)) +#define RNP_FLAG_VXLAN_OFFLOAD_CAPABLE ((u32)(1 << 24)) +#define RNP_FLAG_VXLAN_OFFLOAD_ENABLE ((u32)(1 << 25)) +#define RNP_FLAG_SWITCH_LOOPBACK_EN ((u32)(1 << 26)) +#define RNP_FLAG_SRIOV_INIT_DONE ((u32)(1 << 27)) +#define RNP_FLAG_IN_IRQ ((u32)(1 << 28)) +#define RNP_FLAG_VF_INIT_DONE ((u32)(1 << 29)) +#define RNP_FLAG_LEGACY_CAPABLE ((u32)(1 << 30)) +#define RNP_FLAG_LEGACY_ENABLED ((u32)(1 << 31)) + u32 flags2; +#define RNP_FLAG2_RSC_CAPABLE ((u32)(1 << 0)) +#define RNP_FLAG2_RSC_ENABLED ((u32)(1 << 1)) +#define RNP_FLAG2_TEMP_SENSOR_CAPABLE ((u32)(1 << 2)) +#define RNP_FLAG2_TEMP_SENSOR_EVENT ((u32)(1 << 3)) +#define RNP_FLAG2_SEARCH_FOR_SFP ((u32)(1 << 4)) +#define RNP_FLAG2_SFP_NEEDS_RESET ((u32)(1 << 5)) +#define RNP_FLAG2_RESET_REQUESTED ((u32)(1 << 6)) +#define RNP_FLAG2_FDIR_REQUIRES_REINIT ((u32)(1 << 7)) +#define RNP_FLAG2_RSS_FIELD_IPV4_UDP ((u32)(1 << 8)) +#define RNP_FLAG2_RSS_FIELD_IPV6_UDP ((u32)(1 << 9)) +#define RNP_FLAG2_PTP_ENABLED ((u32)(1 << 10)) +#define RNP_FLAG2_PTP_PPS_ENABLED ((u32)(1 << 11)) +#define RNP_FLAG2_BRIDGE_MODE_VEB ((u32)(1 << 12)) +#define RNP_FLAG2_VLAN_STAGS_ENABLED ((u32)(1 << 13)) +#define RNP_FLAG2_UDP_TUN_REREG_NEEDED ((u32)(1 << 14)) +#define RNP_FLAG2_RESET_PF ((u32)(1 << 15)) +#define RNP_FLAG2_CHKSM_FIX ((u32)(1 << 16)) + + u32 priv_flags; +#define RNP_PRIV_FLAG_MAC_LOOPBACK BIT(0) +#define RNP_PRIV_FLAG_SWITCH_LOOPBACK BIT(1) +#define RNP_PRIV_FLAG_VEB_ENABLE BIT(2) +#define RNP_PRIV_FLAG_FT_PADDING BIT(3) +#define RNP_PRIV_FLAG_PADDING_DEBUG BIT(4) +#define RNP_PRIV_FLAG_PTP_DEBUG BIT(5) +#define RNP_PRIV_FLAG_SIMUATE_DOWN BIT(6) +#define RNP_PRIV_FLAG_VXLAN_INNER_MATCH BIT(7) +#define RNP_PRIV_FLAG_ULTRA_SHORT BIT(8) +#define RNP_PRIV_FLAG_DOUBLE_VLAN BIT(9) +#define RNP_PRIV_FLAG_TCP_SYNC BIT(10) +#define RNP_PRIV_FLAG_PAUSE_OWN BIT(11) +#define RNP_PRIV_FLAG_JUMBO BIT(12) +#define RNP_PRIV_FLAG_TX_PADDING BIT(13) +#define RNP_PRIV_FLAG_RX_ALL BIT(14) +#define RNP_PRIV_FLAG_REC_HDR_LEN_ERR BIT(15) +#define RNP_PRIV_FLAG_RX_FCS BIT(16) +#define RNP_PRIV_FLAG_DOUBLE_VLAN_RECEIVE BIT(17) +#define RNP_PRIV_FLGA_TEST_TX_HANG BIT(18) +#define RNP_PRIV_FLAG_RX_SKIP_EN BIT(19) +#define RNP_PRIV_FLAG_TCP_SYNC_PRIO BIT(20) +#define RNP_PRIV_FLAG_REMAP_PRIO BIT(21) +#define RNP_PRIV_FLAG_8023_PRIO BIT(22) +#define RNP_PRIV_FLAG_SRIOV_VLAN_MODE BIT(23) +#define RNP_PRIV_FLAG_REMAP_MODE BIT(24) +#define RNP_PRIV_FLAG_LLDP_EN_STAT BIT(25) +#define RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE BIT(26) +#define RNP_PRIV_FLAG_LINK_DOWN_BEFORE BIT(27) + +#define PRIV_DATA_EN BIT(7) + int rss_func_mode; + int outer_vlan_type; + int tcp_sync_queue; + int priv_skip_count; + u64 rx_drop_status; + int drop_time; + /* Tx fast path data */ + unsigned int num_tx_queues; + unsigned int max_ring_pair_counts; + u16 tx_work_limit; + __be16 vxlan_port; + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; + u32 eth_queue_idx; + u32 max_rate[MAX_TX_QUEUES]; + /* TX */ + struct rnp_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; + int tx_ring_item_count; + u64 restart_queue; + u64 lsc_int; + u32 tx_timeout_count; + /* RX */ + struct rnp_ring *rx_ring[MAX_RX_QUEUES]; + int rx_ring_item_count; + u64 hw_csum_rx_error; + u64 hw_csum_rx_good; + u64 hw_rx_no_dma_resources; + u64 rsc_total_count; + u64 rsc_total_flush; + u64 non_eop_descs; + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + int num_other_vectors; + int irq_mode; + struct rnp_q_vector *q_vector[MAX_Q_VECTORS]; + /* used for IEEE 1588 ptp clock start */ + u8 __iomem *ptp_addr; + int gmac4; + const struct rnp_hwtimestamp *hwts_ops; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_ops; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + u32 ptp_config_value; + spinlock_t ptp_lock; /* Used to protect the SYSTIME registers. */ + u64 clk_ptp_rate; /* uint is HZ 1MHz=1 000 000Hz */ + u32 sub_second_inc; + u32 systime_flags; + struct timespec64 ptp_prev_hw_time; + unsigned int default_addend; + bool ptp_tx_en; + bool ptp_rx_en; + struct work_struct tx_hwtstamp_work; + unsigned long tx_hwtstamp_start; + unsigned long tx_hwtstamp_skipped; + unsigned long tx_timeout_factor; + u64 tx_hwtstamp_timeouts; + /*used for IEEE 1588 ptp clock end */ + /* DCB parameters */ + struct rnp_dcb_cfg dcb_cfg; + u8 prio_tc_map[RNP_MAX_USER_PRIO * 2]; + u8 num_tc; + int num_q_vectors; /* current number of q_vectors for device */ + int max_q_vectors; /* true count of q_vectors for device */ + struct rnp_ring_feature ring_feature[RING_F_ARRAY_SIZE]; + struct msix_entry *msix_entries; + u32 test_icr; + struct rnp_ring test_tx_ring; + struct rnp_ring test_rx_ring; + /* structs defined in rnp_hw.h */ + struct rnp_hw hw; + u16 msg_enable; + struct rnp_hw_stats hw_stats; + u64 tx_busy; + u32 link_speed; + bool link_up; + bool duplex_status; + u32 link_speed_old; + bool link_up_old; + bool duplex_old; + unsigned long link_check_timeout; + struct timer_list service_timer; + struct work_struct service_task; + /* fdir relative */ + struct hlist_head fdir_filter_list; + unsigned long fdir_overflow; /* number of times ATR was backed off */ + union rnp_atr_input fdir_mask; + int fdir_mode; + int fdir_filter_count; + int layer2_count; + int tuple_5_count; + u32 fdir_pballoc; + u32 atr_sample_rate; + spinlock_t fdir_perfect_lock; + u8 __iomem *io_addr_bar0; + u8 __iomem *io_addr; + u32 wol; + u16 bd_number; + u16 q_vector_off; + u16 eeprom_verh; + u16 eeprom_verl; + u16 eeprom_cap; + u16 stags_vid; + u32 sysfs_tx_ring_num; + u32 sysfs_rx_ring_num; + u32 sysfs_tx_desc_num; + u32 sysfs_rx_desc_num; + u32 interrupt_event; + u32 led_reg; + /* maintain */ + char *maintain_buf; + int maintain_buf_len; + void *maintain_dma_buf; + dma_addr_t maintain_dma_phy; + int maintain_dma_size; + int maintain_in_bytes; + /* SR-IOV */ + DECLARE_BITMAP(active_vfs, RNP_MAX_VF_FUNCTIONS); + unsigned int num_vfs; + struct vf_data_storage *vfinfo; + int vf_rate_link_speed; + struct vf_macvlans vf_mvs; + struct vf_macvlans *mv_list; + u32 timer_event_accumulator; + u32 vferr_refcount; + struct kobject *info_kobj; +#ifdef RNP_SYSFS +#ifdef RNP_HWMON + struct hwmon_buff *rnp_hwmon_buff; +#endif /* RNP_HWMON */ +#endif /* RNPM_SYSFS */ +#ifdef CONFIG_DEBUG_FS + struct dentry *rnp_dbg_adapter; +#endif /*CONFIG_DEBUG_FS*/ + u8 default_up; + u8 port; /* nr_pf_port: 0 or 1 */ + u8 portid_of_card; /* port num in card*/ +#define RNP_MAX_RETA_ENTRIES 512 + u8 rss_indir_tbl[RNP_MAX_RETA_ENTRIES]; +#define RNP_MAX_TC_ENTRIES 8 + u8 rss_tc_tbl[RNP_MAX_TC_ENTRIES]; + int rss_indir_tbl_num; + int rss_tc_tbl_num; + u32 rss_tbl_setup_flag; +#define RNP_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ + u8 rss_key[RNP_RSS_KEY_SIZE]; + u32 rss_key_setup_flag; + u32 sysfs_is_phy_ext_reg; + u32 sysfs_phy_reg; + u32 sysfs_bar4_reg_val; + u32 sysfs_bar4_reg_addr; + u32 sysfs_pcs_lane_num; + int sysfs_input_arg_cnt; + bool dma2_in_1pf; + char name[60]; + void *csl_dma_buf; + dma_addr_t csl_dma_phy; + int csl_dma_size; +}; + +struct device_list_own { + unsigned short vendor; + unsigned short device; +}; + +struct rnp_fdir_filter { + struct hlist_node fdir_node; + union rnp_atr_input filter; + u16 sw_idx; + u16 hw_idx; + u32 vf_num; + u64 action; +}; + +enum rnp_state_t { + __RNP_TESTING, + __RNP_RESETTING, + __RNP_DOWN, + __RNP_SERVICE_SCHED, + __RNP_IN_SFP_INIT, + __RNP_READ_I2C, + __RNP_PTP_TX_IN_PROGRESS, + __RNP_USE_VFINFI, + __RNP_IN_IRQ, + __RNP_REMOVE, + __RNP_SERVICE_CHECK, +}; + +struct rnp_cb { + union { /* Union defining head/tail partner */ + struct sk_buff *head; + struct sk_buff *tail; + }; + dma_addr_t dma; + u16 append_cnt; + bool page_released; +}; +#define RNP_CB(skb) ((struct rnp_cb *)(skb)->cb) + +enum rnp_boards { + board_n10_709_1pf_2x10G, + board_vu440s, + board_n10, + board_n400, +}; + +#if IS_ENABLED(CONFIG_DCB) +extern const struct dcbnl_rtnl_ops dcbnl_ops; +#endif + +extern char rnp_driver_name[]; +extern const char rnp_driver_version[]; + +extern void rnp_up(struct rnp_adapter *adapter); +extern void rnp_down(struct rnp_adapter *adapter); +extern void rnp_reinit_locked(struct rnp_adapter *adapter); +extern void rnp_reset(struct rnp_adapter *adapter); +extern void rnp_set_ethtool_ops(struct net_device *netdev); +extern int rnp_setup_rx_resources(struct rnp_ring *, struct rnp_adapter *); +extern int rnp_setup_tx_resources(struct rnp_ring *, struct rnp_adapter *); +extern void rnp_free_rx_resources(struct rnp_ring *); +extern void rnp_free_tx_resources(struct rnp_ring *); +extern void rnp_configure_rx_ring(struct rnp_adapter *, struct rnp_ring *); +extern void rnp_configure_tx_ring(struct rnp_adapter *, struct rnp_ring *); +extern void rnp_disable_rx_queue(struct rnp_adapter *adapter, + struct rnp_ring *); +extern void rnp_update_stats(struct rnp_adapter *adapter); +extern int rnp_init_interrupt_scheme(struct rnp_adapter *adapter); +extern int rnp_wol_supported(struct rnp_adapter *adapter, u16 device_id, + u16 subdevice_id); +extern void rnp_clear_interrupt_scheme(struct rnp_adapter *adapter); +extern netdev_tx_t rnp_xmit_frame_ring(struct sk_buff *, struct rnp_adapter *, + struct rnp_ring *, bool); +extern void rnp_alloc_rx_buffers(struct rnp_ring *, u16); +extern int rnp_poll(struct napi_struct *napi, int budget); +extern int ethtool_ioctl(struct ifreq *ifr); +extern void rnp_release_hw_control(struct rnp_adapter *adapter); +extern void rnp_get_hw_control(struct rnp_adapter *adapter); +extern s32 rnp_fdir_write_perfect_filter(int fdir_mode, struct rnp_hw *hw, + union rnp_atr_input *filter, u16 hw_id, + u8 queue, bool prio_flag); +extern void rnp_set_rx_mode(struct net_device *netdev); +#ifdef CONFIG_RNP_DCB +extern void rnp_set_rx_drop_en(struct rnp_adapter *adapter); +#endif +extern int rnp_setup_tx_maxrate(struct rnp_ring *tx_ring, u64 max_rate, + int sample_interval); +extern int rnp_setup_tc(struct net_device *dev, u8 tc); +void rnp_check_options(struct rnp_adapter *adapter); +extern int rnp_open(struct net_device *netdev); +extern int rnp_close(struct net_device *netdev); +void rnp_tx_ctxtdesc(struct rnp_ring *tx_ring, u32 mss_len_vf_num, + u32 inner_vlan_tunnel_len, int ignore_vlan, bool crc_pad); +void rnp_maybe_tx_ctxtdesc(struct rnp_ring *tx_ring, + struct rnp_tx_buffer *first, u32 type_tucmd); +extern void rnp_store_reta(struct rnp_adapter *adapter); +extern void rnp_store_key(struct rnp_adapter *adapter); +extern int rnp_init_rss_key(struct rnp_adapter *adapter); +extern int rnp_init_rss_table(struct rnp_adapter *adapter); +extern void rnp_setup_dma_rx(struct rnp_adapter *adapter, int count_in_dw); +extern s32 rnp_fdir_erase_perfect_filter(int fdir_mode, struct rnp_hw *hw, + union rnp_atr_input *input, u16 hw_id); +extern u32 rnp_rss_indir_tbl_entries(struct rnp_adapter *adapter); +extern u32 rnp_tx_desc_unused_sw(struct rnp_ring *tx_ring); +extern u32 rnp_tx_desc_unused_hw(struct rnp_hw *hw, struct rnp_ring *tx_ring); +extern s32 rnp_disable_rxr_maxrate(struct net_device *netdev, u8 queue_index); +extern s32 rnp_enable_rxr_maxrate(struct net_device *netdev, u8 queue_index, + u32 maxrate); +extern u32 rnp_rx_desc_used_hw(struct rnp_hw *hw, struct rnp_ring *rx_ring); +extern void rnp_do_reset(struct net_device *netdev); +#ifdef RNP_HWMON +extern void rnp_sysfs_exit(struct rnp_adapter *adapter); +extern int rnp_sysfs_init(struct rnp_adapter *adapter); +#endif /* CONFIG_RNP_HWMON */ +#ifdef CONFIG_DEBUG_FS +extern void rnp_dbg_adapter_init(struct rnp_adapter *adapter); +extern void rnp_dbg_adapter_exit(struct rnp_adapter *adapter); +extern void rnp_dbg_init(void); +extern void rnp_dbg_exit(void); +#else +static inline void rnp_dbg_adapter_init(struct rnp_adapter *adapter) +{ +} +static inline void rnp_dbg_adapter_exit(struct rnp_adapter *adapter) +{ +} +static inline void rnp_dbg_init(void) +{ +} +static inline void rnp_dbg_exit(void) +{ +} +#endif /* CONFIG_DEBUG_FS */ +static inline struct netdev_queue *txring_txq(const struct rnp_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +void rnp_service_event_schedule(struct rnp_adapter *adapter); +extern void rnp_ptp_init(struct rnp_adapter *adapter); +extern void rnp_ptp_stop(struct rnp_adapter *adapter); +extern void rnp_ptp_overflow_check(struct rnp_adapter *adapter); +extern void rnp_ptp_rx_hang(struct rnp_adapter *adapter); +extern void __rnp_ptp_rx_hwtstamp(struct rnp_q_vector *q_vector, + struct sk_buff *skb); +static inline void rnp_ptp_rx_hwtstamp(struct rnp_ring *rx_ring, + union rnp_rx_desc *rx_desc, + struct sk_buff *skb) +{ + if (unlikely(!rnp_test_staterr(rx_desc, RNP_RXD_STAT_PTP))) + return; + /* + * Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + rx_ring->last_rx_timestamp = jiffies; +} + +static inline int ignore_veb_vlan(struct rnp_adapter *adapter, + union rnp_rx_desc *rx_desc) +{ + if (unlikely((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && + (cpu_to_le16(rx_desc->wb.rev1) & VEB_VF_IGNORE_VLAN))) { + return 1; + } + return 0; +} + +static inline int ignore_veb_pkg_err(struct rnp_adapter *adapter, + union rnp_rx_desc *rx_desc) +{ + if (unlikely((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && + (cpu_to_le16(rx_desc->wb.rev1) & VEB_VF_PKG))) { + return 1; + } + return 0; +} + +int rnp_update_ethtool_fdir_entry(struct rnp_adapter *adapter, + struct rnp_fdir_filter *input, u16 sw_idx); + +static inline int rnp_is_pf1(struct rnp_hw *hw) +{ + return !!(hw->pfvfnum & BIT(PF_BIT)); +} + +static inline int rnp_is_pf0(struct rnp_hw *hw) +{ + return !rnp_is_pf1(hw); +} + +static inline int rnp_get_fuc(struct pci_dev *pdev) +{ + return pdev->devfn; +} + +extern void rnp_service_task(struct work_struct *work); +extern void rnp_sysfs_exit(struct rnp_adapter *adapter); +extern int rnp_sysfs_init(struct rnp_adapter *adapter); + +#ifdef CONFIG_PCI_IOV +void rnp_sriov_reinit(struct rnp_adapter *adapter); +#endif + +#define SET_BIT(n, var) (var = (var | (1 << n))) +#define CLR_BIT(n, var) (var = (var & (~(1 << n)))) +#define CHK_BIT(n, var) (var & (1 << n)) +#define RNP_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +static inline bool rnp_removed(void __iomem *addr) +{ + return unlikely(!addr); +} +#define RNP_REMOVED(a) rnp_removed(a) +int rnp_fw_msg_handler(struct rnp_adapter *adapter); + +int rnp500_fw_update(struct rnp_hw *hw, int partition, const u8 *fw_bin, + int bytes); + +int rnp_fw_update(struct rnp_hw *hw, int partition, const u8 *fw_bin, + int bytes); +#define RNPM_FW_VERSION_NEW_ETHTOOL 0x00050010 +static inline bool rnp_fw_is_old_ethtool(struct rnp_hw *hw) +{ + return hw->fw_version >= RNPM_FW_VERSION_NEW_ETHTOOL ? false : true; +} + +#endif /* _RNP_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_common.c b/drivers/net/ethernet/mucse/rnp/rnp_common.c new file mode 100644 index 0000000000000..70e1686c4c845 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_common.c @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include +#include + +#include "rnp.h" +#include "rnp_common.h" +#include "rnp_mbx.h" + +unsigned int rnp_loglevel; +module_param(rnp_loglevel, uint, S_IRUSR | S_IWUSR); + + diff --git a/drivers/net/ethernet/mucse/rnp/rnp_common.h b/drivers/net/ethernet/mucse/rnp/rnp_common.h new file mode 100644 index 0000000000000..5de180c61b482 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_common.h @@ -0,0 +1,383 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_COMMON_H_ +#define _RNP_COMMON_H_ + +#include +#include +#include "rnp_type.h" +#include "rnp.h" +#include "rnp_regs.h" + +struct rnp_adapter; + +#define TRACE() printk(KERN_DEBUG "==[ %s %d ] ==\n", __func__, __LINE__) + +#ifdef CONFIG_RNP_RX_DEBUG +#define rx_debug_printk printk +#define rx_buf_dump buf_dump +#define rx_dbg(fmt, args...) \ + printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args) +#else +#define rx_debug_printk(fmt, args...) +#define rx_buf_dump(a, b, c) +#define rx_dbg(fmt, args...) +#endif //CONFIG_RNP_RX_DEBUG + +#ifdef CONFIG_RNP_TX_DEBUG +#define desc_hex_dump(msg, buf, len) \ + print_hex_dump(KERN_WARNING, msg, DUMP_PREFIX_OFFSET, 16, 1, (buf), \ + (len), false) +#define rnp_skb_dump _rnp_skb_dump + +#define tx_dbg(fmt, args...) \ + printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args) +#else +#define desc_hex_dump(msg, buf, len) +#define rnp_skb_dump(skb, full_pkt) +#define tx_dbg(fmt, args...) +#endif //CONFIG_RNP_TX_DEBUG + +#ifdef DEBUG +#define dbg(fmt, args...) \ + printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args) +#else +#define dbg(fmt, args...) +#endif + +#ifdef CONFIG_RNP_VF_DEBUG +#define vf_dbg(fmt, args...) \ + printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args) +#else +#define vf_dbg(fmt, args...) +#endif + +int rnp_acquire_msix_vectors(struct rnp_adapter *adapter, int vectors); + +//================= registers read/write helper ===== +#define p_rnp_wr_reg(reg, val) \ + do { \ + printk(KERN_DEBUG " wr-reg: %p <== 0x%08x \t#%-4d %s\n", \ + (reg), (val), __LINE__, __FILE__); \ + iowrite32((val), (void *)(reg)); \ + } while (0) + +static inline unsigned int prnp_rd_reg(void *reg) +{ + unsigned int v = ioread32((void *)(reg)); + + printk(KERN_DEBUG " %p => 0x%08x\n", reg, v); + return v; +} + +#ifdef IO_PRINT +static inline unsigned int rnp_rd_reg(void *reg) +{ + unsigned int v = ioread32((void *)(reg)); + + dbg(" rd-reg: %p <== 0x%08x\n", reg, v); + return v; +} +#define rnp_wr_reg(reg, val) \ + do { \ + dbg(" wr-reg: %p <== 0x%08x \t#%-4d %s\n", (reg), (val), \ + __LINE__, __FILE__); \ + iowrite32((val), (void *)(reg)); \ + } while (0) +#else +#define rnp_rd_reg(reg) readl((void *)(reg)) +#define rnp_wr_reg(reg, val) writel((val), (void *)(reg)) +#endif + +#define rd32(hw, off) rnp_rd_reg((hw)->hw_addr + (off)) +#define wr32(hw, off, val) rnp_wr_reg((hw)->hw_addr + (off), (val)) + +#define nic_rd32(nic, off) rnp_rd_reg((nic)->nic_base_addr + (off)) +#define nic_wr32(nic, off, val) rnp_wr_reg((nic)->nic_base_addr + (off), (val)) + +#define dma_rd32(dma, off) rnp_rd_reg((dma)->dma_base_addr + (off)) +#define dma_wr32(dma, off, val) rnp_wr_reg((dma)->dma_base_addr + (off), (val)) + +#define dma_ring_rd32(dma, off) rnp_rd_reg((dma)->dma_ring_addr + (off)) +#define dma_ring_wr32(dma, off, val) \ + rnp_wr_reg((dma)->dma_ring_addr + (off), (val)) + +#define eth_rd32(eth, off) rnp_rd_reg((eth)->eth_base_addr + (off)) +#define eth_wr32(eth, off, val) rnp_wr_reg((eth)->eth_base_addr + (off), (val)) + +#define mac_rd32(mac, off) rnp_rd_reg((mac)->mac_addr + (off)) +#define mac_wr32(mac, off, val) rnp_wr_reg((mac)->mac_addr + (off), (val)) +#ifdef debug_ring +static inline unsigned int rnp_rd_reg_1(int ring, u32 off, void *reg) +{ + unsigned int v = ioread32((void *)(reg)); + + printk(KERN_DEBUG "%d rd-reg: %x <== 0x%08x\n", ring, off, v); + return v; +} + +#define ring_rd32(ring, off) \ + rnp_rd_reg_1(ring->rnp_queue_idx, off, (ring)->ring_addr + (off)) +#define ring_wr32(ring, off, val) rnp_wr_reg((ring)->ring_addr + (off), (val)) +#else +#define ring_rd32(ring, off) rnp_rd_reg((ring)->ring_addr + (off)) +#define ring_wr32(ring, off, val) rnp_wr_reg((ring)->ring_addr + (off), (val)) +#endif + +#define pwr32(hw, off, val) p_rnp_wr_reg((hw)->hw_addr + (off), (val)) + +#define rnp_mbx_rd(hw, off) rnp_rd_reg((hw)->ring_msix_base + (off)) +#define rnp_mbx_wr(hw, off, val) rnp_wr_reg((hw)->ring_msix_base + (off), val) + +static inline void hw_queue_strip_rx_vlan(struct rnp_hw *hw, u8 ring_num, + bool enable) +{ + u32 reg = RNP_ETH_VLAN_VME_REG(ring_num / 32); + u32 offset = ring_num % 32; + u32 data = rd32(hw, reg); + + if (enable == true) + data |= (1 << offset); + else + data &= ~(1 << offset); + wr32(hw, reg, data); +} + +#define rnp_set_reg_bit(hw, reg_def, bit) \ + do { \ + u32 reg = reg_def; \ + u32 value = rd32(hw, reg); \ + dbg("before set %x %x\n", reg, value); \ + value |= (0x01 << bit); \ + dbg("after set %x %x\n", reg, value); \ + wr32(hw, reg, value); \ + } while (0) + +#define rnp_clr_reg_bit(hw, reg_def, bit) \ + do { \ + u32 reg = reg_def; \ + u32 value = rd32(hw, reg); \ + dbg("before clr %x %x\n", reg, value); \ + value &= (~(0x01 << bit)); \ + dbg("after clr %x %x\n", reg, value); \ + wr32(hw, reg, value); \ + } while (0) + +#define rnp_vlan_filter_on(hw) \ + rnp_set_reg_bit(hw, RNP_ETH_VLAN_FILTER_ENABLE, 30) +#define rnp_vlan_filter_off(hw) \ + rnp_clr_reg_bit(hw, RNP_ETH_VLAN_FILTER_ENABLE, 30) + +#define DPRINTK(nlevel, klevel, fmt, args...) \ + ((NETIF_MSG_##nlevel & adapter->msg_enable) ? \ + (void)(netdev_printk(KERN_##klevel, adapter->netdev, fmt, \ + ##args)) : \ + NULL) + +//==== log helper === +#ifdef HW_DEBUG +#define hw_dbg(hw, fmt, args...) printk(KERN_DEBUG "hw-dbg : " fmt, ##args) +#define eth_dbg(eth, fmt, args...) printk(KERN_DEBUG "hw-dbg : " fmt, ##args) +#else +#define hw_dbg(hw, fmt, args...) +#define eth_dbg(hw, fmt, args...) +#endif + +//#define RNP_DEBUG_OPEN +#ifdef RNP_DEBUG_OPEN +#define rnp_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args) +#else +#define rnp_dbg(fmt, args...) +#endif +#define rnp_info(fmt, args...) printk(KERN_DEBUG "rnp-info: " fmt, ##args) +#define rnp_warn(fmt, args...) printk(KERN_DEBUG "rnp-warn: " fmt, ##args) +#define rnp_err(fmt, args...) printk(KERN_ERR "rnp-err : " fmt, ##args) + +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ##arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ##arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ##arg) +#define e_crit(msglvl, format, arg...) \ + netif_crit(adapter, msglvl, adapter->netdev, format, ##arg) + +#define e_dev_info(format, arg...) dev_info(&adapter->pdev->dev, format, ##arg) +#define e_dev_warn(format, arg...) dev_warn(&adapter->pdev->dev, format, ##arg) +#define e_dev_err(format, arg...) dev_err(&adapter->pdev->dev, format, ##arg) + +#ifdef CONFIG_RNP_TX_DEBUG +static inline void buf_dump_line(const char *msg, int line, void *buf, int len) +{ + int i, offset = 0; + int msg_len = 1024; + u8 msg_buf[1024]; + u8 *ptr = (u8 *)buf; + + offset += snprintf(msg_buf + offset, msg_len, + "=== %s #%d line:%d buf:%p==\n000: ", msg, len, line, + buf); + + for (i = 0; i < len; ++i) { + if ((i != 0) && (i % 16) == 0 && (offset >= (1024 - 10 * 16))) { + printk(KERN_DEBUG "%s\n", msg_buf); + offset = 0; + } + + if ((i != 0) && (i % 16) == 0) { + offset += snprintf(msg_buf + offset, msg_len, + "\n%03x: ", i); + } + offset += snprintf(msg_buf + offset, msg_len, "%02x ", ptr[i]); + } + + offset += snprintf(msg_buf + offset, msg_len, "\n"); + printk(KERN_DEBUG "%s\n", msg_buf); +} +#else +#define buf_dump_line(msg, line, buf, len) +#endif + +static inline __le64 build_ctob(u32 vlan_cmd, u32 mac_ip_len, u32 size) +{ + return cpu_to_le64(((u64)vlan_cmd << 32) | ((u64)mac_ip_len << 16) | + ((u64)size)); +} + +static inline void buf_dump(const char *msg, void *buf, int len) +{ + int i, offset = 0; + int msg_len = 1024; + u8 msg_buf[1024]; + u8 *ptr = (u8 *)buf; + + offset += snprintf(msg_buf + offset, msg_len, + "=== %s #%d ==\n000: ", msg, len); + + for (i = 0; i < len; ++i) { + if ((i != 0) && (i % 16) == 0 && (offset >= (1024 - 10 * 16))) { + printk(KERN_DEBUG "%s\n", msg_buf); + offset = 0; + } + + if ((i != 0) && (i % 16) == 0) { + offset += snprintf(msg_buf + offset, msg_len, + "\n%03x: ", i); + } + offset += snprintf(msg_buf + offset, msg_len, "%02x ", ptr[i]); + } + + offset += snprintf(msg_buf + offset, msg_len, "\n=== done ==\n"); + printk(KERN_DEBUG "%s\n", msg_buf); +} + +#ifndef NO_SKB_DUMP +static inline void _rnp_skb_dump(const struct sk_buff *skb, bool full_pkt) +{ + static atomic_t can_dump_full = ATOMIC_INIT(5); +#ifdef DEBUG + struct skb_shared_info *sh = skb_shinfo(skb); +#endif + struct net_device *dev = skb->dev; + //struct sock *sk = skb->sk; + struct sk_buff *list_skb; + bool has_mac, has_trans; + int headroom, tailroom; + int i, len, seg_len; + const char *level = KERN_WARNING; + + if (full_pkt) + full_pkt = atomic_dec_if_positive(&can_dump_full) >= 0; + + if (full_pkt) + len = skb->len; + else + len = min_t(int, skb->len, MAX_HEADER + 128); + + headroom = skb_headroom(skb); + tailroom = skb_tailroom(skb); + + has_mac = skb_mac_header_was_set(skb); + has_trans = skb_transport_header_was_set(skb); + + dbg("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n" + "mac=(%d,%d) net=(%d,%d) trans=%d\n" + "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n" + "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n" + "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n", + level, skb->len, headroom, skb_headlen(skb), tailroom, + has_mac ? skb->mac_header : -1, + has_mac ? (skb->network_header - skb->mac_header) : -1, + skb->network_header, has_trans ? skb_network_header_len(skb) : -1, + has_trans ? skb->transport_header : -1, sh->tx_flags, sh->nr_frags, + sh->gso_size, sh->gso_type, sh->gso_segs, skb->csum, skb->ip_summed, + skb->csum_complete_sw, skb->csum_valid, skb->csum_level, skb->hash, + skb->sw_hash, skb->l4_hash, ntohs(skb->protocol), skb->pkt_type, + skb->skb_iif); + + if (dev) + dbg("%sdev name=%s feat=0x%pNF\n", level, dev->name, + &dev->features); + + seg_len = min_t(int, skb_headlen(skb), len); + if (seg_len) + print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET, 16, + 1, skb->data, seg_len, false); + len -= seg_len; + + for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + u32 p_len; + struct page *p; + u8 *vaddr; + + p = skb_frag_address(frag); + p_len = skb_frag_size(frag); + seg_len = min_t(int, p_len, len); + vaddr = kmap_atomic(p); + print_hex_dump(level, "skb frag: ", DUMP_PREFIX_OFFSET, 16, + 1, vaddr, seg_len, false); + kunmap_atomic(vaddr); + len -= seg_len; + if (!len) + break; + } + + if (full_pkt && skb_has_frag_list(skb)) { + dbg("skb fraglist:\n"); + skb_walk_frags(skb, list_skb) _rnp_skb_dump(list_skb, true); + } +} +#endif + +enum RNP_LOG_EVT { + LOG_MBX_IN, + LOG_MBX_OUT, + LOG_MBX_MSG_IN, + LOG_MBX_MSG_OUT, + LOG_LINK_EVENT, + LOG_ADPT_STAT, + LOG_MBX_ABLI, + LOG_MBX_LINK_STAT, + LOG_MBX_IFUP_DOWN, + LOG_MBX_LOCK, + LOG_ETHTOOL, + LOG_PHY, + +}; + +#define MII_BUSY 0x00000001 +#define MII_WRITE 0x00000002 +#define MII_DATA_MASK GENMASK(15, 0) + +extern unsigned int rnp_loglevel; + +#define rnp_logd(evt, fmt, args...) \ + do { \ + if (BIT(evt) & rnp_loglevel) { \ + printk(KERN_DEBUG fmt, ##args); \ + } \ + } while (0) + +#endif /* _RNP_COMMON_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_dcb.c b/drivers/net/ethernet/mucse/rnp/rnp_dcb.c new file mode 100644 index 0000000000000..65580d02a0a6f --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_dcb.c @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include + +#ifdef CONFIG_DCB +#include "rnp.h" +#include "rnp_dcb.h" +#include "rnp_sriov.h" +#include "rnp_common.h" + +static void rnp_config_prio_map(struct rnp_adapter *adapter, u8 pfc_map) +{ + int i, j; + u32 prio_map = 0; + u8 port = adapter->port; + u8 *prio_tc = adapter->prio_tc_map; + void __iomem *ioaddr = adapter->hw.hw_addr; + u8 num_tc = adapter->num_tc; + + for (i = 0; i < num_tc; i++) { + if (i > RNP_MAX_TCS_NUM) + break; + for (j = 0; j < RNP_MAX_USER_PRIO; j++) { + dbg("prio_tc[%d]==%d tc_num[%d] pfc_map 0x%.2x\n", j, + prio_tc[j], i, pfc_map); + if ((prio_tc[j] == i) && (pfc_map & BIT(j))) { + dbg("match rule tc_num %d prio_%d\n", i, j); + prio_map |= (i << (2 * j)); + dbg("match prio_tc change to 0x%.2x\n", + prio_map); + } + } + } + /* config untage pkt fifo */ + /* we just have four tc fifo and one fifo is must belong to untage-pkt + * so untage need map to the remain tc fifio + */ + prio_map |= i << RNP_FC_UNCTAGS_MAP_OFFSET; + prio_map |= (1 << 30) | (1 << 31); + rnp_wr_reg(ioaddr + RNP_FC_PORT_PRIO_MAP(port), prio_map); + dbg("tc_prio_map[%d] 0x%.2x\n", i, prio_map); + + /* enable port prio_map config */ + rnp_wr_reg(ioaddr + RNP_FC_EN_CONF_AVAILABLE, 1); +} + +static int rnp_dcb_hw_pfc_config(struct rnp_adapter *adapter, u8 pfc_map) +{ + struct rnp_dcb_cfg *dcb = &adapter->dcb_cfg; + void __iomem *ioaddr = adapter->hw.hw_addr; + u8 i = 0, j = 0; + u32 reg = 0; + u8 num_tc = adapter->num_tc; + + if (!(adapter->flags & RNP_FLAG_DCB_ENABLED) || + adapter->num_rx_queues <= 1) { + dev_warn(&adapter->pdev->dev, "%s DCB_FLAG%d", + "don't support pfc when rx quene less" + "than 1 or disable dcb feature \n", + adapter->flags & RNP_FLAG_DCB_ENABLED); + return 0; + } + /* 1.Enable Receive Priority Flow Control */ + reg = RNP_RX_RFE | RNP_PFCE; + rnp_wr_reg(ioaddr + RNP_MAC_RX_FLOW_CTRL, reg); + /* 2.Configure which port will in pfc mode*/ + reg = rnp_rd_reg(ioaddr + RNP_FC_PORT_ENABLE); + /* 3.For Now just support two port Version So just enabled + * PF port 0 to enable flow control + */ + reg |= 1 << adapter->port; + rnp_wr_reg(ioaddr + RNP_FC_PORT_ENABLE, reg); + + for (i = 0; i < num_tc; i++) { + int enabled = 0; + + for (j = 0; j < RNP_MAX_USER_PRIO; j++) { + if ((adapter->prio_tc_map[j] == i) && + (pfc_map & BIT(j))) { + enabled = 1; + dcb->pfc_cfg.hw_pfc_map |= BIT(j); + dcb->pfc_cfg.pfc_num++; + break; + } + } + if (enabled) { + /* 4.Enable Transmit Priority Flow Control */ + reg = RNP_TX_TFE | + (RNP_PAUSE_28_SLOT_TIME + << RNP_FC_TX_PLTH_OFFSET) | + (RNP_DEFAULT_PAUSE_TIME << RNP_FC_TX_PT_OFFSET); + + rnp_wr_reg(ioaddr + RNP_MAC_Q0_TX_FLOW_CTRL(j), reg); + } + } + /* the below configure can just use default config */ + /* 5.config for pri_map */ + rnp_config_prio_map(adapter, pfc_map); + /* 6.Configure PFC Rx high/low thresholds per TC */ + + /* 7.Configure Rx full/empty thresholds per tc*/ + + /* 8.Configure pause time (3 TCs per register) */ + /* 9.Configure flow control pause low threshold value */ + + return 0; +} + +__maybe_unused static int rnp_dcb_hw_fc_enable(struct rnp_adapter *adapter) +{ + void __iomem *ioaddr = adapter->hw.hw_addr; + + /* 1. Enabled Transmit Flow Control */ + rnp_wr_reg(ioaddr + RNP_MAC_Q0_TX_FLOW_CTRL(0), RNP_TX_TFE); + /* 2. Enabled Recvive Flow Control */ + rnp_wr_reg(ioaddr + RNP_MAC_RX_FLOW_CTRL, RNP_RX_RFE); + /* 3. Configure Fc Pause Time And Pause Low Threshold + * just use default value? + */ + return 0; +} + +static int rnp_dcbnl_getpfc(struct net_device *dev, struct ieee_pfc *pfc) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_dcb_cfg *dcb = &adapter->dcb_cfg; + u8 i = 0, j = 0; + + memset(pfc, 0, sizeof(*pfc)); + pfc->pfc_cap = dcb->pfc_cfg.pfc_max; + /* Pfc setting is based on TC */ + for (i = 0; i < adapter->num_tc; i++) { + for (j = 0; j < RNP_MAX_USER_PRIO; j++) { + if ((adapter->prio_tc_map[j] == i) && + (dcb->pfc_cfg.hw_pfc_map & BIT(i))) + pfc->pfc_en |= BIT(j); + } + } + /* do we need to get the pfc statistic*/ + /* 1. get the tc channel send and recv pfc pkts*/ + /* + *for (i = 0; i < TSRN10_MAX_TC_NUM; i++) { + * pfc->requests[i] = dcb->requests[i]; + * pfc->indications[i] = dcb->indications[i]; + } + */ + + return 0; +} + +/* rnp Support IEEE 802.3 flow-control and + * Priority base flow control (PFC) + */ +static u8 rnp_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap) +{ + struct rnp_adapter *priv = netdev_priv(net_dev); + + switch (capid) { + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_DCBX: + *cap = priv->dcb_cfg.dcbx_mode; + break; + default: + *cap = false; + break; + } + + return 0; +} + +static u8 rnp_dcbnl_getstate(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + return !!(adapter->flags & RNP_FLAG_DCB_ENABLED); +} + +static u8 rnp_dcbnl_setstate(struct net_device *netdev, u8 state) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + int err = 0; + + /* verify there is something to do, if not then exit */ + if (!state == !(adapter->flags & RNP_FLAG_DCB_ENABLED)) + goto out; + + err = rnp_setup_tc(netdev, + state ? adapter->dcb_cfg.num_tcs.pfc_tcs : 0); +out: + return !!err; +} + +static u8 rnp_dcbnl_getdcbx(struct net_device *net_dev) +{ + struct rnp_adapter *adapter = netdev_priv(net_dev); + + return adapter->dcb_cfg.dcbx_mode; +} + +static u8 rnp_dcbnl_setdcbx(struct net_device *net_dev, u8 mode) +{ + struct rnp_adapter *adapter = netdev_priv(net_dev); + + adapter->dcb_cfg.dcbx_mode = mode; + + return 0; + return (mode != (adapter->dcb_cfg.dcbx_mode)) ? 1 : 0; +} + +static int rnp_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + u8 rval = 0; + + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + switch (tcid) { + case DCB_NUMTCS_ATTR_PFC: + if (adapter->dcb_cfg.num_tcs.pfc_tcs > + RNP_MAX_TCS_NUM) { + rval = -EINVAL; + break; + } + *num = adapter->dcb_cfg.num_tcs.pfc_tcs; + break; + default: + rval = -EINVAL; + break; + } + } else { + rval = -EINVAL; + } + + return rval; +} + +static int rnp_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + u8 rval = 0; + + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + switch (tcid) { + case DCB_NUMTCS_ATTR_PFC: + adapter->dcb_cfg.num_tcs.pfc_tcs = num; + break; + default: + rval = -EINVAL; + break; + } + } else { + rval = -EINVAL; + } + + return rval; +} + +static int rnp_dcb_parse_config(struct rnp_dcb_cfg *dcb, struct ieee_pfc *pfc) +{ + u8 j = 0, pfc_en_num = 0, pfc_map = 0; + + for (j = 0; j < RNP_MAX_USER_PRIO; j++) { + if ((pfc->pfc_en & BIT(j))) { + pfc_map |= BIT(j); + pfc_en_num++; + } + } + dcb->pfc_cfg.pfc_num = pfc_en_num; + dcb->pfc_cfg.hw_pfc_map = pfc_map; + dbg("pfc_map 0x%.2x pfc->pfc_en 0x%.2x\n", pfc_map, pfc->pfc_en); + /* tc resource rebuild */ + /* we need to decide tx_ring bind to tc 4 fifo-mac*/ + return pfc_map; +} + +static int rnp_dcbnl_setpfc(struct net_device *dev, struct ieee_pfc *pfc) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_dcb_cfg *dcb = &adapter->dcb_cfg; + u8 pfc_map = 0; + + dbg("%s:%d pfc enabled %d\n", __func__, __LINE__, pfc->pfc_en); + if (pfc->pfc_en) { + /*set PFC Priority mask */ + pfc_map = rnp_dcb_parse_config(dcb, pfc); + rnp_dcb_hw_pfc_config(adapter, pfc_map); + } else { + /* set PAUSE mode */ + // fc is controlled by ethtool + //rnp_dcb_hw_fc_enable(adapter); + } + + return 0; +} + +static u8 rnp_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_pfc_cfg *pfc_cfg = &adapter->dcb_cfg.pfc_cfg; + + return pfc_cfg->pfc_en; +} + +static void rnp_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + adapter->dcb_cfg.pfc_cfg.pfc_en = state; +} + +const struct dcbnl_rtnl_ops rnp_dcbnl_ops = { + /*DCB PFC*/ + /*IEEE*/ + .ieee_getpfc = rnp_dcbnl_getpfc, + .ieee_setpfc = rnp_dcbnl_setpfc, + .getcap = rnp_dcbnl_getcap, + .setdcbx = rnp_dcbnl_setdcbx, + .getdcbx = rnp_dcbnl_getdcbx, + .getnumtcs = rnp_dcbnl_getnumtcs, + .setnumtcs = rnp_dcbnl_setnumtcs, + + /*CEE*/ + .getstate = rnp_dcbnl_getstate, + .setstate = rnp_dcbnl_setstate, + + .getpfcstate = rnp_dcbnl_getpfcstate, + .setpfcstate = rnp_dcbnl_setpfcstate, +}; + +int rnp_dcb_init(struct net_device *dev, struct rnp_adapter *adapter) +{ + struct rnp_dcb_cfg *dcb = &adapter->dcb_cfg; + struct rnp_hw *hw = &adapter->hw; + + if (hw->hw_type != rnp_hw_n10) + return 0; + + dcb->dcb_en = false; + dcb->pfc_cfg.pfc_max = RNP_MAX_TCS_NUM; + dcb->num_tcs.pfc_tcs = RNP_MAX_TCS_NUM; + dcb->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; + dev->dcbnl_ops = &rnp_dcbnl_ops; + + return 0; +} +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_dcb.h b/drivers/net/ethernet/mucse/rnp/rnp_dcb.h new file mode 100644 index 0000000000000..23941b81eef5f --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_dcb.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef __RNP_DCB_H__ +#define __RNP_DCB_H__ +#include "rnp.h" + +enum rnp_pause_low_thrsh { + RNP_PAUSE_4_SLOT_TIME = 0, + RNP_PAUSE_28_SLOT_TIME, + RNP_PAUSE_36_SLOT_TIME, + RNP_PAUSE_144_SLOT_TIME, + RNP_PAUSE_256_SLOT_TIME, +}; +/*Rx Flow Ctrl */ +#define RNP_RX_RFE BIT(0) /* Receive Flow Control Enable */ +#define RNP_UP BIT(1) /* Unicast Pause Packet Detect */ +#define RNP_PFCE BIT(8) /* Priority Based Flow Control Enable. */ + +/*Tx Flow Ctrl */ +#define RNP_TX_FCB BIT(0) /* Tx Flow Control Busy. */ +#define RNP_TX_TFE BIT(1) /* Transmit Flow Control Enable.*/ +#define RNP_TX_PLT GENMASK(6, 4) /* Pause Low Threshold. */ +#define RNP_DZPQ BIT(7) /*Disable Zero-Quanta Pause.*/ +#define RNP_PT GENMASK(31, 16) /* Pause Time. */ + +#define RNP_DEFAULT_PAUSE_TIME (0x100) /* */ +#define RNP_FC_TX_PLTH_OFFSET (4) /* Pause Low Threshold */ +#define RNP_FC_TX_PT_OFFSET (16) /* Pause Time */ + +#define RNP_DCB_MAX_TCS_NUM (4) +#define RNP_DCB_MAX_PFC_NUM (4) + +struct rnp_adapter; +int rnp_dcb_init(struct net_device *dev, struct rnp_adapter *adapter); +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_debugfs.c b/drivers/net/ethernet/mucse/rnp/rnp_debugfs.c new file mode 100644 index 0000000000000..e47758b58bcfa --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_debugfs.c @@ -0,0 +1,522 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include + +#include "rnp.h" +#include "rnp_type.h" + +#ifdef HAVE_RNP_DEBUG_FS +static struct dentry *rnp_dbg_root; +static char rnp_dbg_reg_ops_buf[256] = ""; + +#define bus_to_virt phys_to_virt + +static int rnp_dbg_csl_open(struct inode *inode, struct file *filp) +{ + void *dma_buf = NULL; + dma_addr_t dma_phy; + int err, bytes = 4096; + struct rnp_adapter *adapter; + const char *name; + struct rnp_hw *hw; + + if (inode->i_private) { + filp->private_data = inode->i_private; + } else { + return -EIO; + } + + adapter = filp->private_data; + + if (adapter == NULL) { + return -EIO; + } + + if (adapter->csl_dma_buf != NULL) { + return 0; + } + hw = &adapter->hw; + name = adapter->name; + + dma_buf = + dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy, GFP_ATOMIC); + if (!dma_buf) { + e_dev_err("%s: no dma buf", name); + return -ENOMEM; + } + memset(dma_buf, 0, bytes); + + adapter->csl_dma_buf = dma_buf; + adapter->csl_dma_phy = dma_phy; + adapter->csl_dma_size = bytes; + + err = rnp_mbx_ddr_csl_enable(hw, 1, dma_phy, bytes); + if (err) { + dma_free_coherent(&hw->pdev->dev, bytes, dma_buf, dma_phy); + adapter->csl_dma_buf = NULL; + return -EIO; + } + + return 0; +} + +static int rnp_dbg_csl_release(struct inode *inode, struct file *filp) +{ + struct rnp_adapter *adapter = filp->private_data; + struct rnp_hw *hw = &adapter->hw; + + if (adapter->csl_dma_buf) { + rnp_mbx_ddr_csl_enable(hw, 0, 0, 0); + dma_free_coherent(&hw->pdev->dev, adapter->csl_dma_size, + adapter->csl_dma_buf, adapter->csl_dma_phy); + adapter->csl_dma_buf = NULL; + } + + return 0; +} + +static int rnp_dbg_csl_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long length; + struct rnp_adapter *adapter = filp->private_data; + void *dma_buf = adapter->csl_dma_buf; + dma_addr_t dma_phy = adapter->csl_dma_phy; + int dma_bytes = adapter->csl_dma_size; + int ret = 0; + + length = (unsigned long)(vma->vm_end - vma->vm_start); + + if (length > dma_bytes) { + return -EIO; + } + if (vma->vm_pgoff == 0) { + ret = dma_mmap_coherent(&adapter->pdev->dev, vma, dma_buf, dma_phy, length); + } else { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + ret = remap_pfn_range( + vma, vma->vm_start, + PFN_DOWN(virt_to_phys(bus_to_virt(dma_phy))) + + vma->vm_pgoff, + length, vma->vm_page_prot); + } + + if (ret < 0) { + printk(KERN_ERR "%s: remap failed (%d)\n", __func__, ret); + return ret; + } + + return 0; +} + +static const struct file_operations rnp_dbg_csl_fops = { + .owner = THIS_MODULE, + .open = rnp_dbg_csl_open, + .release = rnp_dbg_csl_release, + .mmap = rnp_dbg_csl_mmap, +}; + +static ssize_t rnp_dbg_eth_info_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + char *buf = NULL; + int len; + + if (adapter == NULL) { + return -EIO; + } + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "bd:%d port%d %s %s\n", adapter->bd_number, + 0, adapter->netdev->name, pci_name(adapter->pdev)); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static const struct file_operations rnp_dbg_eth_info_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = rnp_dbg_eth_info_read, +}; + +static ssize_t rnp_dbg_mbx_cookies_info_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + char *buf = NULL; + int len,i; + struct mbx_req_cookie_pool* cookie_pool = &(adapter->hw.mbx.cookie_pool); + struct mbx_req_cookie*cookie; + int free_cnt=0, wait_timout_cnt=0, alloced_cnt=0; + + if (adapter == NULL) { + return -EIO; + } + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + for(i=0;icookies[i]); + if(cookie->stat == COOKIE_FREE){ + free_cnt++; + }else if(cookie->stat == COOKIE_FREE_WAIT_TIMEOUT){ + wait_timout_cnt++; + }else if(cookie->stat == COOKIE_ALLOCED){ + alloced_cnt++; + } + } + + buf = kasprintf(GFP_KERNEL, "pool items:cur:%d total: %d. free:%d wait_free:%d alloced:%d \n", cookie_pool->next_idx, MAX_COOKIES_ITEMS, + free_cnt, wait_timout_cnt, alloced_cnt); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static const struct file_operations rnp_dbg_mbx_cookies_info_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = rnp_dbg_mbx_cookies_info_read, +}; + +/** + * rnp_dbg_reg_ops_read - read for reg_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t rnp_dbg_reg_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: %s\n", adapter->name, + rnp_dbg_reg_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +/** + * rnp_dbg_reg_ops_write - write into reg_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t rnp_dbg_reg_ops_write(struct file *filp, + const char __user *buffer, size_t count, + loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + struct rnp_hw *hw = &adapter->hw; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(rnp_dbg_reg_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(rnp_dbg_reg_ops_buf, + sizeof(rnp_dbg_reg_ops_buf) - 1, ppos, + buffer, count); + if (len < 0) + return len; + + rnp_dbg_reg_ops_buf[len] = '\0'; + + if (strncmp(rnp_dbg_reg_ops_buf, "write", 5) == 0) { + u32 reg, value; + int cnt; + + cnt = sscanf(&rnp_dbg_reg_ops_buf[5], "%x %x", ®, &value); + if (cnt == 2) { + if (reg >= 0x30000000) { + rnp_mbx_reg_write(hw, reg, value); + e_dev_info("write: 0x%08x = 0x%08x\n", reg, + value); + } else { + rnp_wr_reg(hw->hw_addr + reg, value); + value = rnp_rd_reg(hw->hw_addr + reg); + e_dev_info("write: 0x%08x = 0x%08x\n", reg, + value); + } + } else { + e_dev_info("write \n"); + } + } else if (strncmp(rnp_dbg_reg_ops_buf, "read", 4) == 0) { + u32 reg, value; + int cnt; + + cnt = sscanf(&rnp_dbg_reg_ops_buf[4], "%x", ®); + if (cnt == 1) { + if (reg >= 0x30000000) { + value = rnp_mbx_fw_reg_read(hw, reg); + } else { + value = rnp_rd_reg(hw->hw_addr + reg); + } + snprintf(rnp_dbg_reg_ops_buf, + sizeof(rnp_dbg_reg_ops_buf), "0x%08x: 0x%08x", + reg, value); + e_dev_info("read 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("read \n"); + } + } else { + e_dev_info("Unknown command %s\n", rnp_dbg_reg_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" read \n"); + e_dev_info(" write \n"); + } + return count; +} + +static const struct file_operations rnp_dbg_reg_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = rnp_dbg_reg_ops_read, + .write = rnp_dbg_reg_ops_write, +}; + +static char rnp_dbg_netdev_ops_buf[256] = ""; + +/** + * rnp_dbg_netdev_ops_read - read for netdev_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t rnp_dbg_netdev_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: %s\n", adapter->name, + rnp_dbg_netdev_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +/** + * rnp_dbg_netdev_ops_write - write into netdev_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t rnp_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, size_t count, + loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(rnp_dbg_netdev_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(rnp_dbg_netdev_ops_buf, + sizeof(rnp_dbg_netdev_ops_buf) - 1, ppos, + buffer, count); + if (len < 0) + return len; + + rnp_dbg_netdev_ops_buf[len] = '\0'; + + if (strncmp(rnp_dbg_netdev_ops_buf, "stat", 4) == 0) { + rnp_info("adapter->stat=0x%lx\n", adapter->state); + rnp_info("adapter->tx_timeout_count=%d\n", + adapter->tx_timeout_count); + } else if (strncmp(rnp_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev, + UINT_MAX); + e_dev_info("tx_timeout called\n"); + } else { + e_dev_info("Unknown command: %s\n", rnp_dbg_netdev_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" tx_timeout\n"); + } + return count; +} + +static const struct file_operations rnp_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = rnp_dbg_netdev_ops_read, + .write = rnp_dbg_netdev_ops_write, +}; + +static ssize_t rnp_dbg_netdev_temp_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rnp_adapter *adapter = filp->private_data; + struct rnp_hw *hw = &adapter->hw; + char *buf; + int len; + int temp = 0, voltage = 0; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + temp = rnp_mbx_get_temp(hw, &voltage); + + buf = kasprintf(GFP_KERNEL, "%s: temp: %d oC voltage:%d mV\n", + adapter->name, temp, voltage); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} +static const struct file_operations rnp_dbg_netdev_temp = { + .owner = THIS_MODULE, + .open = simple_open, + .read = rnp_dbg_netdev_temp_read, +}; + +/** + * rnp_dbg_adapter_init - setup the debugfs directory for the adapter + * @adapter: the adapter that is starting up + **/ +void rnp_dbg_adapter_init(struct rnp_adapter *adapter) +{ + const char *name = adapter->name; + struct dentry *pfile; + + adapter->rnp_dbg_adapter = debugfs_create_dir(name, rnp_dbg_root); + if (adapter->rnp_dbg_adapter) { + pfile = debugfs_create_file("reg_ops", 0600, + adapter->rnp_dbg_adapter, adapter, + &rnp_dbg_reg_ops_fops); + if (!pfile) + e_dev_err("debugfs reg_ops for %s failed\n", name); + pfile = debugfs_create_file("netdev_ops", 0600, + adapter->rnp_dbg_adapter, adapter, + &rnp_dbg_netdev_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); + + pfile = debugfs_create_file("temp", 0600, + adapter->rnp_dbg_adapter, adapter, + &rnp_dbg_netdev_temp); + if (!pfile) + e_dev_err("debugfs temp for %s failed\n", name); + if (rnp_is_pf1(&adapter->hw) == 0) { + pfile = debugfs_create_file_unsafe("csl", 0755, + adapter->rnp_dbg_adapter, + adapter, &rnp_dbg_csl_fops); + if (!pfile) + e_dev_err("debugfs csl failed\n"); + } + pfile = debugfs_create_file("info", 0600, + adapter->rnp_dbg_adapter, adapter, + &rnp_dbg_eth_info_fops); + if (!pfile) + e_dev_err("debugfs info failed\n"); + pfile = debugfs_create_file("mbx_cookies_info", 0600, + adapter->rnp_dbg_adapter, adapter, + &rnp_dbg_mbx_cookies_info_fops); + if (!pfile) + e_dev_err("debugfs reg_ops for mbx_cookies_info failed\n"); + } else { + e_dev_err("debugfs entry for %s failed\n", name); + } +} + +/** + * rnp_dbg_adapter_exit - clear out the adapter's debugfs entries + * @pf: the pf that is stopping + **/ +void rnp_dbg_adapter_exit(struct rnp_adapter *adapter) +{ + debugfs_remove_recursive(adapter->rnp_dbg_adapter); + adapter->rnp_dbg_adapter = NULL; +} + +/** + * rnp_dbg_init - start up debugfs for the driver + **/ +void rnp_dbg_init(void) +{ + rnp_dbg_root = debugfs_create_dir(rnp_driver_name, NULL); + if (rnp_dbg_root == NULL) + pr_err("init of debugfs failed\n"); +} + +/** + * rnp_dbg_exit - clean out the driver's debugfs entries + **/ +void rnp_dbg_exit(void) +{ + debugfs_remove_recursive(rnp_dbg_root); +} +#endif /* HAVE_RNP_DEBUG_FS */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_ethtool.c b/drivers/net/ethernet/mucse/rnp/rnp_ethtool.c new file mode 100644 index 0000000000000..8f1c7b236e453 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_ethtool.c @@ -0,0 +1,1927 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "rnp.h" +#include "rnp_sriov.h" +#include "rnp_phy.h" +#include "rnp_mbx_fw.h" +#include "rnp_ethtool.h" + +int rnp_wol_exclusion(struct rnp_adapter *adapter, struct ethtool_wolinfo *wol) +{ + struct rnp_hw *hw = &adapter->hw; + int retval = 0; + + /* WOL not supported for all devices */ + if (!rnp_wol_supported(adapter, hw->device_id, + hw->subsystem_device_id)) { + retval = 1; + wol->supported = 0; + } + + return retval; +} + +void rnp_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + wol->wolopts = 0; + + /* we now can't wol */ + if (rnp_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + /* Only support magic */ + if (RNP_WOL_GET_SUPPORTED(adapter)) + wol->supported = hw->wol_supported; + if (RNP_WOL_GET_STATUS(adapter)) + wol->wolopts |= hw->wol_supported; +} + +int rnp_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (!!wol->wolopts) { + if ((wol->wolopts & (~hw->wol_supported)) || + !RNP_WOL_GET_SUPPORTED(adapter)) + return -EOPNOTSUPP; + } + + RNP_WOL_SET_SUPPORTED(adapter); + if (wol->wolopts & WAKE_MAGIC) { + RNP_WOL_SET_SUPPORTED(adapter); + RNP_WOL_SET_STATUS(adapter); + } else { + RNP_WOL_CLEAR_STATUS(adapter); + } + + rnp_mbx_wol_set(hw, RNP_WOL_GET_STATUS(adapter)); + device_set_wakeup_enable(&adapter->pdev->dev, !!wol->wolopts); + + return 0; +} + +/* ethtool register test data */ +struct rnp_reg_test { + u16 reg; + u8 array_len; + u8 test_type; + u32 mask; + u32 write; +}; + +/* In the hardware, registers are laid out either singly, in arrays + * spaced 0x40 bytes apart, or in contiguous tables. We assume + * most tests take place on arrays or single registers (handled + * as a single-element array) and special-case the tables. + * Table tests are always pattern tests. + * + * We also make provision for some required setup steps by specifying + * registers to be written without any read-back testing. + */ + +#define PATTERN_TEST 1 +#define SET_READ_TEST 2 +#define WRITE_NO_TEST 3 +#define TABLE32_TEST 4 +#define TABLE64_TEST_LO 5 +#define TABLE64_TEST_HI 6 + +/* default n10 register test */ +static struct rnp_reg_test reg_test_n10[] = { + //{RNP_DMA_CONFIG, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF}, + /* + * { RNP_FCRTL_n10(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + * { RNP_FCRTH_n10(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + * { RNP_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + * { RNP_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 }, + * { RNP_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, + * { RNP_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + * { RNP_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, + * { RNP_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF }, + * { RNP_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 }, + * { RNP_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + * { RNP_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + * { RNP_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, + * { RNP_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + * { RNP_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, + * { RNP_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 }, + * { RNP_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + * { RNP_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, + * { RNP_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, + */ + { .reg = 0 }, +}; + +/* write and read check */ +static bool reg_pattern_test(struct rnp_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 pat, val, before; + static const u32 test_pattern[] = { 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, + 0xFFFFFFFF }; + + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { + before = readl(adapter->hw.hw_addr + reg); + printk("before reg %x is %x\n", reg, before); + writel((test_pattern[pat] & write), + (adapter->hw.hw_addr + reg)); + val = readl(adapter->hw.hw_addr + reg); + if (val != (test_pattern[pat] & write & mask)) { + e_err(drv, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", + reg, val, (test_pattern[pat] & write & mask)); + *data = reg; + writel(before, adapter->hw.hw_addr + reg); + return 1; + } + writel(before, adapter->hw.hw_addr + reg); + } + return 0; +} + +static bool reg_set_and_check(struct rnp_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + u32 val, before; + + before = readl(adapter->hw.hw_addr + reg); + writel((write & mask), (adapter->hw.hw_addr + reg)); + val = readl(adapter->hw.hw_addr + reg); + if ((write & mask) != (val & mask)) { + e_err(drv, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + writel(before, (adapter->hw.hw_addr + reg)); + return 1; + } + writel(before, (adapter->hw.hw_addr + reg)); + return 0; +} + +static bool rnp_reg_test(struct rnp_adapter *adapter, u64 *data) +{ + struct rnp_reg_test *test; + struct rnp_hw *hw = &adapter->hw; + u32 i; + + if (RNP_REMOVED(hw->hw_addr)) { + e_err(drv, "Adapter removed - register test blocked\n"); + *data = 1; + return true; + } + + test = reg_test_n10; + /* + * Perform the remainder of the register test, looping through + * the test table until we either fail or reach the null entry. + */ + while (test->reg) { + for (i = 0; i < test->array_len; i++) { + bool b = false; + + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 0x40), + test->mask, test->write); + break; + case SET_READ_TEST: + b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, test->write); + break; + case WRITE_NO_TEST: + wr32(hw, test->reg + (i * 0x40), test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, + test->reg + (i * 4), + test->mask, test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, + test->reg + (i * 8), + test->mask, test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, + (test->reg + 4) + (i * 8), + test->mask, test->write); + break; + } + if (b) + return true; + } + test++; + } + + *data = 0; + return false; +} + +static int rnp_link_test(struct rnp_adapter *adapter, u64 *data) +{ + struct rnp_hw *hw = &adapter->hw; + bool link_up; + u32 link_speed = 0; + bool duplex; + *data = 0; + + hw->ops.check_link(hw, &link_speed, &link_up, &duplex, true); + if (!link_up) + *data = 1; + return *data; +} + +void rnp_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, + u64 *data) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + bool if_running = netif_running(netdev); + + set_bit(__RNP_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + int i; + + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) { + netdev_warn( + netdev, "%s", + "offline diagnostic is not supported when VFs " + "are present\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; + data[3] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__RNP_TESTING, + &adapter->state); + goto skip_ol_tests; + } + } + } + + /* Offline tests */ + e_info(hw, "offline testing starting\n"); + + /* bringing adapter down disables SFP+ optics */ + if (hw->ops.enable_tx_laser) + hw->ops.enable_tx_laser(hw); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (rnp_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e_info(hw, "register testing starting\n"); + if (rnp_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + data[1] = 0; + data[2] = 0; + /* If SRIOV or VMDq is enabled then skip MAC + * loopback diagnostic. + */ + if (adapter->flags & + (RNP_FLAG_SRIOV_ENABLED | RNP_FLAG_VMDQ_ENABLED)) { + e_info(hw, "Skip MAC loopback diagnostic in VT mode\n"); + data[3] = 0; + goto skip_loopback; + } + + data[3] = 0; +skip_loopback: + /* clear testing bit and return adapter to previous state */ + clear_bit(__RNP_TESTING, &adapter->state); + } else { + e_info(hw, "online testing starting\n"); + + /* if adapter is down, SFP+ optics will be disabled */ + if (!if_running && hw->ops.enable_tx_laser) + hw->ops.enable_tx_laser(hw); + + /* Online tests */ + if (rnp_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Offline tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__RNP_TESTING, &adapter->state); + } + + /* if adapter was down, ensure SFP+ optics are disabled again */ + if (!if_running && hw->ops.disable_tx_laser) + hw->ops.disable_tx_laser(hw); +skip_ol_tests: + msleep_interruptible(4 * 1000); +} + +int rnp_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + int err; + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + err = rnp_mbx_get_lane_stat(hw); + if (err) + return err; + + if (adapter->fec) { + fecparam->active_fec = ETHTOOL_FEC_BASER; + } else { + fecparam->active_fec = ETHTOOL_FEC_NONE; + } + fecparam->fec = ETHTOOL_FEC_BASER; + + return 0; +} + +int rnp_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (fecparam->fec & ETHTOOL_FEC_OFF) { + return rnp_set_lane_fun(hw, LANE_FUN_FEC, 0, 0, 0, 0); + } else if (fecparam->fec & ETHTOOL_FEC_BASER) { + return rnp_set_lane_fun(hw, LANE_FUN_FEC, 1, 0, 0, 0); + } + + return -EINVAL; +} + +u32 rnp_get_msglevel(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +void rnp_set_msglevel(struct net_device *netdev, u32 data) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +int rnp_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + rnp_mbx_led_set(hw, 1); + return 2; + + case ETHTOOL_ID_ON: + rnp_mbx_led_set(hw, 2); + break; + + case ETHTOOL_ID_OFF: + rnp_mbx_led_set(hw, 3); + break; + + case ETHTOOL_ID_INACTIVE: + rnp_mbx_led_set(hw, 0); + break; + } + return 0; +} + +int rnp_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + + /* For we just set it as pf0 */ + if (!(adapter->flags2 & RNP_FLAG2_PTP_ENABLED)) + return ethtool_op_get_ts_info(dev, info); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + dbg("phc_index is %d\n", info->phc_index); + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + +static unsigned int rnp_max_channels(struct rnp_adapter *adapter) +{ + unsigned int max_combined; + struct rnp_hw *hw = &adapter->hw; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + /* SR-IOV currently only allows 2 queue on the PF */ + max_combined = hw->sriov_ring_limit; + } else if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + /* dcb on max support 32 */ + max_combined = 32; + } else { + /* support up to 16 queues with RSS */ + max_combined = adapter->max_ring_pair_counts; + /* should not large than q_vectors ? */ + } + + return max_combined; +} + +void rnp_get_channels(struct net_device *dev, struct ethtool_channels *ch) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + + /* report maximum channels */ + ch->max_combined = rnp_max_channels(adapter); + + /* report info for other vector */ + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + + /* record RSS queues */ + ch->combined_count = adapter->ring_feature[RING_F_RSS].indices; + + /* nothing else to report if RSS is disabled */ + if (ch->combined_count == 1) + return; + + /* we do not support ATR queueing if SR-IOV is enabled */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + return; + + /* same thing goes for being DCB enabled */ + if (netdev_get_num_tc(dev) > 1) + return; +} + +int rnp_set_channels(struct net_device *dev, struct ethtool_channels *ch) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + unsigned int count = ch->combined_count; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + return -EINVAL; + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* verify other_count has not changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + dbg("call set channels %d %d %d \n", count, ch->rx_count, ch->tx_count); + dbg("max channels %d\n", rnp_max_channels(adapter)); + /* verify the number of channels does not exceed hardware limits */ + if (count > rnp_max_channels(adapter)) + return -EINVAL; + + /* update feature limits from largest to smallest supported values */ + adapter->ring_feature[RING_F_FDIR].limit = count; + + if (count > adapter->max_ring_pair_counts) + count = adapter->max_ring_pair_counts; + adapter->ring_feature[RING_F_RSS].limit = count; + + /* use setup TC to update any traffic class queue mapping */ + return rnp_setup_tc(dev, netdev_get_num_tc(dev)); +} + +int rnp_get_module_info(struct net_device *dev, struct ethtool_modinfo *modinfo) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_hw *hw = &adapter->hw; + u8 module_id, diag_supported; + int rc; + + rnp_mbx_get_lane_stat(hw); + + if (hw->is_sgmii) + return -EIO; + + rc = rnp_mbx_sfp_module_eeprom_info(hw, 0xA0, SFF_MODULE_ID_OFFSET, 1, + &module_id); + if (rc || module_id == 0xff) { + return -EIO; + } + rc = rnp_mbx_sfp_module_eeprom_info(hw, 0xA0, SFF_DIAG_SUPPORT_OFFSET, + 1, &diag_supported); + if (!rc) { + switch (module_id) { + case SFF_MODULE_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + if (!diag_supported) + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case SFF_MODULE_ID_QSFP: + case SFF_MODULE_ID_QSFP_PLUS: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case SFF_MODULE_ID_QSFP28: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + break; + default: + printk("%s: module_id:0x%x diag_supported:0x%x\n", + __func__, module_id, diag_supported); + rc = -EOPNOTSUPP; + break; + } + } + + return rc; +} + +int rnp_get_module_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_hw *hw = &adapter->hw; + u16 start = eeprom->offset, length = eeprom->len; + int rc = 0; + + rnp_mbx_get_lane_stat(hw); + + if (hw->is_sgmii) + return -EIO; + + memset(data, 0, eeprom->len); + + /* Read A0 portion of the EEPROM */ + if (start < ETH_MODULE_SFF_8436_LEN) { + if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) + length = ETH_MODULE_SFF_8436_LEN - start; + rc = rnp_mbx_sfp_module_eeprom_info(hw, 0xA0, start, length, + data); + if (rc) + return rc; + start += length; + data += length; + length = eeprom->len - length; + } + + /* Read A2 portion of the EEPROM */ + if (length) { + start -= ETH_MODULE_SFF_8436_LEN; + rc = rnp_mbx_sfp_module_eeprom_info(hw, 0xA2, start, length, + data); + } + + return rc; +} + +void rnp_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + /* all ring share the same status*/ + + ring->rx_max_pending = RNP_MAX_RXD; + ring->tx_max_pending = RNP_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_ring_item_count; + ring->tx_pending = adapter->tx_ring_item_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +int rnp_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_ring *temp_ring; + int i, err = 0; + u32 new_rx_count, new_tx_count; + + /* sriov mode can't change ring param */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + return -EINVAL; + } + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + if ((ring->tx_pending < RNP_MIN_TXD) || + (ring->tx_pending > RNP_MAX_TXD) || + (ring->rx_pending < RNP_MIN_RXD) || + (ring->rx_pending > RNP_MAX_RXD)) { + netdev_info( + netdev, + "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + ring->tx_pending, ring->rx_pending, RNP_MIN_TXD, + RNP_MAX_TXD); + return -EINVAL; + } + + new_tx_count = clamp_t(u32, ring->tx_pending, RNP_MIN_TXD, RNP_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, RNP_REQ_TX_DESCRIPTOR_MULTIPLE); + new_rx_count = clamp_t(u32, ring->rx_pending, RNP_MIN_RXD, RNP_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, RNP_REQ_RX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_item_count) && + (new_rx_count == adapter->rx_ring_item_count)) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__RNP_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_item_count = new_tx_count; + adapter->rx_ring_item_count = new_rx_count; + goto clear_reset; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + temp_ring = vmalloc(i * sizeof(struct rnp_ring)); + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + memset(temp_ring, 0x00, i * sizeof(struct rnp_ring)); + + if (new_rx_count != adapter->rx_ring_item_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + adapter->rx_ring[i]->reset_count = new_rx_count; + if (!(adapter->rx_ring[i]->ring_flags & + RNP_RING_SIZE_CHANGE_FIX)) + adapter->rx_ring[i]->ring_flags |= + RNP_RING_FLAG_CHANGE_RX_LEN; + } + } + rnp_down(adapter); + /* + * Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != adapter->tx_ring_item_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct rnp_ring)); + + temp_ring[i].count = new_tx_count; + err = rnp_setup_tx_resources(&temp_ring[i], adapter); + if (err) { + while (i) { + i--; + rnp_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + rnp_free_tx_resources(adapter->tx_ring[i]); + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct rnp_ring)); + } + + adapter->tx_ring_item_count = new_tx_count; + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != adapter->rx_ring_item_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct rnp_ring)); + /* setup ring count */ + if (!(adapter->rx_ring[i]->ring_flags & + RNP_RING_FLAG_DELAY_SETUP_RX_LEN)) { + temp_ring[i].count = new_rx_count; + } else { + /* setup temp count */ + temp_ring[i].count = temp_ring[i].temp_count; + adapter->rx_ring[i]->reset_count = new_rx_count; + new_rx_count = temp_ring[i].temp_count; + } + err = rnp_setup_rx_resources(&temp_ring[i], adapter); + if (err) { + while (i) { + i--; + rnp_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + rnp_free_rx_resources(adapter->rx_ring[i]); + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct rnp_ring)); + } + adapter->rx_ring_item_count = new_rx_count; + } + +err_setup: + rnp_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__RNP_RESETTING, &adapter->state); + return err; +} + +int rnp_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + rnp_mbx_get_dump(&adapter->hw, 0, NULL, 0); + + dump->flag = adapter->hw.dump.flag; + dump->len = adapter->hw.dump.len; + dump->version = adapter->hw.dump.version; + + return 0; +} + +int rnp_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, + void *buffer) +{ + int err; + struct rnp_adapter *adapter = netdev_priv(netdev); + + err = rnp_mbx_get_dump(&adapter->hw, dump->flag, buffer, dump->len); + if (err) + return err; + + dump->flag = adapter->hw.dump.flag; + dump->len = adapter->hw.dump.len; + dump->version = adapter->hw.dump.version; + + return 0; +} + +int rnp_set_dump(struct net_device *netdev, struct ethtool_dump *dump) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + rnp_mbx_set_dump(&adapter->hw, dump->flag); + + return 0; +} + +int rnp_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + coal->use_adaptive_tx_coalesce = adapter->adaptive_tx_coal; + coal->tx_coalesce_usecs = adapter->tx_usecs_usr_set; + coal->tx_coalesce_usecs_irq = 0; + coal->tx_max_coalesced_frames = adapter->tx_frames; + coal->tx_max_coalesced_frames_irq = adapter->tx_work_limit; + + coal->use_adaptive_rx_coalesce = adapter->adaptive_rx_coal; + coal->rx_coalesce_usecs_irq = 0; + coal->rx_coalesce_usecs = adapter->rx_usecs_usr_set; + coal->rx_max_coalesced_frames = adapter->rx_frames; + coal->rx_max_coalesced_frames_irq = adapter->napi_budge; + + /* this is not support */ + coal->pkt_rate_low = 0; + coal->pkt_rate_high = 0; + coal->rx_coalesce_usecs_low = 0; + coal->rx_max_coalesced_frames_low = 0; + coal->tx_coalesce_usecs_low = 0; + coal->tx_max_coalesced_frames_low = 0; + coal->rx_coalesce_usecs_high = 0; + coal->rx_max_coalesced_frames_high = 0; + coal->tx_coalesce_usecs_high = 0; + coal->tx_max_coalesced_frames_high = 0; + coal->rate_sample_interval = 0; + + return 0; +} + +int rnp_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + int reset = 0; + struct rnp_adapter *adapter = netdev_priv(netdev); + u32 value; + /* we don't support close tx and rx coalesce */ + if (!(ec->use_adaptive_tx_coalesce) || !(ec->use_adaptive_rx_coalesce)) + return -EINVAL; + + /* check coalesce frame irq */ + if ((ec->tx_max_coalesced_frames_irq < RNP_MIN_TX_WORK) || + (ec->tx_max_coalesced_frames_irq > RNP_MAX_TX_WORK)) + return -EINVAL; + + value = clamp_t(u32, ec->tx_max_coalesced_frames_irq, RNP_MIN_TX_WORK, + RNP_MAX_TX_WORK); + value = ALIGN(value, RNP_WORK_ALIGN); + + if (adapter->tx_work_limit != value) { + reset = 1; + adapter->tx_work_limit = value; + } + + if ((ec->tx_max_coalesced_frames < RNP_MIN_TX_FRAME) || + (ec->tx_max_coalesced_frames > RNP_MAX_TX_FRAME)) + return -EINVAL; + + value = clamp_t(u32, ec->tx_max_coalesced_frames, RNP_MIN_TX_FRAME, + RNP_MAX_TX_FRAME); + if (adapter->tx_frames != value) { + reset = 1; + adapter->tx_frames = value; + } + + /* check vlaue */ + if ((ec->tx_coalesce_usecs < RNP_MIN_TX_USEC) || + (ec->tx_coalesce_usecs > RNP_MAX_TX_USEC)) + return -EINVAL; + + value = clamp_t(u32, ec->tx_coalesce_usecs, RNP_MIN_TX_USEC, + RNP_MAX_TX_USEC); + if (adapter->tx_usecs != value) { + reset = 1; + adapter->tx_usecs = value; + adapter->tx_usecs_usr_set = value; + } + + if ((ec->rx_max_coalesced_frames_irq < RNP_MIN_RX_WORK) || + (ec->rx_max_coalesced_frames_irq > RNP_MAX_RX_WORK)) + return -EINVAL; + + value = clamp_t(u32, ec->rx_max_coalesced_frames_irq, RNP_MIN_RX_WORK, + RNP_MAX_RX_WORK); + value = ALIGN(value, RNP_WORK_ALIGN); + + if (adapter->napi_budge != value) { + reset = 1; + adapter->napi_budge = value; + } + + if ((ec->rx_max_coalesced_frames < RNP_MIN_RX_FRAME) || + (ec->rx_max_coalesced_frames > RNP_MAX_RX_FRAME)) + return -EINVAL; + + value = clamp_t(u32, ec->rx_max_coalesced_frames, RNP_MIN_RX_FRAME, + RNP_MAX_RX_FRAME); + if (adapter->rx_frames != value) { + reset = 1; + adapter->rx_frames = value; + } + + /* check vlaue */ + if ((ec->rx_coalesce_usecs < RNP_MIN_RX_USEC) || + (ec->rx_coalesce_usecs > RNP_MAX_RX_USEC)) + return -EINVAL; + + value = clamp_t(u32, ec->rx_coalesce_usecs, RNP_MIN_RX_USEC, + RNP_MAX_RX_USEC); + + if (adapter->rx_usecs != value) { + reset = 1; + adapter->rx_usecs = value; + adapter->rx_usecs_usr_set = value; + } + /* other setup is not supported */ + if ((ec->pkt_rate_low) || (ec->pkt_rate_high) || + (ec->rx_coalesce_usecs_low) || (ec->rx_max_coalesced_frames_low) || + (ec->tx_coalesce_usecs_low) || (ec->tx_max_coalesced_frames_low) || + (ec->rx_coalesce_usecs_high) || + (ec->rx_max_coalesced_frames_high) || + (ec->tx_coalesce_usecs_high) || + (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval) || + (ec->tx_coalesce_usecs_irq) || (ec->rx_coalesce_usecs_irq)) + return -EINVAL; + + if (reset) + return rnp_setup_tc(netdev, netdev_get_num_tc(netdev)); + + return 0; +} + +static int rnp_get_rss_hash_opts(struct rnp_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + + /* Report default options for RSS on rnp */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + fallthrough; + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + fallthrough; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + fallthrough; + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + fallthrough; + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + return -EINVAL; + } + return 0; +} + +static int rnp_get_ethtool_fdir_entry(struct rnp_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct hlist_node *node2; + struct rnp_fdir_filter *rule = NULL; + + /* report total rule count */ + cmd->data = adapter->fdir_pballoc; + + hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, + fdir_node) + if (fsp->location <= rule->sw_idx) + break; + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + /* set flow type field */ + switch (rule->filter.formatted.flow_type) { + case RNP_ATR_FLOW_TYPE_TCPV4: + fsp->flow_type = TCP_V4_FLOW; + break; + case RNP_ATR_FLOW_TYPE_UDPV4: + fsp->flow_type = UDP_V4_FLOW; + break; + case RNP_ATR_FLOW_TYPE_SCTPV4: + fsp->flow_type = SCTP_V4_FLOW; + break; + case RNP_ATR_FLOW_TYPE_IPV4: + fsp->flow_type = IP_USER_FLOW; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + if (adapter->fdir_mode == fdir_mode_tuple5) { + fsp->h_u.usr_ip4_spec.proto = + rule->filter.formatted.inner_mac[0]; + fsp->m_u.usr_ip4_spec.proto = 0xff; + } else { + fsp->h_u.usr_ip4_spec.proto = + rule->filter.formatted.inner_mac[0] & + rule->filter.formatted.inner_mac_mask[0]; + fsp->m_u.usr_ip4_spec.proto = + rule->filter.formatted.inner_mac_mask[0]; + } + break; + case RNP_ATR_FLOW_TYPE_ETHER: + fsp->flow_type = ETHER_FLOW; + /* support proto and mask only in this mode */ + fsp->h_u.ether_spec.h_proto = rule->filter.layer2_formate.proto; + fsp->m_u.ether_spec.h_proto = 0xffff; + break; + default: + return -EINVAL; + } + if (rule->filter.formatted.flow_type != RNP_ATR_FLOW_TYPE_ETHER) { + /* not support mask in tuple 5 mode */ + if (adapter->fdir_mode == fdir_mode_tuple5) { + fsp->h_u.tcp_ip4_spec.psrc = + rule->filter.formatted.src_port; + fsp->h_u.tcp_ip4_spec.pdst = + rule->filter.formatted.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = + rule->filter.formatted.src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = + rule->filter.formatted.dst_ip[0]; + fsp->m_u.tcp_ip4_spec.psrc = 0xffff; + fsp->m_u.tcp_ip4_spec.pdst = 0xffff; + fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff; + fsp->m_u.tcp_ip4_spec.ip4dst = 0xffffffff; + } else { + fsp->h_u.tcp_ip4_spec.psrc = + rule->filter.formatted.src_port & + rule->filter.formatted.src_port_mask; + fsp->m_u.tcp_ip4_spec.psrc = + rule->filter.formatted.src_port_mask; + fsp->h_u.tcp_ip4_spec.pdst = + rule->filter.formatted.dst_port & + rule->filter.formatted.dst_port_mask; + fsp->m_u.tcp_ip4_spec.pdst = + rule->filter.formatted.dst_port_mask; + + fsp->h_u.tcp_ip4_spec.ip4src = + rule->filter.formatted.src_ip[0] & + rule->filter.formatted.src_ip_mask[0]; + fsp->m_u.tcp_ip4_spec.ip4src = + rule->filter.formatted.src_ip_mask[0]; + + fsp->h_u.tcp_ip4_spec.ip4dst = + rule->filter.formatted.dst_ip[0] & + rule->filter.formatted.dst_ip_mask[0]; + fsp->m_u.tcp_ip4_spec.ip4dst = + rule->filter.formatted.dst_ip_mask[0]; + } + } + + /* record action */ + if (rule->action == RNP_FDIR_DROP_QUEUE) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else { + int add = 0; + + if (rule->action & 0x1) + add = 1; + + if (rule->vf_num != 0) { + fsp->ring_cookie = ((u64)rule->vf_num << 32) | (add); + } else { + fsp->ring_cookie = rule->action; + } + } + + return 0; +} + +static int rnp_get_ethtool_fdir_all(struct rnp_adapter *adapter, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct hlist_node *node2; + struct rnp_fdir_filter *rule; + int cnt = 0; + + /* report total rule count */ + cmd->data = adapter->fdir_pballoc; + + hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, + fdir_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +int rnp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + struct rnp_hw *hw = &adapter->hw; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + /* we fix 2 when srio on */ + cmd->data = hw->sriov_ring_limit; + } else { + cmd->data = adapter->num_rx_queues; + } + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->fdir_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = rnp_get_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = rnp_get_ethtool_fdir_all(adapter, cmd, (u32 *)rule_locs); + break; + case ETHTOOL_GRXFH: + ret = rnp_get_rss_hash_opts(adapter, cmd); + break; + default: + break; + } + + return ret; +} +#define UDP_RSS_FLAGS \ + (RNP_FLAG2_RSS_FIELD_IPV4_UDP | RNP_FLAG2_RSS_FIELD_IPV6_UDP) +static int rnp_set_rss_hash_opt(struct rnp_adapter *adapter, + struct ethtool_rxnfc *nfc) +{ + /* + * RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if (nfc->data & + ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V4_FLOW: + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + return 0; +} + +static int rnp_flowspec_to_flow_type(struct rnp_adapter *adapter, + struct ethtool_rx_flow_spec *fsp, + uint8_t *flow_type, + struct rnp_fdir_filter *input) +{ + int i; + int ret = 1; + /* not support flow_ext */ + if (fsp->flow_type & FLOW_EXT) + return 0; + + switch (fsp->flow_type & ~FLOW_EXT) { + /* todo ipv6 is not considered*/ + case TCP_V4_FLOW: + *flow_type = RNP_ATR_FLOW_TYPE_TCPV4; + break; + case UDP_V4_FLOW: + *flow_type = RNP_ATR_FLOW_TYPE_UDPV4; + break; + case SCTP_V4_FLOW: + *flow_type = RNP_ATR_FLOW_TYPE_SCTPV4; + break; + case ETHER_FLOW: + /* layer 2 flow */ + *flow_type = RNP_ATR_FLOW_TYPE_ETHER; + input->filter.layer2_formate.proto = + fsp->h_u.ether_spec.h_proto; + break; + case IP_USER_FLOW: + switch (fsp->h_u.usr_ip4_spec.proto) { + case IPPROTO_TCP: + *flow_type = RNP_ATR_FLOW_TYPE_TCPV4; + break; + case IPPROTO_UDP: + *flow_type = RNP_ATR_FLOW_TYPE_UDPV4; + break; + case IPPROTO_SCTP: + *flow_type = RNP_ATR_FLOW_TYPE_SCTPV4; + break; + case 0: + /* if only ip4 no src no dst*/ + if (!(fsp->h_u.tcp_ip4_spec.ip4src) && + (!(fsp->h_u.tcp_ip4_spec.ip4dst))) { + /* if have no l4 proto, use layer2 */ + *flow_type = RNP_ATR_FLOW_TYPE_ETHER; + input->filter.layer2_formate.proto = + htons(0x0800); + } else { + /* may only src or dst input */ + *flow_type = RNP_ATR_FLOW_TYPE_IPV4; + } + break; + default: + /* other unknown l4 proto ip */ + *flow_type = RNP_ATR_FLOW_TYPE_IPV4; + } + break; + default: + return 0; + } + /* layer2 flow */ + if (*flow_type == RNP_ATR_FLOW_TYPE_ETHER) { + if (adapter->layer2_count < 0) { + e_err(drv, "layer2 count full\n"); + ret = 0; + } + /* should check dst mac filter */ + /* should check src dst all zeros */ + for (i = 0; i < ETH_ALEN; i++) { + if (fsp->h_u.ether_spec.h_source[i] != 0) + ret = 0; + + if (fsp->h_u.ether_spec.h_dest[i] != 0) + ret = 0; + + if (fsp->m_u.ether_spec.h_source[i] != 0) + ret = 0; + + if (fsp->m_u.ether_spec.h_dest[i] != 0) + ret = 0; + } + } else if (*flow_type == RNP_ATR_FLOW_TYPE_IPV4) { + if (adapter->fdir_mode == fdir_mode_tuple5) { + if (adapter->tuple_5_count < 0) { + e_err(drv, "tuple 5 count full\n"); + ret = 0; + } + if ((fsp->h_u.usr_ip4_spec.ip4src != 0) && + (fsp->m_u.usr_ip4_spec.ip4src != 0xffffffff)) { + e_err(drv, "ip src mask error\n"); + ret = 0; + } + if ((fsp->h_u.usr_ip4_spec.ip4dst != 0) && + (fsp->m_u.usr_ip4_spec.ip4dst != 0xffffffff)) { + e_err(drv, "ip dst mask error\n"); + ret = 0; + } + if ((fsp->h_u.usr_ip4_spec.proto != 0) && + (fsp->m_u.usr_ip4_spec.proto != 0xff)) { + e_err(drv, "ip l4 proto mask error\n"); + ret = 0; + } + } else { + if (adapter->tuple_5_count < 0) { + e_err(drv, "tcam count full\n"); + ret = 0; + } + /* tcam mode can support mask */ + } + /* not support l4_4_bytes */ + if ((fsp->h_u.usr_ip4_spec.l4_4_bytes != 0)) { + e_err(drv, "ip l4_4_bytes error\n"); + ret = 0; + } + } else { + if (adapter->fdir_mode == fdir_mode_tuple5) { + /* should check mask all ff */ + if (adapter->tuple_5_count < 0) { + e_err(drv, "tuple 5 count full\n"); + ret = 0; + } + if ((fsp->h_u.tcp_ip4_spec.ip4src != 0) && + (fsp->m_u.tcp_ip4_spec.ip4src != 0xffffffff)) { + e_err(drv, "src mask error\n"); + ret = 0; + } + if ((fsp->h_u.tcp_ip4_spec.ip4dst != 0) && + (fsp->m_u.tcp_ip4_spec.ip4dst != 0xffffffff)) { + e_err(drv, "dst mask error\n"); + ret = 0; + } + if ((fsp->h_u.tcp_ip4_spec.psrc != 0) && + (fsp->m_u.tcp_ip4_spec.psrc != 0xffff)) { + e_err(drv, "src port mask error\n"); + ret = 0; + } + if ((fsp->h_u.tcp_ip4_spec.pdst != 0) && + (fsp->m_u.tcp_ip4_spec.pdst != 0xffff)) { + e_err(drv, "src port mask error\n"); + ret = 0; + } + } else { + if (adapter->tuple_5_count < 0) { + e_err(drv, "tcam count full\n"); + ret = 0; + } + } + /* l4 tos is not supported */ + if (fsp->h_u.tcp_ip4_spec.tos != 0) { + e_err(drv, "tos error\n"); + ret = 0; + } + } + + return ret; +} + +int rnp_update_ethtool_fdir_entry(struct rnp_adapter *adapter, + struct rnp_fdir_filter *input, u16 sw_idx) +{ + struct rnp_hw *hw = &adapter->hw; + struct hlist_node *node2; + struct rnp_fdir_filter *rule, *parent; + bool deleted = false; + u16 hw_idx_layer2 = 0; + u16 hw_idx_tuple5 = 0; + + s32 err; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, + fdir_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + + parent = rule; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->sw_idx == sw_idx)) { + /* only clear hw enable bits */ + /* hardware filters are only configured when interface is up, + * and we should not issue filter commands while the interface + * is down + */ + if (netif_running(adapter->netdev) && (!input)) { + err = rnp_fdir_erase_perfect_filter(adapter->fdir_mode, + hw, &rule->filter, + rule->hw_idx); + if (err) + return -EINVAL; + } + + adapter->fdir_filter_count--; + if (rule->filter.formatted.flow_type == + RNP_ATR_FLOW_TYPE_ETHER) { + /* used to determine hw reg offset */ + adapter->layer2_count++; + } else { + adapter->tuple_5_count++; + } + + hlist_del(&rule->fdir_node); + kfree(rule); + deleted = true; + } + + /* If we weren't given an input, then this was a request to delete a + * filter. We should return -EINVAL if the filter wasn't found, but + * return 0 if the rule was successfully deleted. + */ + if (!input) + return deleted ? 0 : -EINVAL; + + /* initialize node and set software index */ + INIT_HLIST_NODE(&input->fdir_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->fdir_node, &parent->fdir_node); + else + hlist_add_head(&input->fdir_node, &adapter->fdir_filter_list); + + /* we must setup all */ + /* should first earase all tcam and l2 rule */ + if (adapter->fdir_mode != fdir_mode_tcam) { + hw->ops.clr_all_layer2_remapping(hw); + /* earase all layer2 */ + } else { + hw->ops.clr_all_tuple5_remapping(hw); + /* earase all tcam */ + } + + /* setup hw */ + hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list, + fdir_node) { + if (netif_running(adapter->netdev)) { + /* hw_idx */ + if (rule->filter.formatted.flow_type == + RNP_ATR_FLOW_TYPE_ETHER) { + rule->hw_idx = hw_idx_layer2++; + } else { + rule->hw_idx = hw_idx_tuple5++; + } + + if ((!rule->vf_num) && + (rule->action != ACTION_TO_MPE)) { + int idx = rule->action; + + err = rnp_fdir_write_perfect_filter( + adapter->fdir_mode, hw, &rule->filter, + rule->hw_idx, + (rule->action == RNP_FDIR_DROP_QUEUE) ? + RNP_FDIR_DROP_QUEUE : + adapter->rx_ring[idx] + ->rnp_queue_idx, + (adapter->priv_flags & + RNP_PRIV_FLAG_REMAP_PRIO) ? + true : + false); + } else { + /* ACTION_TO_MPE use this */ + err = rnp_fdir_write_perfect_filter( + adapter->fdir_mode, hw, &rule->filter, + rule->hw_idx, + (rule->action == RNP_FDIR_DROP_QUEUE) ? + RNP_FDIR_DROP_QUEUE : + rule->action, + (adapter->priv_flags & + RNP_PRIV_FLAG_REMAP_PRIO) ? + true : + false); + } + if (err) + return -EINVAL; + } + } + + /* update counts */ + adapter->fdir_filter_count++; + if (input->filter.formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER) { + /* used to determine hw reg offset */ + adapter->layer2_count--; + } else { + adapter->tuple_5_count--; + } + return 0; +} + +/* used to dbg flo_spec info */ +static void print_fsp(struct ethtool_rx_flow_spec *fsp) +{ + int i; + + switch (fsp->flow_type & ~FLOW_EXT) { + case ETHER_FLOW: + for (i = 0; i < ETH_ALEN; i++) + dbg("src 0x%02x\n", fsp->h_u.ether_spec.h_source[i]); + for (i = 0; i < ETH_ALEN; i++) + dbg("dst 0x%02x\n", fsp->h_u.ether_spec.h_dest[i]); + for (i = 0; i < ETH_ALEN; i++) + dbg("src mask 0x%02x\n", + fsp->m_u.ether_spec.h_source[i]); + for (i = 0; i < ETH_ALEN; i++) + dbg("dst mask 0x%02x\n", fsp->m_u.ether_spec.h_dest[i]); + + dbg("proto type is %x\n", fsp->h_u.ether_spec.h_proto); + + break; + + default: + dbg("flow type is %x\n", fsp->flow_type); + + dbg("ip4 src ip is %x\n", fsp->h_u.tcp_ip4_spec.ip4src); + dbg("ip4 src ip mask is %x\n", fsp->m_u.tcp_ip4_spec.ip4src); + + dbg("ip4 dst ip is %x\n", fsp->h_u.tcp_ip4_spec.ip4dst); + dbg("ip4 dst ip mask is %x\n", fsp->m_u.tcp_ip4_spec.ip4dst); + + dbg("ip4 src port is %x\n", fsp->h_u.tcp_ip4_spec.psrc); + dbg("ip4 src port mask is %x\n", fsp->m_u.tcp_ip4_spec.psrc); + + dbg("ip4 dst port is %x\n", fsp->h_u.tcp_ip4_spec.pdst); + dbg("ip4 dst port mask is %x\n", fsp->m_u.tcp_ip4_spec.pdst); + + dbg("l4 proto type is %x\n", fsp->h_u.usr_ip4_spec.proto); + break; + } +} + +static int rnp_add_ethtool_fdir_entry(struct rnp_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct rnp_fdir_filter *input; + struct rnp_hw *hw = &adapter->hw; + /* we don't support mask */ + int err; + int vf_fix = 0; + + u32 ring_cookie_high = fsp->ring_cookie >> 32; + + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + vf_fix = 1; + + if (!(adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE)) + return -EOPNOTSUPP; + + /* + * Don't allow programming if the action is a queue greater than + * the number of online Rx queues. + */ + /* is sriov is on, allow vf and queue */ + /* vf should smaller than num_vfs */ + print_fsp(fsp); + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && + (((ring_cookie_high & 0xff) > adapter->num_vfs) || + ((fsp->ring_cookie & (u64)0xffffffff) >= + hw->sriov_ring_limit))) + /* return error if not mpe */ + if (fsp->ring_cookie != ACTION_TO_MPE) + return -EINVAL; + + } else { + if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && + (fsp->ring_cookie >= adapter->num_rx_queues)) { + /* ACTION_TO_MPE to mpe special */ + if (fsp->ring_cookie != ACTION_TO_MPE) + return -EINVAL; + } + } + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= (adapter->fdir_pballoc)) { + e_err(drv, "Location out of range\n"); + return -EINVAL; + } + + input = kzalloc(sizeof(*input), GFP_ATOMIC); + if (!input) + return -ENOMEM; + + /* set SW index */ + input->sw_idx = fsp->location; + + /* record flow type */ + if (!rnp_flowspec_to_flow_type( + adapter, fsp, &input->filter.formatted.flow_type, input)) { + e_err(drv, "Unrecognized flow type\n"); + goto err_out; + } + + if (input->filter.formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER) { + /* used to determine hw reg offset */ + } else if (input->filter.formatted.flow_type == + RNP_ATR_FLOW_TYPE_IPV4) { + /* Copy input into formatted structures */ + input->filter.formatted.src_ip[0] = + fsp->h_u.usr_ip4_spec.ip4src; + input->filter.formatted.src_ip_mask[0] = + fsp->m_u.usr_ip4_spec.ip4src; + input->filter.formatted.dst_ip[0] = + fsp->h_u.usr_ip4_spec.ip4dst; + input->filter.formatted.dst_ip_mask[0] = + fsp->m_u.usr_ip4_spec.ip4dst; + input->filter.formatted.src_port = 0; + input->filter.formatted.src_port_mask = 0xffff; + input->filter.formatted.dst_port = 0; + input->filter.formatted.dst_port_mask = 0xffff; + input->filter.formatted.inner_mac[0] = + fsp->h_u.usr_ip4_spec.proto; + input->filter.formatted.inner_mac_mask[0] = + fsp->m_u.usr_ip4_spec.proto; + } else { + /* tcp or udp or sctp*/ + /* Copy input into formatted structures */ + input->filter.formatted.src_ip[0] = + fsp->h_u.tcp_ip4_spec.ip4src; + input->filter.formatted.src_ip_mask[0] = + fsp->m_u.usr_ip4_spec.ip4src; + input->filter.formatted.dst_ip[0] = + fsp->h_u.tcp_ip4_spec.ip4dst; + input->filter.formatted.dst_ip_mask[0] = + fsp->m_u.usr_ip4_spec.ip4dst; + input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; + input->filter.formatted.src_port_mask = + fsp->m_u.tcp_ip4_spec.psrc; + input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + input->filter.formatted.dst_port_mask = + fsp->m_u.tcp_ip4_spec.pdst; + } + + /* determine if we need to drop or route the packet */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + input->action = RNP_FDIR_DROP_QUEUE; + else { + input->vf_num = (fsp->ring_cookie >> 32) & 0xff; + if (input->vf_num) { + /* in vf mode input->action is the real queue nums */ + if (adapter->priv_flags & RNP_PRIV_FLAG_REMAP_MODE) { + input->action = (fsp->ring_cookie & 0xffffffff); + } else { + input->action = + 2 * (((fsp->ring_cookie >> 32) & 0xff) + + vf_fix - 1) + + (fsp->ring_cookie & 0xffffffff); + } + } else + input->action = fsp->ring_cookie; + } + + spin_lock(&adapter->fdir_perfect_lock); + err = rnp_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +err_out: + kfree(input); + return -EINVAL; +} + +static int rnp_del_ethtool_fdir_entry(struct rnp_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + spin_lock(&adapter->fdir_perfect_lock); + err = rnp_update_ethtool_fdir_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +} + +int rnp_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = rnp_add_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = rnp_del_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_SRXFH: + ret = rnp_set_rss_hash_opt(adapter, cmd); + break; + default: + break; + } + + return ret; +} + +u32 rnp_rss_indir_size(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + return rnp_rss_indir_tbl_entries(adapter); +} + +u32 rnp_get_rxfh_key_size(struct net_device *netdev) +{ + return RNP_RSS_KEY_SIZE; +} + +void rnp_get_reta(struct rnp_adapter *adapter, u32 *indir) +{ + int i, reta_size = rnp_rss_indir_tbl_entries(adapter); + u16 rss_m = adapter->ring_feature[RING_F_RSS].mask; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + rss_m = adapter->ring_feature[RING_F_RSS].indices - 1; + + for (i = 0; i < reta_size; i++) + indir[i] = adapter->rss_indir_tbl[i] & rss_m; +} + +int rnp_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + if (hfunc) { + switch (adapter->rss_func_mode) { + case rss_func_top: + *hfunc = ETH_RSS_HASH_TOP; + break; + case rss_func_xor: + *hfunc = ETH_RSS_HASH_XOR; + break; + case rss_func_order: + *hfunc = ETH_RSS_HASH_TOP; + break; + } + } + + if (indir) + rnp_get_reta(adapter, indir); + + if (key) + memcpy(key, adapter->rss_key, rnp_get_rxfh_key_size(netdev)); + + return 0; +} + +enum { + PART_FW, + PART_CFG, + PART_MACSN, + PART_PCSPHY, + PART_PXE, +}; + +#define UCFG_OFF 0x41000 +#define UCFG_SZ (4096) +#define PXE_OFF 0x4a000 +#define PXE_SZ (512 * 1024) + +static int rnp_flash_firmware(struct rnp_adapter *adapter, int region, + const u8 *data, int bytes) +{ + struct rnp_hw *hw = &adapter->hw; + + switch (region) { + case PART_FW: { + if (*((u32 *)(data + 28)) != 0xA51BBEAF) { + return -EINVAL; + } + if (bytes > PXE_OFF) { + int err; + int wbytes_seg1 = bytes - PXE_OFF; + if (wbytes_seg1 > PXE_SZ) { + wbytes_seg1 = PXE_SZ; + } + + err = rnp_fw_update(hw, PART_FW, data, UCFG_OFF); + if (err) { + return err; + } + /* skip ucfg flush only pxe */ + err = rnp_fw_update(hw, PART_PXE, data + PXE_OFF, + wbytes_seg1); + if (err) { + return err; + } + return 0; + } + break; + } + case PART_CFG: { + if (*((u32 *)(data)) != 0x00010cf9) { + return -EINVAL; + } + break; + } + case PART_MACSN: { + break; + } + case PART_PCSPHY: { + if (*((u16 *)(data)) != 0x081d) { + return -EINVAL; + } + break; + } + case PART_PXE: { + if ((*((u16 *)(data)) != 0xaa55) && + (*((u16 *)(data)) != 0x5a4d)) { + return -EINVAL; + } + break; + } + default: { + return -EINVAL; + } + } + return rnp_fw_update(hw, region, data, bytes); +} + +static int rnp_flash_firmware_from_file(struct net_device *dev, + struct rnp_adapter *adapter, int region, + const char *filename) +{ + const struct firmware *fw; + int rc; + + rc = request_firmware(&fw, filename, &dev->dev); + if (rc != 0) { + netdev_err(dev, "Error %d requesting firmware file: %s\n", rc, + filename); + return rc; + } + + rc = rnp_flash_firmware(adapter, region, fw->data, fw->size); + release_firmware(fw); + return rc; +} + +int rnp_flash_device(struct net_device *dev, struct ethtool_flash *flash) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + + if (IS_VF(adapter->hw.pfvfnum)) { + netdev_err(dev, + "flashdev not supported from a virtual function\n"); + return -EINVAL; + } + + return rnp_flash_firmware_from_file(dev, adapter, flash->region, + flash->data); +} +static int rnp_rss_indir_tbl_max(struct rnp_adapter *adapter) +{ + if (adapter->hw.rss_type == rnp_rss_uv3p) + return 8; + else if (adapter->hw.rss_type == rnp_rss_uv440) + return 128; + else if (adapter->hw.rss_type == rnp_rss_n10) + return 128; + else + return 128; +} + +int rnp_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, + const u8 hfunc) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + int i; + u32 reta_entries = rnp_rss_indir_tbl_entries(adapter); + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && + hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + if ((indir) && (adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + return -EINVAL; + } + + /* Fill out the redirection table */ + if (indir) { + int max_queues = min_t(int, adapter->num_rx_queues, + rnp_rss_indir_tbl_max(adapter)); + + /* Allow max 2 queues w/ SR-IOV. */ + if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && + (max_queues > 2)) + max_queues = 2; + + /* Verify user input. */ + for (i = 0; i < reta_entries; i++) + if (indir[i] >= max_queues) + return -EINVAL; + + /* store rss tbl */ + for (i = 0; i < reta_entries; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + rnp_store_reta(adapter); + } + + /* Fill out the rss hash key */ + if (key) { + memcpy(adapter->rss_key, key, rnp_get_rxfh_key_size(netdev)); + rnp_store_key(adapter); + } + + return 0; +} + +void rnp_set_ethtool_ops(struct net_device *netdev) +{ +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_ethtool.h b/drivers/net/ethernet/mucse/rnp/rnp_ethtool.h new file mode 100644 index 0000000000000..929c06f6e8336 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_ethtool.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_ETHTOOL_H_ +#define _RNP_ETHTOOL_H_ + +enum { NETDEV_STATS, RNP_STATS }; + +struct rnp_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +/* rnp allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#define RNP_NUM_RX_QUEUES netdev->real_num_rx_queues +#define RNP_NUM_TX_QUEUES netdev->real_num_tx_queues + +#define RNP_NETDEV_STAT(_net_stat) \ + { \ + .stat_string = #_net_stat, \ + .sizeof_stat = \ + sizeof_field(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ + } + +#define RNP_HW_STAT(_name, _stat) \ + { \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(struct rnp_adapter, _stat), \ + .stat_offset = offsetof(struct rnp_adapter, _stat) \ + } + +struct rnp_tx_queue_ring_stat { + u64 hw_head; + u64 hw_tail; + u64 sw_to_clean; + u64 sw_to_next_to_use; +}; + +struct rnp_rx_queue_ring_stat { + u64 hw_head; + u64 hw_tail; + u64 sw_to_use; + u64 sw_to_clean; +}; + +#define RNP_QUEUE_STATS_LEN \ + (RNP_NUM_TX_QUEUES * \ + (sizeof(struct rnp_tx_queue_stats) / sizeof(u64) + \ + sizeof(struct rnp_queue_stats) / sizeof(u64) + \ + sizeof(struct rnp_tx_queue_ring_stat) / sizeof(u64)) + \ + RNP_NUM_RX_QUEUES * \ + (sizeof(struct rnp_rx_queue_stats) / sizeof(u64) + \ + sizeof(struct rnp_queue_stats) / sizeof(u64) + \ + sizeof(struct rnp_rx_queue_ring_stat) / sizeof(u64))) + +#define RNP_STATS_LEN \ + (RNP_GLOBAL_STATS_LEN + RNP_HWSTRINGS_STATS_LEN + RNP_QUEUE_STATS_LEN) + +int rnp_wol_exclusion(struct rnp_adapter *adapter, struct ethtool_wolinfo *wol); +void rnp_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol); +int rnp_wol_exclusion(struct rnp_adapter *adapter, struct ethtool_wolinfo *wol); +int rnp_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol); +void rnp_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, + u64 *data); +void rnp_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause); +int rnp_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause); +int rnp_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fecparam); +int rnp_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fecparam); +u32 rnp_get_msglevel(struct net_device *netdev); +void rnp_set_msglevel(struct net_device *netdev, u32 data); +int rnp_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state); +int rnp_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info); +void rnp_get_channels(struct net_device *dev, struct ethtool_channels *ch); +int rnp_set_channels(struct net_device *dev, struct ethtool_channels *ch); +int rnp_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo); +int rnp_get_module_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data); +void rnp_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack); +int rnp_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack); +int rnp_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump); +int rnp_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, + void *buffer); +int rnp_set_dump(struct net_device *netdev, struct ethtool_dump *dump); +int rnp_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); +int rnp_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); +int rnp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs); +int rnp_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd); +u32 rnp_rss_indir_size(struct net_device *netdev); +u32 rnp_get_rxfh_key_size(struct net_device *netdev); +void rnp_get_reta(struct rnp_adapter *adapter, u32 *indir); +int rnp_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc); +int rnp_flash_device(struct net_device *dev, struct ethtool_flash *flash); +int rnp_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, + const u8 hfunc); +#define RNP_WOL_GET_SUPPORTED(adapter) (!!(adapter->wol & GENMASK(3, 0))) +#define RNP_WOL_GET_STATUS(adapter) (!!(adapter->wol & GENMASK(7, 4))) +#define RNP_WOL_SET_SUPPORTED(adapter) (adapter->wol |= BIT(0)) +#define RNP_WOL_SET_STATUS(adapter) (adapter->wol |= BIT(4)) +#define RNP_WOL_CLEAR_STATUS(adapter) (adapter->wol &= ~BIT(4)) + +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_lib.c b/drivers/net/ethernet/mucse/rnp/rnp_lib.c new file mode 100644 index 0000000000000..be5c6fddfb4bf --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_lib.c @@ -0,0 +1,1334 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include "rnp.h" +#include "rnp_sriov.h" +#include "rnp_common.h" + +#if IS_ENABLED(CONFIG_DCB) +/** + * rnp_cache_ring_dcb_sriov - Descriptor ring to register mapping for SRIOV + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE are enabled along + * with VMDq. + * + **/ +static bool rnp_cache_ring_dcb_sriov(struct rnp_adapter *adapter) +{ + u8 tcs = netdev_get_num_tc(adapter->netdev); + /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return false; + + return true; +} +#endif + +/** + * rnp_cache_ring_dcb - Descriptor ring to register mapping for DCB + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for DCB to the assigned rings. + * + **/ +static bool rnp_cache_ring_dcb(struct rnp_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + unsigned int tx_idx, rx_idx; + int tc, offset, rss_i, i, step; + u8 num_tcs = netdev_get_num_tc(dev); + struct rnp_ring *ring; + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + + /* verify we have DCB queueing enabled before proceeding */ + if (num_tcs <= 1) + return false; + + rss_i = adapter->ring_feature[RING_F_RSS].indices; + + step = 4; + for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { + /* + * we from tc start + * tc0 0 4 8 c + * tc1 1 5 9 d + * tc2 2 6 a e + * tc3 3 7 b f + */ + tx_idx = tc; + rx_idx = tc; + for (i = 0; i < rss_i; i++, tx_idx += step, rx_idx += step) { + ring = adapter->tx_ring[offset + i]; + + ring->ring_addr = + dma->dma_ring_addr + RING_OFFSET(tx_idx); + ring->rnp_queue_idx = tx_idx; + ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR; + + ring = adapter->rx_ring[offset + i]; + ring->ring_addr = + dma->dma_ring_addr + RING_OFFSET(rx_idx); + ring->rnp_queue_idx = rx_idx; + ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR; + } + } + + return true; +} + +/** + * rnp_cache_ring_sriov - Descriptor ring to register mapping for sriov + * @adapter: board private structure to initialize + * + * SR-IOV doesn't use any descriptor rings but changes the default if + * no other mapping is used. + * + */ +static bool rnp_cache_ring_sriov(struct rnp_adapter *adapter) +{ + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & RNP_FLAG_VMDQ_ENABLED)) + return false; + + return true; +} + +/** + * rnp_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS to the assigned rings. + * + **/ +static bool rnp_cache_ring_rss(struct rnp_adapter *adapter) +{ + int i; + /* setup here */ + int ring_step = 1; + struct rnp_ring *ring; + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + + /* n400 use 0 4 8 c */ + if (hw->hw_type == rnp_hw_n400) + ring_step = 4; + + /* some ring alloc rules can be added here */ + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = adapter->tx_ring[i]; + ring->rnp_queue_idx = i * ring_step; + ring->ring_addr = + dma->dma_ring_addr + RING_OFFSET(ring->rnp_queue_idx); + + ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR; + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + ring = adapter->rx_ring[i]; + ring->rnp_queue_idx = i * ring_step; + ring->ring_addr = + dma->dma_ring_addr + RING_OFFSET(ring->rnp_queue_idx); + ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR; + } + + return true; +} + +/** + * rnp_cache_ring_register - Descriptor ring to register mapping + * @adapter: board private structure to initialize + * + * Once we know the feature-set enabled for the device, we'll cache + * the register offset the descriptor ring is assigned to. + * + * Note, the order the various feature calls is important. It must start + * with the "most" features enabled at the same time, then trickle down to + * the least amount of features turned on at once. + **/ +static void rnp_cache_ring_register(struct rnp_adapter *adapter) +{ + /* start with default case */ + +#if IS_ENABLED(CONFIG_DCB) + if (rnp_cache_ring_dcb_sriov(adapter)) + return; + +#endif + if (rnp_cache_ring_dcb(adapter)) + return; + + /* sriov ring alloc is added before, this maybe no use */ + if (rnp_cache_ring_sriov(adapter)) + return; + + rnp_cache_ring_rss(adapter); +} + +#define RNP_RSS_128Q_MASK 0x7F +#define RNP_RSS_64Q_MASK 0x3F +#define RNP_RSS_16Q_MASK 0xF +#define RNP_RSS_32Q_MASK 0x1F +#define RNP_RSS_8Q_MASK 0x7 +#define RNP_RSS_4Q_MASK 0x3 +#define RNP_RSS_2Q_MASK 0x1 +#define RNP_RSS_DISABLED_MASK 0x0 + +#if IS_ENABLED(CONFIG_DCB) + +/** + * rnp_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB + * @adapter: board private structure to initialize + * + * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * and VM pools where appropriate. Also assign queues based on DCB + * priorities and map accordingly.. + * + **/ +static bool rnp_set_dcb_sriov_queues(struct rnp_adapter *adapter) +{ + int i; + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return false; + + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* 16 pools w/ 8 TC per pool */ + if (tcs > 4) { + vmdq_i = min_t(u16, vmdq_i, 16); + vmdq_m = RNP_n10_VMDQ_8Q_MASK; + /* 32 pools w/ 4 TC per pool */ + } else { + vmdq_i = min_t(u16, vmdq_i, 32); + vmdq_m = RNP_n10_VMDQ_4Q_MASK; + } + + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* + * We do not support DCB, VMDq, and RSS all simultaneously + * so we will disable RSS since it is the lowest priority + */ + adapter->ring_feature[RING_F_RSS].indices = 2; + adapter->ring_feature[RING_F_RSS].mask = RNP_RSS_DISABLED_MASK; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE; + + adapter->num_tx_queues = vmdq_i * tcs; + adapter->num_rx_queues = vmdq_i * tcs; + + /* configure TC to queue mapping */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(adapter->netdev, i, 1, i); + + return true; +} +#endif + +static bool rnp_set_dcb_queues(struct rnp_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct rnp_ring_feature *f; + int rss_i, rss_m, i; + int tcs; + + /* Map queue offset and counts onto allocated tx queues */ + tcs = netdev_get_num_tc(dev); + + /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* determine the upper limit for our current DCB mode */ + rss_i = dev->num_tx_queues / tcs; + + /* we only support 4 tc , rss_i max is 32 */ + + /* 4 TC w/ 32 queues per TC */ + rss_i = min_t(u16, rss_i, 32); + rss_m = RNP_RSS_32Q_MASK; + + /* set RSS mask and indices */ + /* f->limit is relative with cpu_vector */ + f = &adapter->ring_feature[RING_F_RSS]; + /* use f->limit to change rss */ + rss_i = min_t(int, rss_i, f->limit); + f->indices = rss_i; + f->mask = rss_m; + + /* disable ATR as it is not supported when multiple TCs are enabled */ + adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE; + + /* setup queue tc num */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(dev, i, rss_i, rss_i * i); + + /* set the true queues */ + adapter->num_tx_queues = rss_i * tcs; + adapter->num_rx_queues = rss_i * tcs; + + return true; +} + +/** + * rnp_set_sriov_queues - Allocate queues for SR-IOV devices + * @adapter: board private structure to initialize + * + * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * and VM pools where appropriate. If RSS is available, then also try and + * enable RSS and map accordingly. + * + **/ +static bool rnp_set_sriov_queues(struct rnp_adapter *adapter) +{ + u16 vmdq_m = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; + u16 rss_m = RNP_RSS_DISABLED_MASK; + struct rnp_hw *hw = &adapter->hw; + + /* only proceed if SR-IOV is enabled */ + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return false; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = + adapter->max_ring_pair_counts - 1; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* limit RSS based on user input and save for later use */ + adapter->ring_feature[RING_F_RSS].indices = rss_i; + adapter->ring_feature[RING_F_RSS].mask = rss_m; + + adapter->num_rx_queues = hw->sriov_ring_limit; + adapter->num_tx_queues = hw->sriov_ring_limit; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE; + + return true; +} + +u32 rnp_rss_indir_tbl_entries(struct rnp_adapter *adapter) +{ + if (adapter->hw.rss_type == rnp_rss_uv3p) + return 8; + else if (adapter->hw.rss_type == rnp_rss_uv440) + return 128; + else if (adapter->hw.rss_type == rnp_rss_n10) + return 128; + else + return 128; +} + +/** + * rnp_set_rss_queues - Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try + * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. + * + **/ +static bool rnp_set_rss_queues(struct rnp_adapter *adapter) +{ + struct rnp_ring_feature *f; + u16 rss_i; + + f = &adapter->ring_feature[RING_F_RSS]; + /* use thid to change ring num */ + rss_i = f->limit; + /* set limit -> indices */ + f->indices = rss_i; + + /* should init rss mask */ + switch (adapter->hw.rss_type) { + case rnp_rss_uv3p: + f->mask = RNP_RSS_8Q_MASK; + break; + case rnp_rss_uv440: + f->mask = RNP_RSS_64Q_MASK; + break; + case rnp_rss_n10: + /* maybe not good */ + f->mask = RNP_RSS_128Q_MASK; + break; + /* maybe not good */ + default: + f->mask = 0; + + break; + } + + adapter->num_tx_queues = + min_t(int, rss_i, adapter->max_ring_pair_counts); + adapter->num_rx_queues = adapter->num_tx_queues; + + rnp_dbg("[%s] limit:%d indices:%d queues:%d\n", adapter->name, f->limit, + f->indices, adapter->num_tx_queues); + + return true; +} + +/** + * rnp_set_num_queues - Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very + * important, starting with the "most" number of features turned on at once, + * and ending with the smallest set of features. This way large combinations + * can be allocated if they're turned on, and smaller combinations are the + * fallthrough conditions. + * + **/ +static void rnp_set_num_queues(struct rnp_adapter *adapter) +{ + /* Start with base case */ + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + +#if IS_ENABLED(CONFIG_DCB) + if (rnp_set_dcb_sriov_queues(adapter)) + return; + +#endif + if (rnp_set_dcb_queues(adapter)) + return; + + if (rnp_set_sriov_queues(adapter)) + return; + /* at last we support rss */ + rnp_set_rss_queues(adapter); +} + +int rnp_acquire_msix_vectors(struct rnp_adapter *adapter, int vectors) +{ + int err; + + err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vectors, vectors); + if (err < 0) { + rnp_err("pci_enable_msix failed: req:%d err:%d\n", vectors, + err); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + return -EINVAL; + } + /* + * Adjust for only the vectors we'll use, which is minimum + * of max_msix_q_vectors + NON_Q_VECTORS, or the number of + * vectors we were allocated. + */ + vectors -= adapter->num_other_vectors; + adapter->num_q_vectors = min(vectors, adapter->max_q_vectors); + /* in dcb we use max 32 q-vectors */ + /* each vectors for max 4 tcs */ + if (adapter->flags & RNP_FLAG_DCB_ENABLED) + adapter->num_q_vectors = min(32, adapter->num_q_vectors); + + return 0; +} + +static void rnp_add_ring(struct rnp_ring *ring, struct rnp_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +static inline void rnp_irq_enable_queues(struct rnp_q_vector *q_vector) +{ + struct rnp_ring *ring; + + rnp_for_each_ring(ring, q_vector->rx) + rnp_wr_reg(ring->dma_int_mask, ~(RX_INT_MASK | TX_INT_MASK)); +} + +static inline void rnp_irq_disable_queues(struct rnp_q_vector *q_vector) +{ + struct rnp_ring *ring; + + rnp_for_each_ring(ring, q_vector->tx) + rnp_wr_reg(ring->dma_int_mask, (RX_INT_MASK | TX_INT_MASK)); +} + +static enum hrtimer_restart irq_miss_check(struct hrtimer *hrtimer) +{ + struct rnp_q_vector *q_vector; + struct rnp_ring *ring; + struct rnp_tx_desc *eop_desc; + struct rnp_adapter *adapter; + + int tx_next_to_clean; + int tx_next_to_use; + + struct rnp_tx_buffer *tx_buffer; + union rnp_rx_desc *rx_desc; + + q_vector = container_of(hrtimer, struct rnp_q_vector, + irq_miss_check_timer); + adapter = q_vector->adapter; + if (test_bit(__RNP_DOWN, &adapter->state) || + test_bit(__RNP_RESETTING, &adapter->state)) + goto do_self_napi; + rnp_irq_disable_queues(q_vector); + /* check tx irq miss */ + rnp_for_each_ring(ring, q_vector->tx) { + tx_next_to_clean = ring->next_to_clean; + tx_next_to_use = ring->next_to_use; + /* have work to do */ + if (tx_next_to_use == tx_next_to_clean) + continue; + /* have tx done */ + tx_buffer = &ring->tx_buffer_info[tx_next_to_clean]; + eop_desc = tx_buffer->next_to_watch; + /* next_to_watch maybe null in some condition */ + if (eop_desc) { + if ((eop_desc->vlan_cmd & + cpu_to_le32(RNP_TXD_STAT_DD))) { + if (q_vector->new_rx_count != + q_vector->old_rx_count) { + ring_wr32(ring, + RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + q_vector->new_rx_count); + q_vector->old_rx_count = + q_vector->new_rx_count; + } + napi_schedule_irqoff(&q_vector->napi); + goto do_self_napi; + } + } + } + + /* check rx irq */ + rnp_for_each_ring(ring, q_vector->rx) { + rx_desc = RNP_RX_DESC(ring, ring->next_to_clean); + if (rnp_test_staterr(rx_desc, RNP_RXD_STAT_DD)) { + int size; + + size = le16_to_cpu(rx_desc->wb.len); + + if (size) { + if (q_vector->new_rx_count != + q_vector->old_rx_count) { + ring_wr32( + ring, + RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + q_vector->new_rx_count); + q_vector->old_rx_count = + q_vector->new_rx_count; + } + napi_schedule_irqoff(&q_vector->napi); + } else { + /* in sriov mode set reset pf flags */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + adapter->flags2 |= RNP_FLAG2_RESET_PF; + else + adapter->flags2 |= + RNP_FLAG2_RESET_REQUESTED; + } + goto do_self_napi; + } + } + /* open irq again */ + rnp_irq_enable_queues(q_vector); +do_self_napi: + return HRTIMER_NORESTART; +} + +/** + * rnp_alloc_q_vector - Allocate memory for a single interrupt vector + * @adapter: board private structure to initialize + * @v_count: q_vectors allocated on adapter, used for ring interleaving + * @v_idx: index of vector in adapter struct + * @txr_count: total number of Tx rings to allocate + * @txr_idx: index of first Tx ring to allocate + * @rxr_count: total number of Rx rings to allocate + * @rxr_idx: index of first Rx ring to allocate + * + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ +static int rnp_alloc_q_vector(struct rnp_adapter *adapter, int eth_queue_idx, + int v_idx, int r_idx, int r_count, int step) +{ + struct rnp_q_vector *q_vector; + struct rnp_ring *ring; + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + int node = NUMA_NO_NODE; + int cpu = -1; + int ring_count, size; + int txr_count, rxr_count, idx; + int rxr_idx = r_idx, txr_idx = r_idx; + int cpu_offset = 0; + + DPRINTK(PROBE, INFO, + "eth_queue_idx:%d v_idx:%d(off:%d) ring:%d ring_cnt:%d, " + "step:%d\n", + eth_queue_idx, v_idx, adapter->q_vector_off, r_idx, r_count, + step); + + txr_count = rxr_count = r_count; + + ring_count = txr_count + rxr_count; + size = sizeof(struct rnp_q_vector) + + (sizeof(struct rnp_ring) * ring_count); + + /* should minis adapter->q_vector_off */ + if (cpu_online(cpu_offset + v_idx - adapter->q_vector_off)) { + /* cpu 1 - 7 */ + cpu = cpu_offset + v_idx - adapter->q_vector_off; + node = cpu_to_node(cpu); + } + + /* allocate q_vector and rings */ + q_vector = kzalloc_node(size, GFP_KERNEL, node); + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* setup affinity mask and node */ + if (cpu != -1) + cpumask_set_cpu(cpu, &q_vector->affinity_mask); + + q_vector->numa_node = node; + + /* initialize nap */ + netif_napi_add_weight(adapter->netdev, &q_vector->napi, rnp_poll, + adapter->napi_budge); + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx - adapter->q_vector_off] = q_vector; + q_vector->adapter = adapter; + q_vector->v_idx = v_idx; + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; + + /* initialize pointer to rings */ + ring = q_vector->ring; + + for (idx = 0; idx < txr_count; idx++) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Tx values */ + rnp_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_item_count; + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + int rss_i; + + rss_i = adapter->ring_feature[RING_F_RSS].indices; + /* in dcb mode should assign rss */ + ring->queue_index = eth_queue_idx + idx * rss_i; + } else { + ring->queue_index = eth_queue_idx + idx; + } + /* rnp_queue_idx can be changed after */ + /* it is used to location hw reg */ + ring->rnp_queue_idx = txr_idx; + ring->ring_addr = dma->dma_ring_addr + RING_OFFSET(txr_idx); + ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR; + ring->device_id = adapter->pdev->device; + ring->pfvfnum = hw->pfvfnum; + /* n10 should skip tx start control */ + if (hw->hw_type == rnp_hw_n10) + ring->ring_flags |= RNP_RING_SKIP_TX_START; + + if (hw->hw_type == rnp_hw_n400) + ring->ring_flags |= RNP_RING_SKIP_TX_START; + + /* assign ring to adapter */ + adapter->tx_ring[ring->queue_index] = ring; + + /* update count and index */ + txr_idx += step; + + rnp_dbg("\t\t%s:vector[%d] <--RNP TxRing:%d, eth_queue:%d\n", + adapter->name, v_idx, ring->rnp_queue_idx, + ring->queue_index); + + /* push pointer to next ring */ + ring++; + } + + for (idx = 0; idx < rxr_count; idx++) { + /* assign generic ring traits */ + ring->dev = &adapter->pdev->dev; + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ + ring->q_vector = q_vector; + + /* update q_vector Rx values */ + rnp_add_ring(ring, &q_vector->rx); + + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_item_count; + /* rnp_queue_idx can be changed after */ + /* it is used to location hw reg */ + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + int rss_i; + + rss_i = adapter->ring_feature[RING_F_RSS].indices; + /* in dcb mode should assign rss */ + ring->queue_index = eth_queue_idx + idx * rss_i; + } else { + ring->queue_index = eth_queue_idx + idx; + } + ring->rnp_queue_idx = rxr_idx; + ring->ring_addr = dma->dma_ring_addr + RING_OFFSET(rxr_idx); + ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR; + ring->device_id = adapter->pdev->device; + ring->pfvfnum = hw->pfvfnum; + if (hw->hw_type == rnp_hw_n10) { + } else if (hw->hw_type == rnp_hw_n400) { + } + + /* assign ring to adapter */ + adapter->rx_ring[ring->queue_index] = ring; + rnp_dbg("\t\t%s:vector[%d] <--RNP RxRing:%d, eth_queue:%d\n", + adapter->name, v_idx, ring->rnp_queue_idx, + ring->queue_index); + + /* update count and index */ + rxr_idx += step; + + /* push pointer to next ring */ + ring++; + } + if ((hw->hw_type == rnp_hw_n10) || (hw->hw_type == rnp_hw_n400)) { + q_vector->vector_flags |= RNP_QVECTOR_FLAG_IRQ_MISS_CHECK; + q_vector->vector_flags |= RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS; + /* initialize timer */ + q_vector->irq_check_usecs = 1000; + hrtimer_init(&q_vector->irq_miss_check_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + q_vector->irq_miss_check_timer.function = irq_miss_check; + q_vector->new_rx_count = adapter->rx_frames; + q_vector->old_rx_count = adapter->rx_frames; + } + + return 0; +} + +/** + * rnp_free_q_vector - Free memory allocated for specific interrupt vector + * @adapter: board private structure to initialize + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void rnp_free_q_vector(struct rnp_adapter *adapter, int v_idx) +{ + struct rnp_q_vector *q_vector = adapter->q_vector[v_idx]; + struct rnp_ring *ring; + + rnp_dbg("v_idx:%d\n", v_idx); + + rnp_for_each_ring(ring, q_vector->tx) + adapter->tx_ring[ring->queue_index] = NULL; + + rnp_for_each_ring(ring, q_vector->rx) + adapter->rx_ring[ring->queue_index] = NULL; + + adapter->q_vector[v_idx] = NULL; + netif_napi_del(&q_vector->napi); + + /* must stop timer */ + if (q_vector->vector_flags & RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) + hrtimer_cancel(&q_vector->irq_miss_check_timer); + + /* + * rnp_get_stats64() might access the rings on this vector, + * we must wait a grace period before freeing it. + */ + kfree_rcu(q_vector, rcu); +} + +/** + * rnp_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int rnp_alloc_q_vectors(struct rnp_adapter *adapter) +{ + int v_idx = adapter->q_vector_off; + int ring_idx = 0; + int r_remaing = + min_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + int ring_step = 1; + int err, ring_cnt, v_remaing = adapter->num_q_vectors; + int q_vector_nums = 0; + struct rnp_hw *hw = &adapter->hw; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + ring_idx = 0; + /* only 2 rings when sriov enabled */ + /* from back */ + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + ring_idx = 0; + r_remaing = hw->sriov_ring_limit; + } else { + ring_idx = adapter->max_ring_pair_counts - + ring_step * hw->sriov_ring_limit; + r_remaing = hw->sriov_ring_limit; + } + } + + adapter->eth_queue_idx = 0; + BUG_ON(adapter->num_q_vectors == 0); + + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + rnp_dbg("in dcb mode r_remaing %d, num_q_vectors %d\n", + r_remaing, v_remaing); + } + + rnp_dbg("r_remaing:%d, ring_step:%d num_q_vectors:%d\n", r_remaing, + ring_step, v_remaing); + + /* can support muti rings in one q_vector */ + for (; r_remaing > 0 && v_remaing > 0; v_remaing--) { + ring_cnt = DIV_ROUND_UP(r_remaing, v_remaing); + if (adapter->flags & RNP_FLAG_DCB_ENABLED) + BUG_ON(ring_cnt != adapter->num_tc); + + err = rnp_alloc_q_vector(adapter, adapter->eth_queue_idx, v_idx, + ring_idx, ring_cnt, ring_step); + if (err) + goto err_out; + ring_idx += ring_step * ring_cnt; + r_remaing -= ring_cnt; + v_idx++; + q_vector_nums++; + /* dcb mode only add 1 */ + if (adapter->flags & RNP_FLAG_DCB_ENABLED) + adapter->eth_queue_idx += 1; + else + adapter->eth_queue_idx += ring_cnt; + } + /* should fix the real used q_vectors_nums */ + adapter->num_q_vectors = q_vector_nums; + + return 0; + +err_out: + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + rnp_free_q_vector(adapter, v_idx); + + return -ENOMEM; +} + +/** + * rnp_free_q_vectors - Free memory allocated for interrupt vectors + * @adapter: board private structure to initialize + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void rnp_free_q_vectors(struct rnp_adapter *adapter) +{ + int v_idx = adapter->num_q_vectors; + + adapter->num_rx_queues = 0; + adapter->num_tx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) + rnp_free_q_vector(adapter, v_idx); +} + +static void rnp_reset_interrupt_capability(struct rnp_adapter *adapter) +{ + if (adapter->flags & RNP_FLAG_MSIX_ENABLED) + pci_disable_msix(adapter->pdev); + else if (adapter->flags & RNP_FLAG_MSI_CAPABLE) + pci_disable_msi(adapter->pdev); + + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + adapter->q_vector_off = 0; + + /* frist clean msix flags */ + adapter->flags &= (~RNP_FLAG_MSIX_ENABLED); + adapter->flags &= (~RNP_FLAG_MSI_ENABLED); +} + +/** + * rnp_set_interrupt_capability - set MSI-X or MSI if supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int rnp_set_interrupt_capability(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int vector, v_budget, err = 0; + int irq_mode_back = adapter->irq_mode; + + v_budget = min_t(int, adapter->num_tx_queues, adapter->num_rx_queues); + /* in one ring mode should reset v_budget */ + v_budget = min_t(int, v_budget, num_online_cpus()); + v_budget += adapter->num_other_vectors; + v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); + + if (adapter->irq_mode == irq_mode_msix) { + adapter->msix_entries = kcalloc( + v_budget, sizeof(struct msix_entry), GFP_KERNEL); + + if (!adapter->msix_entries) { + rnp_err("alloc msix_entries failed!\n"); + return -EINVAL; + } + dbg("[%s] adapter:%p msix_entry:%p\n", __func__, adapter, + adapter->msix_entries); + + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + err = rnp_acquire_msix_vectors(adapter, v_budget); + if (!err) { + if (adapter->num_other_vectors) + adapter->q_vector_off = 1; + rnp_dbg("adapter%d alloc vectors: cnt:%d [%d~%d] num_q_vectors:%d\n", + adapter->bd_number, v_budget, + adapter->q_vector_off, + adapter->q_vector_off + v_budget - 1, + adapter->num_q_vectors); + adapter->flags |= RNP_FLAG_MSIX_ENABLED; + + goto out; + } + /* if has msi capability try it */ + if (adapter->flags & RNP_FLAG_MSI_CAPABLE) + adapter->irq_mode = irq_mode_msi; + kfree(adapter->msix_entries); + rnp_dbg("acquire msix failed, try to use msi\n"); + } else { + rnp_dbg("adapter%d not in msix mode\n", adapter->bd_number); + } + /* if has msi capability or set irq_mode */ + if (adapter->irq_mode == irq_mode_msi) { + err = pci_enable_msi(adapter->pdev); + if (err) { + rnp_dbg("Failed to allocate MSI interrupt, falling back to legacy. Error"); + } else { + /* msi mode use only 1 irq */ + adapter->flags |= RNP_FLAG_MSI_ENABLED; + } + } + /* write back origin irq_mode */ + adapter->irq_mode = irq_mode_back; + /* legacy and msi only 1 vectors */ + adapter->num_q_vectors = 1; +out: + return err; +} + +static void rnp_print_ring_info(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *ring; + struct rnp_q_vector *q_vector; + + rnp_dbg("tx_queue count %d\n", adapter->num_tx_queues); + rnp_dbg("queue-mapping :\n"); + for (i = 0; i < adapter->num_tx_queues; i++) { + ring = adapter->tx_ring[i]; + rnp_dbg(" queue %d , physical ring %d\n", i, + ring->rnp_queue_idx); + } + rnp_dbg("rx_queue count %d\n", adapter->num_rx_queues); + rnp_dbg("queue-mapping :\n"); + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = adapter->rx_ring[i]; + rnp_dbg(" queue %d , physical ring %d\n", i, + ring->rnp_queue_idx); + } + rnp_dbg("q_vector count %d\n", adapter->num_q_vectors); + rnp_dbg("vector-queue mapping:\n"); + for (i = 0; i < adapter->num_q_vectors; i++) { + q_vector = adapter->q_vector[i]; + rnp_dbg("vector %d\n", i); + rnp_for_each_ring(ring, q_vector->tx) + rnp_dbg(" tx physical ring %d\n", ring->rnp_queue_idx); + + rnp_for_each_ring(ring, q_vector->rx) + rnp_dbg(" rx physical ring %d\n", ring->rnp_queue_idx); + } +} + +/** + * rnp_init_interrupt_scheme - Determine proper interrupt scheme + * @adapter: board private structure to initialize + * + * We determine which interrupt scheme to use based on... + * - Hardware queue count (num_*_queues) + * - defined by miscellaneous hardware support/features (RSS, etc.) + **/ +int rnp_init_interrupt_scheme(struct rnp_adapter *adapter) +{ + int err; + + /* Number of supported queues */ + rnp_set_num_queues(adapter); + + /* Set interrupt mode */ + err = rnp_set_interrupt_capability(adapter); + if (err) { + e_dev_err("Unable to get interrupt\n"); + goto err_set_interrupt; + } + + err = rnp_alloc_q_vectors(adapter); + if (err) { + e_dev_err("Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + rnp_cache_ring_register(adapter); + + DPRINTK(PROBE, INFO, + "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n\n", + (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", + adapter->num_rx_queues, adapter->num_tx_queues); + rnp_print_ring_info(adapter); + + set_bit(__RNP_DOWN, &adapter->state); + + return 0; + +err_alloc_q_vectors: + rnp_reset_interrupt_capability(adapter); +err_set_interrupt:; + return err; +} + +/** + * rnp_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +void rnp_clear_interrupt_scheme(struct rnp_adapter *adapter) +{ + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + + rnp_free_q_vectors(adapter); + rnp_reset_interrupt_capability(adapter); +} + +/** + * rnp_tx_ctxtdesc - Send a control desc to hw + * @tx_ring: target ring of this control desc + * @mss_seg_len: mss length + * @l4_hdr_len: l4 length + * @tunnel_hdr_len: tunnel_hdr_len + * @inner_vlan_tag: inner_vlan_tag + * @type_tucmd: cmd + * + **/ +void rnp_tx_ctxtdesc(struct rnp_ring *tx_ring, u32 mss_len_vf_num, + u32 inner_vlan_tunnel_len, int ignore_vlan, bool crc_pad) +{ + struct rnp_tx_ctx_desc *context_desc; + u16 i = tx_ring->next_to_use; + struct rnp_adapter *adapter = RING2ADAPT(tx_ring); + u32 type_tucmd = 0; + + context_desc = RNP_TX_CTXTDESC(tx_ring, i); + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= RNP_TXD_CTX_CTRL_DESC; + + if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) { + if (!crc_pad) + type_tucmd |= RNP_TXD_MTI_CRC_PAD_CTRL; + /* close mac padding */ + } + + if (tx_ring->ring_flags & RNP_RING_OUTER_VLAN_FIX) { +#define VLAN_MASK (0x0000ffff) +#define VLAN_INSERT (0x00800000) + if (inner_vlan_tunnel_len & VLAN_MASK) + type_tucmd |= VLAN_INSERT; + + } else { + if (inner_vlan_tunnel_len & 0x00ffff00) { + /* if a inner vlan */ + type_tucmd |= RNP_TXD_CMD_INNER_VLAN; + } + } + + context_desc->mss_len_vf_num = cpu_to_le32(mss_len_vf_num); + context_desc->inner_vlan_tunnel_len = + cpu_to_le32(inner_vlan_tunnel_len); + context_desc->resv_cmd = cpu_to_le32(type_tucmd); + if (tx_ring->q_vector->adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + if (ignore_vlan) + context_desc->inner_vlan_tunnel_len |= + VF_VEB_IGNORE_VLAN; + } + buf_dump_line("ctx ", __LINE__, context_desc, sizeof(*context_desc)); +} + +void rnp_maybe_tx_ctxtdesc(struct rnp_ring *tx_ring, + struct rnp_tx_buffer *first, u32 ignore_vlan) +{ + /* sriov mode pf use the last vf */ + if (first->ctx_flag) { + rnp_tx_ctxtdesc(tx_ring, first->mss_len_vf_num, + first->inner_vlan_tunnel_len, ignore_vlan, + first->gso_need_padding); + } +} + +void rnp_store_reta(struct rnp_adapter *adapter) +{ + u32 i, reta_entries = rnp_rss_indir_tbl_entries(adapter); + struct rnp_hw *hw = &adapter->hw; + u32 reta = 0; + /* relative with rss table */ + struct rnp_ring *rx_ring; + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + reta = adapter->rss_indir_tbl[i]; + } else { + rx_ring = adapter->rx_ring[adapter->rss_indir_tbl[i]]; + reta = rx_ring->rnp_queue_idx; + } + hw->rss_indir_tbl[i] = reta; + } + hw->ops.set_rss_table(hw); +} + +void rnp_store_key(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + + hw->ops.set_rss_key(hw, sriov_flag); +} + +int rnp_init_rss_key(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + + /* only init rss key once */ + /* no change rss key if user input one */ + if (!adapter->rss_key_setup_flag) { + netdev_rss_key_fill(adapter->rss_key, RNP_RSS_KEY_SIZE); + adapter->rss_key_setup_flag = 1; + } + hw->ops.set_rss_key(hw, sriov_flag); + + return 0; +} + +int rnp_init_rss_table(struct rnp_adapter *adapter) +{ + int rx_nums = adapter->num_rx_queues; + int i, j; + struct rnp_hw *hw = &adapter->hw; + struct rnp_ring *rx_ring; + u32 reta = 0; + u32 reta_entries = rnp_rss_indir_tbl_entries(adapter); + + if (adapter->flags & RNP_FLAG_DCB_ENABLED) { + rx_nums = rx_nums / adapter->num_tc; + for (i = 0, j = 0; i < 8; i++) { + adapter->rss_tc_tbl[i] = j; + hw->rss_tc_tbl[i] = j; + j = (j + 1) % adapter->num_tc; + } + } else { + for (i = 0, j = 0; i < 8; i++) { + hw->rss_tc_tbl[i] = 0; + adapter->rss_tc_tbl[i] = 0; + } + } + + /* adapter->num_q_vectors is not correct */ + for (i = 0, j = 0; i < reta_entries; i++) { + /* init with default value */ + if (!adapter->rss_tbl_setup_flag) + adapter->rss_indir_tbl[i] = j; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + /* in sriov mode reta in [0, rx_nums] */ + reta = j; + } else { + /* in no sriov, reta is real ring number */ + rx_ring = adapter->rx_ring[adapter->rss_indir_tbl[i]]; + reta = rx_ring->rnp_queue_idx; + } + /* store rss_indir_tbl */ + hw->rss_indir_tbl[i] = reta; + + j = (j + 1) % rx_nums; + } + /* tbl only init once */ + adapter->rss_tbl_setup_flag = 1; + + hw->ops.set_rss_table(hw); + return 0; +} + +void rnp_setup_dma_rx(struct rnp_adapter *adapter, int count_in_dw) +{ + struct rnp_hw *hw = &adapter->hw; + u32 data; + + data = rd32(hw, RNP_DMA_CONFIG); + data &= (0x00000ffff); + data |= (count_in_dw << 16); + wr32(hw, RNP_DMA_CONFIG, data); +} + +/* setup to the hw */ +s32 rnp_fdir_write_perfect_filter(int fdir_mode, struct rnp_hw *hw, + union rnp_atr_input *filter, u16 hw_id, + u8 queue, bool prio_flag) +{ + if (filter->formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER) + hw->ops.set_layer2_remapping(hw, filter, hw_id, queue, + prio_flag); + else + hw->ops.set_tuple5_remapping(hw, filter, hw_id, queue, + prio_flag); + + return 0; +} + +s32 rnp_fdir_erase_perfect_filter(int fdir_mode, struct rnp_hw *hw, + union rnp_atr_input *input, u16 pri_id) +{ + /* just disable filter */ + if (input->formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER) { + hw->ops.clr_layer2_remapping(hw, pri_id); + dbg("disable layer2 %d\n", pri_id); + } else { + hw->ops.clr_tuple5_remapping(hw, pri_id); + dbg("disable tuple5 %d\n", pri_id); + } + + return 0; +} + +u32 rnp_tx_desc_unused_sw(struct rnp_ring *tx_ring) +{ + u16 ntu = tx_ring->next_to_use; + u16 ntc = tx_ring->next_to_clean; + u16 count = tx_ring->count; + + return ((ntu >= ntc) ? (count - ntu + ntc) : (ntc - ntu)); +} + +u32 rnp_rx_desc_used_hw(struct rnp_hw *hw, struct rnp_ring *rx_ring) +{ + u32 head = ring_rd32(rx_ring, RNP_DMA_REG_RX_DESC_BUF_HEAD); + u32 tail = ring_rd32(rx_ring, RNP_DMA_REG_RX_DESC_BUF_TAIL); + u16 count = rx_ring->count; + + return ((tail >= head) ? (count - tail + head) : (head - tail)); +} + +u32 rnp_tx_desc_unused_hw(struct rnp_hw *hw, struct rnp_ring *tx_ring) +{ + u32 head = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + u32 tail = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_TAIL); + u16 count = tx_ring->count; + + return ((tail >= head) ? (count - tail + head) : (head - tail)); +} + +s32 rnp_disable_rxr_maxrate(struct net_device *netdev, u8 queue_index) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_ring *rx_ring = adapter->rx_ring[queue_index]; + u32 reg_idx = rx_ring->rnp_queue_idx; + + /* disable which dma ring in maxrate limit mode */ + wr32(hw, RNP_SELECT_RING_EN(reg_idx), 0); + /* Clear Tx Ring maxrate */ + wr32(hw, RNP_RX_RING_MAXRATE(reg_idx), 0); + + return 0; +} + +s32 rnp_enable_rxr_maxrate(struct net_device *netdev, u8 queue_index, + u32 maxrate) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_ring *rx_ring = adapter->rx_ring[queue_index]; + u32 reg_idx = rx_ring->rnp_queue_idx; + u32 real_rate = maxrate / 16; + + if (!real_rate) + return -EINVAL; + + wr32(hw, RNP_RING_FC_ENABLE, true); + /* disable which dma ring in maxrate limit mode */ + wr32(hw, RNP_SELECT_RING_EN(reg_idx), true); + /* Clear Tx Ring maxrate */ + wr32(hw, RNP_RX_RING_MAXRATE(reg_idx), real_rate); + + return 0; +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_main.c b/drivers/net/ethernet/mucse/rnp/rnp_main.c new file mode 100644 index 0000000000000..db7927f70ceac --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_main.c @@ -0,0 +1,7943 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rnp_tc_u32_parse.h" +#include "rnp_common.h" +#include "rnp.h" +#include "rnp_dcb.h" +#include "rnp_sriov.h" +#include "rnp_ptp.h" +#include "rnp_ethtool.h" +#include "rnp_mpe.h" + +#ifdef CONFIG_ARM64 +#define NO_BQL_TEST +#endif + +char rnp_driver_name[] = "rnp"; +static const char rnp_driver_string[] = + "mucse 1/10/25/40 Gigabit PCI Express Network Driver"; +#define DRV_VERSION "1.0.1-rc2" +#include "version.h" + +const char rnp_driver_version[] = DRV_VERSION; +static const char rnp_copyright[] = + "Copyright (c) 2020-2024 mucse Corporation."; + +extern struct rnp_info rnp_n10_info; +extern struct rnp_info rnp_n400_info; + +static struct rnp_info *rnp_info_tbl[] = { + [board_n10] = &rnp_n10_info, + [board_n400] = &rnp_n400_info, +}; + +static int register_mbx_irq(struct rnp_adapter *adapter); +static void remove_mbx_irq(struct rnp_adapter *adapter); + +static void rnp_pull_tail(struct sk_buff *skb); +#ifdef OPTM_WITH_LPAGE +static bool rnp_alloc_mapped_page(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *bi, + union rnp_rx_desc *rx_desc, u16 bufsz, + u64 fun_id); + +static void rnp_put_rx_buffer(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer); +#else +static bool rnp_alloc_mapped_page(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *bi); +static void rnp_put_rx_buffer(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + struct sk_buff *skb); +#endif + +static struct pci_device_id rnp_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N10), + .driver_data = board_n10 }, /* n10 40G 10G */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N10_X1), + .driver_data = board_n10 }, /* n10 40G 10G */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N10_TP), + .driver_data = board_n10 }, /* n10 10G TP */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N400), + .driver_data = board_n400 }, /* n400 4port 1G */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N400C), + .driver_data = board_n400 }, /* n400 4port 1G */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N400_X1), + .driver_data = board_n10 }, /* n400 1port 10G/1G */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N400C_X1), + .driver_data = board_n10 }, /* n400 1port 10G/1G */ + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N10C), + .driver_data = board_n10 }, /* n10c 40G 10G */ + /* required last entry */ + { + 0, + }, +}; + +MODULE_DEVICE_TABLE(pci, rnp_pci_tbl); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0000); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static unsigned int fix_eth_name; +module_param(fix_eth_name, uint, 0000); +MODULE_PARM_DESC(fix_eth_name, "set eth adapter name to rnpXX"); + +static int module_enable_ptp = 1; +module_param(module_enable_ptp, uint, 0000); +MODULE_PARM_DESC(module_enable_ptp, "enable ptp feature, disabled default"); + +unsigned int mpe_src_port; +module_param(mpe_src_port, uint, 0000); +MODULE_PARM_DESC(mpe_src_port, "mpe src port"); + +unsigned int mpe_pkt_version; +module_param(mpe_pkt_version, uint, 0000); +MODULE_PARM_DESC(mpe_pkt_version, "ipv4 or ipv6 src port"); + +MODULE_AUTHOR("Mucse Corporation, "); +MODULE_DESCRIPTION("Mucse(R) 1/10/25/40 Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static struct workqueue_struct *rnp_wq; +static int enable_hi_dma; +extern void rnp_service_timer(struct timer_list *t); + +void rnp_service_event_schedule(struct rnp_adapter *adapter) +{ + if (!test_bit(__RNP_DOWN, &adapter->state) && + !test_and_set_bit(__RNP_SERVICE_SCHED, &adapter->state)) + queue_work(rnp_wq, &adapter->service_task); +} + +static void rnp_service_event_complete(struct rnp_adapter *adapter) +{ + BUG_ON(!test_bit(__RNP_SERVICE_SCHED, &adapter->state)); + + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(__RNP_SERVICE_SCHED, &adapter->state); +} + + +/** + * rnp_set_ring_vector - set the ring_vector registers, mapping interrupt + * causes to vectors + * + * @adapter: pointer to adapter struct + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + */ +static void rnp_set_ring_vector(struct rnp_adapter *adapter, u8 rnp_queue, + u8 rnp_msix_vector) +{ + struct rnp_hw *hw = &adapter->hw; + u32 data = 0; + + data = hw->pfvfnum << 24; + data |= (rnp_msix_vector << 8); + data |= (rnp_msix_vector << 0); + + DPRINTK(IFUP, INFO, + "Set Ring-Vector queue:%d (reg:0x%x) <-- Rx-MSIX:%d, Tx-MSIX:%d\n", + rnp_queue, RING_VECTOR(rnp_queue), rnp_msix_vector, + rnp_msix_vector); + + rnp_wr_reg(hw->ring_msix_base + RING_VECTOR(rnp_queue), data); +} + +static void rnp_unmap_and_free_tx_resource(struct rnp_ring *ring, + struct rnp_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ +} + +static u64 rnp_get_tx_completed(struct rnp_ring *ring) +{ + return ring->stats.packets; +} + +static u64 rnp_get_tx_pending(struct rnp_ring *ring) +{ + u32 head = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + u32 tail = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_TAIL); + + if (head != tail) + return (head < tail) ? tail - head : + (tail + ring->count - head); + + return 0; +} + +static inline bool rnp_check_tx_hang(struct rnp_ring *tx_ring) +{ + u32 tx_done = rnp_get_tx_completed(tx_ring); + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; + u32 tx_pending = rnp_get_tx_pending(tx_ring); + bool ret = false; + + clear_check_for_tx_hang(tx_ring); + + /* + * Check for a hung queue, but be thorough. This verifies + * that a transmit has been completed since the previous + * check AND there is at least one packet pending. The + * ARMED bit is set to indicate a potential hang. The + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with + * pfc clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ + if ((tx_done_old == tx_done) && tx_pending) { + /* make sure it is true for two checks in a row */ + ret = test_and_set_bit(__RNP_HANG_CHECK_ARMED, &tx_ring->state); + } else { + /* update completed stats and continue */ + tx_ring->tx_stats.tx_done_old = tx_done; + /* reset the countdown */ + clear_bit(__RNP_HANG_CHECK_ARMED, &tx_ring->state); + } + return ret; +} + +/** + * rnp_tx_timeout_reset - initiate reset due to Tx timeout + * @adapter: driver private struct + **/ +static void rnp_tx_timeout_reset(struct rnp_adapter *adapter) +{ + /* Do the reset outside of interrupt context */ + if (!test_bit(__RNP_DOWN, &adapter->state)) { + adapter->flags2 |= RNP_FLAG2_RESET_REQUESTED; + e_warn(drv, "initiating reset due to tx timeout\n"); + rnp_service_event_schedule(adapter); + } +} + +static void rnp_check_restart_tx(struct rnp_q_vector *q_vector, + struct rnp_ring *tx_ring) +{ + struct rnp_adapter *adapter = q_vector->adapter; +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (likely(netif_carrier_ok(tx_ring->netdev) && + (rnp_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !test_bit(__RNP_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } +} + +/** + * rnp_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + **/ +static bool rnp_clean_tx_irq(struct rnp_q_vector *q_vector, + struct rnp_ring *tx_ring, int napi_budget) +{ + struct rnp_adapter *adapter = q_vector->adapter; + struct rnp_tx_buffer *tx_buffer; + struct rnp_tx_desc *tx_desc; + u64 total_bytes = 0, total_packets = 0; + int budget = q_vector->tx.work_limit; + int i = tx_ring->next_to_clean; + + if (test_bit(__RNP_DOWN, &adapter->state)) + return true; + tx_ring->tx_stats.poll_count++; + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = RNP_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + struct rnp_tx_desc *eop_desc = tx_buffer->next_to_watch; + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + rmb(); + + /* if eop DD is not set pending work has not been completed */ + if (!(eop_desc->vlan_cmd & cpu_to_le32(RNP_TXD_STAT_DD))) + break; + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + napi_consume_skb(tx_buffer->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = RNP_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = RNP_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + tx_ring->tx_stats.tx_clean_count += total_packets; + tx_ring->tx_stats.tx_clean_times++; + if (tx_ring->tx_stats.tx_clean_times > 10) { + tx_ring->tx_stats.tx_clean_times = 0; + tx_ring->tx_stats.tx_clean_count = 0; + } + + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + + tx_ring->tx_stats.send_done_bytes += total_bytes; +#ifdef NO_BQL_TEST +#else + netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, + total_bytes); +#endif + + if (!(q_vector->vector_flags & RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS)) { +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (likely(netif_carrier_ok(tx_ring->netdev) && + (rnp_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !test_bit(__RNP_DOWN, &adapter->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + } + + /* now we start tx queue later */ + return !!budget; +} + +static inline void rnp_rx_hash(struct rnp_ring *ring, + union rnp_rx_desc *rx_desc, struct sk_buff *skb) +{ + int rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; +#define RNP_RSS_TYPE_MASK 0xc0 + rss_type = rx_desc->wb.cmd & RNP_RSS_TYPE_MASK; + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.rss_hash), + rss_type ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} + +/** + * rnp_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containing ring specific data + * @rx_desc: current Rx descriptor being processed + * @skb: skb currently being received and modified + **/ +static inline void rnp_rx_checksum(struct rnp_ring *ring, + union rnp_rx_desc *rx_desc, + struct sk_buff *skb) +{ + bool encap_pkt = false; + + skb_checksum_none_assert(skb); + /* Rx csum disabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + if (!(ring->ring_flags & RNP_RING_NO_TUNNEL_SUPPORT)) { + if (rnp_get_stat(rx_desc, RNP_RXD_STAT_TUNNEL_MASK) == + RNP_RXD_STAT_TUNNEL_VXLAN) { + encap_pkt = true; + skb->encapsulation = 1; + skb->ip_summed = CHECKSUM_NONE; + } + } + /* if outer L3/L4 error */ + /* must in promisc mode or rx-all mode */ + if (rnp_test_staterr(rx_desc, RNP_RXD_STAT_ERR_MASK)) { + return; + } + ring->rx_stats.csum_good++; + /* at least it is a ip packet which has ip checksum */ + + /* It must be a TCP or UDP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + if (encap_pkt) { + /* If we checked the outer header let the stack know */ + skb->csum_level = 1; + } +} + +static inline void rnp_update_rx_tail(struct rnp_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + rnp_wr_reg(rx_ring->tail, val); +} + +#if (PAGE_SIZE < 8192) +#define RNP_MAX_2K_FRAME_BUILD_SKB (RNP_RXBUFFER_1536 - NET_IP_ALIGN) +#define RNP_2K_TOO_SMALL_WITH_PADDING \ + ((NET_SKB_PAD + RNP_RXBUFFER_1536) > SKB_WITH_OVERHEAD(RNP_RXBUFFER_2K)) + +static inline int rnp_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int rnp_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (RNP_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = RNP_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = RNP_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + return rnp_compute_pad(rx_buf_len); +} + +#define RNP_SKB_PAD rnp_skb_pad() +#else /* PAGE_SIZE < 8192 */ +#define RNP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#endif + +/** + * rnp_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, timestamp, protocol, and + * other fields within the skb. + **/ +static void rnp_process_skb_fields(struct rnp_ring *rx_ring, + union rnp_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + struct rnp_adapter *adapter = netdev_priv(dev); + + rnp_rx_hash(rx_ring, rx_desc, skb); + rnp_rx_checksum(rx_ring, rx_desc, skb); + if (((dev->features & NETIF_F_HW_VLAN_CTAG_RX) + || (dev->features & NETIF_F_HW_VLAN_STAG_RX)) && + rnp_test_staterr(rx_desc, RNP_RXD_STAT_VLAN_VALID) && + !ignore_veb_vlan(rx_ring->q_vector->adapter, rx_desc)) { + + if (rx_ring->ring_flags & RNP_RING_DOUBLE_VLAN_SUPPORT) { + /* check outer vlan first */ + if (rnp_test_ext_cmd(rx_desc, REV_OUTER_VLAN)) { + u16 vid_inner = le16_to_cpu(rx_desc->wb.vlan); + u16 vid_outer; + u16 vlan_tci = htons(ETH_P_8021Q); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vid_inner); + /* check outer vlan type */ + if (rx_ring->ring_flags & + RNP_RING_STAGS_SUPPORT) { + if (rnp_test_staterr( + rx_desc, + RNP_RXD_STAT_STAG)) { + switch (rx_ring->q_vector + ->adapter + ->outer_vlan_type) { + case outer_vlan_type_88a8: + vlan_tci = htons( + ETH_P_8021AD); + break; + case outer_vlan_type_9100: + vlan_tci = htons( + ETH_P_QINQ1); + break; + case outer_vlan_type_9200: + vlan_tci = htons( + ETH_P_QINQ2); + break; + default: + vlan_tci = htons( + ETH_P_8021AD); + break; + } + } else { + vlan_tci = htons(ETH_P_8021Q); + } + } else { + vlan_tci = htons(ETH_P_8021Q); + } + vid_outer = le16_to_cpu(rx_desc->wb.mark); + /* if in stags mode should ignore only stags */ + if (adapter->flags2 & + RNP_FLAG2_VLAN_STAGS_ENABLED) { + /* push outer in if not equal stags or cvlan */ + if ((vid_outer != adapter->stags_vid) || + (vlan_tci == htons(ETH_P_8021Q))) { + /* push outer inner */ + skb = __vlan_hwaccel_push_inside( + skb); + __vlan_hwaccel_put_tag( + skb, vlan_tci, + vid_outer); + } + } else { + /* push outer */ + skb = __vlan_hwaccel_push_inside(skb); + __vlan_hwaccel_put_tag(skb, vlan_tci, + vid_outer); + } + } else { + /* only inner vlan */ + u16 vid = le16_to_cpu(rx_desc->wb.vlan); + if (rx_ring->ring_flags & RNP_RING_STAGS_SUPPORT) { + if (rnp_test_staterr(rx_desc, + RNP_RXD_STAT_STAG)) { + if ((adapter->flags2 & + RNP_FLAG2_VLAN_STAGS_ENABLED) && + (vid == + adapter->stags_vid)) { + } else + __vlan_hwaccel_put_tag( + skb, + htons(ETH_P_8021AD), + vid); + + } else { + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + vid); + } + } else { + __vlan_hwaccel_put_tag( + skb, htons(ETH_P_8021Q), vid); + } + } + } else { + u16 vid = le16_to_cpu(rx_desc->wb.vlan); + if (rx_ring->ring_flags & RNP_RING_STAGS_SUPPORT) { + if (rnp_test_staterr(rx_desc, + RNP_RXD_STAT_STAG)) { + __vlan_hwaccel_put_tag( + skb, htons(ETH_P_8021AD), vid); + } else { + __vlan_hwaccel_put_tag( + skb, htons(ETH_P_8021Q), vid); + } + } else { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vid); + } + } + rx_ring->rx_stats.vlan_remove++; + } + + skb_record_rx_queue(skb, rx_ring->queue_index); + + skb->protocol = eth_type_trans(skb, dev); +} + +static void rnp_rx_skb(struct rnp_q_vector *q_vector, struct sk_buff *skb) +{ + struct rnp_adapter *adapter = q_vector->adapter; + + if (!(adapter->flags & RNP_FLAG_IN_NETPOLL)) + napi_gro_receive(&q_vector->napi, skb); + else + netif_rx(skb); +} + +/* drop this packets if error */ +static bool rnp_check_csum_error(struct rnp_ring *rx_ring, + union rnp_rx_desc *rx_desc, unsigned int size, + unsigned int *driver_drop_packets) +{ + bool err = false; + + struct net_device *netdev = rx_ring->netdev; + + if (netdev->features & NETIF_F_RXCSUM) { + if (unlikely( + rnp_test_staterr(rx_desc, RNP_RXD_STAT_ERR_MASK))) { + rx_debug_printk("rx error: VEB:%s mark:0x%x cmd:0x%x\n", + (rx_ring->q_vector->adapter->flags & + RNP_FLAG_SRIOV_ENABLED) ? + "On" : + "Off", + rx_desc->wb.mark, rx_desc->wb.cmd); + /* push this packet to stack if in promisc mode */ + rx_ring->rx_stats.csum_err++; + + if ((!(netdev->flags & IFF_PROMISC) && + (!(netdev->features & NETIF_F_RXALL)))) { + if (rx_ring->ring_flags & RNP_RING_CHKSM_FIX) { + err = true; + goto skip_fix; + } + if (unlikely(rnp_test_staterr( + rx_desc, + RNP_RXD_STAT_L4_MASK) && + (!(rx_desc->wb.rev1 & + RNP_RX_L3_TYPE_MASK)))) { + rx_ring->rx_stats.csum_err--; + goto skip_fix; + } + /* we ignore sctp csum erro small than 60 */ + if (unlikely(rnp_test_staterr(rx_desc, + RNP_RXD_STAT_SCTP_MASK))) { + if ((size > 60) && + (rx_desc->wb.rev1 & + RNP_RX_L3_TYPE_MASK)) { + err = true; + } else { + /* sctp less than 60 hw report err by mistake */ + rx_ring->rx_stats.csum_err--; + } + } else { + err = true; + } + } + } + } +skip_fix: + if (err) { + u32 ntc = rx_ring->next_to_clean + 1; + struct rnp_rx_buffer *rx_buffer; +#if (PAGE_SIZE < 8192) + unsigned int truesize = rnp_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = + ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(RNP_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + + /* if eop add drop_packets */ + if (likely(rnp_test_staterr(rx_desc, RNP_RXD_STAT_EOP))) + *driver_drop_packets = *driver_drop_packets + 1; + + /* we are reusing so sync this buffer for CPU use */ + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, + rx_buffer->page_offset, size, + DMA_FROM_DEVICE); + + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +#ifdef OPTM_WITH_LPAGE + rnp_put_rx_buffer(rx_ring, rx_buffer); +#else + rnp_put_rx_buffer(rx_ring, rx_buffer, NULL); +#endif + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + } + return err; +} + +/** + * rnp_rx_ring_reinit - just reinit rx_ring with new count in ->reset_count + * @rx_ring: rx descriptor ring to transact packets on + */ +static int rnp_rx_ring_reinit(struct rnp_adapter *adapter, struct rnp_ring *rx_ring) +{ + struct rnp_ring *temp_ring; + int err = 0; + + if (rx_ring->count == rx_ring->reset_count) + return 0; + /* stop rx queue */ + + temp_ring = vzalloc(array_size(1, sizeof(struct rnp_ring))); + if (!temp_ring) + goto err_setup; + + rnp_disable_rx_queue(adapter, rx_ring); + /* reinit for this ring */ + memcpy(temp_ring, rx_ring, sizeof(struct rnp_ring)); + /* setup new count */ + temp_ring->count = rx_ring->reset_count; + err = rnp_setup_rx_resources(temp_ring, adapter); + if (err) { + rnp_free_rx_resources(temp_ring); + vfree(temp_ring); + goto err_setup; + } + rnp_free_rx_resources(rx_ring); + memcpy(rx_ring, temp_ring, sizeof(struct rnp_ring)); + rnp_configure_rx_ring(adapter, rx_ring); + /* start rx */ + vfree(temp_ring); + ring_wr32(rx_ring, RNP_DMA_RX_START, 1); + return 0; +err_setup: + return -1; +} + +#ifndef OPTM_WITH_LPAGE +/** + * rnp_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void rnp_alloc_rx_buffers(struct rnp_ring *rx_ring, u16 cleaned_count) +{ + union rnp_rx_desc *rx_desc; + struct rnp_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + u64 fun_id = ((u64)(rx_ring->pfvfnum) << (32 + 24)); + u16 bufsz; + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = RNP_RX_DESC(rx_ring, i); + + BUG_ON(rx_desc == NULL); + + bi = &rx_ring->rx_buffer_info[i]; + + BUG_ON(bi == NULL); + + i -= rx_ring->count; + bufsz = rnp_rx_bufsz(rx_ring); + + do { + if (!rnp_alloc_mapped_page(rx_ring, bi)) + break; + + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); + + /* + * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->pkt_addr = + cpu_to_le64(bi->dma + bi->page_offset + fun_id); + /* clean dd */ + rx_desc->resv_cmd = 0; + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = RNP_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + /* clear the hdr_addr for the next_to_use descriptor */ + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) + rnp_update_rx_tail(rx_ring, i); +} +#endif + +static inline unsigned int rnp_rx_offset(struct rnp_ring *rx_ring) +{ + return ring_uses_build_skb(rx_ring) ? RNP_SKB_PAD : 0; +} + +#ifdef OPTM_WITH_LPAGE +/** + * rnp_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +void rnp_alloc_rx_buffers(struct rnp_ring *rx_ring, u16 cleaned_count) +{ + union rnp_rx_desc *rx_desc; + struct rnp_rx_buffer *bi; + u16 i = rx_ring->next_to_use; + u64 fun_id = ((u64)(rx_ring->pfvfnum) << (32 + 24)); + u16 bufsz; + /* nothing to do */ + if (!cleaned_count) + return; + + rx_desc = RNP_RX_DESC(rx_ring, i); + + BUG_ON(rx_desc == NULL); + + bi = &rx_ring->rx_buffer_info[i]; + + BUG_ON(bi == NULL); + + i -= rx_ring->count; + bufsz = rnp_rx_bufsz(rx_ring); + + do { + int count = 1; + struct page *page; + + if (!rnp_alloc_mapped_page(rx_ring, bi, rx_desc, bufsz, fun_id)) + break; + page = bi->page; + + rx_desc->resv_cmd = 0; + + rx_desc++; + i++; + bi++; + + if (unlikely(!i)) { + rx_desc = RNP_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + + rx_desc->resv_cmd = 0; + + cleaned_count--; + + while (count < rx_ring->rx_page_buf_nums && cleaned_count) { + dma_addr_t dma; + + bi->page_offset = rx_ring->rx_per_buf_mem * count + + rnp_rx_offset(rx_ring); + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, + bi->page_offset, bufsz, + DMA_FROM_DEVICE, + RNP_RX_DMA_ATTR); + + if (dma_mapping_error(rx_ring->dev, dma)) { + printk("map second error\n"); + rx_ring->rx_stats.alloc_rx_page_failed++; + break; + } + + bi->dma = dma; + bi->page = page; + page_ref_add(page, USHRT_MAX); + bi->pagecnt_bias = USHRT_MAX; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + 0, bufsz, + DMA_FROM_DEVICE); + + /* + * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->pkt_addr = cpu_to_le64(bi->dma + fun_id); + /* clean dd */ + rx_desc->resv_cmd = 0; + + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = RNP_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + count++; + /* clear the hdr_addr for the next_to_use descriptor */ + cleaned_count--; + } + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) + rnp_update_rx_tail(rx_ring, i); +} + +#endif /* OPTM_WITH_LPAGE */ +/** + * rnp_get_headlen - determine size of header for RSC/LRO/GRO/FCOE + * @data: pointer to the start of the headers + * @max_len: total length of section to find headers in + * + * This function is meant to determine the length of headers that will + * be recognized by hardware for LRO, GRO, and RSC offloads. The main + * motivation of doing this is to only perform one pull for IPv4 TCP + * packets so that we can do basic things like calculating the gso_size + * based on the average data per packet. + **/ +static unsigned int rnp_get_headlen(unsigned char *data, unsigned int max_len) +{ + union { + unsigned char *network; + /* l2 headers */ + struct ethhdr *eth; + struct vlan_hdr *vlan; + /* l3 headers */ + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + __be16 protocol; + u8 nexthdr = 0; /* default to not TCP */ + u8 hlen; + + /* this should never happen, but better safe than sorry */ + if (max_len < ETH_HLEN) + return max_len; + + /* initialize network frame pointer */ + hdr.network = data; + + /* set first protocol and move network header forward */ + protocol = hdr.eth->h_proto; + hdr.network += ETH_HLEN; + + /* handle any vlan tag if present */ + if (protocol == htons(ETH_P_8021Q)) { + if ((hdr.network - data) > (max_len - VLAN_HLEN)) + return max_len; + + protocol = hdr.vlan->h_vlan_encapsulated_proto; + hdr.network += VLAN_HLEN; + } + + /* handle L3 protocols */ + if (protocol == htons(ETH_P_IP)) { + if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) + return max_len; + + /* access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[0] & 0x0F) << 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return hdr.network - data; + + /* record next protocol if header is present */ + if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) + nexthdr = hdr.ipv4->protocol; + } else if (protocol == htons(ETH_P_IPV6)) { + if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) + return max_len; + + /* record next protocol */ + nexthdr = hdr.ipv6->nexthdr; + hlen = sizeof(struct ipv6hdr); + } else { + return hdr.network - data; + } + + /* relocate pointer to start of L4 header */ + hdr.network += hlen; + + /* finally sort out TCP/UDP */ + if (nexthdr == IPPROTO_TCP) { + if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) + return max_len; + + /* access doff as a u8 to avoid unaligned access on ia64 */ + hlen = (hdr.network[12] & 0xF0) >> 2; + + /* verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct tcphdr)) + return hdr.network - data; + + hdr.network += hlen; + } else if (nexthdr == IPPROTO_UDP) { + if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) + return max_len; + + hdr.network += sizeof(struct udphdr); + } + + /* + * If everything has gone correctly hdr.network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + if ((hdr.network - data) < max_len) + return hdr.network - data; + else + return max_len; +} + +#ifdef OPTM_WITH_LPAGE +/** + * rnp_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool rnp_is_non_eop(struct rnp_ring *rx_ring, union rnp_rx_desc *rx_desc) +{ + u32 ntc = rx_ring->next_to_clean + 1; + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(RNP_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ + if (likely(rnp_test_staterr(rx_desc, RNP_RXD_STAT_EOP))) + return false; + /* place skb in next buffer to be received */ + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + + return true; +} + +static bool rnp_alloc_mapped_page(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *bi, + union rnp_rx_desc *rx_desc, u16 bufsz, + u64 fun_id) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + page = dev_alloc_pages(RNP_ALLOC_PAGE_ORDER); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + bi->page_offset = rnp_rx_offset(rx_ring); + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, bi->page_offset, bufsz, + DMA_FROM_DEVICE, + RNP_RX_DMA_ATTR); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, RNP_ALLOC_PAGE_ORDER); + printk("map failed\n"); + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + bi->dma = dma; + bi->page = page; + bi->page_offset = rnp_rx_offset(rx_ring); + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + rx_ring->rx_stats.alloc_rx_page++; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0, bufsz, + DMA_FROM_DEVICE); + + /* + * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->pkt_addr = cpu_to_le64(bi->dma + fun_id); + + return true; +} + +#else +static bool rnp_alloc_mapped_page(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) + return true; + + page = dev_alloc_pages(rnp_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, rnp_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, + RNP_RX_DMA_ATTR); + + /* + * if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, rnp_rx_pg_order(rx_ring)); + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + bi->dma = dma; + bi->page = page; + bi->page_offset = rnp_rx_offset(rx_ring); + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + rx_ring->rx_stats.alloc_rx_page++; + + return true; +} + +/** + * rnp_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool rnp_is_non_eop(struct rnp_ring *rx_ring, union rnp_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(RNP_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ + if (likely(rnp_test_staterr(rx_desc, RNP_RXD_STAT_EOP))) + return false; + /* place skb in next buffer to be received */ + rx_ring->rx_buffer_info[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + + return true; +} + +#endif +/** + * rnp_pull_tail - rnp specific version of skb_pull_tail + * @skb: pointer to current skb being adjusted + * + * This function is an rnp specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void rnp_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = rnp_get_headlen(va, RNP_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +static bool rnp_check_src_mac(struct sk_buff *skb, struct net_device *netdev) +{ + char *data = (char *)skb->data; + bool ret = false; + struct netdev_hw_addr *ha; + + if (is_multicast_ether_addr(data)) { + if (0 == memcmp(data + netdev->addr_len, netdev->dev_addr, + netdev->addr_len)) { + dev_kfree_skb_any(skb); + ret = true; + } + /* if src mac equal own mac */ + netdev_for_each_uc_addr(ha, netdev) { + if (0 == memcmp(data + netdev->addr_len, ha->addr, + netdev->addr_len)) { + dev_kfree_skb_any(skb); + ret = true; + } + } + } + return ret; +} + +/** + * rnp_cleanup_headers - Correct corrupted or empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being fixed + * + * Check if the skb is valid. In the XDP case it will be an error pointer. + * Return true in this case to abort processing and advance to next + * descriptor. + * + * Check for corrupted packet headers caused by senders on the local L2 + * embedded NIC switch not setting up their Tx Descriptors right. These + * should be very rare. + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool rnp_cleanup_headers(struct rnp_ring __maybe_unused *rx_ring, + union rnp_rx_desc *rx_desc, struct sk_buff *skb) +{ + struct net_device *netdev = rx_ring->netdev; + struct rnp_adapter *adapter = netdev_priv(netdev); +#ifdef OPTM_WITH_LPAGE +#else + /* XDP packets use error pointer so abort at this point */ + if (IS_ERR(skb)) + return true; +#endif + + /* place header in linear portion of buffer */ + if (!skb_headlen(skb)) + rnp_pull_tail(skb); + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && + (!(rx_ring->ring_flags & RNP_RING_VEB_MULTI_FIX))) + return rnp_check_src_mac(skb, rx_ring->netdev); + else + return false; +} + +/** + * rnp_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void rnp_reuse_rx_page(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *old_buff) +{ + struct rnp_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; + + new_buff = &rx_ring->rx_buffer_info[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* + * Transfer page from old buffer to new buffer. + * Move each member individually to avoid possible store + * forwarding stalls and unnecessary copy of skb. + */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static inline bool rnp_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +static bool rnp_can_reuse_rx_page(struct rnp_rx_buffer *rx_buffer, int size) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + +#ifdef OPTM_WITH_LPAGE + return false; +#endif + /* avoid re-using remote pages */ + if (unlikely(rnp_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) + return false; +#else + + /* + * The last offset is a bit aggressive in that we assume the + * worst case of FCoE being enabled and using a 3K buffer. + * However this should have minimal impact as the 1K extra is + * still less than one buffer in size. + */ +#define RNP_LAST_OFFSET (SKB_WITH_OVERHEAD(PAGE_SIZE)) + if (rx_buffer->page_offset > (RNP_LAST_OFFSET - size)) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +/** + * rnp_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @skb: sk_buff to place the data into + * @size: size of data + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static void rnp_add_rx_frag(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + struct sk_buff *skb, unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = rnp_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(RNP_SKB_PAD + size) : + SKB_DATA_ALIGN(size); +#endif + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +#ifdef OPTM_WITH_LPAGE +static struct rnp_rx_buffer *rnp_get_rx_buffer(struct rnp_ring *rx_ring, + union rnp_rx_desc *rx_desc, + const unsigned int size) +{ + struct rnp_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + + rx_buf_dump("rx buf", + page_address(rx_buffer->page) + rx_buffer->page_offset, + rx_desc->wb.len); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, 0, size, + DMA_FROM_DEVICE); + /* skip_sync: */ + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} +#else + +static struct rnp_rx_buffer *rnp_get_rx_buffer(struct rnp_ring *rx_ring, + union rnp_rx_desc *rx_desc, + struct sk_buff **skb, + const unsigned int size) +{ + struct rnp_rx_buffer *rx_buffer; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + *skb = rx_buffer->skb; + + rx_buf_dump("rx buf", + page_address(rx_buffer->page) + rx_buffer->page_offset, + rx_desc->wb.len); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, + rx_buffer->page_offset, size, + DMA_FROM_DEVICE); + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} +#endif + +#ifdef OPTM_WITH_LPAGE +static void rnp_put_rx_buffer(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer) +{ + struct rnp_q_vector *q_vector = rx_ring->q_vector; + struct rnp_adapter *adapter = q_vector->adapter; + struct rnp_hw *hw = &adapter->hw; + + if (rnp_can_reuse_rx_page(rx_buffer, hw->dma_split_size)) { + /* hand second half of page back to the ring */ + rnp_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + rnp_rx_bufsz(rx_ring), DMA_FROM_DEVICE, + RNP_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; +} + +#else +static void rnp_put_rx_buffer(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + struct sk_buff *skb) +{ + struct rnp_q_vector *q_vector = rx_ring->q_vector; + struct rnp_adapter *adapter = q_vector->adapter; + struct rnp_hw *hw = &adapter->hw; + + if (rnp_can_reuse_rx_page(rx_buffer, hw->dma_split_size)) { + /* hand second half of page back to the ring */ + rnp_reuse_rx_page(rx_ring, rx_buffer); + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + rnp_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + RNP_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + } + + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + rx_buffer->skb = NULL; +} +#endif + +#ifdef OPTM_WITH_LPAGE +static struct sk_buff *rnp_construct_skb(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + union rnp_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + unsigned int truesize = SKB_DATA_ALIGN(size); + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, RNP_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + prefetchw(skb->data); + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > RNP_RX_HDR_SIZE) + headlen = rnp_get_headlen(va, RNP_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); + + /* update all of the pointers */ + size -= headlen; + + if (size) { + + skb_add_rx_frag(skb, 0, rx_buffer->page, + (va + headlen) - page_address(rx_buffer->page), + size, truesize); + rx_buffer->page_offset += truesize; + } else { + + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +static struct sk_buff *rnp_build_skb(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + union rnp_rx_desc *rx_desc, + unsigned int size) +{ + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(size + RNP_SKB_PAD); + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* build an skb around the page buffer */ + skb = build_skb(va - RNP_SKB_PAD, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, RNP_SKB_PAD); + __skb_put(skb, size); + /* record DMA address if this is the start of a + * chain of buffers + */ + + return skb; +} + +#else + +static struct sk_buff *rnp_construct_skb(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union rnp_rx_desc *rx_desc) +{ + unsigned int size = xdp->data_end - xdp->data; +#if (PAGE_SIZE < 8192) + unsigned int truesize = rnp_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = + SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(xdp->data); +#if L1_CACHE_BYTES < 128 + prefetch(xdp->data + L1_CACHE_BYTES); +#endif + /* allocate a skb to store the frags */ + skb = napi_alloc_skb(&rx_ring->q_vector->napi, RNP_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + prefetchw(skb->data); + + if (size > RNP_RX_HDR_SIZE) { + + skb_add_rx_frag(skb, 0, rx_buffer->page, + xdp->data - page_address(rx_buffer->page), size, + truesize); +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + memcpy(__skb_put(skb, size), xdp->data, + ALIGN(size, sizeof(long))); + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +static struct sk_buff *rnp_build_skb(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + struct xdp_buff *xdp, + union rnp_rx_desc *rx_desc) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + void *va = xdp->data_meta; +#if (PAGE_SIZE < 8192) + unsigned int truesize = rnp_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start); +#endif + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + /* build an skb around the page buffer */ + skb = build_skb(xdp->data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, xdp->data - xdp->data_hard_start); + __skb_put(skb, xdp->data_end - xdp->data); + if (metasize) + skb_metadata_set(skb, metasize); + /* update buffer offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + + return skb; +} + +#endif + +#define RNP_XDP_PASS 0 +#define RNP_XDP_CONSUMED 1 +#define RNP_XDP_TX 2 + +#ifndef OPTM_WITH_LPAGE +static void rnp_rx_buffer_flip(struct rnp_ring *rx_ring, + struct rnp_rx_buffer *rx_buffer, + unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = rnp_rx_pg_size(rx_ring) / 2; + + rx_buffer->page_offset ^= truesize; +#else + unsigned int truesize = ring_uses_build_skb(rx_ring) ? + SKB_DATA_ALIGN(RNP_SKB_PAD + size) : + SKB_DATA_ALIGN(size); + + rx_buffer->page_offset += truesize; +#endif +} +#endif + +#ifdef OPTM_WITH_LPAGE +/** + * rnp_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed. + **/ + +static int rnp_clean_rx_irq(struct rnp_q_vector *q_vector, + struct rnp_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + unsigned int err_packets = 0; + unsigned int driver_drop_packets = 0; + struct sk_buff *skb = rx_ring->skb; + struct rnp_adapter *adapter = q_vector->adapter; + u16 cleaned_count = rnp_desc_unused_rx(rx_ring); + + while (likely(total_rx_packets < budget)) { + union rnp_rx_desc *rx_desc; + struct rnp_rx_buffer *rx_buffer; + unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= RNP_RX_BUFFER_WRITE) { + rnp_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + rx_desc = RNP_RX_DESC(rx_ring, rx_ring->next_to_clean); + + rx_buf_dump("rx-desc:", rx_desc, sizeof(*rx_desc)); + rx_debug_printk(" dd set: %s\n", + (rx_desc->wb.cmd & RNP_RXD_STAT_DD) ? "Yes" : + "No"); + + if (!rnp_test_staterr(rx_desc, RNP_RXD_STAT_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_debug_printk( + "queue:%d rx-desc:%d has-data len:%d next_to_clean %d\n", + rx_ring->rnp_queue_idx, rx_ring->next_to_clean, + rx_desc->wb.len, rx_ring->next_to_clean); + + /* handle padding */ + if ((adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) && + (!(adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG))) { + if (likely(rnp_test_staterr(rx_desc, + RNP_RXD_STAT_EOP))) { + size = le16_to_cpu(rx_desc->wb.len) - + le16_to_cpu(rx_desc->wb.padding_len); + } else { + size = le16_to_cpu(rx_desc->wb.len); + } + } else { + /* size should not zero */ + size = le16_to_cpu(rx_desc->wb.len); + } + + if (!size) + break; + + /* + * should check csum err + * maybe one packet use multiple descs + * no problems hw set all csum_err in multiple descs + * maybe BUG if the last sctp desc less than 60 + */ + if (rnp_check_csum_error(rx_ring, rx_desc, size, + &driver_drop_packets)) { + cleaned_count++; + err_packets++; + if (err_packets + total_rx_packets > budget) + break; + continue; + } + + rx_buffer = rnp_get_rx_buffer(rx_ring, rx_desc, size); + + if (skb) { + rnp_add_rx_frag(rx_ring, rx_buffer, skb, size); + } else if (ring_uses_build_skb(rx_ring)) { + skb = rnp_build_skb(rx_ring, rx_buffer, rx_desc, size); + } else { + skb = rnp_construct_skb(rx_ring, rx_buffer, rx_desc, + size); + } + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_buffer->pagecnt_bias++; + break; + } + if (module_enable_ptp && adapter->ptp_rx_en && + adapter->flags2 & RNP_FLAG2_PTP_ENABLED) + rnp_ptp_get_rx_hwstamp(adapter, rx_desc, skb); + + rnp_put_rx_buffer(rx_ring, rx_buffer); + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (rnp_is_non_eop(rx_ring, rx_desc)) + continue; + + /* verify the packet layout is correct */ + if (rnp_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + rnp_process_skb_fields(rx_ring, rx_desc, skb); + + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + rnp_rx_skb(q_vector, skb); + skb = NULL; + + /* update budget accounting */ + total_rx_packets++; + } + + rx_ring->skb = skb; + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + rx_ring->rx_stats.driver_drop_packets += driver_drop_packets; + rx_ring->rx_stats.rx_clean_count += total_rx_packets; + rx_ring->rx_stats.rx_clean_times++; + if (rx_ring->rx_stats.rx_clean_times > 10) { + rx_ring->rx_stats.rx_clean_times = 0; + rx_ring->rx_stats.rx_clean_count = 0; + } + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + if (total_rx_packets >= budget) + rx_ring->rx_stats.poll_again_count++; + + return total_rx_packets; +} + +#else +/** + * rnp_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed. + **/ +static int rnp_clean_rx_irq(struct rnp_q_vector *q_vector, + struct rnp_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + unsigned int err_packets = 0; + unsigned int driver_drop_packets = 0; + struct rnp_adapter *adapter = q_vector->adapter; + u16 cleaned_count = rnp_desc_unused_rx(rx_ring); + bool xdp_xmit = false; + struct xdp_buff xdp; + + xdp.data = NULL; + xdp.data_end = NULL; + + while (likely(total_rx_packets < budget)) { + union rnp_rx_desc *rx_desc; + struct rnp_rx_buffer *rx_buffer; + struct sk_buff *skb; + unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= RNP_RX_BUFFER_WRITE) { + rnp_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + rx_desc = RNP_RX_DESC(rx_ring, rx_ring->next_to_clean); + + rx_buf_dump("rx-desc:", rx_desc, sizeof(*rx_desc)); + rx_debug_printk(" dd set: %s\n", + (rx_desc->wb.cmd & RNP_RXD_STAT_DD) ? "Yes" : + "No"); + + if (!rnp_test_staterr(rx_desc, RNP_RXD_STAT_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + + rx_debug_printk( + "queue:%d rx-desc:%d has-data len:%d next_to_clean %d\n", + rx_ring->rnp_queue_idx, rx_ring->next_to_clean, + rx_desc->wb.len, rx_ring->next_to_clean); + + /* handle padding */ + if ((adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) && + (!(adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG))) { + if (likely(rnp_test_staterr(rx_desc, + RNP_RXD_STAT_EOP))) { + size = le16_to_cpu(rx_desc->wb.len) - + le16_to_cpu(rx_desc->wb.padding_len); + } else { + size = le16_to_cpu(rx_desc->wb.len); + } + } else { + /* size should not zero */ + size = le16_to_cpu(rx_desc->wb.len); + } + + if (!size) + break; + + /* + * should check csum err + * maybe one packet use multiple descs + * no problems hw set all csum_err in multiple descs + * maybe BUG if the last sctp desc less than 60 + */ + if (rnp_check_csum_error(rx_ring, rx_desc, size, + &driver_drop_packets)) { + cleaned_count++; + err_packets++; + if (err_packets + total_rx_packets > budget) + break; + continue; + } + + rx_buffer = rnp_get_rx_buffer(rx_ring, rx_desc, &skb, size); + + if (!skb) { + xdp.data = page_address(rx_buffer->page) + + rx_buffer->page_offset; + xdp.data_meta = xdp.data; + xdp.data_hard_start = xdp.data - rnp_rx_offset(rx_ring); + xdp.data_end = xdp.data + size; + } + + if (IS_ERR(skb)) { + if (PTR_ERR(skb) == -RNP_XDP_TX) { + xdp_xmit = true; + rnp_rx_buffer_flip(rx_ring, rx_buffer, size); + } else { + rx_buffer->pagecnt_bias++; + } + total_rx_packets++; + total_rx_bytes += size; + } else if (skb) { + rnp_add_rx_frag(rx_ring, rx_buffer, skb, size); + } else if (ring_uses_build_skb(rx_ring)) { + skb = rnp_build_skb(rx_ring, rx_buffer, &xdp, rx_desc); + } else { + skb = rnp_construct_skb(rx_ring, rx_buffer, &xdp, + rx_desc); + } + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + rx_buffer->pagecnt_bias++; + break; + } + if (module_enable_ptp && adapter->ptp_rx_en && + adapter->flags2 & RNP_FLAG2_PTP_ENABLED) + rnp_ptp_get_rx_hwstamp(adapter, rx_desc, skb); + + rnp_put_rx_buffer(rx_ring, rx_buffer, skb); + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (rnp_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (rnp_cleanup_headers(rx_ring, rx_desc, skb)) { + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, timestamp, VLAN, and protocol */ + rnp_process_skb_fields(rx_ring, rx_desc, skb); + + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + + rnp_rx_skb(q_vector, skb); + + /* update budget accounting */ + total_rx_packets++; + } + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + rx_ring->rx_stats.driver_drop_packets += driver_drop_packets; + rx_ring->rx_stats.rx_clean_count += total_rx_packets; + rx_ring->rx_stats.rx_clean_times++; + if (rx_ring->rx_stats.rx_clean_times > 10) { + rx_ring->rx_stats.rx_clean_times = 0; + rx_ring->rx_stats.rx_clean_count = 0; + } + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + if (total_rx_packets >= budget) + rx_ring->rx_stats.poll_again_count++; + return total_rx_packets; +} +#endif + +/** + * rnp_configure_msix - Configure MSI-X hardware + * @adapter: board private structure + * + * rnp_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +static void rnp_configure_msix(struct rnp_adapter *adapter) +{ + struct rnp_q_vector *q_vector; + int i; + + /* + * configure ring-msix Registers table + */ + for (i = 0; i < adapter->num_q_vectors; i++) { + struct rnp_ring *ring; + + q_vector = adapter->q_vector[i]; + rnp_for_each_ring(ring, q_vector->rx) { + rnp_set_ring_vector(adapter, ring->rnp_queue_idx, + q_vector->v_idx); + } + } +} + +/** + * rnp_update_itr - update the dynamic ITR value based on statistics + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + **/ +static void rnp_update_itr(struct rnp_q_vector *q_vector, + struct rnp_ring_container *ring_container, int type) +{ + unsigned int itr = RNP_ITR_ADAPTIVE_MIN_USECS | + RNP_ITR_ADAPTIVE_LATENCY; + unsigned int avg_wire_size, packets, bytes; + unsigned int packets_old; + unsigned long next_update = jiffies; + u32 old_itr; + u16 add_itr, add = 0; + /* 0 is tx ;1 is rx */ + if (type) + old_itr = q_vector->itr_rx; + else + old_itr = q_vector->itr_tx; + + /* If we don't have any rings just leave ourselves set for maximum + * possible latency so we take ourselves out of the equation. + */ + if (!ring_container->ring) + return; + + packets_old = ring_container->total_packets_old; + packets = ring_container->total_packets; + bytes = ring_container->total_bytes; + add_itr = ring_container->add_itr; + /* If Rx and there are 1 to 23 packets and bytes are less than + * 12112 assume insufficient data to use bulk rate limiting + * approach. Instead we will focus on simply trying to target + * receiving 8 times as much data in the next interrupt. + */ + if (!packets) + return; + + if (packets && packets < 24 && bytes < 12112) { + itr = RNP_ITR_ADAPTIVE_LATENCY; + + avg_wire_size = (bytes + packets * 24); + avg_wire_size = + clamp_t(unsigned int, avg_wire_size, 128, 12800); + + goto adjust_for_speed; + } + + /* Less than 48 packets we can assume that our current interrupt delay + * is only slightly too low. As such we should increase it by a small + * fixed amount. + */ + if (packets < 48) { + if (add_itr) { + if (packets_old < packets) { + itr = (old_itr >> 2) + RNP_ITR_ADAPTIVE_MIN_INC; + if (itr > RNP_ITR_ADAPTIVE_MAX_USECS) + itr = RNP_ITR_ADAPTIVE_MAX_USECS; + add = 1; + + if (packets < 8) + itr += RNP_ITR_ADAPTIVE_LATENCY; + else + itr += ring_container->itr & + RNP_ITR_ADAPTIVE_LATENCY; + + } else { + /* we add itr before ,but not get more packets */ + itr = (old_itr >> 2) - RNP_ITR_ADAPTIVE_MIN_INC; + if (itr < RNP_ITR_ADAPTIVE_MIN_USECS) + itr = RNP_ITR_ADAPTIVE_MIN_USECS; + } + + } else { + /* we not add before, add itr */ + add = 1; + itr = (old_itr >> 2) + RNP_ITR_ADAPTIVE_MIN_INC; + if (itr > RNP_ITR_ADAPTIVE_MAX_USECS) + itr = RNP_ITR_ADAPTIVE_MAX_USECS; + + /* If sample size is 0 - 7 we should probably switch + * to latency mode instead of trying to control + * things as though we are in bulk. + * + * Otherwise if the number of packets is less than 48 + * we should maintain whatever mode we are currently + * in. The range between 8 and 48 is the cross-over + * point between latency and bulk traffic. + */ + if (packets < 8) + itr += RNP_ITR_ADAPTIVE_LATENCY; + else + itr += ring_container->itr & + RNP_ITR_ADAPTIVE_LATENCY; + } + goto clear_counts; + } + + /* Between 48 and 96 is our "goldilocks" zone where we are working + * out "just right". Just report that our current ITR is good for us. + */ + if (packets < 96) { + itr = old_itr >> 2; + goto clear_counts; + } + /* If packet count is 96 or greater we are likely looking at a slight + * overrun of the delay we want. Try halving our delay to see if that + * will cut the number of packets in half per interrupt. + */ + if (packets < 256) { + itr = old_itr >> 3; + if (itr < RNP_ITR_ADAPTIVE_MIN_USECS) + itr = RNP_ITR_ADAPTIVE_MIN_USECS; + goto clear_counts; + } + + /* The paths below assume we are dealing with a bulk ITR since number + * of packets is 256 or greater. We are just going to have to compute + * a value and try to bring the count under control, though for smaller + * packet sizes there isn't much we can do as NAPI polling will likely + * be kicking in sooner rather than later. + */ + itr = RNP_ITR_ADAPTIVE_BULK; + + /* If packet counts are 256 or greater we can assume we have a gross + * overestimation of what the rate should be. Instead of trying to fine + * tune it just use the formula below to try and dial in an exact value + * give the current packet size of the frame. + */ + avg_wire_size = bytes / packets; + + /* The following is a crude approximation of: + * wmem_default / (size + overhead) = desired_pkts_per_int + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate + * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value + * + * Assuming wmem_default is 212992 and overhead is 640 bytes per + * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the + * formula down to + * + * (170 * (size + 24)) / (size + 640) = ITR + * + * We first do some math on the packet size and then finally bitshift + * by 8 after rounding up. We also have to account for PCIe link speed + * difference as ITR scales based on this. + */ + if (avg_wire_size <= 60) { + /* Start at 50k ints/sec */ + avg_wire_size = 5120; + } else if (avg_wire_size <= 316) { + /* 50K ints/sec to 16K ints/sec */ + avg_wire_size *= 40; + avg_wire_size += 2720; + } else if (avg_wire_size <= 1084) { + /* 16K ints/sec to 9.2K ints/sec */ + avg_wire_size *= 15; + avg_wire_size += 11452; + } else if (avg_wire_size <= 1980) { + /* 9.2K ints/sec to 8K ints/sec */ + avg_wire_size *= 5; + avg_wire_size += 22420; + } else { + /* plateau at a limit of 8K ints/sec */ + avg_wire_size = 32256; + } + +adjust_for_speed: + /* Resultant value is 256 times larger than it needs to be. This + * gives us room to adjust the value as needed to either increase + * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. + * + * Use addition as we have already recorded the new latency flag + * for the ITR value. + */ + switch (q_vector->adapter->link_speed) { + case RNP_LINK_SPEED_10GB_FULL: + case RNP_LINK_SPEED_100_FULL: + default: + itr += DIV_ROUND_UP(avg_wire_size, + RNP_ITR_ADAPTIVE_MIN_INC * 256) * + RNP_ITR_ADAPTIVE_MIN_INC; + break; + case RNP_LINK_SPEED_1GB_FULL: + case RNP_LINK_SPEED_10_FULL: + itr += DIV_ROUND_UP(avg_wire_size, + RNP_ITR_ADAPTIVE_MIN_INC * 64) * + RNP_ITR_ADAPTIVE_MIN_INC; + break; + } + + /* In the case of a latency specific workload only allow us to + * reduce the ITR by at most 2us. By doing this we should dial + * in so that our number of interrupts is no more than 2x the number + * of packets for the least busy workload. So for example in the case + * of a TCP worload the ack packets being received would set the + * the interrupt rate as they are a latency specific workload. + */ + if ((itr & RNP_ITR_ADAPTIVE_LATENCY) && itr < ring_container->itr) + itr = ring_container->itr - RNP_ITR_ADAPTIVE_MIN_INC; + +clear_counts: + /* write back value */ + ring_container->itr = itr; + + /* next update should occur within next jiffy */ + ring_container->next_update = next_update + 1; + + ring_container->total_bytes = 0; + ring_container->total_packets_old = packets; + ring_container->add_itr = add; + ring_container->total_packets = 0; +} + +/** + * rnp_write_eitr - write EITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + * + * This function is made to be called by ethtool and by the driver + * when it needs to update EITR registers at runtime. Hardware + * specific quirks/differences are taken care of here. + */ +static void rnp_write_eitr_rx(struct rnp_q_vector *q_vector) +{ + struct rnp_adapter *adapter = q_vector->adapter; + struct rnp_hw *hw = &adapter->hw; + u32 itr_reg = q_vector->itr_rx >> 2; + struct rnp_ring *ring; + + itr_reg = itr_reg * hw->usecstocount; + + rnp_for_each_ring(ring, q_vector->rx) { + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER, itr_reg); + } +} + +static void rnp_set_itr(struct rnp_q_vector *q_vector) +{ + u32 new_itr_rx; + + rnp_update_itr(q_vector, &q_vector->rx, 1); + new_itr_rx = q_vector->rx.itr; + new_itr_rx &= RNP_ITR_ADAPTIVE_MASK_USECS; + new_itr_rx <<= 2; + if (new_itr_rx != q_vector->itr_rx) { + /* save the algorithm value here */ + q_vector->itr_rx = new_itr_rx; + rnp_write_eitr_rx(q_vector); + } +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +static inline void rnp_irq_enable_queues(struct rnp_adapter *adapter, + struct rnp_q_vector *q_vector) +{ + struct rnp_ring *ring; + + rnp_for_each_ring(ring, q_vector->rx) + rnp_wr_reg(ring->dma_int_mask, ~(RX_INT_MASK | TX_INT_MASK)); +} + +static inline void rnp_irq_disable_queues(struct rnp_q_vector *q_vector) +{ + struct rnp_ring *ring; + + rnp_for_each_ring(ring, q_vector->tx) { + if (q_vector->new_rx_count != q_vector->old_rx_count) { + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + q_vector->new_rx_count); + q_vector->old_rx_count = q_vector->new_rx_count; + } + rnp_wr_reg(ring->dma_int_mask, (RX_INT_MASK | TX_INT_MASK)); + } +} + +/** + * rnp_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static inline void rnp_irq_enable(struct rnp_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_q_vectors; i++) + rnp_irq_enable_queues(adapter, adapter->q_vector[i]); +} + +static irqreturn_t rnp_msix_other(int irq, void *data) +{ + struct rnp_adapter *adapter = data; + set_bit(__RNP_IN_IRQ, &adapter->state); + + rnp_msg_task(adapter); + + clear_bit(__RNP_IN_IRQ, &adapter->state); + + return IRQ_HANDLED; +} + +static void rnp_htimer_start(struct rnp_q_vector *q_vector) +{ + unsigned long ns = q_vector->irq_check_usecs * NSEC_PER_USEC / 2; + + hrtimer_start_range_ns(&q_vector->irq_miss_check_timer, ns_to_ktime(ns), + ns, HRTIMER_MODE_REL); +} + +static void rnp_htimer_stop(struct rnp_q_vector *q_vector) +{ + hrtimer_cancel(&q_vector->irq_miss_check_timer); +} + +static irqreturn_t rnp_msix_clean_rings(int irq, void *data) +{ + struct rnp_q_vector *q_vector = data; + + if (q_vector->vector_flags & RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) + rnp_htimer_stop(q_vector); + + /* disabled interrupts (on this vector) for us */ + rnp_irq_disable_queues(q_vector); + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +static void update_rx_count(int cleaned, struct rnp_q_vector *q_vector) +{ + struct rnp_adapter *adapter = q_vector->adapter; + u32 link_speed = adapter->link_speed; + struct rnp_ring *ring; + + if (link_speed == RNP_LINK_SPEED_10GB_FULL) { + if ((cleaned) && (cleaned != q_vector->new_rx_count)) { + if (cleaned < 5) { + q_vector->small_times = 0; + q_vector->large_times = 0; + q_vector->too_small_times++; + if (q_vector->too_small_times >= 2) { + q_vector->new_rx_count = 1; + } + } else if (cleaned < 30) { + q_vector->too_small_times = 0; + q_vector->middle_time++; + /* count is 5 -30 */ + if (cleaned < q_vector->new_rx_count) { + /* change small */ + q_vector->small_times = 0; + q_vector->new_rx_count -= + (1 << (q_vector->large_times++)); + if (q_vector->new_rx_count < 0) + q_vector->new_rx_count = 1; + + } else { + q_vector->large_times = 0; + + if (cleaned > 30) { + if (q_vector->new_rx_count == + (cleaned - 4)) { + } else { + q_vector->new_rx_count += + (1 + << (q_vector->small_times++)); + } + /* should no more than q_vector */ + if (q_vector->new_rx_count >= cleaned) { + q_vector->new_rx_count = + cleaned - 4; + q_vector->small_times = 0; + } + + } else { + if (q_vector->new_rx_count == + (cleaned - 1)) { + } else { + q_vector->new_rx_count += + (1 + << (q_vector->small_times++)); + } + /* should no more than q_vector */ + if (q_vector->new_rx_count >= cleaned) { + q_vector->new_rx_count = + cleaned - 1; + q_vector->small_times = 0; + } + } + } + } else { + q_vector->too_small_times = 0; + q_vector->new_rx_count = + max_t(int, 64, adapter->rx_frames); + q_vector->small_times = 0; + q_vector->large_times = 0; + } + } + } else { + rnp_for_each_ring(ring, q_vector->rx) { + if (ring->ring_flags & RNP_RING_LOWER_ITR) { + q_vector->new_rx_count = 1; + } else { + q_vector->new_rx_count = 32; + } + } + + + } +} + +/** + * rnp_poll - NAPI Rx polling callback + * @napi: structure for representing this polling device + * @budget: how many packets driver is allowed to clean + * + * This function is used for legacy and MSI, NAPI mode + **/ +int rnp_poll(struct napi_struct *napi, int budget) +{ + struct rnp_q_vector *q_vector = + container_of(napi, struct rnp_q_vector, napi); + struct rnp_adapter *adapter = q_vector->adapter; + struct rnp_ring *ring; + int per_ring_budget, work_done = 0; + bool clean_complete = true; + int cleaned_total = 0; + + rnp_for_each_ring(ring, q_vector->tx) { + if (!rnp_clean_tx_irq(q_vector, ring, budget)) + clean_complete = false; + } + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling + */ + if (q_vector->rx.count > 1) + per_ring_budget = max(budget / q_vector->rx.count, 1); + else + per_ring_budget = budget; + + rnp_for_each_ring(ring, q_vector->rx) { + int cleaned = 0; + /* this ring is waitting to reset rx_len*/ + /* avoid to deal this ring until reset done */ + if (likely(!(ring->ring_flags & RNP_RING_FLAG_DO_RESET_RX_LEN))) + cleaned = rnp_clean_rx_irq(q_vector, ring, + per_ring_budget); + /* check delay rx setup */ + if (unlikely(ring->ring_flags & + RNP_RING_FLAG_DELAY_SETUP_RX_LEN)) { + int head; + + rnp_disable_rx_queue(adapter, ring); + head = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD); + if (head < RNP_MIN_RXD) { + /* it is time to delay set */ + /* stop rx */ + rnp_disable_rx_queue(adapter, ring); + ring->ring_flags &= + (~RNP_RING_FLAG_DELAY_SETUP_RX_LEN); + ring->ring_flags |= + RNP_RING_FLAG_DO_RESET_RX_LEN; + } else { + + ring_wr32(ring, RNP_DMA_RX_START, 1); + } + } + work_done += cleaned; + cleaned_total += cleaned; + if (cleaned >= per_ring_budget) + clean_complete = false; + } + + /* force close irq */ + if (test_bit(__RNP_DOWN, &adapter->state)) { + clean_complete = true; + } + /* all work done, exit the polling mode */ + if (!(q_vector->vector_flags & RNP_QVECTOR_FLAG_ITR_FEATURE)) + update_rx_count(cleaned_total, q_vector); + + if (!clean_complete) { + int cpu_id = smp_processor_id(); + + /* It is possible that the interrupt affinity has changed but, + * if the cpu is pegged at 100%, polling will never exit while + * traffic continues and the interrupt will be stuck on this + * cpu. We check to make sure affinity is correct before we + * continue to poll, otherwise we must stop polling so the + * interrupt can move to the correct cpu. + */ + if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + /* Tell napi that we are done polling */ + napi_complete_done(napi, work_done); + if (!test_bit(__RNP_DOWN, &adapter->state)) { + rnp_irq_enable_queues(adapter, q_vector); + /* we need this to ensure irq start before tx start */ + if (q_vector->vector_flags & + RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS) { + smp_mb(); + rnp_for_each_ring(ring, q_vector->tx) { + rnp_check_restart_tx(q_vector, + ring); + if (q_vector->new_rx_count != + q_vector->old_rx_count) { + ring_wr32( + ring, + RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + q_vector->new_rx_count); + q_vector->old_rx_count = + q_vector->new_rx_count; + } + } + } + if (!test_bit(__RNP_DOWN, &adapter->state)) { + if (q_vector->vector_flags & + RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) + rnp_htimer_start(q_vector); + /* Return budget-1 so that polling stops */ + return budget - 1; + } + } + return min(work_done, budget - 1); + } + if (q_vector->vector_flags & + RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS) { + rnp_for_each_ring(ring, q_vector->tx) { + rnp_check_restart_tx(q_vector, ring); + /* update rx count if need */ + if (q_vector->new_rx_count != + q_vector->old_rx_count) { + ring_wr32(ring, + RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + q_vector->new_rx_count); + q_vector->old_rx_count = + q_vector->new_rx_count; + } + } + } + return budget; + } + + if (likely(napi_complete_done(napi, work_done))) { + /* try to do itr handle */ + if (q_vector->vector_flags & RNP_QVECTOR_FLAG_ITR_FEATURE) + rnp_set_itr(q_vector); + + if (!test_bit(__RNP_DOWN, &adapter->state)) { + rnp_irq_enable_queues(adapter, q_vector); + smp_mb(); + /* we need this to ensure irq start before tx start */ + if (q_vector->vector_flags & + RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS) { + rnp_for_each_ring(ring, q_vector->tx) { + rnp_check_restart_tx(q_vector, ring); + if (q_vector->new_rx_count != + q_vector->old_rx_count) { + ring_wr32( + ring, + RNP_DMA_REG_RX_INT_DELAY_PKTCNT, + q_vector->new_rx_count); + q_vector->old_rx_count = + q_vector->new_rx_count; + } + } + } + if (q_vector->vector_flags & RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) + rnp_htimer_start(q_vector); + + } + } + + return min(work_done, budget - 1); +} + +/** + * rnp_irq_affinity_notify - Callback for affinity changes + * @notify: context as to what irq was changed + * @mask: the new affinity mask + * + * This is a callback function used by the irq_set_affinity_notifier function + * so that we may register to receive changes to the irq affinity masks. + **/ +static void rnp_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct rnp_q_vector *q_vector = + container_of(notify, struct rnp_q_vector, affinity_notify); + + cpumask_copy(&q_vector->affinity_mask, mask); +} + +/** + * rnp_irq_affinity_release - Callback for affinity notifier release + * @ref: internal core kernel usage + * + * This is a callback function used by the irq_set_affinity_notifier function + * to inform the current notification subscriber that they will no longer + * receive notifications. + **/ +static void rnp_irq_affinity_release(struct kref *ref) +{ +} + +static irqreturn_t rnp_intr(int irq, void *data) +{ + struct rnp_adapter *adapter = data; + struct rnp_q_vector *q_vector = adapter->q_vector[0]; + if (q_vector->vector_flags & RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) + rnp_htimer_stop(q_vector); + + /* disabled interrupts (on this vector) for us */ + rnp_irq_disable_queues(q_vector); + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + + /* handle other */ + rnp_msg_task(adapter); + + return IRQ_HANDLED; +} + +/** + * rnp_request_msix_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * + * rnp_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int rnp_request_msix_irqs(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + int i = 0; + DPRINTK(IFUP, INFO, "[%s] num_q_vectors:%d\n", __func__, + adapter->num_q_vectors); + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct rnp_q_vector *q_vector = adapter->q_vector[i]; + struct msix_entry *entry = + &adapter->msix_entries[i + adapter->q_vector_off]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d-%d", netdev->name, "TxRx", i, + q_vector->v_idx); + } else { + WARN(!(q_vector->tx.ring && q_vector->rx.ring), + "%s vector%d tx rx is null, v_idx:%d\n", + netdev->name, i, q_vector->v_idx); + /* skip this unused q_vector */ + continue; + } + err = request_irq(entry->vector, &rnp_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { + e_err(probe, + "%s:request_irq failed for MSIX interrupt:%d " + "Error: %d\n", + netdev->name, entry->vector, err); + goto free_queue_irqs; + } + /* register for affinity change notifications */ + q_vector->affinity_notify.notify = rnp_irq_affinity_notify; + q_vector->affinity_notify.release = rnp_irq_affinity_release; + irq_set_affinity_notifier(entry->vector, + &q_vector->affinity_notify); + DPRINTK(IFUP, INFO, "[%s] set %s affinity_mask\n", __func__, + q_vector->name); + + irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask); + } + + return 0; + +free_queue_irqs: + while (i) { + i--; + irq_set_affinity_hint( + adapter->msix_entries[i + adapter->q_vector_off].vector, + NULL); + free_irq( + adapter->msix_entries[i + adapter->q_vector_off].vector, + adapter->q_vector[i]); + irq_set_affinity_notifier( + adapter->msix_entries[i + adapter->q_vector_off].vector, + NULL); + irq_set_affinity_hint( + adapter->msix_entries[i + adapter->q_vector_off].vector, + NULL); + } + return err; +} + +static int rnp_free_msix_irqs(struct rnp_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct rnp_q_vector *q_vector = adapter->q_vector[i]; + struct msix_entry *entry = + &adapter->msix_entries[i + adapter->q_vector_off]; + + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; + /* clear the affinity notifier in the IRQ descriptor */ + irq_set_affinity_notifier(entry->vector, NULL); + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + DPRINTK(IFDOWN, INFO, "free irq %s\n", q_vector->name); + free_irq(entry->vector, q_vector); + } + + return 0; +} + +/** + * rnp_request_irq - initialize interrupts + * @adapter: board private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int rnp_request_irq(struct rnp_adapter *adapter) +{ + int err; + + if (adapter->flags & RNP_FLAG_MSIX_ENABLED) { + pr_info("msix mode is used\n"); + err = rnp_request_msix_irqs(adapter); + + } else if (adapter->flags & RNP_FLAG_MSI_ENABLED) { + /* in this case one for all */ + pr_info("msi mode is used\n"); + err = request_irq(adapter->pdev->irq, rnp_intr, 0, + adapter->netdev->name, adapter); + adapter->hw.mbx.other_irq_enabled = true; + } else { + pr_info("legacy mode is used\n"); + err = request_irq(adapter->pdev->irq, rnp_intr, IRQF_SHARED, + adapter->netdev->name, adapter); + adapter->hw.mbx.other_irq_enabled = true; + } + + if (err) + e_err(probe, "request_irq failed, Error %d\n", err); + + return err; +} + +static void rnp_free_irq(struct rnp_adapter *adapter) +{ + if (adapter->flags & RNP_FLAG_MSIX_ENABLED) { + rnp_free_msix_irqs(adapter); + } else if (adapter->flags & RNP_FLAG_MSI_ENABLED) { + /* in this case one for all */ + free_irq(adapter->pdev->irq, adapter); + adapter->hw.mbx.other_irq_enabled = false; + } else { + free_irq(adapter->pdev->irq, adapter); + adapter->hw.mbx.other_irq_enabled = false; + } + +} + +/** + * rnp_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static inline void rnp_irq_disable(struct rnp_adapter *adapter) +{ + int i, j; + + for (i = 0; i < adapter->num_q_vectors; i++) { + rnp_irq_disable_queues(adapter->q_vector[i]); + j = i + adapter->q_vector_off; + if (adapter->flags & RNP_FLAG_MSIX_ENABLED) + synchronize_irq(adapter->msix_entries[j].vector); + else + synchronize_irq(adapter->pdev->irq); + } +} + +int rnp_setup_tx_maxrate(struct rnp_ring *tx_ring, u64 max_rate, + int samples_1sec) +{ + /* set hardware samping internal 1S */ + ring_wr32(tx_ring, RNP_DMA_REG_TX_FLOW_CTRL_TM, samples_1sec); + ring_wr32(tx_ring, RNP_DMA_REG_TX_FLOW_CTRL_TH, max_rate); + + return 0; +} + +/** + * rnp_tx_maxrate_own - callback to set the maximum per-queue bitrate + * @netdev: network interface device structure + * @queue_index: Tx queue to set + * @maxrate: desired maximum transmit bitrate Mbps + **/ +static int rnp_tx_maxrate_own(struct rnp_adapter *adapter, int queue_index) +{ + struct rnp_ring *tx_ring = adapter->tx_ring[queue_index]; + u64 real_rate = 0; + u32 maxrate = adapter->max_rate[queue_index]; + + if (!maxrate) + return rnp_setup_tx_maxrate(tx_ring, 0, + adapter->hw.usecstocount * 1000000); + /* we need turn it to bytes/s */ + real_rate = ((u64)maxrate * 1024 * 1024) / 8; + rnp_setup_tx_maxrate(tx_ring, real_rate, + adapter->hw.usecstocount * 1000000); + + return 0; +} + +/** + * rnp_configure_tx_ring - Configure 8259x Tx ring after Reset + * @adapter: board private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +void rnp_configure_tx_ring(struct rnp_adapter *adapter, struct rnp_ring *ring) +{ + struct rnp_hw *hw = &adapter->hw; + + /* disable queue to avoid issues while updating state */ + + if (!(ring->ring_flags & RNP_RING_SKIP_TX_START)) + ring_wr32(ring, RNP_DMA_TX_START, 0); + + ring_wr32(ring, RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_LO, (u32)ring->dma); + ring_wr32(ring, RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_HI, + (u32)(((u64)ring->dma) >> 32) | (hw->pfvfnum << 24)); + ring_wr32(ring, RNP_DMA_REG_TX_DESC_BUF_LEN, ring->count); + + ring->next_to_clean = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + ring->next_to_use = ring->next_to_clean; + ring->tail = ring->ring_addr + RNP_DMA_REG_TX_DESC_BUF_TAIL; + rnp_wr_reg(ring->tail, ring->next_to_use); + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + ring_wr32(ring, RNP_DMA_REG_TX_DESC_FETCH_CTRL, + (8 << 0) /* max_water_flow */ + | (8 << 16) + /* max-num_descs_peer_read */ + ); + + } else { + ring_wr32(ring, RNP_DMA_REG_TX_DESC_FETCH_CTRL, + (64 << 0) /* max_water_flow */ + | (TSRN10_TX_DEFAULT_BURST << 16) + /* max-num_descs_peer_read */ + ); + } + ring_wr32(ring, RNP_DMA_REG_TX_INT_DELAY_TIMER, + adapter->tx_usecs * hw->usecstocount); + ring_wr32(ring, RNP_DMA_REG_TX_INT_DELAY_PKTCNT, adapter->tx_frames); + + rnp_tx_maxrate_own(adapter, ring->queue_index); + if (adapter->flags & RNP_FLAG_FDIR_HASH_CAPABLE) { + ring->atr_sample_rate = adapter->atr_sample_rate; + ring->atr_count = 0; + set_bit(__RNP_TX_FDIR_INIT_DONE, &ring->state); + } else { + ring->atr_sample_rate = 0; + } + /* initialize XPS */ + if (!test_and_set_bit(__RNP_TX_XPS_INIT_DONE, &ring->state)) { + struct rnp_q_vector *q_vector = ring->q_vector; + + if (q_vector) + netif_set_xps_queue(adapter->netdev, + &q_vector->affinity_mask, + ring->queue_index); + } + + clear_bit(__RNP_HANG_CHECK_ARMED, &ring->state); + + if (!(ring->ring_flags & RNP_RING_SKIP_TX_START)) { + /* should wait tx_ready before open tx start */ + int timeout = 0; + u32 status = 0; + + do { + status = ring_rd32(ring, RNP_DMA_TX_READY); + usleep_range(100, 200); + timeout++; + rnp_dbg("wait %d tx ready to 1\n", ring->rnp_queue_idx); + } while ((status != 1) && (timeout < 100)); + + if (timeout >= 100) + printk("wait tx ready timeout\n"); + ring_wr32(ring, RNP_DMA_TX_START, 1); + } +} + +/** + * rnp_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void rnp_configure_tx(struct rnp_adapter *adapter) +{ + u32 i, dma_axi_ctl; + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + + /* dma_axi_en.tx_en must be before Tx queues are enabled */ + dma_axi_ctl = dma_rd32(dma, RNP_DMA_AXI_EN); + dma_axi_ctl |= TX_AXI_RW_EN; + dma_wr32(dma, RNP_DMA_AXI_EN, dma_axi_ctl); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < (adapter->num_tx_queues); i++) + rnp_configure_tx_ring(adapter, adapter->tx_ring[i]); +} + +void rnp_disable_rx_queue(struct rnp_adapter *adapter, struct rnp_ring *ring) +{ + ring_wr32(ring, RNP_DMA_RX_START, 0); +} + +void rnp_configure_rx_ring(struct rnp_adapter *adapter, struct rnp_ring *ring) +{ + struct rnp_hw *hw = &adapter->hw; + u64 desc_phy = ring->dma; + u16 q_idx = ring->queue_index; + + /* disable queue to avoid issues while updating state */ + rnp_disable_rx_queue(adapter, ring); + + /* set descripts registers*/ + ring_wr32(ring, RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_LO, (u32)desc_phy); + ring_wr32(ring, RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_HI, + ((u32)(desc_phy >> 32)) | (hw->pfvfnum << 24)); + ring_wr32(ring, RNP_DMA_REG_RX_DESC_BUF_LEN, ring->count); + + ring->tail = ring->ring_addr + RNP_DMA_REG_RX_DESC_BUF_TAIL; + ring->next_to_clean = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD); + ring->next_to_use = ring->next_to_clean; + + if (ring->ring_flags & RNP_RING_SCATER_SETUP) + ring_wr32(ring, PCI_DMA_REG_RX_SCATTER_LENGTH, 96); + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + ring_wr32(ring, RNP_DMA_REG_RX_DESC_FETCH_CTRL, + 0 | (TSRN10_RX_DEFAULT_LINE << 0) /* rx-desc-flow */ + | (TSRN10_RX_DEFAULT_BURST << 16) + /* max-read-desc-cnt */ + ); + + } else { + ring_wr32(ring, RNP_DMA_REG_RX_DESC_FETCH_CTRL, + 0 | (TSRN10_RX_DEFAULT_LINE << 0) /* rx-desc-flow */ + | (TSRN10_RX_DEFAULT_BURST << 16) + /* max-read-desc-cnt */ + ); + } + /* setup rx drop */ + if (adapter->rx_drop_status & BIT(q_idx)) { + ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, + adapter->drop_time); + } else { + ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, 0); + } + + if (ring->ring_flags & RNP_RING_IRQ_MISS_FIX) + ring_wr32(ring, RNP_DMA_INT_TRIG, TX_INT_MASK | RX_INT_MASK); + + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER, + adapter->rx_usecs * hw->usecstocount); + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_PKTCNT, adapter->rx_frames); + rnp_alloc_rx_buffers(ring, rnp_desc_unused_rx(ring)); +} + +static void rnp_configure_virtualization(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + u32 ring, vfnum; + u64 real_rate = 0; + int i, vf_ring, j; + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + hw->ops.set_sriov_status(hw, false); + return; + } + + /* Enable only the PF's pool for Tx/Rx */ + + if (adapter->flags2 & RNP_FLAG2_BRIDGE_MODE_VEB) { + dma_wr32(dma, RNP_DMA_CONFIG, + dma_rd32(dma, RNP_DMA_CONFIG) & (~DMA_VEB_BYPASS)); + adapter->flags2 |= RNP_FLAG2_BRIDGE_MODE_VEB; + } + ring = adapter->tx_ring[0]->rnp_queue_idx; + hw->ops.set_sriov_status(hw, true); + + /* store vfnum */ + vfnum = hw->max_vfs - 1; + hw->veb_ring = ring; + hw->vfnum = vfnum; + /* use last-vf's table entry. */ + adapter->vf_num_for_pf = 0x80 | vfnum; + + /* setup vf tx rate setup here */ + for (i = 0; i < adapter->num_vfs; i++) { + real_rate = (adapter->vfinfo[i].tx_rate * 1024 * 128) / + hw->sriov_ring_limit; + for (j = 0; i < hw->sriov_ring_limit; i++) { + vf_ring = rnp_get_vf_ringnum(hw, i, j); + rnp_setup_ring_maxrate(adapter, vf_ring, real_rate); + } + } +} + +static void rnp_set_rx_buffer_len(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN * 3; + struct rnp_ring *rx_ring; + int i; + + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) + max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); + + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + clear_bit(__RNP_RX_3K_BUFFER, &rx_ring->state); + clear_bit(__RNP_RX_BUILD_SKB_ENABLED, &rx_ring->state); + set_bit(__RNP_RX_BUILD_SKB_ENABLED, &rx_ring->state); + +#ifdef OPTM_WITH_LPAGE + rx_ring->rx_page_buf_nums = RNP_PAGE_BUFFER_NUMS(rx_ring); + rx_ring->rx_per_buf_mem = + ALIGN((rnp_rx_offset(rx_ring) + rnp_rx_bufsz(rx_ring) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + RNP_RX_HWTS_OFFSET), + 1024); +#endif + + } +} + +/** + * rnp_configure_rx - Configure 8259x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void rnp_configure_rx(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + int i; + u32 rxctrl = 0, dma_axi_ctl; + + /* disable receives while setting up the descriptors */ + /* set_rx_buffer_len must be called before ring initialization */ + rnp_set_rx_buffer_len(adapter); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + for (i = 0; i < adapter->num_rx_queues; i++) + rnp_configure_rx_ring(adapter, adapter->rx_ring[i]); + + if (adapter->num_rx_queues > 0) { + wr32(hw, RNP_ETH_DEFAULT_RX_RING, + adapter->rx_ring[0]->rnp_queue_idx); + } + + /* enable all receives */ + rxctrl |= 0; + + dma_axi_ctl = dma_rd32(dma, RNP_DMA_AXI_EN); + dma_axi_ctl |= RX_AXI_RW_EN; + dma_wr32(dma, RNP_DMA_AXI_EN, dma_axi_ctl); +} + +static int rnp_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + bool veb_setup = true; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + + if (sriov_flag) { + if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN) { + if (hw->ops.set_veb_vlan_mask) { + if (hw->ops.set_veb_vlan_mask( + hw, vid, hw->vfnum, true) != 0) { + dev_err(&adapter->pdev->dev, + "out of vlan entries in sriov mode \n"); + return -EACCES; + } + } + } else { + /* in sriov mode */ + if ((vid) && (adapter->vf_vlan) && + (vid != adapter->vf_vlan)) { + dev_err(&adapter->pdev->dev, + "only 1 vlan in sriov mode \n"); + return -EACCES; + } + + /* update this */ + if (vid) { + adapter->vf_vlan = vid; + if (hw->ops.set_vf_vlan_mode) { + if (hw->feature_flags & + RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_vf_vlan_mode( + hw, vid, 0, true); + else + hw->ops.set_vf_vlan_mode( + hw, vid, hw->vfnum, + true); + } + } + } + } + + if (vid) { + if (proto == htons(ETH_P_8021Q)) { + adapter->vlan_count++; + } + } + + if (vid < VLAN_N_VID) { + if (proto != htons(ETH_P_8021Q)) { + set_bit(vid, adapter->active_vlans_stags); + veb_setup = false; + } else { + set_bit(vid, adapter->active_vlans); + } + } + + if (hw->ops.set_vlan_filter) { + hw->ops.set_vlan_filter(hw, vid, true, + (sriov_flag && veb_setup)); + } + + return 0; +} + +static int rnp_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_eth_info *eth = &hw->eth; + int i; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + bool veb_setup = true; + + if (!vid) + return 0; + + if (sriov_flag) { + if (vid) { + int true_remove = 1; + /* clean this */ + adapter->vf_vlan = 0; + for (i = 0; i < adapter->num_vfs; i++) { + if (vid == adapter->vfinfo[i].vf_vlan) { + true_remove = 0; + } + if (vid == adapter->vfinfo[i].pf_vlan) { + true_remove = 0; + } + /* setup pf_vlan */ + } + /* if no vf use this vid */ + if (true_remove) { + /* if remove stags */ + if (proto != htons(ETH_P_8021Q)) { + veb_setup = false; + if (!test_bit(vid, + adapter->active_vlans)) + true_remove = 1; + } else { + /* if remove ctags */ + if (!test_bit(vid, + adapter->active_vlans_stags)) + true_remove = 1; + } + /* if no other tags use this vid */ + if (true_remove) { + if ((adapter->flags2 & + RNP_FLAG2_VLAN_STAGS_ENABLED) && + (vid != adapter->stags_vid)) + /* should also check stags */ + hw->ops.set_vlan_filter( + hw, vid, false, + veb_setup); + } + } + /* always clean veb */ + hw->ops.set_vlan_filter(hw, vid, true, false); + + if (hw->ops.set_vf_vlan_mode) { + if (hw->feature_flags & + RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_vf_vlan_mode(hw, vid, 0, + false); + else + hw->ops.set_vf_vlan_mode( + hw, vid, hw->vfnum, false); + } + + /* remove veb */ + if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN) { + if (hw->ops.set_veb_vlan_mask) { + hw->ops.set_veb_vlan_mask( + hw, vid, hw->vfnum, false); + } + } + } + } else { + int true_remove = 0; + if (proto != htons(ETH_P_8021Q)) { + veb_setup = false; + if (!test_bit(vid, adapter->active_vlans)) + true_remove = 1; + + } else { + /* if remove ctags */ + if (!test_bit(vid, adapter->active_vlans_stags)) + true_remove = 1; + } + if (true_remove) { + if ((adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) && + (vid != adapter->stags_vid)) + /* should also check stags */ + hw->ops.set_vlan_filter(hw, vid, false, false); + } + } + + /* need set ncsi vfta again */ + if (hw->ncsi_en) + eth->ops.ncsi_set_vfta(eth); + + if (vid) { + if (proto == htons(ETH_P_8021Q)) { + /* should check proto todo */ + adapter->vlan_count--; + } + } + if (proto == htons(ETH_P_8021Q)) + clear_bit(vid, adapter->active_vlans); + /* clear stags */ + if (proto != htons(ETH_P_8021Q)) + clear_bit(vid, adapter->active_vlans_stags); + return 0; +} + +/** + * rnp_vlan_strip_disable - helper to disable hw vlan stripping + * @adapter: driver data + */ +static void rnp_vlan_strip_disable(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *tx_ring; + struct rnp_hw *hw = &adapter->hw; + + for (i = 0; i < adapter->num_rx_queues; i++) { + tx_ring = adapter->rx_ring[i]; + hw->ops.set_vlan_strip(hw, tx_ring->rnp_queue_idx, false); + } +} + +/** + * rnp_vlan_strip_enable - helper to enable hw vlan stripping + * @adapter: driver data + */ +static void rnp_vlan_strip_enable(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct rnp_ring *tx_ring; + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) { + tx_ring = adapter->rx_ring[i]; + + hw->ops.set_vlan_strip(hw, tx_ring->rnp_queue_idx, true); + } +} + +static void rnp_remove_vlan(struct rnp_adapter *adapter) +{ + adapter->vlan_count = 0; +} + +static void rnp_restore_vlan(struct rnp_adapter *adapter) +{ + u16 vid; + struct rnp_hw *hw = &adapter->hw; + struct rnp_eth_info *eth = &hw->eth; + int i; + + /* in stags open, set stags_vid to vlan filter */ + if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) + eth->ops.set_vfta(eth, adapter->stags_vid, true); + + rnp_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + rnp_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); + /* config vlan mode for mac */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + for (i = 0; i < adapter->num_vfs; i++) { + vid = adapter->vfinfo[i].vf_vlan; + if (vid) { + rnp_vlan_rx_add_vid(adapter->netdev, + htons(ETH_P_8021Q), vid); + } + vid = adapter->vfinfo[i].pf_vlan; + if (vid) { + rnp_vlan_rx_add_vid(adapter->netdev, + htons(ETH_P_8021Q), vid); + } + } + } +} + +/** + * rnp_set_rx_mode - Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_method entry point is called whenever the unicast/multicast + * address list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast and + * promiscuous mode. + **/ +void rnp_set_rx_mode(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + netdev_features_t features; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + + hw->ops.set_rx_mode(hw, netdev, sriov_flag); + + if (sriov_flag) { + if (!test_and_set_bit(__RNP_USE_VFINFI, &adapter->state)) { + rnp_restore_vf_macvlans(adapter); + rnp_restore_vf_macs(adapter); + clear_bit(__RNP_USE_VFINFI, &adapter->state); + } + } + + features = netdev->features; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rnp_vlan_strip_enable(adapter); + else + rnp_vlan_strip_disable(adapter); + /* stags */ + /* only do this if hw support stags */ + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) { + if (features & NETIF_F_HW_VLAN_STAG_RX) + rnp_vlan_strip_enable(adapter); + else + rnp_vlan_strip_disable(adapter); + } +} + +static void rnp_napi_enable_all(struct rnp_adapter *adapter) +{ + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) + napi_enable(&adapter->q_vector[q_idx]->napi); +} + +static void rnp_napi_disable_all(struct rnp_adapter *adapter) +{ + int q_idx; + + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) + napi_disable(&adapter->q_vector[q_idx]->napi); +} + +static void rnp_fdir_filter_restore(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct hlist_node *node2; + struct rnp_fdir_filter *filter; + + spin_lock(&adapter->fdir_perfect_lock); + + /* enable tcam if set tcam mode */ + if (adapter->fdir_mode == fdir_mode_tcam) { + wr32(hw, RNP_ETH_TCAM_EN, 1); + wr32(hw, RNP_TOP_ETH_TCAM_CONFIG_ENABLE, 1); + wr32(hw, RNP_TCAM_CACHE_ENABLE, 0); + } + + /* setup ntuple */ + hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, + fdir_node) { + if ((!filter->vf_num) && + (filter->action != ACTION_TO_MPE)) { + rnp_fdir_write_perfect_filter( + adapter->fdir_mode, hw, &filter->filter, filter->hw_idx, + (filter->action == RNP_FDIR_DROP_QUEUE) ? + RNP_FDIR_DROP_QUEUE : + adapter->rx_ring[filter->action] + ->rnp_queue_idx, + (adapter->priv_flags & RNP_PRIV_FLAG_REMAP_PRIO) ? + true : + false); + } else { + rnp_fdir_write_perfect_filter( + adapter->fdir_mode, hw, &filter->filter, + filter->hw_idx, + (filter->action == RNP_FDIR_DROP_QUEUE) ? + RNP_FDIR_DROP_QUEUE : + filter->action, + (adapter->priv_flags & + RNP_PRIV_FLAG_REMAP_PRIO) ? + true : + false); + } + } + + spin_unlock(&adapter->fdir_perfect_lock); +} + +static void rnp_configure_pause(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + + hw->ops.set_pause_mode(hw); +} + +static void rnp_vlan_stags_flag(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + + /* stags is added */ + if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) + hw->ops.set_txvlan_mode(hw, false); + else + hw->ops.set_txvlan_mode(hw, true); +} + +static void rnp_configure(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); +#if (PAGE_SIZE < 8192) + struct rnp_ring *rx_ring = adapter->rx_ring[0]; +#endif + /* + * We must restore virtualization before VLANs or else + * the VLVF registers will not be populated + */ + rnp_configure_virtualization(adapter); + + /* Unicast, Multicast and Promiscuous mode set */ + rnp_set_rx_mode(adapter->netdev); + /* reconfigure hw */ + hw->ops.set_mac(hw, hw->mac.addr, sriov_flag); + + /* in sriov mode vlan is not reset */ + rnp_restore_vlan(adapter); + + /* we first update rx_offset */ +#if (PAGE_SIZE < 8192) + /* setup before calculate dma_split_size */ + rnp_set_rx_buffer_len(adapter); + hw->dma_split_size = rnp_rx_pg_size(rx_ring) / 2 - + rnp_rx_offset(rx_ring) - + sizeof(struct skb_shared_info); +#else + /* if mtu more than this */ + hw->dma_split_size = SKB_WITH_OVERHEAD(PAGE_SIZE) - RNP_SKB_PAD; + + if (hw->max_length_current >= 1536) + hw->dma_split_size = min_t(int, hw->dma_split_size, hw->max_length_current); + /* up to 16-asign */ + hw->dma_split_size = (hw->dma_split_size + 15) & (~0xf); +#endif + hw->ops.update_hw_info(hw); + + /* init setup pause */ + rnp_configure_pause(adapter); + rnp_vlan_stags_flag(adapter); + rnp_init_rss_key(adapter); + rnp_init_rss_table(adapter); + + if (adapter->flags & RNP_FLAG_FDIR_HASH_CAPABLE) { + + } else if (adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE) { + + rnp_fdir_filter_restore(adapter); + + } + + /* setup vxlan match mode */ + if (adapter->priv_flags & RNP_PRIV_FLAG_VXLAN_INNER_MATCH) + hw->ops.set_vxlan_mode(hw, true); + else + hw->ops.set_vxlan_mode(hw, false); + rnp_configure_tx(adapter); + rnp_configure_rx(adapter); +} + +static inline bool rnp_is_sfp(struct rnp_hw *hw) +{ + return true; +} + +/** + * rnp_sfp_link_config - set up SFP+ link + * @adapter: pointer to private adapter struct + **/ +static void rnp_sfp_link_config(struct rnp_adapter *adapter) +{ + /* + * We are assuming the worst case scenario here, and that + * is that an SFP was inserted/removed after the reset + * but before SFP detection was enabled. As such the best + * solution is to just start searching as soon as we start + */ + adapter->flags2 |= RNP_FLAG2_SFP_NEEDS_RESET; +} + +static void rnp_up_complete(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int i; + + rnp_configure_msix(adapter); + + /* enable the optics for n10 SFP+ fiber */ + if (hw->ops.enable_tx_laser) + hw->ops.enable_tx_laser(hw); + + smp_mb__before_atomic(); + clear_bit(__RNP_DOWN, &adapter->state); + rnp_napi_enable_all(adapter); + + if (rnp_is_sfp(hw)) { + rnp_sfp_link_config(adapter); + } + /*clear any pending interrupts*/ + rnp_irq_enable(adapter); + + /* enable transmits */ + netif_tx_start_all_queues(adapter->netdev); + + /* enable rx transmit */ + for (i = 0; i < adapter->num_rx_queues; i++) + ring_wr32(adapter->rx_ring[i], RNP_DMA_RX_START, 1); + + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problems + */ + adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + mod_timer(&adapter->service_timer, jiffies); + + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + hw->link = 0; + rnp_mbx_force_speed(hw, hw->saved_force_link_speed); + hw->ops.set_mbx_link_event(hw, 1); + hw->ops.set_mbx_ifup(hw, 1); +} + +void rnp_reinit_locked(struct rnp_adapter *adapter) +{ + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ + while (test_and_set_bit(__RNP_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + rnp_down(adapter); + /* + * If SR-IOV enabled then wait a bit before bringing the adapter + * back up to give the VFs time to respond to the reset. The + * two second wait is based upon the watchdog timer cycle in + * the VF driver. + */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + msleep(2000); + rnp_up(adapter); + + clear_bit(__RNP_RESETTING, &adapter->state); +} + +void rnp_up(struct rnp_adapter *adapter) +{ + /* hardware has been reset, we need to reload some things */ + rnp_configure(adapter); + rnp_up_complete(adapter); +} + +void rnp_reset(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int err; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + + rnp_logd(LOG_ADPT_STAT, "%s\n", __func__); + + /* lock SFP init bit to prevent race conditions with the watchdog */ + while (test_and_set_bit(__RNP_IN_SFP_INIT, &adapter->state)) + usleep_range(1000, 2000); + + /* clear all SFP and link config related flags while holding SFP_INIT */ + adapter->flags2 &= + ~(RNP_FLAG2_SEARCH_FOR_SFP | RNP_FLAG2_SFP_NEEDS_RESET); + adapter->flags &= ~RNP_FLAG_NEED_LINK_CONFIG; + + err = hw->ops.init_hw(hw); + + if (err) { + e_dev_err("init_hw: Hardware Error: err:%d. line:%d\n", err, + __LINE__); + } + + clear_bit(__RNP_IN_SFP_INIT, &adapter->state); + + /* reprogram the RAR[0] in case user changed it. */ + hw->ops.set_mac(hw, hw->mac.addr, sriov_flag); + + if (module_enable_ptp) { + if (adapter->flags2 & RNP_FLAG2_PTP_ENABLED && + (adapter->ptp_rx_en || adapter->ptp_tx_en)) + rnp_ptp_reset(adapter); + } +} + +#ifdef OPTM_WITH_LPAGE +/** + * rnp_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void rnp_clean_rx_ring(struct rnp_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + struct rnp_rx_buffer *rx_buffer; + + if (!rx_ring->rx_buffer_info) + return; + + if (rx_ring->skb) + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + rx_buffer = &rx_ring->rx_buffer_info[i]; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + if (!rx_buffer->page) + goto next_buffer; + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, + rx_buffer->page_offset, + rnp_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + rnp_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + RNP_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + /* now this page is not used */ + rx_buffer->page = NULL; +next_buffer: + i++; + rx_buffer++; + if (i == rx_ring->count) { + i = 0; + rx_buffer = rx_ring->rx_buffer_info; + } + } + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +#else +/** + * rnp_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void rnp_clean_rx_ring(struct rnp_ring *rx_ring) +{ + u16 i = rx_ring->next_to_clean; + struct rnp_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; + + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; + + dev_kfree_skb(skb); + rx_buffer->skb = NULL; + } + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, + rx_buffer->page_offset, + rnp_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, + rnp_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + RNP_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_buffer->page, + rx_buffer->pagecnt_bias); + /* now this page is not used */ + rx_buffer->page = NULL; + i++; + rx_buffer++; + if (i == rx_ring->count) { + i = 0; + rx_buffer = rx_ring->rx_buffer_info; + } + } + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} +#endif + +/** + * rnp_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void rnp_clean_tx_ring(struct rnp_ring *tx_ring) +{ + unsigned long size; + u16 i = tx_ring->next_to_clean; + struct rnp_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + + BUG_ON(tx_ring == NULL); + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buffer_info) + return; + + while (i != tx_ring->next_to_use) { + struct rnp_tx_desc *eop_desc, *tx_desc; + + dev_kfree_skb_any(tx_buffer->skb); + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + + eop_desc = tx_buffer->next_to_watch; + tx_desc = RNP_TX_DESC(tx_ring, i); + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = RNP_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + netdev_tx_reset_queue(txring_txq(tx_ring)); + size = sizeof(struct rnp_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * rnp_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void rnp_clean_all_rx_rings(struct rnp_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + rnp_clean_rx_ring(adapter->rx_ring[i]); +} + +/** + * rnp_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void rnp_clean_all_tx_rings(struct rnp_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + rnp_clean_tx_ring(adapter->tx_ring[i]); +} + +static void rnp_fdir_filter_exit(struct rnp_adapter *adapter) +{ + struct hlist_node *node2; + struct rnp_fdir_filter *filter; + struct rnp_hw *hw = &adapter->hw; + + spin_lock(&adapter->fdir_perfect_lock); + + hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list, + fdir_node) { + /* call earase to hw */ + rnp_fdir_erase_perfect_filter(adapter->fdir_mode, hw, + &filter->filter, filter->hw_idx); + + hlist_del(&filter->fdir_node); + kfree(filter); + } + adapter->fdir_filter_count = 0; + adapter->layer2_count = hw->layer2_count; + adapter->tuple_5_count = hw->tuple5_count; + + spin_unlock(&adapter->fdir_perfect_lock); +} + +static int rnp_xmit_nop_frame_ring(struct rnp_adapter *adapter, + struct rnp_ring *tx_ring) +{ + u16 i = tx_ring->next_to_use; + struct rnp_tx_desc *tx_desc; + + tx_desc = RNP_TX_DESC(tx_ring, i); + + /* set length to 0 */ + tx_desc->blen_mac_ip_len = 0; + tx_desc->vlan_cmd = cpu_to_le32(RNP_TXD_CMD_EOP | RNP_TXD_CMD_RS); + /* + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + /* update tail */ + rnp_wr_reg(tx_ring->tail, 0); + return 0; +} + +static void print_status(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct rnp_eth_info *eth = &hw->eth; + int i; + struct rnp_dma_info *dma = &hw->dma; + + printk("eth 0x120 %x\n", eth_rd32(eth, 0x120)); + printk("eth 0x124 %x\n", eth_rd32(eth, 0x124)); + + for (i = 0x300; i < 0x318; i = i + 4) { + printk("eth 0x%x %x\n", i, eth_rd32(eth, i)); + } + + printk("eth 0x%x %x\n", 0x98, eth_rd32(eth, 0x98)); + printk("eth 0x%x %x\n", 0x220, eth_rd32(eth, 0x220)); + + for (i = 0x138; i < 0x158; i = i + 4) { + printk("dma 0x%x %x\n", i, dma_rd32(dma, i)); + } + i = 0x170; + printk("dma 0x%x %x\n", i, dma_rd32(dma, i)); + i = 0x174; + printk("dma 0x%x %x\n", i, dma_rd32(dma, i)); + for (i = 0x214; i < 0x220; i = i + 4) { + printk("dma 0x%x %x\n", i, dma_rd32(dma, i)); + } + for (i = 0x234; i < 0x270; i = i + 4) { + printk("dma 0x%x %x\n", i, dma_rd32(dma, i)); + } +} + +void rnp_down(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct rnp_hw *hw = &adapter->hw; + int i; + int free_tx_ealay = 0; + int err = 0; + /* signal that we are down to the interrupt handler */ + set_bit(__RNP_DOWN, &adapter->state); + if ((!hw->ncsi_en) && (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))) + hw->ops.set_mac_rx(hw, false); + + if (hw->ncsi_en) { + /* if we false down, we should set mac loopback */ + hw->ops.set_mac_rx(hw, false); + } + + hw->ops.set_mbx_link_event(hw, 0); + hw->ops.set_mbx_ifup(hw, 0); + + if (hw->ops.clean_link) + hw->ops.clean_link(hw); + + /* if carrier on before */ + if (netif_carrier_ok(netdev)) + e_info(drv, "NIC Link is Down\n"); + + rnp_remove_vlan(adapter); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + usleep_range(5000, 10000); + /* if we have tx desc to clean */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct rnp_ring *tx_ring = adapter->tx_ring[i]; + + if (!(tx_ring->ring_flags & RNP_RING_SKIP_TX_START)) { + int head, tail; + int timeout = 0; + + free_tx_ealay = 1; + + head = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + tail = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_TAIL); + + while (head != tail) { + usleep_range(30000, 50000); + head = ring_rd32(tx_ring, + RNP_DMA_REG_TX_DESC_BUF_HEAD); + tail = ring_rd32(tx_ring, + RNP_DMA_REG_TX_DESC_BUF_TAIL); + timeout++; + if ((timeout >= 100) && (timeout < 101)) { + e_info(drv, + "wait tx done timeout %x %x\n", + head, tail); + /* set this to hold hardware status */ + adapter->priv_flags |= + RNP_PRIV_FLGA_TEST_TX_HANG; + print_status(adapter); + err = 1; + } + if (timeout >= 200) { + e_info(drv, + "200 wait tx done timeout %x %x\n", + head, tail); + print_status(adapter); + break; + } + } + } + } + + { + int time = 0; + + while (test_bit(__RNP_SERVICE_CHECK, &adapter->state)) { + usleep_range(100, 200); + time++; + if (time > 100) + break; + } + } + + if (free_tx_ealay) + rnp_clean_all_tx_rings(adapter); + + usleep_range(2000, 5000); + + rnp_irq_disable(adapter); + + usleep_range(5000, 10000); + + netif_tx_disable(netdev); + + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rnp_disable_rx_queue(adapter, adapter->rx_ring[i]); + /* only handle when srio enable and change rx length setup */ + if ((((adapter->flags & RNP_FLAG_SRIOV_ENABLED) || + hw->ncsi_en)) && + (adapter->rx_ring[i]->ring_flags & + RNP_RING_FLAG_CHANGE_RX_LEN)) { + int head; + struct rnp_ring *ring = adapter->rx_ring[i]; + + head = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD); + adapter->rx_ring[i]->ring_flags &= + (~RNP_RING_FLAG_CHANGE_RX_LEN); + /* we should delay setup rx length to + * wait rx head to 0 + */ + if (head >= adapter->rx_ring[i]->reset_count) { + adapter->rx_ring[i]->ring_flags |= + RNP_RING_FLAG_DELAY_SETUP_RX_LEN; + /* set sw count to head + 1*/ + adapter->rx_ring[i]->temp_count = head + 1; + } + } + /* only down without rx_len change no need handle */ + } + /* call carrier off first to avoid false dev_watchdog timeouts */ + + rnp_napi_disable_all(adapter); + + adapter->flags2 &= + ~(RNP_FLAG2_FDIR_REQUIRES_REINIT | RNP_FLAG2_RESET_REQUESTED); + adapter->flags &= ~RNP_FLAG_NEED_LINK_UPDATE; + + if (adapter->num_vfs) { + /* ping all the active vfs to let them know we are going down */ + rnp_ping_all_vfs(adapter); + /* Disable all VFTE/VFRE TX/RX */ + rnp_disable_tx_rx(adapter); + } + { + + u32 status = 0; + int timeout = 0; + + do { + status = rd32(hw, RNP_DMA_AXI_READY); + usleep_range(100, 200); + timeout++; + } while ((status != 0xffff) && (timeout < 100)); + + if (timeout > 100) + printk("wait axi ready timeout\n"); + } + + + /* disable transmits in the hardware now that interrupts are off */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct rnp_ring *tx_ring = adapter->tx_ring[i]; + int count = tx_ring->count; + int head; + int timeout = 0; + + /* 1. stop queue */ + if (!err) { + if (!(tx_ring->ring_flags & RNP_RING_SKIP_TX_START)) { + ring_wr32(tx_ring, RNP_DMA_TX_START, 0); + } + } + /* 2. try to set tx head to 0 in sriov mode + * since we don't reset + */ + if ((((adapter->flags & RNP_FLAG_SRIOV_ENABLED) || + hw->ncsi_en)) && + (!(tx_ring->ring_flags & RNP_RING_SIZE_CHANGE_FIX))) { + /* only do this if hw not support tx head to zero auto */ + /* n10 should wait tx_ready */ + u32 status = 0; + + timeout = 0; + do { + status = ring_rd32(tx_ring, RNP_DMA_TX_READY); + usleep_range(100, 200); + timeout++; + rnp_dbg("wait %d tx ready to 1\n", + tx_ring->rnp_queue_idx); + } while ((status != 1) && (timeout < 100)); + + if (timeout >= 100) + printk("wait tx ready timeout\n"); + + head = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + if (head != 0) { + u16 next_to_use = tx_ring->next_to_use; + + if (head != (count - 1)) { + /* 3 set len head + 1 */ + ring_wr32(tx_ring, + RNP_DMA_REG_TX_DESC_BUF_LEN, + head + 1); + } + /* set to use head */ + tx_ring->next_to_use = head; + /* 4 send a len zero packet */ + rnp_xmit_nop_frame_ring(adapter, tx_ring); + if (!(tx_ring->ring_flags & + RNP_RING_SKIP_TX_START)) + ring_wr32(tx_ring, RNP_DMA_TX_START, 1); + /* 5 wait head to zero */ + while ((head != 0) && (timeout < 1000)) { + head = ring_rd32( + tx_ring, + RNP_DMA_REG_TX_DESC_BUF_HEAD); + usleep_range(10000, 20000); + timeout++; + } + if (timeout >= 1000) { + printk("[%s] Wait Tx-ring %d head to zero time out\n", + netdev->name, + tx_ring->rnp_queue_idx); + } + /* 6 stop queue again*/ + if (!(tx_ring->ring_flags & + RNP_RING_SKIP_TX_START)) + ring_wr32(tx_ring, RNP_DMA_TX_START, 0); + /* 7 write back next_to_use maybe hw hang */ + tx_ring->next_to_use = next_to_use; + } + } + } + if (!err) { + if (!pci_channel_offline(adapter->pdev)) { + if (hw->ncsi_en == 0 && + !(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + rnp_reset(adapter); + } + } + } + /* power down the optics for n10 SFP+ fiber */ + if (hw->ops.disable_tx_laser) + hw->ops.disable_tx_laser(hw); + + if (!free_tx_ealay) + rnp_clean_all_tx_rings(adapter); + + rnp_clean_all_rx_rings(adapter); + + if (hw->ncsi_en) + hw->ops.set_mac_rx(hw, true); +} + +/** + * rnp_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void rnp_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + /* Do the reset outside of interrupt context */ + int i; + bool real_tx_hang = false; + +#define TX_TIMEO_LIMIT 16000 + for (i = 0; i < adapter->num_tx_queues; i++) { + struct rnp_ring *tx_ring = adapter->tx_ring[i]; + + if (check_for_tx_hang(tx_ring) && rnp_check_tx_hang(tx_ring)) + real_tx_hang = true; + } + + if (real_tx_hang) { + printk("hw real hang!!!!"); + /* Do the reset outside of interrupt context */ + rnp_tx_timeout_reset(adapter); + } else { + printk("Fake Tx hang detected with timeout of %d " + "seconds\n", + netdev->watchdog_timeo / HZ); + + /* fake Tx hang - increase the kernel timeout */ + if (netdev->watchdog_timeo < TX_TIMEO_LIMIT) + netdev->watchdog_timeo *= 2; + } +} + +/** + * rnp_sw_init - Initialize general software structures (struct rnp_adapter) + * @adapter: board private structure to initialize + * + * rnp_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int rnp_sw_init(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned int rss = 0, fdir; + int rss_limit = num_online_cpus(); + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + /* if this hw can setup msix count */ + rss = min_t(int, adapter->max_ring_pair_counts, rss_limit); + rss = min_t(int, rss, + hw->mac.max_msix_vectors - adapter->num_other_vectors); + adapter->ring_feature[RING_F_RSS].limit = + min_t(int, rss, adapter->max_ring_pair_counts); + + adapter->flags |= RNP_FLAG_VXLAN_OFFLOAD_CAPABLE; + adapter->flags |= RNP_FLAG_VXLAN_OFFLOAD_ENABLE; + + adapter->max_q_vectors = hw->max_msix_vectors - 1; + adapter->atr_sample_rate = 20; + + fdir = min_t(int, adapter->max_q_vectors, rss_limit); + adapter->ring_feature[RING_F_FDIR].limit = fdir; + + if (hw->feature_flags & RNP_NET_FEATURE_RX_NTUPLE_FILTER) { + spin_lock_init(&adapter->fdir_perfect_lock); + adapter->fdir_filter_count = 0; + adapter->fdir_mode = hw->fdir_mode; + /* fdir_pballoc not from zero, so add 2 */ + adapter->fdir_pballoc = 2 + hw->layer2_count + hw->tuple5_count; + adapter->layer2_count = hw->layer2_count; + adapter->tuple_5_count = hw->tuple5_count; + } + + /* itr sw setup here */ + adapter->sample_interval = 10; + adapter->adaptive_rx_coal = 1; + adapter->adaptive_tx_coal = 1; + adapter->auto_rx_coal = 0; + adapter->napi_budge = 64; + /* set default work limits */ + adapter->tx_work_limit = RNP_DEFAULT_TX_WORK; + adapter->rx_usecs = RNP_PKT_TIMEOUT; + adapter->rx_usecs_usr_set = RNP_PKT_TIMEOUT; + adapter->rx_frames = RNP_RX_PKT_POLL_BUDGET; + adapter->tx_usecs = RNP_PKT_TIMEOUT_TX; + adapter->tx_usecs_usr_set = RNP_PKT_TIMEOUT_TX; + adapter->tx_frames = RNP_TX_PKT_POLL_BUDGET; + + /* set default ring sizes */ + adapter->tx_ring_item_count = RNP_DEFAULT_TXD; + adapter->rx_ring_item_count = RNP_DEFAULT_RXD; + + set_bit(__RNP_DOWN, &adapter->state); + + return 0; +} + +/** + * rnp_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +int rnp_setup_tx_resources(struct rnp_ring *tx_ring, + struct rnp_adapter *adapter) +{ + struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = NUMA_NO_NODE; + int size; + + size = sizeof(struct rnp_tx_buffer) * tx_ring->count; + + if (tx_ring->q_vector) + numa_node = tx_ring->q_vector->numa_node; + tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct rnp_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + set_dev_node(dev, numa_node); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!tx_ring->desc) + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) + goto err; + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + DPRINTK(IFUP, INFO, + "TxRing:%d, vector:%d ItemCounts:%d " + "desc:%p(0x%llx) node:%d\n", + tx_ring->rnp_queue_idx, tx_ring->q_vector->v_idx, + tx_ring->count, tx_ring->desc, (u64)tx_ring->dma, numa_node); + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); + return -ENOMEM; +} + +/** + * rnp_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int rnp_setup_all_tx_resources(struct rnp_adapter *adapter) +{ + int i, err = 0; + + tx_dbg("adapter->num_tx_queues:%d, adapter->tx_ring[0]:%p\n", + adapter->num_tx_queues, adapter->tx_ring[0]); + + for (i = 0; i < (adapter->num_tx_queues); i++) { + BUG_ON(adapter->tx_ring[i] == NULL); + err = rnp_setup_tx_resources(adapter->tx_ring[i], adapter); + if (!err) + continue; + + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + goto err_setup_tx; + } + + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + rnp_free_tx_resources(adapter->tx_ring[i]); + return err; +} + +/** + * rnp_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +int rnp_setup_rx_resources(struct rnp_ring *rx_ring, + struct rnp_adapter *adapter) +{ + struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = NUMA_NO_NODE; + int size; + + BUG_ON(rx_ring == NULL); + + size = sizeof(struct rnp_rx_buffer) * rx_ring->count; + + if (rx_ring->q_vector) + numa_node = rx_ring->q_vector->numa_node; + + rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = vzalloc(size); + if (!rx_ring->rx_buffer_info) + goto err; + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union rnp_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + + set_dev_node(dev, numa_node); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!rx_ring->desc) + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) + goto err; + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + DPRINTK(IFUP, INFO, + "RxRing:%d, vector:%d ItemCounts:%d " + "desc:%p(0x%llx) node:%d\n", + rx_ring->rnp_queue_idx, rx_ring->q_vector->v_idx, + rx_ring->count, rx_ring->desc, (u64)rx_ring->dma, numa_node); + + return 0; +err: + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); + return -ENOMEM; +} + +/** + * rnp_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int rnp_setup_all_rx_resources(struct rnp_adapter *adapter) +{ + int i, err = 0; + u32 head; + + for (i = 0; i < adapter->num_rx_queues; i++) { + BUG_ON(adapter->rx_ring[i] == NULL); + + /* should check count and head */ + /* in sriov condition may head large than count */ + head = ring_rd32(adapter->rx_ring[i], + RNP_DMA_REG_RX_DESC_BUF_HEAD); + if (unlikely(head >= adapter->rx_ring[i]->count)) { + dbg("[%s] Ring %d head large than count", + adapter->netdev->name, + adapter->rx_ring[i]->rnp_queue_idx); + adapter->rx_ring[i]->ring_flags |= + RNP_RING_FLAG_DELAY_SETUP_RX_LEN; + adapter->rx_ring[i]->reset_count = + adapter->rx_ring[i]->count; + adapter->rx_ring[i]->count = head + 1; + } + err = rnp_setup_rx_resources(adapter->rx_ring[i], adapter); + if (!err) + continue; + + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + rnp_free_rx_resources(adapter->rx_ring[i]); + return err; +} + +/** + * rnp_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +void rnp_free_tx_resources(struct rnp_ring *tx_ring) +{ + BUG_ON(tx_ring == NULL); + + rnp_clean_tx_ring(tx_ring); + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * rnp_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +static void rnp_free_all_tx_resources(struct rnp_adapter *adapter) +{ + int i; + + for (i = 0; i < (adapter->num_tx_queues); i++) + rnp_free_tx_resources(adapter->tx_ring[i]); +} + +/** + * rnp_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +void rnp_free_rx_resources(struct rnp_ring *rx_ring) +{ + BUG_ON(rx_ring == NULL); + + rnp_clean_rx_ring(rx_ring); + + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * rnp_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +static void rnp_free_all_rx_resources(struct rnp_adapter *adapter) +{ + int i; + + for (i = 0; i < (adapter->num_rx_queues); i++) + if (adapter->rx_ring[i]->desc) + rnp_free_rx_resources(adapter->rx_ring[i]); +} + +/** + * rnp_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int rnp_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN * 2; + + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < hw->min_length) || (max_frame > hw->max_length)) + return -EINVAL; + + e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + + if (netdev->mtu == new_mtu) + return 0; + + /* must set new MTU before calling down or up */ + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + rnp_reinit_locked(adapter); + + rnp_msg_post_status(adapter, PF_SET_MTU); + + return 0; +} + +/** + * rnp_tx_maxrate - callback to set the maximum per-queue bitrate + * @netdev: network interface device structure + * @queue_index: Tx queue to set + * @maxrate: desired maximum transmit bitrate Mbps + **/ +__maybe_unused static int rnp_tx_maxrate(struct net_device *netdev, + int queue_index, u32 maxrate) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_ring *tx_ring = adapter->tx_ring[queue_index]; + u64 real_rate = 0; + + adapter->max_rate[queue_index] = maxrate; + rnp_dbg("%s: queue:%d maxrate:%d\n", __func__, queue_index, maxrate); + if (!maxrate) + return rnp_setup_tx_maxrate(tx_ring, 0, + adapter->hw.usecstocount * 1000000); + /* we need turn it to bytes/s */ + real_rate = ((u64)maxrate * 1024 * 1024) / 8; + rnp_setup_tx_maxrate(tx_ring, real_rate, + adapter->hw.usecstocount * 1000000); + + return 0; +} + +/** + * rnp_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +int rnp_open(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int err; + + DPRINTK(IFUP, INFO, "ifup\n"); + + /* disallow open during test */ + if (test_bit(__RNP_TESTING, &adapter->state)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = rnp_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = rnp_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + rnp_configure(adapter); + + err = rnp_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); + if (err) + goto err_set_queues; + + if (module_enable_ptp) + rnp_ptp_register(adapter); + + rnp_up_complete(adapter); + + return 0; + +err_set_queues: + rnp_free_irq(adapter); +err_req_irq: + rnp_free_all_rx_resources(adapter); +err_setup_rx: + rnp_free_all_tx_resources(adapter); +err_setup_tx: + hw->ops.set_mbx_ifup(hw, 0); + rnp_reset(adapter); + + return err; +} + +/** + * rnp_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +int rnp_close(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + DPRINTK(IFDOWN, INFO, "ifdown\n"); + + if (module_enable_ptp) + rnp_ptp_unregister(adapter); + + rnp_down(adapter); + rnp_free_irq(adapter); + rnp_free_all_tx_resources(adapter); + rnp_free_all_rx_resources(adapter); + + /* if in sriov mode send link down to all vfs */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + adapter->link_up = 0; + adapter->link_up_old = 0; + rnp_msg_post_status(adapter, PF_SET_LINK_STATUS); + /* wait all vf get this status */ + usleep_range(5000, 10000); + } + + return 0; +} + +#ifdef CONFIG_PM +static int rnp_resume(struct pci_dev *pdev) +{ + struct rnp_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + u32 err; + struct rnp_hw *hw = &adapter->hw; + + printk("call rnp_resume\n"); + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* + * pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pcim_enable_device(pdev); + if (err) { + e_dev_err("Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + switch (hw->hw_type) { + case rnp_hw_n10: + case rnp_hw_n400: + case rnp_hw_n20: + case rnp_hw_uv440: + wait_mbx_init_done(hw); +#ifdef FIX_VF_BUG + rnp_wr_reg(adapter->io_addr_bar0 + + (0x7982fc & (pci_resource_len(pdev, 0) - 1)), + 0); +#endif + break; + default: + + break; + } + + rtnl_lock(); + + err = rnp_init_interrupt_scheme(adapter); + if (!err) + err = register_mbx_irq(adapter); + + if (hw->ops.driver_status) + hw->ops.driver_status(hw, false, rnp_driver_suspuse); + + rnp_reset(adapter); + + if (!err && netif_running(netdev)) + err = rnp_open(netdev); + + rtnl_unlock(); + + if (err) + return err; + + netif_device_attach(netdev); + + return 0; +} +#endif + +static int __rnp_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct rnp_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + struct rnp_hw *hw = &adapter->hw; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + rtnl_lock(); + if (netif_running(netdev)) { + rnp_down(adapter); + rnp_free_irq(adapter); + rnp_free_all_tx_resources(adapter); + rnp_free_all_rx_resources(adapter); + /* should consider sriov mode ? */ + } + rtnl_unlock(); + + if (hw->ops.driver_status) + hw->ops.driver_status(hw, true, rnp_driver_suspuse); + + remove_mbx_irq(adapter); + rnp_clear_interrupt_scheme(adapter); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; + +#endif + if (wufc) { + rnp_set_rx_mode(netdev); + + /* enable the optics for n10 SFP+ fiber as we can WoL */ + if (hw->ops.enable_tx_laser) + hw->ops.enable_tx_laser(hw); + + /* turn on all-multi mode if wake on multicast is enabled */ + } + + if (hw->ops.setup_wol) + hw->ops.setup_wol(hw, adapter->wol); + + pci_wake_from_d3(pdev, !!wufc); + *enable_wake = !!wufc; + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int rnp_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + printk("call rnp_suspend\n"); + + retval = __rnp_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} +#endif /* CONFIG_PM */ + +__maybe_unused static void rnp_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __rnp_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +/** + * rnp_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ +void rnp_update_stats(struct rnp_adapter *adapter) +{ + struct net_device_stats *net_stats = &adapter->netdev->stats; + struct rnp_hw *hw = &adapter->hw; + struct rnp_hw_stats *hw_stats = &adapter->hw_stats; + int i; + struct rnp_ring *ring; + u64 hw_csum_rx_error = 0; + u64 hw_csum_rx_good = 0; + + net_stats->tx_packets = 0; + net_stats->tx_bytes = 0; + net_stats->rx_packets = 0; + net_stats->rx_bytes = 0; + net_stats->rx_dropped = 0; + net_stats->rx_errors = 0; + hw_stats->vlan_strip_cnt = 0; + hw_stats->vlan_add_cnt = 0; + + if (test_bit(__RNP_DOWN, &adapter->state) || + test_bit(__RNP_RESETTING, &adapter->state)) + return; + + for (i = 0; i < adapter->num_q_vectors; i++) { + rnp_for_each_ring(ring, adapter->q_vector[i]->rx) { + hw_csum_rx_error += ring->rx_stats.csum_err; + hw_csum_rx_good += ring->rx_stats.csum_good; + hw_stats->vlan_strip_cnt += ring->rx_stats.vlan_remove; + net_stats->rx_packets += ring->stats.packets; + net_stats->rx_bytes += ring->stats.bytes; + } + rnp_for_each_ring(ring, adapter->q_vector[i]->tx) { + hw_stats->vlan_add_cnt += ring->tx_stats.vlan_add; + net_stats->tx_packets += ring->stats.packets; + net_stats->tx_bytes += ring->stats.bytes; + } + } + net_stats->rx_errors += hw_csum_rx_error; + + hw->ops.update_hw_status(hw, hw_stats, net_stats); + + adapter->hw_csum_rx_error = hw_csum_rx_error; + adapter->hw_csum_rx_good = hw_csum_rx_good; + net_stats->rx_errors = hw_csum_rx_error; +} + +/** + * rnp_check_hang_subtask - check for hung queues and dropped interrupts + * @adapter: pointer to the device adapter structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occurring. Secondly it sets the + * bits needed to check for TX hangs. As a result we should immediately + * determine if a hang has occurred. + */ +static void rnp_check_hang_subtask(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *tx_ring; + u64 tx_next_to_clean_old; + u64 tx_next_to_clean; + u64 tx_next_to_use; + struct rnp_ring *rx_ring; + u64 rx_next_to_clean_old; + u64 rx_next_to_clean; + union rnp_rx_desc *rx_desc; + + /* If we're down or resetting, just bail */ + if (test_bit(__RNP_DOWN, &adapter->state) || + test_bit(__RNP_RESETTING, &adapter->state)) + return; + + set_bit(__RNP_SERVICE_CHECK, &adapter->state); + + /* Force detection of hung controller */ + if (netif_carrier_ok(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_check_for_tx_hang(adapter->tx_ring[i]); + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + tx_ring = adapter->tx_ring[i]; + /* get the last next_to_clean */ + tx_next_to_clean_old = tx_ring->tx_stats.tx_next_to_clean; + tx_next_to_clean = tx_ring->next_to_clean; + tx_next_to_use = tx_ring->next_to_use; + + /* if we have tx desc to clean */ + if (tx_next_to_use != tx_next_to_clean) { + if (tx_next_to_clean == tx_next_to_clean_old) { + tx_ring->tx_stats.tx_equal_count++; + if (tx_ring->tx_stats.tx_equal_count > 2) { + /* maybe not so good */ + struct rnp_q_vector *q_vector = + tx_ring->q_vector; + + /* stats */ + if (q_vector->rx.ring || + q_vector->tx.ring) + napi_schedule_irqoff( + &q_vector->napi); + + tx_ring->tx_stats.tx_irq_miss++; + tx_ring->tx_stats.tx_equal_count = 0; + } + } else { + tx_ring->tx_stats.tx_equal_count = 0; + } + /* update */ + /* record this next_to_clean */ + tx_ring->tx_stats.tx_next_to_clean = tx_next_to_clean; + } else { + /* clean record to -1 */ + tx_ring->tx_stats.tx_next_to_clean = -1; + } + } + + /* check if we lost rx irq */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + /* get the last next_to_clean */ + rx_next_to_clean_old = rx_ring->rx_stats.rx_next_to_clean; + /* get the now clean */ + rx_next_to_clean = rx_ring->next_to_clean; + + if (rx_next_to_clean == rx_next_to_clean_old) { + rx_ring->rx_stats.rx_equal_count++; + + if ((rx_ring->rx_stats.rx_equal_count > 2) && + (rx_ring->rx_stats.rx_equal_count < 5)) { + rx_desc = RNP_RX_DESC(rx_ring, + rx_ring->next_to_clean); + if (rnp_test_staterr(rx_desc, + RNP_RXD_STAT_DD)) { + int size; + struct rnp_q_vector *q_vector = + rx_ring->q_vector; + + size = le16_to_cpu(rx_desc->wb.len); + if (size) { + rx_ring->rx_stats.rx_irq_miss++; + if (q_vector->rx.ring || + q_vector->tx.ring) + napi_schedule_irqoff( + &q_vector->napi); + } else { + printk("set RNP_FLAG2_RESET_REQUESTED since size is 0\n"); + adapter->flags2 |= + RNP_FLAG2_RESET_REQUESTED; + } + } + } + if (rx_ring->rx_stats.rx_equal_count > 1000) + rx_ring->rx_stats.rx_equal_count = 0; + } else { + rx_ring->rx_stats.rx_equal_count = 0; + } + rx_ring->rx_stats.rx_next_to_clean = rx_next_to_clean; + } + + clear_bit(__RNP_SERVICE_CHECK, &adapter->state); +} + +static void update_ring_delay(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *ring; + struct rnp_hw *hw = &adapter->hw; + + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = adapter->rx_ring[i]; + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER, + adapter->rx_usecs * hw->usecstocount); + ring = adapter->tx_ring[i]; + ring_wr32(ring, RNP_DMA_REG_TX_INT_DELAY_TIMER, + adapter->tx_usecs * hw->usecstocount); + } +} + +/** + * rnp_watchdog_update_link - update the link status + * @adapter: pointer to the device adapter structure + * @link_speed: pointer to a u32 to store the link_speed + **/ +static void rnp_watchdog_update_link(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + u32 link_speed = adapter->link_speed; + bool link_up = adapter->link_up; + bool duplex = adapter->duplex_old; + bool flow_rx = true, flow_tx = true; + + if (!(adapter->flags & RNP_FLAG_NEED_LINK_UPDATE)) + return; + + if (hw->ops.check_link) { + hw->ops.check_link(hw, &link_speed, &link_up, &duplex, false); + } else { + /* always assume link is up, if no check link function */ + link_speed = RNP_LINK_SPEED_10GB_FULL; + link_up = true; + } + + if (link_up || time_after(jiffies, (adapter->link_check_timeout + + RNP_TRY_LINK_TIMEOUT))) { + adapter->flags &= ~RNP_FLAG_NEED_LINK_UPDATE; + } + adapter->link_up = link_up; + adapter->link_speed = link_speed; + adapter->duplex_old = duplex; + + if (hw->ops.get_pause_mode) + hw->ops.get_pause_mode(hw); + switch (hw->fc.current_mode) { + case rnp_fc_none: + flow_rx = false; + flow_tx = false; + break; + case rnp_fc_tx_pause: + flow_rx = false; + flow_tx = true; + + break; + case rnp_fc_rx_pause: + flow_rx = true; + flow_tx = false; + break; + + case rnp_fc_full: + flow_rx = true; + flow_tx = true; + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + } + /* if we detect changed link setup new */ + if (adapter->link_up) { + if (hw->ops.set_mac_speed) + hw->ops.set_mac_speed(hw, true, link_speed, duplex); + /* we should also update pause mode */ + if (hw->ops.set_pause_mode) + hw->ops.set_pause_mode(hw); + + e_info(drv, "NIC Link is Up %s, %s Duplex, Flow Control: %s\n", + (link_speed == RNP_LINK_SPEED_40GB_FULL ? + "40 Gbps" : + (link_speed == RNP_LINK_SPEED_25GB_FULL ? + "25 Gbps" : + (link_speed == RNP_LINK_SPEED_10GB_FULL ? + "10 Gbps" : + (link_speed == RNP_LINK_SPEED_1GB_FULL ? + "1 Gbps" : + (link_speed == RNP_LINK_SPEED_100_FULL ? + "100 Mbps" : + (link_speed == RNP_LINK_SPEED_10_FULL ? + "10 Mbps" : + "unknown speed")))))), + ((duplex) ? "Full" : "Half"), + ((flow_rx && flow_tx) ? + "RX/TX" : + (flow_rx ? "RX" : (flow_tx ? "TX" : "None")))); + /* we should update rx irq delay and tx irq delay */ + if (link_speed == RNP_LINK_SPEED_10GB_FULL) { + adapter->rx_usecs = adapter->rx_usecs_usr_set; + adapter->tx_usecs = adapter->tx_usecs_usr_set; + } else { + adapter->rx_usecs = adapter->rx_usecs_usr_set * 6; + adapter->tx_usecs = adapter->tx_usecs_usr_set * 2; + } + update_ring_delay(adapter); + } else { + if (hw->ops.set_mac_speed) + hw->ops.set_mac_speed(hw, false, 0, false); + } +} + +/** + * rnp_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @adapter: pointer to the device adapter structure + **/ +static void rnp_watchdog_link_is_up(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct rnp_hw *hw = &adapter->hw; + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + + adapter->flags2 &= ~RNP_FLAG2_SEARCH_FOR_SFP; + switch (hw->mac.type) { + default: + break; + } + + netif_carrier_on(netdev); + + netif_tx_wake_all_queues(netdev); + + hw->ops.set_mac_rx(hw, true); +} + +/** + * rnp_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @adapter: pointer to the adapter structure + **/ +static void rnp_watchdog_link_is_down(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct rnp_hw *hw = &adapter->hw; + + adapter->link_up = false; + adapter->link_speed = 0; + + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + + /* poll for SFP+ cable when link is down */ + if (rnp_is_sfp(hw)) + adapter->flags2 |= RNP_FLAG2_SEARCH_FOR_SFP; + + e_info(drv, "NIC Link is Down\n"); + + netif_carrier_off(netdev); + + netif_tx_stop_all_queues(netdev); + + hw->ops.set_mac_rx(hw, false); +} + +static void rnp_update_link_to_vf(struct rnp_adapter *adapter) +{ + /* maybe confict with vf */ + if (!(adapter->flags & RNP_FLAG_VF_INIT_DONE)) + return; + + if ((adapter->link_up_old != adapter->link_up) || + (adapter->link_speed_old != adapter->link_speed)) { + /* if change send mbx to all vf */ + if (!test_bit(__RNP_IN_IRQ, &adapter->state)) { + if (0 == + rnp_msg_post_status(adapter, PF_SET_LINK_STATUS)) { + /* maybe delay if we are in other irq? */ + adapter->link_up_old = adapter->link_up; + adapter->link_speed_old = adapter->link_speed; + } + } + } +} +/** + * rnp_watchdog_subtask - check and bring link up + * @adapter: pointer to the device adapter structure + **/ +static void rnp_watchdog_subtask(struct rnp_adapter *adapter) +{ + /* if interface is down do nothing */ + /* should do link status if in sriov */ + if (test_bit(__RNP_DOWN, &adapter->state) || + test_bit(__RNP_RESETTING, &adapter->state)) + return; + + rnp_watchdog_update_link(adapter); + + if (adapter->link_up) + rnp_watchdog_link_is_up(adapter); + else + rnp_watchdog_link_is_down(adapter); + + rnp_update_link_to_vf(adapter); + + rnp_update_stats(adapter); +} + +/** + * rnp_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +void rnp_service_timer(struct timer_list *t) +{ + struct rnp_adapter *adapter = from_timer(adapter, t, service_timer); + unsigned long next_event_offset; + bool ready = true; + + /* poll faster when waiting for link */ + if (adapter->flags & RNP_FLAG_NEED_LINK_UPDATE) + next_event_offset = HZ / 10; + else + next_event_offset = HZ; + /* Reset the timer */ + if (!test_bit(__RNP_REMOVE, &adapter->state)) + mod_timer(&adapter->service_timer, next_event_offset + jiffies); + + if (ready) + rnp_service_event_schedule(adapter); +} + +static void rnp_reset_pf_subtask(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 err; + + if (!(adapter->flags2 & RNP_FLAG2_RESET_PF)) + return; + + rtnl_lock(); + netif_device_detach(netdev); + if (netif_running(netdev)) { + rnp_down(adapter); + rnp_free_irq(adapter); + rnp_free_all_tx_resources(adapter); + rnp_free_all_rx_resources(adapter); + } + rtnl_unlock(); + + adapter->link_up = 0; + adapter->link_up_old = 0; + rnp_msg_post_status(adapter, PF_SET_LINK_STATUS); + /* wait all vf get this status */ + usleep_range(500, 1000); + + rnp_reset(adapter); + remove_mbx_irq(adapter); + rnp_clear_interrupt_scheme(adapter); + + rtnl_lock(); + err = rnp_init_interrupt_scheme(adapter); + + register_mbx_irq(adapter); + + if (!err && netif_running(netdev)) + err = rnp_open(netdev); + + rtnl_unlock(); + rnp_msg_post_status(adapter, PF_SET_RESET); + netif_device_attach(netdev); + adapter->flags2 &= (~RNP_FLAG2_RESET_PF); +} + +static void rnp_reset_subtask(struct rnp_adapter *adapter) +{ + if (!(adapter->flags2 & RNP_FLAG2_RESET_REQUESTED)) + return; + + adapter->flags2 &= ~RNP_FLAG2_RESET_REQUESTED; + + /* If we're already down or resetting, just bail */ + if (test_bit(__RNP_DOWN, &adapter->state) || + test_bit(__RNP_RESETTING, &adapter->state)) + return; + + netdev_err(adapter->netdev, "Reset adapter\n"); + adapter->tx_timeout_count++; + rtnl_lock(); + rnp_reinit_locked(adapter); + rtnl_unlock(); +} + +static void rnp_rx_len_reset_subtask(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *rx_ring; + + for (i = 0; i < adapter->num_tx_queues; i++) { + rx_ring = adapter->rx_ring[i]; + if (unlikely(rx_ring->ring_flags & + RNP_RING_FLAG_DO_RESET_RX_LEN)) { + dbg("[%s] Rx-ring %d count reset\n", + adapter->netdev->name, rx_ring->rnp_queue_idx); + if (!rnp_rx_ring_reinit(adapter, rx_ring)) { + rx_ring->ring_flags &= + (~RNP_RING_FLAG_DO_RESET_RX_LEN); + } + } + } +} + +static void rnp_auto_itr_moderation(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *rx_ring; + u64 period = (u64)(jiffies - adapter->last_moder_jiffies); + + if (!adapter->adaptive_rx_coal || + period < adapter->sample_interval * HZ) { + return; + } + + adapter->last_moder_jiffies = jiffies; + + /* it is time to check moderation */ + for (i = 0; i < adapter->num_rx_queues; i++) { + u64 x, y, rate; + u64 rx_packets, packets, rx_pkt_diff; + + rx_ring = adapter->rx_ring[i]; + rx_packets = READ_ONCE(rx_ring->stats.packets); + rx_pkt_diff = rx_packets - + adapter->last_moder_packets[rx_ring->queue_index]; + packets = rx_pkt_diff; + + x = packets * HZ; + y = do_div(x, period); + rate = x; + + + if (rate == 0) { + + } else if (rate < 20000) { + + rx_ring->ring_flags |= RNP_RING_LOWER_ITR; + } else { + + rx_ring->ring_flags &= (~RNP_RING_LOWER_ITR); + } + + /* write back new count */ + adapter->last_moder_packets[rx_ring->queue_index] = rx_packets; + } +} + +/** + * rnp_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +void rnp_service_task(struct work_struct *work) +{ + struct rnp_adapter *adapter = + container_of(work, struct rnp_adapter, service_task); + + rnp_reset_subtask(adapter); + rnp_reset_pf_subtask(adapter); + rnp_watchdog_subtask(adapter); + rnp_rx_len_reset_subtask(adapter); + rnp_auto_itr_moderation(adapter); + rnp_check_hang_subtask(adapter); + rnp_service_event_complete(adapter); +} + +static int rnp_tso(struct rnp_ring *tx_ring, struct rnp_tx_buffer *first, + u32 *mac_ip_len, u8 *hdr_len, u32 *tx_flags) +{ + struct sk_buff *skb = first->skb; + struct net_device *netdev = tx_ring->netdev; + struct rnp_adapter *adapter = netdev_priv(netdev); + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; + int err; + u8 *inner_mac; + u16 gso_segs, gso_size; + u16 gso_need_pad; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + inner_mac = skb->data; + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->tot_len = 0; + ip.v4->check = 0x0000; + } else { + ip.v6->payload_len = 0; + } + + if (skb_shinfo(skb)->gso_type & + (SKB_GSO_GRE | + SKB_GSO_GRE_CSUM | + SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { + } + /* we should alayws do this */ + inner_mac = skb_inner_mac_header(skb); + first->tunnel_hdr_len = (inner_mac - skb->data); + + if (skb_shinfo(skb)->gso_type & + (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { + *tx_flags |= RNP_TXD_TUNNEL_VXLAN; + l4.udp->check = 0; + tx_dbg("set outer l4.udp to 0\n"); + } else { + *tx_flags |= RNP_TXD_TUNNEL_NVGRE; + } + + /* reset pointers to inner headers */ + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + } + + if (ip.v4->version == 4) { + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->tot_len = 0; + ip.v4->check = 0x0000; + + } else { + ip.v6->payload_len = 0; + /* set ipv6 type */ + *tx_flags |= RNP_TXD_FLAG_IPv6; + } + + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + paylen = skb->len - l4_offset; + + if (skb->csum_offset == offsetof(struct tcphdr, check)) { + *tx_flags |= RNP_TXD_L4_TYPE_TCP; + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + l4.tcp->psh = 0; + } else { + *tx_flags |= RNP_TXD_L4_TYPE_UDP; + /* compute length of segmentation header */ + *hdr_len = sizeof(*l4.udp) + l4_offset; + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + } + + *mac_ip_len = (l4.hdr - ip.hdr) | ((ip.hdr - inner_mac) << 9); + + /* compute header lengths */ + /* pull values out of skb_shinfo */ + gso_size = skb_shinfo(skb)->gso_size; + gso_segs = skb_shinfo(skb)->gso_segs; + + /* if we close padding check gso confition */ + if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) { + gso_need_pad = (first->skb->len - *hdr_len) % gso_size; + if (gso_need_pad) { + if ((gso_need_pad + *hdr_len) <= 60) { + gso_need_pad = 60 - (gso_need_pad + *hdr_len); + first->gso_need_padding = !!gso_need_pad; + } + } + } + + /* update gso size and bytecount with header size */ + /* to fix tx status */ + first->gso_segs = gso_segs; + first->bytecount += (first->gso_segs - 1) * *hdr_len; + if (skb->csum_offset == offsetof(struct tcphdr, check)) { + first->mss_len_vf_num |= + (gso_size | ((l4.tcp->doff * 4) << 24)); + } else { + first->mss_len_vf_num |= (gso_size | ((8) << 24)); + } + + *tx_flags |= RNP_TXD_FLAG_TSO | RNP_TXD_IP_CSUM | RNP_TXD_L4_CSUM; + + first->ctx_flag = true; + return 1; +} + +static int rnp_tx_csum(struct rnp_ring *tx_ring, struct rnp_tx_buffer *first, + u32 *mac_ip_len, u32 *tx_flags) +{ + struct sk_buff *skb = first->skb; + u8 l4_proto = 0; + u8 ip_len = 0; + u8 mac_len = 0; + u8 *inner_mac = skb->data; + u8 *exthdr; + __be16 frag_off; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + inner_mac = skb->data; + + /* outer protocol */ + if (skb->encapsulation) { + /* define outer network header type */ + if (ip.v4->version == 4) { + l4_proto = ip.v4->protocol; + } else { + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } + + /* define outer transport */ + switch (l4_proto) { + case IPPROTO_UDP: + l4.udp->check = 0; + *tx_flags |= RNP_TXD_TUNNEL_VXLAN; + break; + case IPPROTO_GRE: + *tx_flags |= RNP_TXD_TUNNEL_NVGRE; + /* There was a long-standing issue in GRE where GSO + * was not setting the outer transport header unless + * a GRE checksum was requested. This was fixed in + * the 4.6 version of the kernel. In the 4.7 kernel + * support for GRE over IPv6 was added to GSO. So we + * can assume this workaround for all IPv4 headers + * without impacting later versions of the GRE. + */ + if (ip.v4->version == 4) + l4.hdr = ip.hdr + (ip.v4->ihl * 4); + break; + default: + skb_checksum_help(skb); + return -1; + } + + /* switch IP header pointer from outer to inner header */ + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + inner_mac = skb_inner_mac_header(skb); + first->tunnel_hdr_len = inner_mac - skb->data; + first->ctx_flag = true; + tx_dbg("tunnel length is %d\n", first->tunnel_hdr_len); + } + + mac_len = (ip.hdr - inner_mac); // mac length + *mac_ip_len = (ip.hdr - inner_mac) << 9; + tx_dbg("inner checksum needed %d", skb_checksum_start_offset(skb)); + tx_dbg("skb->encapsulation %d\n", skb->encapsulation); + ip_len = (l4.hdr - ip.hdr); + if (ip.v4->version == 4) { + l4_proto = ip.v4->protocol; + } else { + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, + &frag_off); + *tx_flags |= RNP_TXD_FLAG_IPv6; + } + /* Enable L4 checksum offloads */ + switch (l4_proto) { + case IPPROTO_TCP: + *tx_flags |= RNP_TXD_L4_TYPE_TCP | RNP_TXD_L4_CSUM; + break; + case IPPROTO_SCTP: + tx_dbg("sctp checksum packet\n"); + *tx_flags |= RNP_TXD_L4_TYPE_SCTP | RNP_TXD_L4_CSUM; + break; + case IPPROTO_UDP: + *tx_flags |= RNP_TXD_L4_TYPE_UDP | RNP_TXD_L4_CSUM; + break; + default: + skb_checksum_help(skb); + return 0; + } + + /* should consider stags mode */ + if ((tx_ring->ring_flags & RNP_RING_NO_TUNNEL_SUPPORT) && + (first->ctx_flag)) { + /* if not support tunnel */ + *tx_flags &= (~RNP_TXD_TUNNEL_MASK); + if (!(first->priv_tags)) { + first->ctx_flag = false; + mac_len += first->tunnel_hdr_len; + first->tunnel_hdr_len = 0; + } + } + *mac_ip_len = (mac_len << 9) | ip_len; + + return 0; +} + +static int __rnp_maybe_stop_tx(struct rnp_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(rnp_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int rnp_maybe_stop_tx(struct rnp_ring *tx_ring, u16 size) +{ + if (likely(rnp_desc_unused(tx_ring) >= size)) + return 0; + return __rnp_maybe_stop_tx(tx_ring, size); +} + +static int rnp_tx_map(struct rnp_ring *tx_ring, struct rnp_tx_buffer *first, + u32 mac_ip_len, u32 tx_flags) +{ + struct sk_buff *skb = first->skb; + struct rnp_tx_buffer *tx_buffer; + struct rnp_tx_desc *tx_desc; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int data_len, size; + u16 i = tx_ring->next_to_use; + u64 fun_id = ((u64)(tx_ring->pfvfnum) << (56)); + + tx_desc = RNP_TX_DESC(tx_ring, i); + size = skb_headlen(skb); + data_len = skb->data_len; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + /* 1st desc */ + tx_desc->pkt_addr = cpu_to_le64(dma | fun_id); + + while (unlikely(size > RNP_MAX_DATA_PER_TXD)) { + tx_desc->vlan_cmd_bsz = build_ctob( + tx_flags, mac_ip_len, RNP_MAX_DATA_PER_TXD); + /* ==== desc== */ + buf_dump_line("tx0 ", __LINE__, tx_desc, + sizeof(*tx_desc)); + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = RNP_TX_DESC(tx_ring, 0); + i = 0; + } + dma += RNP_MAX_DATA_PER_TXD; + size -= RNP_MAX_DATA_PER_TXD; + + tx_desc->pkt_addr = cpu_to_le64(dma | fun_id); + } + + buf_dump_line("tx1 ", __LINE__, tx_desc, sizeof(*tx_desc)); + if (likely(!data_len)) + break; + tx_desc->vlan_cmd_bsz = build_ctob(tx_flags, mac_ip_len, size); + buf_dump_line("tx2 ", __LINE__, tx_desc, sizeof(*tx_desc)); + + /* ==== frag== */ + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = RNP_TX_DESC(tx_ring, 0); + i = 0; + } + + size = skb_frag_size(frag); + data_len -= size; + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + tx_desc->vlan_cmd_bsz = build_ctob( + tx_flags | RNP_TXD_CMD_EOP | RNP_TXD_CMD_RS, mac_ip_len, size); + buf_dump_line("tx3 ", __LINE__, tx_desc, sizeof(*tx_desc)); + + /* set the timestamp */ + first->time_stamp = jiffies; + + tx_ring->tx_stats.send_bytes += first->bytecount; +#ifdef NO_BQL_TEST +#else + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); +#endif + + /* + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + /* timestamp the skb as late as possible, just prior to notifying + * the MAC that it should transmit this packet + */ + wmb(); + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + + buf_dump_line("tx4 ", __LINE__, tx_desc, sizeof(*tx_desc)); + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + /* need this */ + rnp_maybe_stop_tx(tx_ring, DESC_NEEDED); + skb_tx_timestamp(skb); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { + tx_ring->tx_stats.send_bytes_to_hw += first->bytecount; + tx_ring->tx_stats.send_bytes_to_hw += + tx_ring->tx_stats.todo_update; + tx_ring->tx_stats.todo_update = 0; + rnp_wr_reg(tx_ring->tail, i); + } else { + tx_ring->tx_stats.todo_update += first->bytecount; + } + return 0; +dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + rnp_unmap_and_free_tx_resource(tx_ring, tx_buffer); + if (tx_buffer == first) + break; + if (i == 0) + i += tx_ring->count; + i--; + } + dev_kfree_skb_any(first->skb); + first->skb = NULL; + tx_ring->next_to_use = i; + + return -1; +} + +static void rnp_force_src_mac(struct sk_buff *skb, struct net_device *netdev) +{ + u8 *data = skb->data; + bool ret = false; + struct netdev_hw_addr *ha; + /* force all src mac to myself */ + if (is_multicast_ether_addr(data)) { + if (0 == memcmp(data + netdev->addr_len, netdev->dev_addr, + netdev->addr_len)) { + ret = true; + goto DONE; + } + netdev_for_each_uc_addr(ha, netdev) { + if (0 == memcmp(data + netdev->addr_len, ha->addr, + netdev->addr_len)) { + ret = true; + goto DONE; + } + } + /* if not src mac, force to src mac */ + if (!ret) + memcpy(data + netdev->addr_len, netdev->dev_addr, + netdev->addr_len); + } +DONE: + return; +} + +netdev_tx_t rnp_xmit_frame_ring(struct sk_buff *skb, + struct rnp_adapter *adapter, + struct rnp_ring *tx_ring, bool tx_padding) +{ + struct rnp_tx_buffer *first; + int tso; + u32 tx_flags = 0; + unsigned short f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + __be16 protocol = skb->protocol; + u8 hdr_len = 0; + int ignore_vlan = 0; + /* default len should not 0 (hw request) */ + u32 mac_ip_len = 20; + + tx_dbg("=== begin ====\n"); + tx_dbg("rnp skb:%p, skb->len:%d headlen:%d, data_len:%d\n", skb, + skb->len, skb_headlen(skb), skb->data_len); + tx_dbg("next_to_clean %d, next_to_use %d\n", tx_ring->next_to_clean, + tx_ring->next_to_use); + /* + * need: 1 descriptor per page * PAGE_SIZE/RNP_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/RNP_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + skb_frag_t *frag_temp = &skb_shinfo(skb)->frags[f]; + + count += TXD_USE_COUNT(skb_frag_size(frag_temp)); + tx_dbg(" rnp #%d frag: size:%d\n", f, skb_frag_size(frag_temp)); + } + + if (rnp_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && + (!(tx_ring->ring_flags & RNP_RING_VEB_MULTI_FIX))) + rnp_force_src_mac(skb, tx_ring->netdev); + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + first->priv_tags = 0; + + first->mss_len_vf_num = 0; + first->inner_vlan_tunnel_len = 0; + + first->ctx_flag = (adapter->flags & RNP_FLAG_SRIOV_ENABLED) ? true : + false; + + /* if we have a HW VLAN tag being added default to the HW one */ + /* RNP_TXD_VLAN_VALID is used for veb */ + /* setup padding flag */ + + if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) { + first->ctx_flag = true; + /* should consider sctp */ + first->gso_need_padding = tx_padding; + } + + /* RNP_FLAG2_VLAN_STAGS_ENABLED and + * tx-stags-offload not support together + */ + if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) { + /* always add a stags for any packets out */ + if (tx_ring->ring_flags & RNP_RING_OUTER_VLAN_FIX) { + /* set outer_vlan to ctx */ + first->inner_vlan_tunnel_len |= (adapter->stags_vid); + first->priv_tags = 1; + first->ctx_flag = true; + + if (skb_vlan_tag_present(skb)) { + tx_flags |= RNP_TXD_VLAN_VALID | + RNP_TXD_VLAN_CTRL_INSERT_VLAN; + tx_flags |= skb_vlan_tag_get(skb); + /* else if it is a SW VLAN check the next + * protocol and store the tag + */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + + vhdr = skb_header_pointer( + skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= ntohs(vhdr->h_vlan_TCI); + tx_flags |= RNP_TXD_VLAN_VALID; + } + + } else { + /* sriov mode not support this */ + tx_flags |= adapter->stags_vid; + tx_flags |= RNP_TXD_VLAN_CTRL_INSERT_VLAN; + if (skb_vlan_tag_present(skb)) { + tx_flags |= RNP_TXD_VLAN_VALID; + first->inner_vlan_tunnel_len |= + (skb_vlan_tag_get(skb) << 8); + first->ctx_flag = true; + /* else if it is a SW VLAN check the next + * protocol and store the tag + */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + + vhdr = skb_header_pointer( + skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= RNP_TXD_VLAN_VALID; + } + } + } else { + /* normal mode*/ + if (skb_vlan_tag_present(skb)) { + if (skb->vlan_proto != htons(ETH_P_8021Q)) { + /* veb only use ctags */ + tx_flags |= skb_vlan_tag_get(skb); + tx_flags |= RNP_TXD_SVLAN_TYPE | + RNP_TXD_VLAN_CTRL_INSERT_VLAN; + } else { + tx_flags |= skb_vlan_tag_get(skb); + tx_flags |= RNP_TXD_VLAN_VALID | + RNP_TXD_VLAN_CTRL_INSERT_VLAN; + } + tx_ring->tx_stats.vlan_add++; + /* else if it is a SW VLAN check the next + * protocol and store the tag + */ + /* veb only use ctags */ + } else if (protocol == htons(ETH_P_8021Q)) { + struct vlan_hdr *vhdr, _vhdr; + + vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), + &_vhdr); + if (!vhdr) + goto out_drop; + + protocol = vhdr->h_vlan_encapsulated_proto; + tx_flags |= ntohs(vhdr->h_vlan_TCI); + tx_flags |= RNP_TXD_VLAN_VALID; + ignore_vlan = 1; + } + } + protocol = vlan_get_protocol(skb); + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + adapter->flags2 & RNP_FLAG2_PTP_ENABLED && adapter->ptp_tx_en) { + if (!test_and_set_bit_lock(__RNP_PTP_TX_IN_PROGRESS, + &adapter->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= RNP_TXD_FLAG_PTP; + adapter->ptp_tx_skb = skb_get(skb); + adapter->tx_hwtstamp_start = jiffies; + schedule_work(&adapter->tx_hwtstamp_work); + } else { + printk("ptp_tx_skb miss\n"); + } + } + /* record initial flags and protocol */ + tso = rnp_tso(tx_ring, first, &mac_ip_len, &hdr_len, &tx_flags); + if (tso < 0) + goto out_drop; + else if (!tso) + rnp_tx_csum(tx_ring, first, &mac_ip_len, &tx_flags); + /* check sriov mode */ + /* in this mode pf send msg should with vf_num */ + if (unlikely(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + first->ctx_flag = true; + first->mss_len_vf_num |= (adapter->vf_num_for_pf << 16); + } + + /* add control desc */ + rnp_maybe_tx_ctxtdesc(tx_ring, first, ignore_vlan); + /* add the ATR filter if ATR is on */ + if (rnp_tx_map(tx_ring, first, mac_ip_len, tx_flags)) { + goto cleanup_tx_tstamp; + } + tx_dbg("=== end ====\n\n\n\n"); + return NETDEV_TX_OK; + +out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; +cleanup_tx_tstamp: + if (unlikely(tx_flags & RNP_TXD_FLAG_PTP)) { + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + cancel_work_sync(&adapter->tx_hwtstamp_work); + clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state); + } + + return NETDEV_TX_OK; +} + +static bool check_sctp_no_padding(struct sk_buff *skb) +{ + bool no_padding = false; + u8 l4_proto = 0; + u8 *exthdr; + __be16 frag_off; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + if (ip.v4->version == 4) { + l4_proto = ip.v4->protocol; + } else { + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, + &frag_off); + } + /* sctp set no_padding to true */ + switch (l4_proto) { + case IPPROTO_SCTP: + no_padding = true; + break; + default: + + break; + } + return no_padding; +} + +static netdev_tx_t rnp_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_ring *tx_ring; + bool tx_padding = false; + + if (!netif_carrier_ok(netdev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* + * The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ + if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) { + if (skb->len < 60) { + if (!check_sctp_no_padding(skb)) { + if (skb_put_padto(skb, 60)) + return NETDEV_TX_OK; + } else { + /* if sctp smaller than 60, never padding */ + tx_padding = true; + } + } + } else { + if (skb_put_padto(skb, 33)) + return NETDEV_TX_OK; + } + tx_ring = adapter->tx_ring[skb->queue_mapping]; + + return rnp_xmit_frame_ring(skb, adapter, tx_ring, tx_padding); +} + +/** + * rnp_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int rnp_set_mac(struct net_device *netdev, void *p) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + + dbg("[%s] call set mac\n", netdev->name); + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + hw->ops.set_mac(hw, hw->mac.addr, sriov_flag); + + /* reset veb table */ + rnp_configure_virtualization(adapter); + return 0; +} + +static int rnp_mdio_read(struct net_device *netdev, int prtad, int devad, + u32 addr, u32 *phy_value) +{ + int rc = -EIO; + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + u16 value; + + rc = hw->ops.phy_read_reg(hw, addr, 0, &value); + *phy_value = value; + + return rc; +} + +static int rnp_mdio_write(struct net_device *netdev, int prtad, int devad, + u16 addr, u16 value) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + return hw->ops.phy_write_reg(hw, addr, 0, value); +} + +static int rnp_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&ifr->ifr_data; + int prtad, devad, ret; + u32 phy_value; + + prtad = (mii->phy_id & MDIO_PHY_ID_PRTAD) >> 5; + devad = (mii->phy_id & MDIO_PHY_ID_DEVAD); + + if (cmd == SIOCGMIIREG) { + ret = rnp_mdio_read(netdev, prtad, devad, mii->reg_num, + &phy_value); + if (ret < 0) + return ret; + mii->val_out = phy_value; + return 0; + } else { + return rnp_mdio_write(netdev, prtad, devad, mii->reg_num, + mii->val_in); + } +} + +static int rnp_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + /* ptp 1588 used this */ + switch (cmd) { + case SIOCGHWTSTAMP: + if (module_enable_ptp) + return rnp_ptp_get_ts_config(adapter, req); + break; + case SIOCSHWTSTAMP: + if (module_enable_ptp) + return rnp_ptp_set_ts_config(adapter, req); + break; + case SIOCGMIIPHY: + return 0; + break; + case SIOCGMIIREG: + /* n400 use this */ + /* fall through */ + case SIOCSMIIREG: + return rnp_mii_ioctl(netdev, req, cmd); + break; + } + return -EINVAL; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void rnp_netpoll(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(__RNP_DOWN, &adapter->state)) + return; + + adapter->flags |= RNP_FLAG_IN_NETPOLL; + for (i = 0; i < adapter->num_q_vectors; i++) + rnp_msix_clean_rings(0, adapter->q_vector[i]); + adapter->flags &= ~RNP_FLAG_IN_NETPOLL; +} + +#endif + +static void rnp_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < adapter->num_rx_queues; i++) { + struct rnp_ring *ring = READ_ONCE(adapter->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct rnp_ring *ring = READ_ONCE(adapter->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + rcu_read_unlock(); + /* following stats updated by rnp_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; + +} + +/** + * rnp_setup_tc - configure net_device for multiple traffic classes + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + */ +int rnp_setup_tc(struct net_device *dev, u8 tc) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_hw *hw = &adapter->hw; + int ret = 0; + + if (hw->hw_type != rnp_hw_n10) + return -EINVAL; + /* if now we are in force mode, never need force, if not force it */ + if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { + hw->ops.set_mac_rx(hw, false); + if (hw->ops.driver_status) + hw->ops.driver_status(hw, true, + rnp_driver_force_control_mac); + } + + /* Hardware supports up to 8 traffic classes */ + if ((tc > RNP_MAX_TCS_NUM) || (tc == 1)) + return -EINVAL; + /* we canot support tc with sriov mode */ + if ((tc) && (adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return -EINVAL; + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + while (test_and_set_bit(__RNP_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (netif_running(dev)) + rnp_close(dev); + + rnp_fdir_filter_exit(adapter); + adapter->priv_flags &= (~RNP_PRIV_FLAG_TCP_SYNC); + remove_mbx_irq(adapter); + rnp_clear_interrupt_scheme(adapter); + adapter->num_tc = tc; + + if (tc) { + netdev_set_num_tc(dev, tc); + adapter->flags |= RNP_FLAG_DCB_ENABLED; + } else { + netdev_reset_tc(dev); + adapter->flags &= ~RNP_FLAG_DCB_ENABLED; + } + + rnp_init_interrupt_scheme(adapter); + + register_mbx_irq(adapter); + /* rss table must reset */ + adapter->rss_tbl_setup_flag = 0; + + if (netif_running(dev)) + ret = rnp_open(dev); + + /* if we not set force now */ + if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { + hw->ops.set_mac_rx(hw, false); + if (hw->ops.driver_status) + hw->ops.driver_status(hw, false, + rnp_driver_force_control_mac); + } + + clear_bit(__RNP_RESETTING, &adapter->state); + return ret; +} + +#ifdef CONFIG_PCI_IOV +void rnp_sriov_reinit(struct rnp_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + rtnl_lock(); + rnp_setup_tc(netdev, netdev_get_num_tc(netdev)); + rtnl_unlock(); + usleep_range(10000, 20000); +} +#endif + +static int rnp_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) +{ + /* 1. check weather filter rule is ingress root */ + struct rnp_adapter *adapter = netdev_priv(dev); + u32 loc = cls->knode.handle & 0xfffff; + u32 uhtid = TC_U32_USERHTID(cls->knode.handle); + int ret; + + if ((uhtid != 0x800)) + return -EINVAL; + + spin_lock(&adapter->fdir_perfect_lock); + ret = rnp_update_ethtool_fdir_entry(adapter, NULL, loc); + spin_unlock(&adapter->fdir_perfect_lock); + + return ret; +} + +#ifdef CONFIG_NET_CLS_ACT +static int rnp_action_parse(struct tcf_exts *exts, u64 *action, u8 *queue) +{ + const struct tc_action *a; + int j; + + if (!tcf_exts_has_actions(exts)) + return -EINVAL; + + tcf_exts_for_each_action(j, a, exts) { + /* Drop action */ + if (is_tcf_gact_shot(a)) { + *action = RNP_FDIR_DROP_QUEUE; + *queue = RNP_FDIR_DROP_QUEUE; + return 0; + } + /* Redirect to a VF or a offloaded macvlan */ + if (is_tcf_mirred_egress_redirect(a)) { + + struct net_device *dev = tcf_mirred_dev(a); + + if (!dev) + return -EINVAL; + } + + return -EINVAL; + } + + return 0; +} + +#else +static int rnp_action_parse(struct tcf_exts *exts, u64 *action, u8 *queue) +{ + return -EINVAL; +} +#endif + +static int rnp_clsu32_build_input(struct tc_cls_u32_offload *cls, + struct rnp_fdir_filter *input, + const struct rnp_match_parser *parsers) +{ + int i = 0, j = 0, err = -1; + __be32 val, mask, off; + bool found; + + for (i = 0; i < cls->knode.sel->nkeys; i++) { + off = cls->knode.sel->keys[i].off; + val = cls->knode.sel->keys[i].val; + mask = cls->knode.sel->keys[i].mask; + dbg("cls-key[%d] off %d val %d mask %d\n ", i, off, val, mask); + found = false; + for (j = 0; parsers[j].val; j++) { + /* according the off select parser */ + if (off == parsers[j].off) { + found = true; + err = parsers[j].val(input, val, mask); + if (err) + return err; + + break; + } + } + /* if the rule can't parse that we don't support the rule */ + if (!found) + return -EINVAL; + } + + return 0; +} + +static int rnp_config_knode(struct net_device *dev, __be16 protocol, + struct tc_cls_u32_offload *cls) +{ + /*1. check ethernet hw-feature U32 can offload */ + /*2. check U32 protocol We just support IPV4 offloading For now*/ + /*3. check if this cls is a cls of root u32 or cls of class u32*/ + /*4. check if this cls has been added. + * the filter extry create but the match val and mask don't fill + * so we can use it. + * find a exist extry and the match val and mask is added before + * so we don't need add it again + */ + u32 uhtid, link_uhtid; + int ret; + struct rnp_adapter *adapter = netdev_priv(dev); + u8 queue; + struct rnp_fdir_filter *input; + // struct rnp_hw *hw = &adapter->hw; + u32 loc = cls->knode.handle & 0xfffff; + + if (protocol != htons(ETH_P_IP)) + return -EOPNOTSUPP; + + uhtid = TC_U32_USERHTID(cls->knode.handle); + link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); + + netdev_info(dev, "uhtid %d link_uhtid %d protocol 0x%2x\n", uhtid, + link_uhtid, ntohs(protocol)); + /* For now just support handle root ingress + * TODO more feature + */ + if (uhtid != 0x800) + return -EINVAL; + + input = kzalloc(sizeof(*input), GFP_KERNEL); + /*be carefull this input mem need to free */ + ret = rnp_clsu32_build_input(cls, input, rnp_ipv4_parser); + if (ret) { + netdev_warn(dev, "This Rules We Can't Support It\n"); + goto out; + } + ret = rnp_action_parse(cls->knode.exts, &input->action, &queue); + if (ret) + goto out; + + dbg("tc filter rule sw_location %d\n", loc); + + /* maybe bug here */ + input->hw_idx = adapter->tuple_5_count++; + input->sw_idx = loc; + spin_lock(&adapter->fdir_perfect_lock); + rnp_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + spin_unlock(&adapter->fdir_perfect_lock); + + return 0; +out: + kfree(input); + return -EOPNOTSUPP; +} + +static int rnp_setup_tc_cls_u32(struct net_device *dev, + struct tc_cls_u32_offload *cls_u32) +{ + __be16 proto = cls_u32->common.protocol; + dbg("cls_u32->command is %d\n", cls_u32->command); + switch (cls_u32->command) { + case TC_CLSU32_NEW_KNODE: + case TC_CLSU32_REPLACE_KNODE: + return rnp_config_knode(dev, proto, cls_u32); + case TC_CLSU32_DELETE_KNODE: + return rnp_delete_knode(dev, cls_u32); + default: + return -EOPNOTSUPP; + } +} + +static int rnp_setup_tc_block_ingress_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct net_device *dev = cb_priv; + struct rnp_adapter *adapter = netdev_priv(dev); + + if (test_bit(__RNP_DOWN, &adapter->state)) { + netdev_err( + adapter->netdev, + "Failed to setup tc on port %d. Link Down? 0x%.2lx\n", + adapter->port, adapter->state); + return -EINVAL; + } + if (!tc_cls_can_offload_and_chain0(dev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSU32: + return rnp_setup_tc_cls_u32(dev, type_data); + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(rnp_block_cb_list); + +static int rnp_setup_mqprio(struct net_device *dev, + struct tc_mqprio_qopt *mqprio) +{ + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + return rnp_setup_tc(dev, mqprio->num_tc); +} + +static int __rnp_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + switch (type) { + case TC_SETUP_BLOCK: { + struct flow_block_offload *f = + (struct flow_block_offload *)type_data; + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return flow_block_cb_setup_simple( + type_data, &rnp_block_cb_list, + rnp_setup_tc_block_ingress_cb, adapter, adapter, + true); + else + return -EOPNOTSUPP; + } + case TC_SETUP_CLSU32: + return rnp_setup_tc_cls_u32(netdev, type_data); + case TC_SETUP_QDISC_MQPRIO: + return rnp_setup_mqprio(netdev, type_data); + default: + return -EOPNOTSUPP; + } + + return 0; +} + +void rnp_do_reset(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + rnp_reinit_locked(adapter); + else + rnp_reset(adapter); +} + +static netdev_features_t rnp_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; + + /* close rx csum when rx fcs on */ + if (!(adapter->flags2 & RNP_FLAG2_CHKSM_FIX)) { + if (features & NETIF_F_RXFCS) + features &= (~NETIF_F_RXCSUM); + } + /* Turn off LRO if not RSC capable */ + if (!(adapter->flags2 & RNP_FLAG2_RSC_CAPABLE)) + features &= ~NETIF_F_LRO; + if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) { + if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER) + features &= ~NETIF_F_HW_VLAN_STAG_FILTER; + } + + if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER) { + if (!(features & NETIF_F_HW_VLAN_STAG_FILTER)) + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + } + + if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) { + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) + features &= ~NETIF_F_HW_VLAN_STAG_RX; + } + + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) { + if (!(features & NETIF_F_HW_VLAN_STAG_RX)) + features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } + + if (!(features & NETIF_F_HW_VLAN_CTAG_TX)) { + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) + features &= ~NETIF_F_HW_VLAN_STAG_TX; + } + + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) { + if (!(features & NETIF_F_HW_VLAN_STAG_TX)) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + } + + return features; +} + +static int rnp_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = netdev->features ^ features; + bool need_reset = false; + struct rnp_hw *hw = &adapter->hw; + + netdev->features = features; + + /* if changed ntuple should close all */ + if (changed & NETIF_F_NTUPLE) { + if (!(features & NETIF_F_NTUPLE)) { + rnp_fdir_filter_exit(adapter); + } + } + + switch (features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: + /* turn off ATR, enable perfect filters and reset */ + if (!(adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE)) + need_reset = true; + + adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE; + adapter->flags |= RNP_FLAG_FDIR_PERFECT_CAPABLE; + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE) + need_reset = true; + + adapter->flags &= ~RNP_FLAG_FDIR_PERFECT_CAPABLE; + + /* We cannot enable ATR if SR-IOV is enabled */ + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) + break; + + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + break; + + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + break; + + adapter->flags |= RNP_FLAG_FDIR_HASH_CAPABLE; + break; + } + + /* vlan filter changed */ + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (features & (NETIF_F_HW_VLAN_CTAG_FILTER)) + hw->ops.set_vlan_filter_en(hw, true); + else + hw->ops.set_vlan_filter_en(hw, false); + rnp_msg_post_status(adapter, PF_VLAN_FILTER_STATUS); + } + + /* rss hash changed */ + if (changed & (NETIF_F_RXHASH)) { + bool iov_en = (adapter->flags & RNP_FLAG_SRIOV_ENABLED) ? true : + false; + + if (netdev->features & (NETIF_F_RXHASH)) + hw->ops.set_rx_hash(hw, true, iov_en); + else + hw->ops.set_rx_hash(hw, false, iov_en); + } + + /* rx fcs changed */ + /* in this mode rx l4/sctp checksum will get error */ + if (changed & NETIF_F_RXFCS) { + + if (features & NETIF_F_RXFCS) { + adapter->priv_flags |= RNP_PRIV_FLAG_RX_FCS; + hw->ops.set_fcs_mode(hw, true); + /* if in rx fcs mode ,hw rxcsum may error, + * close rxcusm + */ + } else { + adapter->priv_flags &= (~RNP_PRIV_FLAG_RX_FCS); + hw->ops.set_fcs_mode(hw, false); + } + rnp_msg_post_status(adapter, PF_FCS_STATUS); + } + + if (changed & NETIF_F_RXALL) + need_reset = true; + + if (features & NETIF_F_RXALL) + adapter->priv_flags |= RNP_PRIV_FLAG_RX_ALL; + else + adapter->priv_flags &= (~RNP_PRIV_FLAG_RX_ALL); + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rnp_vlan_strip_enable(adapter); + else + rnp_vlan_strip_disable(adapter); + + if (need_reset) + rnp_do_reset(netdev); + + return 0; +} + +static int rnp_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + __always_unused u16 flags, + struct netlink_ext_ack __always_unused *ext) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_hw *hw = &adapter->hw; + struct nlattr *attr, *br_spec; + int rem; + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if (mode == BRIDGE_MODE_VEPA) { + adapter->flags2 &= ~RNP_FLAG2_BRIDGE_MODE_VEB; + wr32(hw, RNP_DMA_CONFIG, + rd32(hw, RNP_DMA_CONFIG) | DMA_VEB_BYPASS); + } else if (mode == BRIDGE_MODE_VEB) { + adapter->flags2 |= RNP_FLAG2_BRIDGE_MODE_VEB; + wr32(hw, RNP_DMA_CONFIG, + rd32(hw, RNP_DMA_CONFIG) & (~DMA_VEB_BYPASS)); + + } else + return -EINVAL; + + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + } + + return 0; +} + +static int rnp_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, + u32 __maybe_unused filter_mask, int nlflags) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + u16 mode; + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return 0; + + if (adapter->flags2 & RNP_FLAG2_BRIDGE_MODE_VEB) + mode = BRIDGE_MODE_VEB; + else + mode = BRIDGE_MODE_VEPA; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags, + filter_mask, NULL); +} + +#define RNP_MAX_TUNNEL_HDR_LEN 80 +#define RNP_MAX_MAC_HDR_LEN 127 +#define RNP_MAX_NETWORK_HDR_LEN 511 + +static netdev_features_t rnp_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > RNP_MAX_MAC_HDR_LEN)) + return features & + ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_TSO | NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > RNP_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_TSO | NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + +static void rnp_clear_udp_tunnel_port(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + + if (!(adapter->flags & (RNP_FLAG_VXLAN_OFFLOAD_CAPABLE))) + return; + + adapter->vxlan_port = 0; + hw->ops.set_vxlan_port(hw, adapter->vxlan_port); +} + +/** + * rnp_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports + * @dev: The port's netdev + * @ti: Tunnel endpoint information + **/ +__maybe_unused static void rnp_add_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_hw *hw = &adapter->hw; + __be16 port = ti->port; + + if (ti->sa_family != AF_INET) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & RNP_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port == port) + return; + + if (adapter->vxlan_port) { + netdev_info(dev, + "VXLAN port %d set, not adding port %d\n", + ntohs(adapter->vxlan_port), ntohs(port)); + return; + } + + adapter->vxlan_port = port; + break; + default: + return; + } + hw->ops.set_vxlan_port(hw, ntohs(adapter->vxlan_port)); +} + +/** + * rnp_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports + * @dev: The port's netdev + * @ti: Tunnel endpoint information + **/ +__maybe_unused static void rnp_del_udp_tunnel_port(struct net_device *dev, + struct udp_tunnel_info *ti) +{ + struct rnp_adapter *adapter = netdev_priv(dev); + + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + if (ti->sa_family != AF_INET) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!(adapter->flags & RNP_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port != ti->port) { + netdev_info(dev, "VXLAN port %d not found\n", + ntohs(ti->port)); + return; + } + + break; + default: + return; + } + + rnp_clear_udp_tunnel_port(adapter); + adapter->flags2 |= RNP_FLAG2_UDP_TUN_REREG_NEEDED; +} + +const struct net_device_ops rnp10_netdev_ops = { + .ndo_open = rnp_open, + .ndo_stop = rnp_close, + .ndo_start_xmit = rnp_xmit_frame, + .ndo_set_rx_mode = rnp_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_eth_ioctl = rnp_ioctl, + .ndo_change_mtu = rnp_change_mtu, + .ndo_get_stats64 = rnp_get_stats64, + .ndo_tx_timeout = rnp_tx_timeout, + .ndo_set_tx_maxrate = rnp_tx_maxrate, + .ndo_set_mac_address = rnp_set_mac, + .ndo_vlan_rx_add_vid = rnp_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = rnp_vlan_rx_kill_vid, + .ndo_set_vf_mac = rnp_ndo_set_vf_mac, + .ndo_set_vf_vlan = rnp_ndo_set_vf_vlan, + .ndo_set_vf_rate = rnp_ndo_set_vf_bw, + .ndo_set_vf_spoofchk = rnp_ndo_set_vf_spoofchk, + .ndo_set_vf_link_state = rnp_ndo_set_vf_link_state, + .ndo_set_vf_trust = rnp_ndo_set_vf_trust, + .ndo_get_vf_config = rnp_ndo_get_vf_config, + .ndo_setup_tc = __rnp_setup_tc, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rnp_netpoll, +#endif + .ndo_bridge_setlink = rnp_ndo_bridge_setlink, + .ndo_bridge_getlink = rnp_ndo_bridge_getlink, + .ndo_features_check = rnp_features_check, + .ndo_set_features = rnp_set_features, + .ndo_fix_features = rnp_fix_features, +}; + +static void rnp_assign_netdev_ops(struct net_device *dev) +{ + /* different hw can assign difference fun */ + dev->netdev_ops = &rnp10_netdev_ops; + rnp_set_ethtool_ops(dev); + dev->watchdog_timeo = 5 * HZ; +} + +/** + * rnp_wol_supported - Check whether device supports WoL + * @hw: hw specific details + * @device_id: the device ID + * @subdev_id: the subsystem device ID + * + * This function is used by probe and ethtool to determine + * which devices have WoL support + * + **/ +int rnp_wol_supported(struct rnp_adapter *adapter, u16 device_id, + u16 subdevice_id) +{ + int is_wol_supported = 0; + + struct rnp_hw *hw = &adapter->hw; + + if (hw->wol_supported) + is_wol_supported = 1; + return is_wol_supported; +} + +static inline unsigned long rnp_tso_features(struct rnp_hw *hw) +{ + unsigned long features = 0; + + if (hw->feature_flags & RNP_NET_FEATURE_TSO) + features |= NETIF_F_TSO; + if (hw->feature_flags & RNP_NET_FEATURE_TSO) + features |= NETIF_F_TSO6; + features |= NETIF_F_GSO_PARTIAL; + if (hw->feature_flags & RNP_NET_FEATURE_TX_UDP_TUNNEL) + features |= RNP_GSO_PARTIAL_FEATURES; + + return features; +} + +static void remove_mbx_irq(struct rnp_adapter *adapter) +{ + /* mbx */ + if (adapter->num_other_vectors) { + /* only msix use indepented intr */ + if (adapter->flags & RNP_FLAG_MSIX_ENABLED) { + adapter->hw.mbx.ops.configure( + &adapter->hw, adapter->msix_entries[0].entry, + false); + free_irq(adapter->msix_entries[0].vector, adapter); + + adapter->hw.mbx.other_irq_enabled = false; + } + } +} + +static int register_mbx_irq(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int err = 0; + + /* for mbx:vector0 */ + if (adapter->num_other_vectors) { + /* only do this in msix mode */ + if (adapter->flags & RNP_FLAG_MSIX_ENABLED) { + err = request_irq(adapter->msix_entries[0].vector, + rnp_msix_other, 0, netdev->name, + adapter); + if (err) { + e_err(probe, + "request_irq for msix_other failed: %d\n", + err); + goto err_mbx; + } + hw->mbx.ops.configure( + hw, adapter->msix_entries[0].entry, true); + adapter->hw.mbx.other_irq_enabled = true; + } + } + +err_mbx: + return err; +} + +static int rnp_rm_adpater(struct rnp_adapter *adapter) +{ + struct net_device *netdev; + struct rnp_hw *hw = &adapter->hw; + + netdev = adapter->netdev; + pr_info("= remove adapter:%s =\n", netdev->name); + + rnp_dbg_adapter_exit(adapter); + + netif_carrier_off(netdev); + + set_bit(__RNP_DOWN, &adapter->state); + set_bit(__RNP_REMOVE, &adapter->state); + if (module_enable_ptp) { + while (test_bit(__RNP_PTP_TX_IN_PROGRESS, &adapter->state)) { + usleep_range(10000, 20000); + } + cancel_work_sync(&adapter->tx_hwtstamp_work); + } + cancel_work_sync(&adapter->service_task); + + del_timer_sync(&adapter->service_timer); + rnp_sysfs_exit(adapter); + rnp_fdir_filter_exit(adapter); + adapter->priv_flags &= (~RNP_PRIV_FLAG_TCP_SYNC); + + if (adapter->rpu_inited) { + rnp_rpu_mpe_stop(adapter); + adapter->rpu_inited = 0; + } + + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); + + adapter->netdev = NULL; + + if (hw->ops.driver_status) + hw->ops.driver_status(hw, false, rnp_driver_insmod); + + remove_mbx_irq(adapter); + + rnp_clear_interrupt_scheme(adapter); + + if (hw->ncsi_en) { + rnp_mbx_probe_stat_set(hw, MBX_REMOVE); + } + + if (adapter->io_addr) + iounmap(adapter->io_addr); + + if (adapter->io_addr_bar0) + iounmap(adapter->io_addr_bar0); + + free_netdev(netdev); + + pr_info("remove complete\n"); + + return 0; +} + +static void rnp_fix_dma_tx_status(struct rnp_adapter *adapter) +{ + int i; + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + + if ((hw->hw_type == rnp_hw_n10) || (hw->hw_type == rnp_hw_n400)) { + for (i = 0; i < dma->max_tx_queues; i++) + dma_ring_wr32(dma, RING_OFFSET(i) + RNP_DMA_TX_START, + 1); + } +} + +static u8 rnp10_pfnum(u8 __iomem *hw_addr_bar0, struct pci_dev *pdev) +{ + /* n10 read this from bar0 */ + u16 vf_num = -1; + u32 pfvfnum_reg; +#define PF_NUM_REG_N10 (0x75f000) + pfvfnum_reg = (PF_NUM_REG_N10 & (pci_resource_len(pdev, 0) - 1)); + vf_num = readl(hw_addr_bar0 + pfvfnum_reg); +#define VF_NUM_MASK_TEMP (0x400) +#define VF_NUM_OFF (4) + return ((vf_num & VF_NUM_MASK_TEMP) >> VF_NUM_OFF); +} + +static int rnp_can_rpu_start(struct rnp_adapter *adapter) +{ + if (adapter->hw.rpu_addr == NULL) + return 0; + if ((adapter->pdev->device & 0xff00) == 0x1c00) { + return 1; + } + if (adapter->hw.rpu_availble) { + return 1; + } + return 0; +} + +static int rnp_add_adpater(struct pci_dev *pdev, struct rnp_info *ii, + struct rnp_adapter **padapter) +{ + int i, err = 0; + struct rnp_adapter *adapter = NULL; + struct net_device *netdev; + struct rnp_hw *hw; + u8 __iomem *hw_addr = NULL; + u8 __iomem *hw_addr_bar0 = NULL; + + u32 dma_version = 0; + u32 nic_version = 0; + u32 queues = ii->total_queue_pair_cnts; + static int bd_number; + + pr_info("==== add adapter queues:%d ====", queues); + netdev = alloc_etherdev_mq(sizeof(struct rnp_adapter), queues); + if (!netdev) + return -ENOMEM; + + if (!fix_eth_name) + SET_NETDEV_DEV(netdev, &pdev->dev); + + adapter = netdev_priv(netdev); + + memset((char *)adapter, 0x00, sizeof(struct rnp_adapter)); + adapter->netdev = netdev; + adapter->pdev = pdev; + + adapter->max_ring_pair_counts = queues; + if (padapter) + *padapter = adapter; + + adapter->bd_number = bd_number++; + adapter->port = 0; + snprintf(adapter->name, sizeof(netdev->name), "%s%d%d", rnp_driver_name, + 1, adapter->bd_number); + pci_set_drvdata(pdev, adapter); + + hw = &adapter->hw; + hw->back = adapter; + /* first setup hw type */ + hw->rss_type = ii->rss_type; + hw->hw_type = ii->hw_type; + switch (hw->hw_type) { + case rnp_hw_n10: + case rnp_hw_n20: + case rnp_hw_n400: + case rnp_hw_uv440: + hw_addr_bar0 = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!hw_addr_bar0) { + dev_err(&pdev->dev, "pcim_iomap bar%d failed!\n", 0); + return -EIO; + } +#ifdef FIX_VF_BUG + rnp_wr_reg(hw_addr_bar0 + + (0x7982fc & (pci_resource_len(pdev, 0) - 1)), + 0); +#endif + + /* n10 use bar4 */ +#define RNP_NIC_BAR_N10 4 + hw_addr = ioremap(pci_resource_start(pdev, RNP_NIC_BAR_N10), + pci_resource_len(pdev, RNP_NIC_BAR_N10)); + if (!hw_addr) { + dev_err(&pdev->dev, "pcim_iomap bar%d failed!\n", + RNP_NIC_BAR_N10); + return -EIO; + } + pr_info("[bar%d]:%p %llx len=%d MB\n", RNP_NIC_BAR_N10, hw_addr, + (unsigned long long)pci_resource_start(pdev, + RNP_NIC_BAR_N10), + (int)pci_resource_len(pdev, RNP_NIC_BAR_N10) / 1024 / + 1024); + /* get dma version */ + dma_version = rnp_rd_reg(hw_addr); + + if (rnp10_pfnum(hw_addr_bar0, pdev)) + hw->pfvfnum = PF_NUM(1); + else + hw->pfvfnum = PF_NUM(0); + +#ifdef FIX_VF_BUG + if (hw->pfvfnum) + hw->hw_addr = hw_addr + 0x100000; + else + hw->hw_addr = hw_addr; +#else + hw->hw_addr = hw_addr; +#endif + /* setup msix base */ +#ifdef FIX_VF_BUG + if (hw->pfvfnum) + hw->ring_msix_base = hw->hw_addr + 0xa4000 + 0x200; + else + hw->ring_msix_base = hw->hw_addr + 0xa4000; +#else + hw->ring_msix_base = hw->hw_addr + 0xa4000; +#endif + nic_version = rd32(hw, RNP_TOP_NIC_VERSION); + adapter->irq_mode = irq_mode_msix; + adapter->flags |= RNP_FLAG_MSIX_CAPABLE; + + break; + default: +#ifdef FIX_VF_BUG + hw_addr_bar0 = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); +#endif + hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + goto err_free_net; + break; + } + + /* setup FT_PADDING */ + { +#ifdef FT_PADDING + u32 data; + + data = rnp_rd_reg(hw->hw_addr + RNP_DMA_CONFIG); + SET_BIT(8, data); + rnp_wr_reg(hw->hw_addr + RNP_DMA_CONFIG, data); + adapter->priv_flags |= RNP_PRIV_FLAG_FT_PADDING; +#endif + } + + /* assign to adapter */ + adapter->io_addr = hw_addr; + adapter->io_addr_bar0 = hw_addr_bar0; + if (pci_resource_len(pdev, 0) == (8 * 1024 * 1024)) { + hw->rpu_addr = hw_addr_bar0; + } + + hw->pdev = pdev; + hw->dma_version = dma_version; + adapter->msg_enable = netif_msg_init(debug, NETIF_MSG_DRV +#ifdef MSG_PROBE_ENABLE + | NETIF_MSG_PROBE +#endif +#ifdef MSG_IFUP_ENABLE + | NETIF_MSG_IFUP +#endif +#ifdef MSG_IFDOWN_ENABLE + | NETIF_MSG_IFDOWN +#endif + ); + + /* we have other irq */ + adapter->num_other_vectors = 1; + /* get software info */ + ii->get_invariants(hw); + + spin_lock_init(&adapter->link_stat_lock); + + if (adapter->num_other_vectors) { + /* Mailbox */ + rnp_init_mbx_params_pf(hw); + memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); + if (dma_version >= 0x20210111) { + rnp_mbx_link_event_enable(hw, 0); + if ((hw->hw_type == rnp_hw_n10) || + (hw->hw_type == rnp_hw_n400)) + rnp_mbx_force_speed(hw, 0); + if (rnp_mbx_get_capability(hw, ii)) { + dev_err(&pdev->dev, + "rnp_mbx_get_capability failed!\n"); + err = -EIO; + goto err_free_net; + } + + /* should check eco */ +#ifdef VF_PROMISC_SUPPORT + if (!hw->eco) { + dev_err(&pdev->dev, + "only v2 chips support vf promisc!\n"); + err = -EIO; + goto err_free_net; + + } +#endif + adapter->portid_of_card = hw->port_id[0]; + if (hw->eco) { + hw->eth.num_rar_entries -= 1; + hw->mac.num_rar_entries -= 1; + hw->num_rar_entries -= 1; + } + + adapter->portid_of_card = hw->pfvfnum ? 1 : 0; + adapter->wol = hw->wol; + } + } + if (hw->ncsi_en) { + hw->eth.num_rar_entries -= hw->ncsi_rar_entries; + hw->mac.num_rar_entries -= hw->ncsi_rar_entries; + hw->num_rar_entries -= hw->ncsi_rar_entries; + } + + if (hw->force_status) + adapter->priv_flags |= RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE; + else + adapter->priv_flags &= (~RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE); + hw->default_rx_queue = 0; + pr_info("%s %s: dma version:0x%x, nic version:0x%x, pfvfnum:0x%x\n", + adapter->name, pci_name(pdev), hw->dma_version, nic_version, + hw->pfvfnum); + + /* Setup hw api */ + hw->mac.type = ii->mac; + /* EEPROM */ + if (ii->eeprom_ops) + memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); + + hw->phy.sfp_type = rnp_sfp_type_unknown; + + hw->ops.setup_ethtool(netdev); + rnp_assign_netdev_ops(netdev); + rnp_check_options(adapter); + /* setup the private structure */ + /* this private is used only once + */ + err = rnp_sw_init(adapter); + if (err) + goto err_sw_init; + + err = hw->ops.reset_hw(hw); + hw->phy.reset_if_overtemp = false; + if (err) { + e_dev_err("HW Init failed: %d\n", err); + goto err_sw_init; + } + if (hw->ops.driver_status) + hw->ops.driver_status(hw, true, rnp_driver_insmod); + if (hw->ops.driver_status) { + hw->ops.driver_status(hw, !!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE), + rnp_driver_force_control_mac); + } + +#ifdef CONFIG_PCI_IOV + if (adapter->num_other_vectors) { + rnp_enable_sriov(adapter); + pci_sriov_set_totalvfs(pdev, hw->max_vfs - 1); + } +#endif + + /* MTU range: 68 - 9710 */ + netdev->min_mtu = hw->min_length; + netdev->max_mtu = hw->max_length - (ETH_HLEN + 2 * ETH_FCS_LEN); + + if (hw->feature_flags & RNP_NET_FEATURE_SG) + netdev->features |= NETIF_F_SG; + if (hw->feature_flags & RNP_NET_FEATURE_TSO) + netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; + if (hw->feature_flags & RNP_NET_FEATURE_RX_HASH) + netdev->features |= NETIF_F_RXHASH; + if (hw->feature_flags & RNP_NET_FEATURE_RX_CHECKSUM) + netdev->features |= NETIF_F_RXCSUM; + if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM) + netdev->features |= NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC; + + if (hw->feature_flags & RNP_NET_FEATURE_USO) + netdev->features |= NETIF_F_GSO_UDP_L4; + + netdev->features |= NETIF_F_HIGHDMA; + + if (hw->feature_flags & RNP_NET_FEATURE_TX_UDP_TUNNEL) { + netdev->gso_partial_features = RNP_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | + RNP_GSO_PARTIAL_FEATURES; + } + + netdev->hw_features |= netdev->features; + + if (hw->ncsi_en) { + hw->feature_flags &= ~RNP_NET_FEATURE_VLAN_OFFLOAD; + } + if (hw->feature_flags & RNP_NET_FEATURE_VLAN_FILTER) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER) + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_FILTER; + if (hw->feature_flags & RNP_NET_FEATURE_VLAN_OFFLOAD) { + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + } + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) { + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX; + } + netdev->hw_features |= NETIF_F_RXALL; + if (hw->feature_flags & RNP_NET_FEATURE_RX_NTUPLE_FILTER) + netdev->hw_features |= NETIF_F_NTUPLE; + if (hw->feature_flags & RNP_NET_FEATURE_RX_FCS) + netdev->hw_features |= NETIF_F_RXFCS; + if (hw->feature_flags & RNP_NET_FEATURE_HW_TC) + netdev->hw_features |= NETIF_F_HW_TC; + + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->hw_enc_features |= netdev->vlan_features; + netdev->mpls_features |= NETIF_F_HW_CSUM; + + if (hw->feature_flags & RNP_NET_FEATURE_VLAN_FILTER) + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER) + netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; + if (hw->feature_flags & RNP_NET_FEATURE_VLAN_OFFLOAD) { + netdev->features |= NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + } + if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) { + netdev->features |= NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX; + } + + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + + if (adapter->flags2 & RNP_FLAG2_RSC_CAPABLE) + netdev->hw_features |= NETIF_F_LRO; + + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + +#if IS_ENABLED(CONFIG_DCB) + rnp_dcb_init(netdev, adapter); +#endif + + if (adapter->flags2 & RNP_FLAG2_RSC_ENABLED) + netdev->features |= NETIF_F_LRO; + + eth_hw_addr_set(netdev, hw->mac.perm_addr); + memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); + pr_info("dev mac:%pM \n", netdev->dev_addr); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + e_dev_err("invalid MAC address\n"); + err = -EIO; + goto err_sw_init; + } + ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); + + timer_setup(&adapter->service_timer, rnp_service_timer, 0); + + if (module_enable_ptp) { + /* setup ptp_addr according to mac type */ + switch (adapter->hw.mac.mac_type) { + case mac_dwc_xlg: + adapter->ptp_addr = adapter->hw.mac.mac_addr + 0xd00; + adapter->gmac4 = 1; + break; + case mac_dwc_g: + adapter->ptp_addr = adapter->hw.mac.mac_addr + 0x700; + adapter->gmac4 = 0; + break; + } + adapter->flags2 |= RNP_FLAG2_PTP_ENABLED; + if (adapter->flags2 & RNP_FLAG2_PTP_ENABLED) { + adapter->tx_timeout_factor = 10; + INIT_WORK(&adapter->tx_hwtstamp_work, + rnp_tx_hwtstamp_work); + } + } + + INIT_WORK(&adapter->service_task, rnp_service_task); + clear_bit(__RNP_SERVICE_SCHED, &adapter->state); + + if (fix_eth_name) + strncpy(netdev->name, adapter->name, sizeof(netdev->name) - 1); + else { + strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); + } + + err = rnp_init_interrupt_scheme(adapter); + if (err) + goto err_sw_init; + + err = register_mbx_irq(adapter); + if (err) + goto err_register; + +#ifdef CONFIG_PCI_IOV + rnp_enable_sriov_true(adapter); +#endif + + /* WOL not supported for all devices */ + { + struct ethtool_wolinfo wol; + + if (rnp_wol_exclusion(adapter, &wol) || + !device_can_wakeup(&adapter->pdev->dev)) + adapter->wol = 0; + } + /* reset the hardware with the new settings */ + err = hw->ops.start_hw(hw); + rnp_fix_dma_tx_status(adapter); + + if (!fix_eth_name) + strscpy(netdev->name, "eth%d", sizeof(netdev->name)); + err = register_netdev(netdev); + if (err) { + e_dev_err("register_netdev failed!\n"); + goto err_register; + } + + /* power down the optics for n10 SFP+ fiber */ + if (hw->ops.disable_tx_laser) + hw->ops.disable_tx_laser(hw); + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n", + adapter->num_vfs); + for (i = 0; i < adapter->num_vfs; i++) + rnp_vf_configuration(pdev, (i | 0x10000000)); + } + + if (rnp_mbx_lldp_status_get(hw) == 1) { + adapter->priv_flags |= RNP_PRIV_FLAG_LLDP_EN_STAT; + } + + if (rnp_sysfs_init(adapter)) + e_err(probe, "failed to allocate sysfs resources\n"); + + rnp_dbg_adapter_init(adapter); + /* only pf0 download mpe */ + if (rnp_is_pf0(&adapter->hw) && rnp_can_rpu_start(adapter)) { + rnp_rpu_mpe_start(adapter); + } + + if (hw->ncsi_en) { + hw->ops.set_mac_rx(hw, true); + rnp_mbx_probe_stat_set(hw, MBX_PROBE); + } + + return 0; +err_register: + remove_mbx_irq(adapter); + rnp_clear_interrupt_scheme(adapter); +err_sw_init: + rnp_disable_sriov(adapter); + adapter->flags2 &= ~RNP_FLAG2_SEARCH_FOR_SFP; +err_free_net: + free_netdev(netdev); + return err; +} + +/** + * rnp_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in rnp_pci_tbl + * + * Returns 0 on success, negative on failure + * + * rnp_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int rnp_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct rnp_adapter *adapter; + struct rnp_info *ii = rnp_info_tbl[id->driver_data]; + int err; + + /* Catch broken hardware that put the wrong VF device ID in + * the PCIe SR-IOV capability. + */ + if (pdev->is_virtfn) { + WARN(1, "%s (%hx:%hx) should not be a VF!\n", pci_name(pdev), + pdev->vendor, pdev->device); + return -EINVAL; + } + /* not support bus reset*/ + pdev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; + err = pci_enable_device_mem(pdev); + if (err) + return err; + + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(56)) && + !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(56))) { + enable_hi_dma = 1; + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + } + enable_hi_dma = 0; + } + + err = pci_request_mem_regions(pdev, rnp_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_reg; + } + pci_set_master(pdev); + pci_save_state(pdev); + + err = rnp_add_adpater(pdev, ii, &adapter); + if (err) + goto err_regions; + + return 0; +err_regions: + pci_release_mem_regions(pdev); +err_dma: +err_pci_reg: + return err; +} + +/** + * rnp_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * rnp_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void rnp_remove(struct pci_dev *pdev) +{ + struct rnp_adapter *adapter = pci_get_drvdata(pdev); + +#ifdef CONFIG_PCI_IOV + /* + * Only disable SR-IOV on unload if the user specified the now + * deprecated max_vfs module parameter. + */ + rnp_disable_sriov(adapter); +#endif + rnp_rm_adpater(adapter); + + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver rnp_driver = { + .name = rnp_driver_name, + .id_table = rnp_pci_tbl, + .probe = rnp_probe, + .remove = rnp_remove, +#ifdef CONFIG_PM + .suspend = rnp_suspend, + .resume = rnp_resume, +#endif + .shutdown = rnp_shutdown, + .sriov_configure = rnp_pci_sriov_configure, +}; + +static int __init rnp_init_module(void) +{ + int ret; + + pr_info("%s - version %s\n", rnp_driver_string, rnp_driver_version); + pr_info("%s \n", rnp_copyright); + rnp_wq = create_singlethread_workqueue(rnp_driver_name); + + if (!rnp_wq) { + pr_err("%s: Failed to create workqueue\n", rnp_driver_name); + return -ENOMEM; + } + + rnp_dbg_init(); + + ret = pci_register_driver(&rnp_driver); + if (ret) { + destroy_workqueue(rnp_wq); + rnp_dbg_exit(); + return ret; + } + + return 0; +} +module_init(rnp_init_module); + +static void __exit rnp_exit_module(void) +{ + pci_unregister_driver(&rnp_driver); + + destroy_workqueue(rnp_wq); + + rnp_dbg_exit(); + + rcu_barrier(); /* Wait for completion of call_rcu()'s */ +} + +module_exit(rnp_exit_module); diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mbx.c b/drivers/net/ethernet/mucse/rnp/rnp_mbx.c new file mode 100644 index 0000000000000..e4399ad1f73ae --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_mbx.c @@ -0,0 +1,650 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include "rnp.h" +#include "rnp_type.h" +#include "rnp_common.h" +#include "rnp_mbx.h" +#include "rnp_mbx_fw.h" + +#define VF2PF_MBOX_VEC(mbx, vf) (mbx->vf2pf_mbox_vec_base + 4 * (vf)) +#define CPU2PF_MBOX_VEC(mbx) (mbx->cpu2pf_mbox_vec) +/* == PF <--> VF mailbox ==== */ +#define SHARE_MEM_BYTES 64 +#define PF_VF_SHM(mbx, vf) \ + (mbx->pf_vf_shm_base + \ + mbx->mbx_mem_size * vf) +/* for PF1 rtl will remap 6000 to 0xb000 */ +#define PF2VF_COUNTER(mbx, vf) (PF_VF_SHM(mbx, vf) + 0) +#define VF2PF_COUNTER(mbx, vf) (PF_VF_SHM(mbx, vf) + 4) +#define PF_VF_SHM_DATA(mbx, vf) (PF_VF_SHM(mbx, vf) + 8) +#define PF2VF_MBOX_CTRL(mbx, vf) (mbx->pf2vf_mbox_ctrl_base + 4 * vf) +#define PF_VF_MBOX_MASK_LO(mbx) (mbx->pf_vf_mbox_mask_lo) +#define PF_VF_MBOX_MASK_HI(mbx) (mbx->pf_vf_mbox_mask_hi) + +/* === CPU <--> PF === */ +#define CPU_PF_SHM(mbx) (mbx->cpu_pf_shm_base) +#define CPU2PF_COUNTER(mbx) (CPU_PF_SHM(mbx) + 0) +#define PF2CPU_COUNTER(mbx) (CPU_PF_SHM(mbx) + 4) +#define CPU_PF_SHM_DATA(mbx) (CPU_PF_SHM(mbx) + 8) +#define PF2CPU_MBOX_CTRL(mbx) (mbx->pf2cpu_mbox_ctrl) +#define CPU_PF_MBOX_MASK(mbx) (mbx->cpu_pf_mbox_mask) +#define MBOX_CTRL_REQ (1 << 0) /* WO */ +#define MBOX_CTRL_PF_HOLD_SHM (1 << 3) /* VF:RO, PF:WR */ +#define MBOX_IRQ_EN 0 +#define MBOX_IRQ_DISABLE 1 +#define mbx_prd32(hw, reg) prnp_rd_reg((hw)->hw_addr + (reg)) +#define mbx_rd32(hw, reg) rnp_rd_reg((hw)->hw_addr + (reg)) +#define mbx_pwr32(hw, reg, val) p_rnp_wr_reg((hw)->hw_addr + (reg), (val)) +#define mbx_wr32(hw, reg, val) rnp_wr_reg((hw)->hw_addr + (reg), (val)) + +/** + * rnp_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox/vfnum to read + * + * returns SUCCESS if it successfully read message from buffer + **/ +s32 rnp_read_mbx(struct rnp_hw *hw, u32 *msg, u16 size, enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = RNP_ERR_MBX; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * rnp_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 rnp_write_mbx(struct rnp_hw *hw, u32 *msg, u16 size, enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = 0; + + if (size > mbx->size) + ret_val = RNP_ERR_MBX; + else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +static inline u16 rnp_mbx_get_req(struct rnp_hw *hw, int reg) +{ + mb(); + return ioread32(hw->hw_addr + reg) & 0xffff; +} + +static inline u16 rnp_mbx_get_ack(struct rnp_hw *hw, int reg) +{ + mb(); + return (mbx_rd32(hw, reg) >> 16); +} + +static inline void rnp_mbx_inc_pf_req(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + u16 req; + int reg; + struct rnp_mbx_info *mbx = &hw->mbx; + u32 v; + + reg = (mbx_id == MBX_CM3CPU) ? PF2CPU_COUNTER(mbx) : + PF2VF_COUNTER(mbx, mbx_id); + v = mbx_rd32(hw, reg); + + req = (v & 0xffff); + req++; + v &= ~(0x0000ffff); + v |= req; + mb(); + mbx_wr32(hw, reg, v); + + /* update stats */ + hw->mbx.stats.msgs_tx++; +} + +static inline void rnp_mbx_inc_pf_ack(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + u16 ack; + struct rnp_mbx_info *mbx = &hw->mbx; + int reg = (mbx_id == MBX_CM3CPU) ? PF2CPU_COUNTER(mbx) : + PF2VF_COUNTER(mbx, mbx_id); + u32 v = mbx_rd32(hw, reg); + + ack = (v >> 16) & 0xffff; + ack++; + v &= ~(0xffff0000); + v |= (ack << 16); + mb(); + mbx_wr32(hw, reg, v); + + /* update stats */ + hw->mbx.stats.msgs_rx++; +} + +/** + * rnp_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 rnp_check_for_msg(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = RNP_ERR_MBX; + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * rnp_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 rnp_check_for_ack(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = RNP_ERR_MBX; + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * rnp_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 rnp_poll_for_msg(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + udelay(mbx->usec_delay); + } + +out: + return countdown ? 0 : -ETIME; +} + +/** + * rnp_poll_for_ack - Wait for message acknowledgment + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgment + **/ +static s32 rnp_poll_for_ack(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) { + printk("mbx poll for ack ack timeout\n"); + break; + } + udelay(mbx->usec_delay); + } + +out: + return countdown ? 0 : RNP_ERR_MBX; +} + +/** + * rnp_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 rnp_read_posted_mbx(struct rnp_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = RNP_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + ret_val = rnp_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); +out: + return ret_val; +} + +/** + * rnp_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 rnp_write_posted_mbx(struct rnp_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = RNP_ERR_MBX; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg and hold buffer lock */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = rnp_poll_for_ack(hw, mbx_id); + +out: + return ret_val; +} + +/** + * rnp_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 rnp_check_for_msg_pf(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + s32 ret_val = RNP_ERR_MBX; + u16 hw_req_count = 0; + struct rnp_mbx_info *mbx = &hw->mbx; + + if (mbx_id == MBX_CM3CPU) { + hw_req_count = rnp_mbx_get_req(hw, CPU2PF_COUNTER(mbx)); + if (mbx->mbx_feature & MBX_FEATURE_NO_ZERO) { + if ((hw_req_count != 0) && + (hw_req_count != hw->mbx.cpu_req)) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + + } else { + if (hw_req_count != hw->mbx.cpu_req) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + } + } else { + if (rnp_mbx_get_req(hw, VF2PF_COUNTER(mbx, mbx_id)) != + hw->mbx.vf_req[mbx_id]) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + } + + return ret_val; +} + +/** + * rnp_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 rnp_check_for_ack_pf(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + s32 ret_val = RNP_ERR_MBX; + struct rnp_mbx_info *mbx = &hw->mbx; + + if (mbx_id == MBX_CM3CPU) { + if (rnp_mbx_get_ack(hw, CPU2PF_COUNTER(mbx)) != + hw->mbx.cpu_ack) { + ret_val = 0; + hw->mbx.stats.acks++; + } + } else { + if (rnp_mbx_get_ack(hw, VF2PF_COUNTER(mbx, mbx_id)) != + hw->mbx.vf_ack[mbx_id]) { + ret_val = 0; + hw->mbx.stats.acks++; + } + } + + return ret_val; +} + +/** + * rnp_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @mbx_id: the VF index or CPU + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 rnp_obtain_mbx_lock_pf(struct rnp_hw *hw, enum MBX_ID mbx_id) +{ + int try_cnt = 5000; + struct rnp_mbx_info *mbx = &hw->mbx; + u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) : + PF2VF_MBOX_CTRL(mbx, mbx_id); + + while (try_cnt-- > 0) { + /* Take ownership of the buffer */ + mbx_wr32(hw, CTRL_REG, MBOX_CTRL_PF_HOLD_SHM); + wmb(); + /* reserve mailbox for cm3 use */ + if (mbx_rd32(hw, CTRL_REG) & MBOX_CTRL_PF_HOLD_SHM) + return 0; + udelay(100); + } + + rnp_err("%s: failed to get:%d lock \n", __func__, mbx_id); + return EPERM; +} + +/** + * rnp_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 rnp_write_mbx_pf(struct rnp_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + s32 ret_val = 0; + u16 i; + struct rnp_mbx_info *mbx = &hw->mbx; + u32 DATA_REG = (mbx_id == MBX_CM3CPU) ? CPU_PF_SHM_DATA(mbx) : + PF_VF_SHM_DATA(mbx, mbx_id); + u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) : + PF2VF_MBOX_CTRL(mbx, mbx_id); + + if (size > RNP_VFMAILBOX_SIZE) { + printk("%s: size:%d should <%d\n", __func__, size, + RNP_VFMAILBOX_SIZE); + return -EINVAL; + } + + /* lock the mailbox to prevent pf/vf/cpu race condition */ + ret_val = rnp_obtain_mbx_lock_pf(hw, mbx_id); + if (ret_val) { + printk("%s: get mbx:%d wlock failed. ret:%d. req:0x%08x-0x%08x\n", + __func__, mbx_id, ret_val, msg[0], msg[1]); + goto out_no_write; + } + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) { + mbx_wr32(hw, DATA_REG + i * 4, msg[i]); + rnp_logd(LOG_MBX_OUT, " w-mbx:0x%x <= 0x%x\n", + DATA_REG + i * 4, msg[i]); + } + + /* flush msg and acks as we are overwriting the message buffer */ + if (mbx_id == MBX_CM3CPU) { + hw->mbx.cpu_ack = rnp_mbx_get_ack(hw, CPU2PF_COUNTER(mbx)); + } else { + hw->mbx.vf_ack[mbx_id] = + rnp_mbx_get_ack(hw, VF2PF_COUNTER(mbx, mbx_id)); + } + rnp_mbx_inc_pf_req(hw, mbx_id); + + /* Interrupt VF/CM3 to tell it a message + * has been sent and release buffer + */ + if (mbx->mbx_feature & MBX_FEATURE_WRITE_DELAY) + udelay(300); + mbx_wr32(hw, CTRL_REG, MBOX_CTRL_REQ); + +out_no_write: + /* sometimes happen */ + + return ret_val; +} + +/** + * rnp_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF/CPU request so no polling for message is needed. + **/ +static s32 rnp_read_mbx_pf(struct rnp_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + s32 ret_val = -EIO; + u32 i; + struct rnp_mbx_info *mbx = &hw->mbx; + u32 BUF_REG = (mbx_id == MBX_CM3CPU) ? CPU_PF_SHM_DATA(mbx) : + PF_VF_SHM_DATA(mbx, mbx_id); + u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) : + PF2VF_MBOX_CTRL(mbx, mbx_id); + if (size > RNP_VFMAILBOX_SIZE) { + printk("%s: size:%d should <%d\n", __func__, size, + RNP_VFMAILBOX_SIZE); + return -EINVAL; + } + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = rnp_obtain_mbx_lock_pf(hw, mbx_id); + if (ret_val) + goto out_no_read; + + mb(); + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) { + msg[i] = mbx_rd32(hw, BUF_REG + 4 * i); + rnp_logd(LOG_MBX_IN, " r-mbx:0x%x => 0x%x\n", BUF_REG + 4 * i, + msg[i]); + } + mbx_wr32(hw, BUF_REG, 0); + + /* update req. used by rnpvf_check_for_msg_vf */ + if (mbx_id == MBX_CM3CPU) { + hw->mbx.cpu_req = rnp_mbx_get_req(hw, CPU2PF_COUNTER(mbx)); + } else { + hw->mbx.vf_req[mbx_id] = + rnp_mbx_get_req(hw, VF2PF_COUNTER(mbx, mbx_id)); + } + /* this ack maybe too earier? */ + /* Acknowledge receipt and release mailbox, then we're done */ + rnp_mbx_inc_pf_ack(hw, mbx_id); + + /* free ownership of the buffer */ + mbx_wr32(hw, CTRL_REG, 0); + +out_no_read: + + return ret_val; +} + +static void rnp_mbx_reset(struct rnp_hw *hw) +{ + int idx, v; + struct rnp_mbx_info *mbx = &hw->mbx; + + for (idx = 0; idx < hw->max_vfs; idx++) { + v = mbx_rd32(hw, VF2PF_COUNTER(mbx, idx)); + hw->mbx.vf_req[idx] = v & 0xffff; + hw->mbx.vf_ack[idx] = (v >> 16) & 0xffff; + mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0); + } + + v = mbx_rd32(hw, CPU2PF_COUNTER(mbx)); + hw->mbx.cpu_req = v & 0xffff; + hw->mbx.cpu_ack = (v >> 16) & 0xffff; + + printk("now mbx.cpu_req %d mbx.cpu_ack %d\n", hw->mbx.cpu_req, + hw->mbx.cpu_ack); + /* release pf->cm3 buffer lock */ + mbx_wr32(hw, PF2CPU_MBOX_CTRL(mbx), 0); + + if (PF_VF_MBOX_MASK_LO(mbx)) + wr32(hw, PF_VF_MBOX_MASK_LO(mbx), + 0); /* allow vf to vectors */ + if (PF_VF_MBOX_MASK_HI(mbx)) + wr32(hw, PF_VF_MBOX_MASK_HI(mbx), 0); + + /* allow CM3CPU to PF MBX IRQ */ + wr32(hw, CPU_PF_MBOX_MASK(mbx), 0); +} + +static int rnp_mbx_configure_pf(struct rnp_hw *hw, int nr_vec, bool enable) +{ + int idx = 0; + u32 v; + struct rnp_mbx_info *mbx = &hw->mbx; + + if (enable) { + for (idx = 0; idx < hw->max_vfs; idx++) { + v = mbx_rd32(hw, VF2PF_COUNTER(mbx, idx)); + hw->mbx.vf_req[idx] = v & 0xffff; + hw->mbx.vf_ack[idx] = (v >> 16) & 0xffff; + + mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0); + } + /* reset pf->cm3 status */ + v = mbx_rd32(hw, CPU2PF_COUNTER(mbx)); + hw->mbx.cpu_req = v & 0xffff; + hw->mbx.cpu_ack = (v >> 16) & 0xffff; + /* release pf->cm3 buffer lock */ + mbx_wr32(hw, PF2CPU_MBOX_CTRL(mbx), 0); + /* allow VF to PF MBX IRQ */ + for (idx = 0; idx < hw->max_vfs; idx++) { + mbx_wr32(hw, VF2PF_MBOX_VEC(mbx, idx), + nr_vec); + /* vf to pf req interrupt */ + } + + if (PF_VF_MBOX_MASK_LO(mbx)) + wr32(hw, PF_VF_MBOX_MASK_LO(mbx), + 0); + /* allow vf to vectors */ + + if (PF_VF_MBOX_MASK_HI(mbx)) + wr32(hw, PF_VF_MBOX_MASK_HI(mbx), 0); + + /* bind cm3cpu mbx to irq */ + wr32(hw, CPU2PF_MBOX_VEC(mbx), + nr_vec); + /* cm3 and VF63 share #63 irq */ + /* allow CM3CPU to PF MBX IRQ */ + wr32(hw, CPU_PF_MBOX_MASK(mbx), 0); + + rnp_dbg("[%s] mbx-vector:%d\n", __func__, nr_vec); + + } else { + if (PF_VF_MBOX_MASK_LO(mbx)) + wr32(hw, PF_VF_MBOX_MASK_LO(mbx), + 0xffffffff); + if (PF_VF_MBOX_MASK_HI(mbx)) + wr32(hw, PF_VF_MBOX_MASK_HI(mbx), + 0xffffffff); + + /* disable CM3CPU to PF MBX IRQ */ + wr32(hw, CPU_PF_MBOX_MASK(mbx), 0xffffffff); + + /* reset vf->pf status/ctrl */ + for (idx = 0; idx < hw->max_vfs; idx++) + mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0); + /* reset pf->cm3 ctrl */ + mbx_wr32(hw, PF2CPU_MBOX_CTRL(mbx), 0); + /* used to sync link status */ + wr32(hw, RNP_DMA_DUMY, 0); + } + return 0; +} + +unsigned int rnp_mbx_change_timeout(struct rnp_hw *hw, int timeout_ms) +{ + unsigned int old_timeout = hw->mbx.timeout; + + hw->mbx.timeout = timeout_ms * 1000 / hw->mbx.usec_delay; + + return old_timeout; +} + +/** + * rnp_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +s32 rnp_init_mbx_params_pf(struct rnp_hw *hw) +{ + struct rnp_mbx_info *mbx = &hw->mbx; + + mbx->usec_delay = 100; + /* wait 5s */ + mbx->timeout = (4 * 1000 * 1000) / mbx->usec_delay; + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + mbx->size = RNP_VFMAILBOX_SIZE; + mutex_init(&mbx->lock); + rnp_mbx_reset(hw); + + return 0; +} + +struct rnp_mbx_operations mbx_ops_generic = { + .init_params = rnp_init_mbx_params_pf, + .read = rnp_read_mbx_pf, + .write = rnp_write_mbx_pf, + .read_posted = rnp_read_posted_mbx, + .write_posted = rnp_write_posted_mbx, + .check_for_msg = rnp_check_for_msg_pf, + .check_for_ack = rnp_check_for_ack_pf, + .configure = rnp_mbx_configure_pf, +}; diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mbx.h b/drivers/net/ethernet/mucse/rnp/rnp_mbx.h new file mode 100644 index 0000000000000..692f5b75ce89d --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_mbx.h @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_MBX_H_ +#define _RNP_MBX_H_ + +#include "rnp_type.h" + +#define RNP_VFMAILBOX_SIZE 14 /* 16 32 bit words - 64 bytes */ +#define RNP_ERR_MBX -100 +#define RNP_VT_MSGTYPE_ACK 0x80000000 +/* Messages below or'd with */ +/* this are the ACK */ +#define RNP_VT_MSGTYPE_NACK 0x40000000 +/* Messages below or'd with + * this are the NACK + */ +#define RNP_VT_MSGTYPE_CTS 0x20000000 +/* Indicates that VF is still + *clear to send requests + */ +#define RNP_VT_MSGINFO_SHIFT 14 +/* bits 23:16 are used for exra info for certain messages */ +#define RNP_VT_MSGINFO_MASK (0x7F << RNP_VT_MSGINFO_SHIFT) +/* VLAN pool filtering masks */ +#define RNP_VLVF_VIEN 0x80000000 /* filter is valid */ +#define RNP_VLVF_ENTRIES 64 +#define RNP_VLVF_VLANID_MASK 0x00000FFF +/* + * mailbox msg_data + * + * + * + */ +#define RNP_VNUM_OFFSET (21) +#define RNP_VF_MASK (0x7f << 21) +#define RNP_MAIL_CMD_MASK 0x3fff +/* mailbox API, legacy requests */ +#define RNP_VF_RESET 0x01 /* VF requests reset */ +#define RNP_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define RNP_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define RNP_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ + +/* mailbox API, version 1.0 VF requests */ +#define RNP_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define RNP_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define RNP_VF_GET_MACADDR 0x07 /* get vf macaddr */ +#define RNP_VF_API_NEGOTIATE 0x08 /* negotiate API version */ + +/* mailbox API, version 1.1 VF requests */ +#define RNP_VF_GET_QUEUES 0x09 /* get queue configuration */ +#define RNP_VF_SET_VLAN_STRIP 0x0a /* VF requests PF to set VLAN STRIP */ +#define RNP_VF_REG_RD 0x0b /* vf read reg */ +#define RNP_VF_GET_MTU 0x0c /* vf get pf ethtool setup */ +#define RNP_VF_SET_MTU 0x0d /* vf get pf ethtool setup */ +#define RNP_VF_GET_FW 0x0e /* vf get firmware version */ +#define RNP_VF_GET_LINK 0x10 /* get link status */ +#define RNP_VF_RESET_PF 0x11 +#define RNP_VF_GET_DMA_FRAG 0x12 +#define RNP_VF_SET_PROMISCE 0x16 +#define RNP_PF_SET_FCS 0x10 /* PF set fcs status */ +#define RNP_PF_SET_PAUSE 0x11 /* PF set pause status */ +#define RNP_PF_SET_FT_PADDING 0x12 /* PF set ft padding status */ +#define RNP_PF_SET_VLAN_FILTER 0x13 /* PF set ntuple status */ +#define RNP_PF_SET_VLAN 0x14 /* PF set ntuple status */ +#define RNP_PF_SET_LINK 0x15 /* PF set ntuple status */ +#define RNP_PF_SET_MTU 0x16 /* PF set ntuple status */ +#define RNP_PF_SET_RESET 0x17 /* PF set ntuple status */ +#define RNP_PF_LINK_UP (1 << 31) +#define RNP_PF_REMOVE 0x0f +/* GET_QUEUES return data indices within the mailbox */ +#define RNP_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define RNP_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define RNP_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define RNP_VF_DEF_QUEUE 4 /* Default queue offset */ +#define RNP_VF_QUEUE_START 5 /* Default queue offset */ +#define RNP_VF_QUEUE_DEPTH 6 /* ring depth */ + +/* length of permanent address message returned from PF */ +#define RNP_VF_PERMADDR_MSG_LEN 11 +/* word in permanent address message with the current multicast type */ +#define RNP_VF_MC_TYPE_WORD 3 +#define RNP_VF_DMA_VERSION_WORD 4 +#define RNP_VF_VLAN_WORD 5 +#define RNP_VF_PHY_TYPE_WORD 6 +#define RNP_VF_FW_VERSION_WORD 7 +#define RNP_VF_LINK_STATUS_WORD 8 +#define RNP_VF_AXI_MHZ 9 +#define PF_FEATRURE_VLAN_FILTER BIT(0) +#define PF_NCSI_EN BIT(1) +#define RNP_VF_FEATURE 10 + +#define RNP_PF_CONTROL_PRING_MSG 0x0100 /* PF control message */ + +#define RNP_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define RNP_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +enum MBX_ID { + MBX_VF0 = 0, + MBX_VF1, + MBX_VF2, + MBX_VF3, + MBX_VF4, + MBX_VF5, + MBX_VF6, + MBX_VF7, + MBX_VF8, + MBX_VF9, + MBX_VF10, + MBX_VF11, + MBX_VF12, + MBX_VF13, + MBX_VF14, + MBX_VF15, + MBX_VF16, + MBX_VF17, + MBX_VF18, + MBX_VF19, + MBX_VF20, + MBX_VF21, + MBX_VF22, + MBX_VF23, + MBX_VF24, + MBX_VF25, + MBX_VF26, + MBX_VF27, + MBX_VF28, + MBX_VF29, + MBX_VF30, + MBX_VF31, + MBX_VF32, + MBX_VF33, + MBX_VF34, + MBX_VF35, + MBX_VF36, + MBX_VF37, + MBX_VF38, + MBX_VF39, + MBX_VF40, + MBX_VF41, + MBX_VF42, + MBX_VF43, + MBX_VF44, + MBX_VF45, + MBX_VF46, + MBX_VF47, + MBX_VF48, + MBX_VF49, + MBX_VF50, + MBX_VF51, + MBX_VF52, + MBX_VF53, + MBX_VF54, + MBX_VF55, + MBX_VF56, + MBX_VF57, + MBX_VF58, + MBX_VF59, + MBX_VF60, + MBX_VF61, + MBX_VF62, + //... + MBX_VF63, + MBX_CM3CPU, + MBX_FW = MBX_CM3CPU, + MBX_VFCNT +}; + +enum PF_STATUS { + PF_FCS_STATUS, + PF_PAUSE_STATUS, + PF_FT_PADDING_STATUS, + PF_VLAN_FILTER_STATUS, + PF_SET_VLAN_STATUS, + PF_SET_LINK_STATUS, + PF_SET_MTU, + PF_SET_RESET, +}; + +s32 rnp_read_mbx(struct rnp_hw *, u32 *, u16, enum MBX_ID); +s32 rnp_write_mbx(struct rnp_hw *, u32 *, u16, enum MBX_ID); +s32 rnp_check_for_msg(struct rnp_hw *, enum MBX_ID); +s32 rnp_check_for_ack(struct rnp_hw *, enum MBX_ID); +s32 rnp_check_for_rst(struct rnp_hw *, enum MBX_ID); +s32 rnp_init_mbx_params_pf(struct rnp_hw *); +extern struct rnp_mbx_operations mbx_ops_generic; +#define MBX_IFDOWN (0) +#define MBX_IFUP (1) +#define MBX_PROBE (2) +#define MBX_REMOVE (3) +void rnp_mbx_probe_stat_set(struct rnp_hw *hw, int stat); +int rnp_fw_get_macaddr(struct rnp_hw *hw, int pfvfnum, u8 *mac_addr, int lane); +int rnp_mbx_fw_reset_phy(struct rnp_hw *hw); +unsigned int rnp_mbx_change_timeout(struct rnp_hw *hw, int timeout_ms); +struct rnp_info; +int rnp_mbx_get_capability(struct rnp_hw *hw, struct rnp_info *info); +int rnp_mbx_link_event_enable(struct rnp_hw *hw, int enable); +int rnp_mbx_get_link_stat(struct rnp_hw *hw); +int rnp_mbx_ifup_down(struct rnp_hw *hw, int up); +int rnp_mbx_led_set(struct rnp_hw *hw, int value); +int rnp_mbx_get_dump(struct rnp_hw *hw, int flags, u8 *data_out, int buflen); +int rnp_mbx_set_dump(struct rnp_hw *hw, int flag); +int rnp_mbx_sfp_write(struct rnp_hw *hw, int sfp_addr, int reg, short v); +int rnp_mbx_sfp_module_eeprom_info(struct rnp_hw *hw, int sfp_addr, int reg, + int data_len, u8 *buf); +int rnp_mbx_get_temp(struct rnp_hw *hw, int *voltage); +int rnp_mbx_phy_link_set(struct rnp_hw *hw, int adv, int autoneg, int speed, + int duplex, int tp_mdix_ctrl); +int rnp_mbx_phy_pause_set(struct rnp_hw *hw, int pause_mode); +int rnp_mbx_phy_write(struct rnp_hw *hw, u32 reg, u32 val); +int rnp_mbx_phy_read(struct rnp_hw *hw, u32 reg, u32 *val); + +int rnp_maintain_req(struct rnp_hw *hw, int cmd, int arg0, int req_data_bytes, + int reply_bytes, dma_addr_t dma_phy_addr); +int rnp_mbx_get_lane_stat(struct rnp_hw *hw); +int rnp_mbx_wol_set(struct rnp_hw *hw, u32 mode); +int rnp_mbx_ifsuspuse(struct rnp_hw *hw, int status); +int rnp_mbx_ifinsmod(struct rnp_hw *hw, int status); +int rnp_mbx_ifforce_control_mac(struct rnp_hw *hw, int status); +int wait_mbx_init_done(struct rnp_hw *hw); +int rnp_set_lane_fun(struct rnp_hw *hw, int fun, int value0, int value1, + int value2, int value3); +void rnp_link_stat_mark(struct rnp_hw *hw, int up); +int rnp_mbx_reg_writev(struct rnp_hw *hw, int fw_reg, int value[4], int bytes); +int rnp_mbx_reg_write(struct rnp_hw *hw, int fw_reg, int value); +int rnp_mbx_fw_reg_read(struct rnp_hw *hw, int fw_reg); +int rnp_mbx_force_speed(struct rnp_hw *hw, int speed); + +#define cm3_reg_write32(hw, cm3_rpu_reg, v) \ + rnp_mbx_reg_write((hw), (cm3_rpu_reg), (v)) + +#define cm3_reg_read32(hw, cm3_rpu_reg) rnp_mbx_fw_reg_read((hw), (cm3_rpu_reg)) + +int rnp_mbx_lldp_status_get(struct rnp_hw *hw); +int rnp_mbx_lldp_port_enable(struct rnp_hw *hw, bool enable); +int rnp_mbx_ddr_csl_enable(struct rnp_hw *hw, int enable, dma_addr_t dma_phy, + int bytes); +#endif /* _RNP_MBX_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.c b/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.c new file mode 100644 index 0000000000000..dcd07c615fcc4 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.c @@ -0,0 +1,1495 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include + +#include "rnp.h" +#include "rnp_mbx.h" +#include "rnp_mbx_fw.h" + +#define RNP_FW_MAILBOX_SIZE RNP_VFMAILBOX_SIZE + +static bool is_cookie_valid(struct rnp_hw *hw, void *cookie) +{ + unsigned char* begin = (unsigned char*)(&hw->mbx.cookie_pool.cookies[0]); + unsigned char* end = (unsigned char*)(&hw->mbx.cookie_pool.cookies[MAX_COOKIES_ITEMS]); + if(((unsigned char*)cookie)>=begin && ((unsigned char*)cookie)< end){ + return true; + } + return false; +} + +static struct mbx_req_cookie *mbx_cookie_zalloc(struct rnp_hw *hw, int priv_len) +{ + struct mbx_req_cookie *cookie = NULL; + int loop_cnt = MAX_COOKIES_ITEMS, i; + bool find = false; + + u64 now_jiffies = get_jiffies_64(); + + if (mutex_lock_interruptible(&hw->mbx.lock)) { + rnp_err("[%s] get mbx lock failed,priv_len:%d\n", __func__, priv_len); + return NULL; + } + i = hw->mbx.cookie_pool.next_idx; + while(loop_cnt--){ + cookie = &(hw->mbx.cookie_pool.cookies[i]); + if(cookie->stat == COOKIE_FREE || + /* force free cookie if cookie not freed after 120 seconds */ + time_after64(now_jiffies,cookie->alloced_jiffies + (2 * 60) * HZ)){ + find = true; + cookie->alloced_jiffies = get_jiffies_64(); + cookie->stat = COOKIE_ALLOCED; + hw->mbx.cookie_pool.next_idx = (i+1)%MAX_COOKIES_ITEMS; + break; + } + i = (i+1)%MAX_COOKIES_ITEMS; + } + mutex_unlock(&hw->mbx.lock); + + if (!find) { + rnp_err("[%s] no free cookies availble\n", __func__); + return NULL; + } + + cookie->timeout_jiffes = 30 * HZ; + cookie->priv_len = priv_len; + + return cookie; +} + +static void mbx_free_cookie(struct mbx_req_cookie *cookie, bool force_free) +{ + if (!cookie) + return; + + if (force_free) { + cookie->stat = COOKIE_FREE; + } else { + cookie->stat = COOKIE_FREE_WAIT_TIMEOUT; + } +} + +static int rnp_mbx_write_posted_locked(struct rnp_hw *hw, struct mbx_fw_cmd_req *req) +{ + int err = 0; + int retry = 3; + + if (mutex_lock_interruptible(&hw->mbx.lock)) { + rnp_err("[%s] get mbx lock failed opcode:0x%x\n", __func__, + req->opcode); + return -EAGAIN; + } + + rnp_logd(LOG_MBX_LOCK, "%s %d lock:%p hw:%p opcode:0x%x\n", __func__, + hw->pfvfnum, &hw->mbx.lock, hw, req->opcode); + +try_again: + retry--; + if (retry < 0) { + mutex_unlock(&hw->mbx.lock); + rnp_err("%s: write_posted failed! err:0x%x opcode:0x%x\n", + __func__, err, req->opcode); + return -EIO; + } + + err = hw->mbx.ops.write_posted( + hw, (u32 *)req, (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + if (err) { + goto try_again; + } + mutex_unlock(&hw->mbx.lock); + + return err; +} + +static void rnp_link_stat_mark_reset(struct rnp_hw *hw) +{ + wr32(hw, RNP_DMA_DUMY, 0xa5a40000); +} + +static void rnp_link_stat_mark_disable(struct rnp_hw *hw) +{ + wr32(hw, RNP_DMA_DUMY, 0); +} + +static int rnp_mbx_fw_post_req(struct rnp_hw *hw, struct mbx_fw_cmd_req *req, + struct mbx_req_cookie *cookie) +{ + int err = 0; + struct rnp_adapter *adpt = hw->back; + + cookie->errcode = 0; + cookie->done = 0; + init_waitqueue_head(&cookie->wait); + + if (mutex_lock_interruptible(&hw->mbx.lock)) { + rnp_err("[%s] wait mbx lock timeout pfvf:0x%x opcode:0x%x\n", + __func__, hw->pfvfnum, req->opcode); + return -EAGAIN; + } + + rnp_logd(LOG_MBX_LOCK, "%s %d lock:%p hw:%p opcode:0x%x\n", __func__, + hw->pfvfnum, &hw->mbx.lock, hw, req->opcode); + + err = rnp_write_mbx(hw, (u32 *)req, + (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + if (err) { + rnp_err("rnp_write_mbx failed! err:%d opcode:0x%x\n", err, + req->opcode); + mutex_unlock(&hw->mbx.lock); + return err; + } + + if (cookie->timeout_jiffes != 0) { + int retry_cnt = 4; +retry: + err = wait_event_interruptible_timeout(cookie->wait, + cookie->done == 1, + cookie->timeout_jiffes); + + if (err == -ERESTARTSYS && retry_cnt) { + retry_cnt--; + goto retry; + } + if (err == 0) { + rnp_err("[%s] %s failed! pfvfnum:0x%x hw:%p timeout err:%d opcode:%x\n", + adpt->name, __func__, hw->pfvfnum, hw, err, + req->opcode); + err = -ETIME; + } else if (err > 0) { + err = 0; + } + } else { + wait_event_interruptible(cookie->wait, cookie->done == 1); + } + + mutex_unlock(&hw->mbx.lock); + + if (cookie->errcode) { + err = cookie->errcode; + } + + return err; +} + +static int rnp_fw_send_cmd_wait(struct rnp_hw *hw, struct mbx_fw_cmd_req *req, + struct mbx_fw_cmd_reply *reply) +{ + int err; + int retry_cnt = 3; + + if (!hw || !req || !reply || !hw->mbx.ops.read_posted) { + printk("error: hw:%p req:%p reply:%p\n", hw, req, reply); + return -EINVAL; + } + + if (mutex_lock_interruptible(&hw->mbx.lock)) { + rnp_err("[%s] get mbx lock failed opcode:0x%x\n", __func__, + req->opcode); + return -EAGAIN; + } + + rnp_logd(LOG_MBX_LOCK, "%s %d lock:%p hw:%p opcode:0x%x\n", __func__, + hw->pfvfnum, &hw->mbx.lock, hw, req->opcode); + err = hw->mbx.ops.write_posted( + hw, (u32 *)req, (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + if (err) { + rnp_err("%s: write_posted failed! err:0x%x opcode:0x%x\n", + __func__, err, req->opcode); + mutex_unlock(&hw->mbx.lock); + return err; + } + +retry: + retry_cnt--; + if (retry_cnt < 0) { + rnp_err("retry timeout opcode:0x%x\n", req->opcode); + return -EIO; + } + err = hw->mbx.ops.read_posted(hw, (u32 *)reply, sizeof(*reply) / 4, + MBX_FW); + if (err) { + rnp_err("%s: read_posted failed! err:0x%x opcode:0x%x\n", + __func__, err, req->opcode); + mutex_unlock(&hw->mbx.lock); + return err; + } + if (reply->opcode != req->opcode) + goto retry; + + mutex_unlock(&hw->mbx.lock); + + if (reply->error_code) { + rnp_err("%s: reply err:0x%x req:0x%x\n", __func__, + reply->error_code, req->opcode); + return -reply->error_code; + } + return 0; +} + +int wait_mbx_init_done(struct rnp_hw *hw) +{ + int count = 10000; + u32 v = rd32(hw, RNP_TOP_NIC_DUMMY); + + while (count) { + v = rd32(hw, RNP_TOP_NIC_DUMMY); + if (((v & 0xFF000000) == 0xa5000000) && (v & 0x80)) + break; + + usleep_range(500, 1000); + printk("waiting fw up\n"); + count--; + } + printk("fw init ok %x\n", v); + + return 0; +} + +int rnp_mbx_get_lane_stat(struct rnp_hw *hw) +{ + int err = 0; + struct mbx_fw_cmd_req req; + struct rnp_adapter *adpt = hw->back; + struct lane_stat_data *st; + struct mbx_req_cookie *cookie = NULL; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + + if (hw->mbx.other_irq_enabled) { + cookie = mbx_cookie_zalloc(hw,sizeof(struct lane_stat_data)); + if (!cookie) { + rnp_err("%s: no memory\n", __func__); + return -ENOMEM; + } + st = (struct lane_stat_data *)cookie->priv; + + build_get_lane_status_req(&req, hw->nr_lane, cookie); + + err = rnp_mbx_fw_post_req(hw, &req, cookie); + if (err) { + rnp_err("%s: error:%d\n", __func__, err); + goto quit; + } + } else { + memset(&reply, 0, sizeof(reply)); + + build_get_lane_status_req(&req, hw->nr_lane, &req); + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + if (err) { + rnp_err("%s: 1 error:%d\n", __func__, err); + goto quit; + } + st = (struct lane_stat_data *)&(reply.data); + } + + hw->phy_type = st->phy_type; + hw->speed = adpt->speed = st->speed; + if ((st->is_sgmii) || (hw->phy_type == PHY_TYPE_10G_TP)) { + adpt->phy_addr = st->phy_addr; + } else { + adpt->sfp.fault = st->sfp.fault; + adpt->sfp.los = st->sfp.los; + adpt->sfp.mod_abs = st->sfp.mod_abs; + adpt->sfp.tx_dis = st->sfp.tx_dis; + } + adpt->si.main = st->si_main; + adpt->si.pre = st->si_pre; + adpt->si.post = st->si_post; + adpt->si.tx_boost = st->si_tx_boost; + adpt->link_traing = st->link_traing; + adpt->fec = st->fec; + hw->is_sgmii = st->is_sgmii; + hw->pci_gen = st->pci_gen; + hw->pci_lanes = st->pci_lanes; + adpt->speed = st->speed; + adpt->hw.link = st->linkup; + hw->is_backplane = st->is_backplane; + hw->supported_link = st->supported_link; + hw->advertised_link = st->advertised_link; + hw->tp_mdx = st->tp_mdx; + + if ((hw->hw_type == rnp_hw_n10) || (hw->hw_type == rnp_hw_n400)) { + if (hw->fw_version >= 0x00050000) { + hw->sfp_connector = st->sfp_connector; + hw->duplex = st->duplex; + adpt->an = st->autoneg; + } else { + hw->sfp_connector = 0xff; + hw->duplex = 1; + adpt->an = st->an; + } + if (hw->fw_version <= 0x00050000) { + hw->supported_link |= RNP_LINK_SPEED_10GB_FULL | + RNP_LINK_SPEED_1GB_FULL; + } + } + + rnp_logd( + LOG_MBX_LINK_STAT, + "%s:pma_type:0x%x phy_type:0x%x,linkup:%d duplex:%d auton:%d " + "fec:%d an:%d lt:%d is_sgmii:%d supported_link:0x%x, backplane:%d " + "speed:%d sfp_connector:0x%x\n", + adpt->name, st->pma_type, st->phy_type, st->linkup, st->duplex, + st->autoneg, st->fec, st->an, st->link_traing, st->is_sgmii, + hw->supported_link, hw->is_backplane, st->speed, + st->sfp_connector); +quit: + if (cookie) + mbx_free_cookie(cookie, err ? false : true); + + return err; +} + +int rnp_mbx_get_link_stat(struct rnp_hw *hw) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_get_link_status_req(&req, hw->nr_lane, &req); + return rnp_fw_send_cmd_wait(hw, &req, &reply); +} + +int rnp_mbx_fw_reset_phy(struct rnp_hw *hw) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + int ret; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + if (hw->mbx.other_irq_enabled) { + struct mbx_req_cookie *cookie = mbx_cookie_zalloc(hw,0); + + if (!cookie) { + return -ENOMEM; + } + + build_reset_phy_req(&req, cookie); + + ret = rnp_mbx_fw_post_req(hw, &req, cookie); + mbx_free_cookie(cookie,ret?false:true); + return ret; + } else { + build_reset_phy_req(&req, &req); + return rnp_fw_send_cmd_wait(hw, &req, &reply); + } +} + +int rnp_maintain_req(struct rnp_hw *hw, int cmd, int arg0, int req_data_bytes, + int reply_bytes, dma_addr_t dma_phy_addr) +{ + int err; + struct mbx_req_cookie *cookie = NULL; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + u64 address = dma_phy_addr; + + cookie = mbx_cookie_zalloc(hw,0); + if (!cookie) { + return -ENOMEM; + } + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + cookie->timeout_jiffes = 60 * HZ; + + build_maintain_req(&req, cookie, cmd, arg0, req_data_bytes, reply_bytes, + address & 0xffffffff, (address >> 32) & 0xffffffff); + + if (hw->mbx.other_irq_enabled) { + cookie->timeout_jiffes = 400 * HZ; + err = rnp_mbx_fw_post_req(hw, &req, cookie); + } else { + int old_mbx_timeout = hw->mbx.timeout; + hw->mbx.timeout = + (400 * 1000 * 1000) / hw->mbx.usec_delay; + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + hw->mbx.timeout = old_mbx_timeout; + } + + if (cookie) + mbx_free_cookie(cookie,err?false:true); + + return (err) ? -EIO : 0; +} + +int rnp_fw_get_macaddr(struct rnp_hw *hw, int pfvfnum, u8 *mac_addr, + int nr_lane) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + rnp_dbg("%s: pfvfnum:0x%x nr_lane:%d\n", __func__, pfvfnum, nr_lane); + + if (!mac_addr) { + rnp_err("%s: mac_addr is null\n", __func__); + return -EINVAL; + } + + if (hw->mbx.other_irq_enabled) { + struct mbx_req_cookie *cookie = + mbx_cookie_zalloc(hw,sizeof(reply.mac_addr)); + struct mac_addr *mac = (struct mac_addr *)cookie->priv; + + if (!cookie) { + return -ENOMEM; + } + + build_get_macaddress_req(&req, 1 << nr_lane, pfvfnum, cookie); + + err = rnp_mbx_fw_post_req(hw, &req, cookie); + if (err) { + mbx_free_cookie(cookie,false); + return err; + } + hw->pcode = mac->pcode; + + if ((1 << nr_lane) & mac->lanes) { + memcpy(mac_addr, mac->addrs[nr_lane].mac, 6); + } + + mbx_free_cookie(cookie,true); + return 0; + } else { + build_get_macaddress_req(&req, 1 << nr_lane, pfvfnum, &req); + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + if (err) { + rnp_err("%s: failed. err:%d\n", __func__, err); + return err; + } + + hw->pcode = reply.mac_addr.pcode; + if ((1 << nr_lane) & reply.mac_addr.lanes) { + memcpy(mac_addr, reply.mac_addr.addrs[nr_lane].mac, 6); + return 0; + } + } + + return -ENODATA; +} + +static int rnp_mbx_sfp_read(struct rnp_hw *hw, int sfp_i2c_addr, int reg, + int cnt, u8 *out_buf) +{ + struct mbx_fw_cmd_req req; + int err = -EIO; + int nr_lane = hw->nr_lane; + + if ((cnt > MBX_SFP_READ_MAX_CNT) || !out_buf) { + rnp_err("%s: cnt:%d should <= %d out_buf:%p\n", __func__, cnt, + MBX_SFP_READ_MAX_CNT, out_buf); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + + if (hw->mbx.other_irq_enabled) { + struct mbx_req_cookie *cookie = mbx_cookie_zalloc(hw,cnt); + if (!cookie) { + return -ENOMEM; + } + build_mbx_sfp_read(&req, nr_lane, sfp_i2c_addr, reg, cnt, + cookie); + + err = rnp_mbx_fw_post_req(hw, &req, cookie); + if (err) { + mbx_free_cookie(cookie,false); + return err; + } else { + memcpy(out_buf, cookie->priv, cnt); + err = 0; + mbx_free_cookie(cookie,true); + } + } else { + struct mbx_fw_cmd_reply reply; + + memset(&reply, 0, sizeof(reply)); + build_mbx_sfp_read(&req, nr_lane, sfp_i2c_addr, reg, cnt, + &reply); + + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + if (err == 0) { + memcpy(out_buf, reply.sfp_read.value, cnt); + } + } + + return err; +} + +int rnp_mbx_sfp_module_eeprom_info(struct rnp_hw *hw, int sfp_addr, int reg, + int data_len, u8 *buf) +{ + int left = data_len; + int cnt, err; + + do { + cnt = (left > MBX_SFP_READ_MAX_CNT) ? MBX_SFP_READ_MAX_CNT : + left; + err = rnp_mbx_sfp_read(hw, sfp_addr, reg, cnt, buf); + if (err) { + rnp_err("%s: error:%d\n", __func__, err); + return err; + } + reg += cnt; + buf += cnt; + left -= cnt; + } while (left > 0); + + return 0; +} + +int rnp_mbx_sfp_write(struct rnp_hw *hw, int sfp_addr, int reg, short v) +{ + struct mbx_fw_cmd_req req; + int err; + int nr_lane = hw->nr_lane; + + memset(&req, 0, sizeof(req)); + + build_mbx_sfp_write(&req, nr_lane, sfp_addr, reg, v); + err = rnp_mbx_write_posted_locked(hw, &req); + + return err; +} + +int rnp_mbx_fw_reg_read(struct rnp_hw *hw, int fw_reg) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + int err, ret = 0xffffffff; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + if (hw->fw_version < 0x00050200) { + return -EOPNOTSUPP; + } + + if (hw->mbx.other_irq_enabled) { + struct mbx_req_cookie *cookie = + mbx_cookie_zalloc(hw,sizeof(reply.r_reg)); + + build_readreg_req(&req, fw_reg, cookie); + err = rnp_mbx_fw_post_req(hw, &req, cookie); + if (err) { + mbx_free_cookie(cookie,false); + return ret; + } + ret = ((int *)(cookie->priv))[0]; + mbx_free_cookie(cookie,true); + } else { + build_readreg_req(&req, fw_reg, &reply); + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + if (err) { + rnp_err("%s: failed. err:%d\n", __func__, err); + return err; + } else { + ret = reply.r_reg.value[0]; + } + } + return ret; +} + +int rnp_mbx_reg_write(struct rnp_hw *hw, int fw_reg, int value) +{ + struct mbx_fw_cmd_req req; + int err; + memset(&req, 0, sizeof(req)); + + if (hw->fw_version < 0x00050200) { + return -EOPNOTSUPP; + } + + build_writereg_req(&req, NULL, fw_reg, 4, &value); + + err = rnp_mbx_write_posted_locked(hw, &req); + return err; +} + +int rnp_mbx_reg_writev(struct rnp_hw *hw, int fw_reg, int value[4], int bytes) +{ + struct mbx_fw_cmd_req req; + int err; + memset(&req, 0, sizeof(req)); + + build_writereg_req(&req, NULL, fw_reg, bytes, value); + + err = rnp_mbx_write_posted_locked(hw, &req); + return err; +} + +int rnp_mbx_wol_set(struct rnp_hw *hw, u32 mode) +{ + struct mbx_fw_cmd_req req; + int err; + int nr_lane = hw->nr_lane; + + memset(&req, 0, sizeof(req)); + + build_mbx_wol_set(&req, nr_lane, mode); + + err = rnp_mbx_write_posted_locked(hw, &req); + return err; +} + +int rnp_mbx_set_dump(struct rnp_hw *hw, int flag) +{ + int err; + struct mbx_fw_cmd_req req; + + memset(&req, 0, sizeof(req)); + build_set_dump(&req, hw->nr_lane, flag); + + err = rnp_mbx_write_posted_locked(hw, &req); + + return err; +} + +int rnp_mbx_force_speed(struct rnp_hw *hw, int speed) +{ + int cmd = 0x01150000; + + if (hw->force_10g_1g_speed_ablity == 0) + return -EINVAL; + + hw->saved_force_link_speed = speed; + if (speed == RNP_LINK_SPEED_10GB_FULL) { + cmd = 0x01150002; + hw->force_speed_stat = FORCE_SPEED_STAT_10G; + } else if (speed == RNP_LINK_SPEED_1GB_FULL) { + cmd = 0x01150001; + hw->force_speed_stat = FORCE_SPEED_STAT_1G; + } else { + cmd = 0x01150000; + hw->force_speed_stat = FORCE_SPEED_STAT_DISABLED; + } + return rnp_mbx_set_dump(hw, cmd); +} + +int rnp_mbx_get_dump(struct rnp_hw *hw, int flags, u8 *data_out, int bytes) +{ + int err; + struct mbx_req_cookie *cookie = NULL; + + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + struct get_dump_reply *get_dump; + + void *dma_buf = NULL; + dma_addr_t dma_phy = 0; + u64 address; + + cookie = mbx_cookie_zalloc(hw,sizeof(*get_dump)); + if (!cookie) { + return -ENOMEM; + } + get_dump = (struct get_dump_reply *)cookie->priv; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + if (bytes > sizeof(get_dump->data)) { + dma_buf = dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy, + GFP_ATOMIC); + if (!dma_buf) { + dev_err(&hw->pdev->dev, "%s: no memory:%d!", __func__, + bytes); + err = -ENOMEM; + goto quit; + } + } + address = dma_phy; + build_get_dump_req(&req, cookie, hw->nr_lane, address & 0xffffffff, + (address >> 32) & 0xffffffff, bytes); + + if (hw->mbx.other_irq_enabled) { + err = rnp_mbx_fw_post_req(hw, &req, cookie); + } else { + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + get_dump = &reply.get_dump; + } + +quit: + if (err == 0) { + hw->dump.version = get_dump->version; + hw->dump.flag = get_dump->flags; + hw->dump.len = get_dump->bytes; + } + if (err == 0 && data_out) { + if (dma_buf) { + memcpy(data_out, dma_buf, bytes); + } else { + memcpy(data_out, get_dump->data, bytes); + } + } + if (dma_buf) + dma_free_coherent(&hw->pdev->dev, bytes, dma_buf, dma_phy); + + if (cookie) + mbx_free_cookie(cookie, err ? false : true); + return err ? -err : 0; +} + +int rnp_fw_update(struct rnp_hw *hw, int partition, const u8 *fw_bin, int bytes) +{ + int err; + struct mbx_req_cookie *cookie = NULL; + + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + void *dma_buf = NULL; + dma_addr_t dma_phy; + + cookie = mbx_cookie_zalloc(hw,0); + if (!cookie) { + dev_err(&hw->pdev->dev, "%s: no memory:%d!", __func__, 0); + return -ENOMEM; + } + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + dma_buf = + dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy, GFP_ATOMIC); + if (!dma_buf) { + dev_err(&hw->pdev->dev, "%s: no memory:%d!", __func__, bytes); + err = -ENOMEM; + goto quit; + } + + memcpy(dma_buf, fw_bin, bytes); + + build_fw_update_req(&req, cookie, partition, dma_phy & 0xffffffff, + (dma_phy >> 32) & 0xffffffff, bytes); + if (hw->mbx.other_irq_enabled) { + cookie->timeout_jiffes = 400 * HZ; + err = rnp_mbx_fw_post_req(hw, &req, cookie); + } else { + int old_mbx_timeout = hw->mbx.timeout; + hw->mbx.timeout = + (400 * 1000 * 1000) / hw->mbx.usec_delay; + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + hw->mbx.timeout = old_mbx_timeout; + } + +quit: + if (dma_buf) + dma_free_coherent(&hw->pdev->dev, bytes, dma_buf, dma_phy); + if (cookie) + mbx_free_cookie(cookie, err ? false : true); + printk("%s: %s (errcode:%d)\n", __func__, err ? " failed" : " success", + err); + return (err) ? -EIO : 0; +} + +int rnp_mbx_link_event_enable(struct rnp_hw *hw, int enable) +{ + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + int err; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + if (enable) { + int v = rd32(hw, RNP_DMA_DUMY); + v &= 0x0000ffff; + v |= 0xa5a40000; + wr32(hw, RNP_DMA_DUMY, v); + } else { + wr32(hw, RNP_DMA_DUMY, 0); + } + + build_link_set_event_mask(&req, BIT(EVT_LINK_UP), + (enable & 1) << EVT_LINK_UP, &req); + err = rnp_mbx_write_posted_locked(hw, &req); + + return err; +} + +int rnp_fw_get_capability(struct rnp_hw *hw, struct phy_abilities *abil) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_phy_abalities_req(&req, &req); + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + + if (err == 0) + memcpy(abil, &reply.phy_abilities, sizeof(*abil)); + + return err; +} + +static int to_mac_type(struct phy_abilities *ability) +{ + int lanes = hweight_long(ability->lane_mask); + if ((ability->phy_type == PHY_TYPE_40G_BASE_KR4) || + (ability->phy_type == PHY_TYPE_40G_BASE_LR4) || + (ability->phy_type == PHY_TYPE_40G_BASE_CR4) || + (ability->phy_type == PHY_TYPE_40G_BASE_SR4)) { + if (lanes == 1) { + return rnp_mac_n10g_x8_40G; + } else { + return rnp_mac_n10g_x8_10G; + } + } else if ((ability->phy_type == PHY_TYPE_10G_BASE_KR) || + (ability->phy_type == PHY_TYPE_10G_BASE_LR) || + (ability->phy_type == PHY_TYPE_10G_BASE_ER) || + (ability->phy_type == PHY_TYPE_10G_BASE_SR)) { + if (lanes == 1) { + return rnp_mac_n10g_x2_10G; + } else if (lanes == 2) { + return rnp_mac_n10g_x4_10G; + } else { + return rnp_mac_n10g_x8_10G; + } + } else if (ability->phy_type == PHY_TYPE_1G_BASE_KX) { + return rnp_mac_n10l_x8_1G; + } else if (ability->phy_type == PHY_TYPE_SGMII) { + return rnp_mac_n10l_x8_1G; + } + return rnp_mac_unknown; +} + +int rnp_set_lane_fun(struct rnp_hw *hw, int fun, int value0, int value1, + int value2, int value3) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_set_lane_fun(&req, hw->nr_lane, fun, value0, value1, value2, + value3); + + return rnp_mbx_write_posted_locked(hw, &req); +} + +int rnp_mbx_ifinsmod(struct rnp_hw *hw, int status) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_ifinsmod(&req, hw->nr_lane, status); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + err = hw->mbx.ops.write_posted( + hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + + mutex_unlock(&hw->mbx.lock); + + rnp_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d status:%d\n", __func__, + hw->nr_lane, status); + return err; +} + +int rnp_mbx_ifsuspuse(struct rnp_hw *hw, int status) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_ifsuspuse(&req, hw->nr_lane, status); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + err = hw->mbx.ops.write_posted( + hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + + mutex_unlock(&hw->mbx.lock); + + rnp_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d status:%d\n", __func__, + hw->nr_lane, status); + + return err; +} + +int rnp_mbx_ifforce_control_mac(struct rnp_hw *hw, int status) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_ifforce(&req, hw->nr_lane, status); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + + err = hw->mbx.ops.write_posted( + hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + + mutex_unlock(&hw->mbx.lock); + + rnp_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d status:%d\n", __func__, + hw->nr_lane, status); + + return err; +} + +int rnp_mbx_ifup_down(struct rnp_hw *hw, int up) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_ifup_down(&req, hw->nr_lane, up); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + err = hw->mbx.ops.write_posted( + hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + + mutex_unlock(&hw->mbx.lock); + + rnp_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d up:%d\n", __func__, + hw->nr_lane, up); + + /* force firmware report link-status */ + if (up) + rnp_link_stat_mark_reset(hw); + + return err; +} + +int rnp_mbx_led_set(struct rnp_hw *hw, int value) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_led_set(&req, hw->nr_lane, value, &reply); + + return rnp_mbx_write_posted_locked(hw, &req); +} + +int rnp_mbx_get_capability(struct rnp_hw *hw, struct rnp_info *info) +{ + int err; + struct phy_abilities ablity; + int try_cnt = 3; + + memset(&ablity, 0, sizeof(ablity)); + rnp_link_stat_mark_disable(hw); + + while (try_cnt--) { + err = rnp_fw_get_capability(hw, &ablity); + if (err == 0 && info) { + hw->lane_mask = ablity.lane_mask & 0xf; + info->mac = to_mac_type(&ablity); + info->adapter_cnt = hweight_long(hw->lane_mask); + hw->mode = ablity.nic_mode; + hw->pfvfnum = ablity.pfnum; + hw->speed = ablity.speed; + hw->nr_lane = 0; // PF1 + hw->fw_version = ablity.fw_version; + hw->mac_type = info->mac; + hw->phy_type = ablity.phy_type; + hw->axi_mhz = ablity.axi_mhz; + hw->port_ids = ablity.port_ids; + hw->bd_uid = ablity.bd_uid; + hw->phy_id = ablity.phy_id; + hw->wol = ablity.wol_status; + hw->eco = ablity.e.v2; + hw->force_link_supported = + ablity.e.force_link_supported; + + if (ablity.e.force_link_supported && + (ablity.e.force_down_en & 0x1)) { + hw->force_status = 1; + } + + if ((hw->fw_version >= 0x00050201) && + (ablity.speed == SPEED_10000)) { + hw->force_speed_stat = + FORCE_SPEED_STAT_DISABLED; + hw->force_10g_1g_speed_ablity = 1; + } + if (ablity.ext_ablity != 0xffffffff && ablity.e.valid) { + hw->ncsi_en = (ablity.e.ncsi_en == 1); + hw->ncsi_rar_entries = 1; + hw->rpu_en = ablity.e.rpu_en; + if (hw->rpu_en) { + ablity.e.rpu_availble = 1; + } + hw->rpu_availble = ablity.e.rpu_availble; + hw->fw_lldp_ablity = ablity.e.fw_lldp_ablity; + } else { + hw->ncsi_rar_entries = 0; + } + + if (hw->force_link_supported == 0) { + hw->force_status = hw->ncsi_en ? 0 : 1; + } + + pr_info("%s: nic-mode:%d mac:%d adpt_cnt:%d lane_mask:0x%x, phy_type: " + "0x%x, " + "pfvfnum:0x%x, fw-version:0x%08x\n, axi:%d Mhz," + "port_id:%d bd_uid:0x%08x 0x%x ex-ablity:0x%x fs:%d speed:%d " + "ncsi_en:%u %d wol=0x%x rpu:%d-%d v2:%d force-status:%d,%d\n", + __func__, hw->mode, info->mac, + info->adapter_cnt, hw->lane_mask, hw->phy_type, + hw->pfvfnum, ablity.fw_version, ablity.axi_mhz, + ablity.port_id[0], hw->bd_uid, ablity.phy_id, + ablity.ext_ablity, + hw->force_10g_1g_speed_ablity, ablity.speed, + hw->ncsi_en, hw->ncsi_rar_entries, hw->wol, + hw->rpu_en, hw->rpu_availble, hw->eco, + hw->force_status, hw->force_link_supported); + if (hw->phy_type == PHY_TYPE_10G_TP) { + hw->supported_link = RNP_LINK_SPEED_10GB_FULL | + RNP_LINK_SPEED_1GB_FULL | + RNP_LINK_SPEED_1GB_HALF; + hw->phy.autoneg_advertised = hw->supported_link; + hw->autoneg = 1; + } + if (info->adapter_cnt != 0) + return 0; + } + } + + dev_err(&hw->pdev->dev, "%s: error!\n", __func__); + return -EIO; +} + +int rnp_mbx_get_temp(struct rnp_hw *hw, int *voltage) +{ + int err; + struct mbx_req_cookie *cookie = NULL; + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + struct get_temp *temp; + int temp_v = 0; + + cookie = mbx_cookie_zalloc(hw,sizeof(*temp)); + if (!cookie) { + return -ENOMEM; + } + temp = (struct get_temp *)cookie->priv; + + memset(&req, 0, sizeof(req)); + + build_get_temp(&req, cookie); + + if (hw->mbx.other_irq_enabled) { + err = rnp_mbx_fw_post_req(hw, &req, cookie); + } else { + memset(&reply, 0, sizeof(reply)); + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + temp = &reply.get_temp; + } + + if (voltage) + *voltage = temp->volatage; + temp_v = temp->temp; + + if (cookie) { + mbx_free_cookie(cookie,err?false:true); + } + return temp_v; +} + +enum speed_enum { + speed_10, + speed_100, + speed_1000, + speed_10000, + speed_25000, + speed_40000, + +}; + +void rnp_link_stat_mark(struct rnp_hw *hw, int up) +{ + u32 v; + + v = rd32(hw, RNP_DMA_DUMY); + if ((hw->hw_type == rnp_hw_n10) || (hw->hw_type == rnp_hw_n400)) { + v &= ~(0xffff0000); + v |= 0xa5a40000; + if (up) { + v |= BIT(0); + } else { + v &= ~BIT(0); + } + } + wr32(hw, RNP_DMA_DUMY, v); +} + +void rnp_mbx_probe_stat_set(struct rnp_hw *hw, int stat) +{ +#define RNP10_DMA_DUMMY_PROBE_STAT_BIT (4) + u32 v; + + v = rd32(hw, RNP_DMA_DUMY); + if ((hw->hw_type == rnp_hw_n10) || (hw->hw_type == rnp_hw_n400)) { + v &= ~(0xffff0000); + v |= 0xa5a40000; + + if (stat == MBX_PROBE) { + v |= BIT(RNP10_DMA_DUMMY_PROBE_STAT_BIT); + } else if (stat == MBX_REMOVE) { + v = 0xFFA5A6A7; + } else { + v &= ~BIT(RNP10_DMA_DUMMY_PROBE_STAT_BIT); + } + } + wr32(hw, RNP_DMA_DUMY, v); +} + +static inline int rnp_mbx_fw_req_handler(struct rnp_adapter *adapter, + struct mbx_fw_cmd_req *req) +{ + struct rnp_hw *hw = &adapter->hw; + + switch (req->opcode) { + case LINK_STATUS_EVENT: + rnp_logd( + LOG_LINK_EVENT, + "[LINK_STATUS_EVENT:0x%x] %s:link changed: changed_lane:0x%x, " + "status:0x%x, speed:%d, duplex:%d\n", + req->opcode, adapter->name, + req->link_stat.changed_lanes, + req->link_stat.lane_status, req->link_stat.st[0].speed, + req->link_stat.st[0].duplex); + + if (req->link_stat.lane_status) { + adapter->hw.link = 1; + } else { + adapter->hw.link = 0; + } + if (req->link_stat.st[0].lldp_status) + adapter->priv_flags |= RNP_PRIV_FLAG_LLDP_EN_STAT; + else + adapter->priv_flags &= (~RNP_PRIV_FLAG_LLDP_EN_STAT); + + if (req->link_stat.port_st_magic == SPEED_VALID_MAGIC) { + hw->speed = req->link_stat.st[0].speed; + hw->duplex = req->link_stat.st[0].duplex; + + switch (hw->speed) { + case 10: + adapter->speed = RNP_LINK_SPEED_10_FULL; + break; + case 100: + adapter->speed = RNP_LINK_SPEED_100_FULL; + break; + case 1000: + adapter->speed = RNP_LINK_SPEED_1GB_FULL; + break; + case 10000: + adapter->speed = RNP_LINK_SPEED_10GB_FULL; + break; + case 25000: + adapter->speed = RNP_LINK_SPEED_25GB_FULL; + break; + case 40000: + adapter->speed = RNP_LINK_SPEED_40GB_FULL; + break; + } + } + if (req->link_stat.lane_status) { + rnp_link_stat_mark(hw, 1); + } else { + rnp_link_stat_mark(hw, 0); + } + + adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE; + break; + } + rnp_service_event_schedule(adapter); + + return 0; +} + +static inline int rnp_mbx_fw_reply_handler(struct rnp_adapter *adapter, + struct mbx_fw_cmd_reply *reply) +{ + struct mbx_req_cookie *cookie; + + cookie = reply->cookie; + if (!cookie || is_cookie_valid(&adapter->hw,cookie)== false + || cookie->stat != COOKIE_ALLOCED) { + return -EIO; + } + + if (cookie->priv_len > 0) { + memcpy(cookie->priv, reply->data, cookie->priv_len); + } + + cookie->done = 1; + + if (reply->flags & FLAGS_ERR) { + cookie->errcode = reply->error_code; + } else { + cookie->errcode = 0; + } + + if(cookie->stat == COOKIE_ALLOCED){ + wake_up_interruptible(&cookie->wait); + } + /* not really free cookie, mark as free-able */ + mbx_free_cookie(cookie, false); + + return 0; +} + +static inline int rnp_rcv_msg_from_fw(struct rnp_adapter *adapter) +{ + u32 msgbuf[RNP_FW_MAILBOX_SIZE]; + struct rnp_hw *hw = &adapter->hw; + s32 retval; + + retval = rnp_read_mbx(hw, msgbuf, RNP_FW_MAILBOX_SIZE, MBX_FW); + if (retval) { + printk("Error receiving message from FW:%d\n", retval); + return retval; + } + + rnp_logd(LOG_MBX_MSG_IN, + "msg from fw: msg[0]=0x%08x_0x%08x_0x%08x_0x%08x\n", msgbuf[0], + msgbuf[1], msgbuf[2], msgbuf[3]); + + /* this is a message we already processed, do nothing */ + if (((unsigned short *)msgbuf)[0] & FLAGS_DD) { + return rnp_mbx_fw_reply_handler( + adapter, (struct mbx_fw_cmd_reply *)msgbuf); + } else { + return rnp_mbx_fw_req_handler(adapter, + (struct mbx_fw_cmd_req *)msgbuf); + } +} + +static void rnp_rcv_ack_from_fw(struct rnp_adapter *adapter) +{ + /* do-nothing */ +} + +int rnp_fw_msg_handler(struct rnp_adapter *adapter) +{ + /* == check fw-req */ + if (!rnp_check_for_msg(&adapter->hw, MBX_FW)) + rnp_rcv_msg_from_fw(adapter); + + /* process any acks */ + if (!rnp_check_for_ack(&adapter->hw, MBX_FW)) + rnp_rcv_ack_from_fw(adapter); + + return 0; +} + +int rnp_mbx_phy_write(struct rnp_hw *hw, u32 reg, u32 val) +{ + struct mbx_fw_cmd_req req; + char nr_lane = hw->nr_lane; + memset(&req, 0, sizeof(req)); + + build_set_phy_reg(&req, NULL, PHY_EXTERNAL_PHY_MDIO, nr_lane, reg, val, + 0); + + return rnp_mbx_write_posted_locked(hw, &req); +} + +int rnp_mbx_phy_read(struct rnp_hw *hw, u32 reg, u32 *val) +{ + struct mbx_fw_cmd_req req; + int err = -EIO; + char nr_lane = hw->nr_lane; + int times = 0; +retry: + memset(&req, 0, sizeof(req)); + + if (hw->mbx.other_irq_enabled) { + struct mbx_req_cookie *cookie = mbx_cookie_zalloc(hw,4); + if (!cookie) { + return -ENOMEM; + } + build_get_phy_reg(&req, cookie, PHY_EXTERNAL_PHY_MDIO, nr_lane, + reg); + + err = rnp_mbx_fw_post_req(hw, &req, cookie); + if (err) { + mbx_free_cookie(cookie,false); + return err; + } else { + memcpy(val, cookie->priv, 4); + err = 0; + } + mbx_free_cookie(cookie,true); + } else { + struct mbx_fw_cmd_reply reply; + memset(&reply, 0, sizeof(reply)); + build_get_phy_reg(&req, &reply, PHY_EXTERNAL_PHY_MDIO, nr_lane, + reg); + + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + if (err == 0) { + *val = reply.r_reg.value[0]; + } + } + if ((*(val) == 0xffff) && (times <= 5)) { + printk("%x warning mbx_phy_read 0xffff, addr %x\n", times, reg); + times++; + goto retry; + } + return err; +} + +int rnp_mbx_phy_link_set(struct rnp_hw *hw, int adv, int autoneg, int speed, + int duplex, int mdix_ctrl) +{ + int err; + struct mbx_fw_cmd_req req; + + memset(&req, 0, sizeof(req)); + + printk("%s:lane:%d adv:0x%x\n", __func__, hw->nr_lane, adv); + printk("%s:autoneg %x, speed %x, duplex %x\n", __func__, autoneg, speed, + duplex); + + build_phy_link_set(&req, adv, hw->nr_lane, autoneg, speed, duplex, + mdix_ctrl); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + err = hw->mbx.ops.write_posted( + hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + + mutex_unlock(&hw->mbx.lock); + return err; +} + +int rnp_mbx_phy_pause_set(struct rnp_hw *hw, int pause_mode) +{ + int err; + struct mbx_fw_cmd_req req; + + memset(&req, 0, sizeof(req)); + + printk("%s:lane:%d pause:0x%x\n", __func__, hw->nr_lane, pause_mode); + + build_phy_pause_set(&req, pause_mode, hw->nr_lane); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + err = hw->mbx.ops.write_posted( + hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW); + + mutex_unlock(&hw->mbx.lock); + return err; +} + +int rnp_mbx_lldp_port_enable(struct rnp_hw *hw, bool enable) +{ + struct mbx_fw_cmd_req req; + int err; + int nr_lane = hw->nr_lane; + + if (!hw->fw_lldp_ablity) { + rnp_warn("lldp set not supported\n"); + return -EOPNOTSUPP; + } + + memset(&req, 0, sizeof(req)); + + build_lldp_ctrl_set(&req, nr_lane, enable); + + err = rnp_mbx_write_posted_locked(hw, &req); + return err; +} + +int rnp_mbx_lldp_status_get(struct rnp_hw *hw) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + int err, ret = 0; + + if (!hw->fw_lldp_ablity) { + rnp_warn("fw lldp not supported\n"); + return -EOPNOTSUPP; + } + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + if (hw->mbx.other_irq_enabled) { + struct mbx_req_cookie *cookie = + mbx_cookie_zalloc(hw,sizeof(reply.lldp)); + + if (!cookie) { + return -ENOMEM; + } + build_lldp_ctrl_get(&req, hw->nr_lane, cookie); + + err = rnp_mbx_fw_post_req(hw, &req, cookie); + if (err) { + mbx_free_cookie(cookie,false); + return ret; + } + ret = ((int *)(cookie->priv))[0]; + mbx_free_cookie(cookie,true); + } else { + build_lldp_ctrl_get(&req, hw->nr_lane, &reply); + err = rnp_fw_send_cmd_wait(hw, &req, &reply); + if (err) { + rnp_err("%s: 1 error:%d\n", __func__, err); + return -EIO; + } + ret = reply.lldp.enable_stat; + } + return ret; +} + +int rnp_mbx_ddr_csl_enable(struct rnp_hw *hw, int enable, + dma_addr_t dma_phy, + int bytes) +{ + struct mbx_fw_cmd_req req; + memset(&req, 0, sizeof(req)); + + build_ddr_csl(&req, NULL, enable, dma_phy, bytes); + + if (hw->mbx.other_irq_enabled) { + return rnp_mbx_write_posted_locked(hw, &req); + } else { + struct mbx_fw_cmd_reply reply; + memset(&reply, 0, sizeof(reply)); + return rnp_fw_send_cmd_wait(hw, &req, &reply); + } +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.h b/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.h new file mode 100644 index 0000000000000..183e84d060d1c --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.h @@ -0,0 +1,1135 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef MBX_FW_CMD_H +#define MBX_FW_CMD_H + +#include +#include +#include + +#ifndef _PACKED_ALIGN4 +#define _PACKED_ALIGN4 __attribute__((packed, aligned(4))) +#endif + +enum GENERIC_CMD { + /* generat */ + GET_VERSION = 0x0001, + READ_REG = 0xFF03, + WRITE_REG = 0xFF04, + MODIFY_REG = 0xFF07, + /* virtualization */ + IFUP_DOWN = 0x0800, + SEND_TO_PF = 0x0801, + SEND_TO_VF = 0x0802, + DRIVER_INSMOD = 0x0803, + SYSTEM_SUSPUSE = 0x0804, + FORCE_LINK_ON_CLOSE = 0x0805, + /* link configuration admin commands */ + GET_PHY_ABALITY = 0x0601, + GET_MAC_ADDRESS = 0x0602, + RESET_PHY = 0x0603, + LED_SET = 0x0604, + GET_LINK_STATUS = 0x0607, + LINK_STATUS_EVENT = 0x0608, + SET_LANE_FUN = 0x0609, + GET_LANE_STATUS = 0x0610, + SFP_SPEED_CHANGED_EVENT = 0x0611, + SET_EVENT_MASK = 0x0613, + SET_LOOPBACK_MODE = 0x0618, + SET_PHY_REG = 0x0628, + GET_PHY_REG = 0x0629, + PHY_LINK_SET = 0x0630, + GET_PHY_STATISTICS = 0x0631, + PHY_PAUSE_SET = 0x0632, + /*sfp-module*/ + SFP_MODULE_READ = 0x0900, + SFP_MODULE_WRITE = 0x0901, + /* fw update */ + FW_UPDATE = 0x0700, + FW_MAINTAIN = 0x0701, + WOL_EN = 0x0910, + GET_DUMP = 0x0a00, + SET_DUMP = 0x0a10, + GET_TEMP = 0x0a11, + SET_WOL = 0x0a12, + LLDP_TX_CTL = 0x0a13, + SET_DDR_CSL = 0xFF11, +}; + +enum link_event_mask { + EVT_LINK_UP = 1, + EVT_NO_MEDIA = 2, + EVT_LINK_FAULT = 3, + EVT_PHY_TEMP_ALARM = 4, + EVT_EXCESSIVE_ERRORS = 5, + EVT_SIGNAL_DETECT = 6, + EVT_AUTO_NEGOTIATION_DONE = 7, + EVT_MODULE_QUALIFICATION_FAILED = 8, + EVT_PORT_TX_SUSPEND = 9, +}; + +enum pma_type { + PHY_TYPE_NONE = 0, + PHY_TYPE_1G_BASE_KX, + PHY_TYPE_SGMII, + PHY_TYPE_10G_BASE_KR, + PHY_TYPE_25G_BASE_KR, + PHY_TYPE_40G_BASE_KR4, + PHY_TYPE_10G_BASE_SR, + PHY_TYPE_40G_BASE_SR4, + PHY_TYPE_40G_BASE_CR4, + PHY_TYPE_40G_BASE_LR4, + PHY_TYPE_10G_BASE_LR, + PHY_TYPE_10G_BASE_ER, + PHY_TYPE_10G_TP +}; + +#define PHY_C45 (BIT(30)) +#define PHY_MMD(i) (i << 16) +#define PHY_MMD_PMAPMD PHY_MMD(1) +#define PHY_MMD_AN PHY_MMD(7) +#define PHY_MMD_VEND2 PHY_MMD(31) +#define PHY_826x_MDIX (PHY_C45 | PHY_MMD_VEND2 | 0xa430) +#define PHY_826x_SPEED (PHY_C45 | PHY_MMD_PMAPMD | 0) +#define PHY_826x_DUPLEX (PHY_C45 | PHY_MMD_VEND2 | 0xa44) +#define PHY_826x_AN (PHY_C45 | PHY_MMD_AN | 0) +#define PHY_826x_ADV (PHY_C45 | PHY_MMD_AN | 16) +#define PHY_826x_GBASE_ADV (PHY_C45 | PHY_MMD_AN | 0x20) +#define PHY_826x_GBASE_ADV_2 (PHY_C45 | PHY_MMD_VEND2 | 0xa412) +struct phy_abilities { + unsigned char link_stat; + unsigned char lane_mask; + int speed; + short phy_type; + short nic_mode; + short pfnum; + unsigned int fw_version; + unsigned int axi_mhz; + union { + unsigned char port_id[4]; + unsigned int port_ids; + }; + unsigned int bd_uid; + int phy_id; + int wol_status; + + union { + unsigned int ext_ablity; + struct { + unsigned int valid : 1; /* 0 */ + unsigned int wol_en : 1; /* 1 */ + unsigned int pci_preset_runtime_en : 1; /* 2 */ + unsigned int smbus_en : 1; /* 3 */ + unsigned int ncsi_en : 1; /* 4 */ + unsigned int rpu_en : 1; /* 5 */ + unsigned int v2 : 1; /* 6 */ + unsigned int pxe_en : 1; /* 7 */ + unsigned int mctp_en : 1; /* 8 */ + unsigned int yt8614 : 1; /* 9 */ + unsigned int pci_ext_reset : 1; /* 10 */ + unsigned int rpu_availble : 1; /* 11 */ + unsigned int fw_lldp_ablity : 1; /* 12 */ + unsigned int lldp_enabled : 1; /* 13 */ + unsigned int only_1g : 1; /* 14 */ + unsigned int force_down_en : 4; /* 15-18 */ + unsigned int force_link_supported : 1; /* 19 */ + unsigned int ports_is_sgmii_valid : 1; /* [20] */ + unsigned int lane0_is_sgmii : 1; /* [21] */ + unsigned int lane1_is_sgmii : 1; /* [22] */ + unsigned int lane2_is_sgmii : 1; /* [23] */ + unsigned int lane3_is_sgmii : 1; /* [24] */ + } e; + }; + +} _PACKED_ALIGN4; + +enum LOOPBACK_LEVEL { + LOOPBACK_DISABLE = 0, + LOOPBACK_MAC = 1, + LOOPBACK_PCS = 5, + LOOPBACK_EXTERNAL = 6, +}; +enum LOOPBACK_TYPE { + /* Tx->Rx */ + LOOPBACK_TYPE_LOCAL = 0x0, +}; + +enum LOOPBACK_FORCE_SPEED { + LOOPBACK_FORCE_SPEED_NONE = 0x0, + LOOPBACK_FORCE_SPEED_1GBS = 0x1, + LOOPBACK_FORCE_SPEED_10GBS = 0x2, + LOOPBACK_FORCE_SPEED_40_25GBS = 0x3, +}; + +enum PHY_INTERFACE { + PHY_INTERNAL_PHY = 0, + PHY_EXTERNAL_PHY_MDIO = 1, +}; + +/* Table 3-54. Get link status response (opcode: 0x0607) */ +struct link_stat_data { + char phy_type; + unsigned char speed; +#define LNK_STAT_SPEED_UNKNOWN 0 +#define LNK_STAT_SPEED_10 1 +#define LNK_STAT_SPEED_100 2 +#define LNK_STAT_SPEED_1000 3 +#define LNK_STAT_SPEED_10000 4 +#define LNK_STAT_SPEED_25000 5 +#define LNK_STAT_SPEED_40000 6 + /* 2 */ + char link_stat : 1; +#define LINK_UP 1 +#define LINK_DOWN 0 + char link_fault : 4; +#define LINK_LINK_FAULT BIT(0) +#define LINK_TX_FAULT BIT(1) +#define LINK_RX_FAULT BIT(2) +#define LINK_REMOTE_FAULT BIT(3) + char extern_link_stat : 1; + char media_available : 1; + char rev1 : 1; + /* 3:ignore */ + char an_completed : 1; + char lp_an_ablity : 1; + char parallel_detection_fault : 1; + char fec_enabled : 1; + char low_power_state : 1; + char link_pause_status : 2; + char qualified_odule : 1; + /* 4 */ + char phy_temp_alarm : 1; + char excessive_link_errors : 1; + char port_tx_suspended : 2; + char force_40G_enabled : 1; + char external_25G_phy_err_code : 3; +#define EXTERNAL_25G_PHY_NOT_PRESENT 1 +#define EXTERNAL_25G_PHY_NVM_CRC_ERR 2 +#define EXTERNAL_25G_PHY_MDIO_ACCESS_FAILED 6 +#define EXTERNAL_25G_PHY_INIT_SUCCED 7 + /* 5 */ + char loopback_enabled_status : 4; +#define LOOPBACK_DISABLE 0x0 +#define LOOPBACK_MAC 0x1 +#define LOOPBACK_SERDES 0x2 +#define LOOPBACK_PHY_INTERNAL 0x3 +#define LOOPBACK_PHY_EXTERNAL 0x4 + char loopback_type_status : 1; +#define LOCAL_LOOPBACK 0 /* tx->rx */ +#define FAR_END_LOOPBACK 0 /* rx->Tx */ + char rev3 : 1; + char external_dev_power_ability : 2; + /* 6-7 */ + short max_frame_sz; + /* 8 */ + char _25gb_kr_fec_enabled : 1; + char _25gb_rs_fec_enabled : 1; + char crc_enabled : 1; + char rev4 : 5; + /* 9 */ + int link_type; /* same as Phy type */ + char link_type_ext; +} _PACKED_ALIGN4; + +struct port_stat { + u8 phyid; + u8 duplex : 1; + u8 autoneg : 1; + u8 fec : 1; + u8 rev : 1; + u8 link_traing : 1; + u8 is_sgmii : 1; + u8 lldp_status : 1; + u32 speed; +} __attribute__((packed)); + +struct lane_stat_data { + u8 nr_lane; + u8 pci_gen : 4; + u8 pci_lanes : 4; + u8 pma_type; + u8 phy_type; + u16 linkup : 1; + u16 duplex : 1; + u16 autoneg : 1; + u16 fec : 1; + u16 an : 1; + u16 link_traing : 1; + u16 media_available : 1; + u16 is_sgmii : 1; // + u16 link_fault : 4; +#define LINK_LINK_FAULT BIT(0) +#define LINK_TX_FAULT BIT(1) +#define LINK_RX_FAULT BIT(2) +#define LINK_REMOTE_FAULT BIT(3) + u16 is_backplane : 1; + u16 tp_mdx : 2; + union { + u8 phy_addr; + struct { + u8 mod_abs : 1; + u8 fault : 1; + u8 tx_dis : 1; + u8 los : 1; + } sfp; + }; + u8 sfp_connector; + u32 speed; + u32 si_main; + u32 si_pre; + u32 si_post; + u32 si_tx_boost; + u32 supported_link; + u32 phy_id; + u32 advertised_link; +} __attribute__((packed)); + +struct yt_phy_statistics { + u32 pkg_ib_valid; /* rx crc good and length 64-1518 */ + u32 pkg_ib_os_good; /* rx crc good and length >1518 */ + u32 pkg_ib_us_good; /* rx crc good and length <64 */ + u16 pkg_ib_err; /* rx crc wrong and length 64-1518 */ + u16 pkg_ib_os_bad; /* rx crc wrong and length >1518 */ + u16 pkg_ib_frag; /* rx crc wrong and length <64 */ + u16 pkg_ib_nosfd; /* rx sfd missed */ + u32 pkg_ob_valid; /* tx crc good and length 64-1518 */ + u32 pkg_ob_os_good; /* tx crc good and length >1518 */ + u32 pkg_ob_us_good; /* tx crc good and length <64 */ + u16 pkg_ob_err; /* tx crc wrong and length 64-1518 */ + u16 pkg_ob_os_bad; /* tx crc wrong and length >1518 */ + u16 pkg_ob_frag; /* tx crc wrong and length <64 */ + u16 pkg_ob_nosfd; /* tx sfd missed */ +} __attribute__((packed)); + +struct phy_statistics { + union { + struct yt_phy_statistics yt; + }; +} __attribute__((packed)); +/* == flags == */ +#define FLAGS_DD BIT(0) /* driver clear 0, FW must set 1 */ +#define FLAGS_CMP BIT(1) /* driver clear 0, FW mucst set */ +#define FLAGS_ERR BIT(2) +/* driver clear 0, FW must set only if it reporting an error */ +#define FLAGS_LB BIT(9) +#define FLAGS_RD BIT(10) /* set if additional buffer has command parameters */ +#define FLAGS_BUF BIT(12) /* set 1 on indirect command */ +#define FLAGS_SI BIT(13) /* not irq when command complete */ +#define FLAGS_EI BIT(14) /* interrupt on error */ +#define FLAGS_FE BIT(15) /* flush erro */ + +#ifndef SHM_DATA_MAX_BYTES +#define SHM_DATA_MAX_BYTES (64 - 2 * 4) +#endif + +#define MBX_REQ_HDR_LEN 24 +#define MBX_REPLYHDR_LEN 16 +#define MBX_REQ_MAX_DATA_LEN (SHM_DATA_MAX_BYTES - MBX_REQ_HDR_LEN) +#define MBX_REPLY_MAX_DATA_LEN (SHM_DATA_MAX_BYTES - MBX_REPLYHDR_LEN) + +/* TODO req is little endian. bigendian should be conserened */ + +struct mbx_fw_cmd_req { + unsigned short flags; /* 0-1 */ + unsigned short opcode; /* 2-3 enum LINK_ADM_CMD */ + unsigned short datalen; /* 4-5 */ + unsigned short ret_value; /* 6-7 */ + union { + struct { + unsigned int cookie_lo; /* 8-11 */ + unsigned int cookie_hi; /* 12-15 */ + }; + void *cookie; + }; + unsigned int reply_lo; /* 16-19 5dw */ + unsigned int reply_hi; /* 20-23 */ + /* === data === 7dw [24-64] */ + union { + char data[0]; + + struct { + unsigned int addr; + unsigned int bytes; + } r_reg; + + struct { + unsigned int addr; + unsigned int bytes; + unsigned int data[4]; + } w_reg; + + struct { + unsigned int lanes; + } ptp; + + struct { + int lane; + int up; + } ifup; + + struct { + int nr_lane; +#define LLDP_TX_ALL_LANES 0xFF + int op; +#define LLDP_TX_SET 0x0 +#define LLDP_TX_GET 0x1 + int enable; + } lldp_tx; + + struct { + int lane; + int status; + } ifinsmod; + + struct { + int lane; + int status; + } ifsuspuse; + + struct { + int nr_lane; + int status; + } ifforce; + + struct { + int nr_lane; + } get_lane_st; + + struct { + int nr_lane; + int func; +#define LANE_FUN_AN 0 +#define LANE_FUN_LINK_TRAING 1 +#define LANE_FUN_FEC 2 +#define LANE_FUN_SI 3 +#define LANE_FUN_SFP_TX_DISABLE 4 +#define LANE_FUN_PCI_LANE 5 +#define LANE_FUN_PRBS 6 +#define LANE_FUN_SPEED_CHANGE 7 + + int value0; + int value1; + int value2; + int value3; + } set_lane_fun; + + struct { + int flag; + int nr_lane; + } set_dump; + + struct { + int lane; + int enable; + } wol; + + struct { + unsigned int bytes; + unsigned int nr_lane; + unsigned int bin_phy_lo; + unsigned int bin_phy_hi; + } get_dump; + + struct { + unsigned int nr_lane; + int value; +#define LED_IDENTIFY_INACTIVE 0 +#define LED_IDENTIFY_ACTIVE 1 +#define LED_IDENTIFY_ON 2 +#define LED_IDENTIFY_OFF 3 + } led_set; + + struct { + unsigned int addr; + unsigned int data; + unsigned int mask; + } modify_reg; + + struct { + unsigned int adv_speed_mask; + unsigned int autoneg; + unsigned int speed; + unsigned int duplex; + int nr_lane; + unsigned int tp_mdix_ctrl; + } phy_link_set; + + struct { + unsigned int pause_mode; + int nr_lane; + } phy_pause_set; + + struct { + unsigned int nr_lane; + unsigned int sfp_adr; + unsigned int reg; + unsigned int cnt; + } sfp_read; + + struct { + unsigned int nr_lane; + unsigned int sfp_adr; + unsigned int reg; + unsigned int val; + } sfp_write; + + struct { + unsigned int nr_lane; /* 0-3 */ + } get_linkstat; + struct { + unsigned short changed_lanes; + unsigned short lane_status; + unsigned int port_st_magic; +#define SPEED_VALID_MAGIC 0xa4a6a8a9 + struct port_stat st[4]; + } link_stat; + + struct { + unsigned short enable_stat; + unsigned short event_mask; + } stat_event_mask; + + struct { /* set loopback */ + unsigned char loopback_level; + unsigned char loopback_type; + unsigned char loopback_force_speed; + + char loopback_force_speed_enable : 1; + } loopback; + + struct { + int cmd; + int arg0; + int req_bytes; + int reply_bytes; + int ddr_lo; + int ddr_hi; + } maintain; + + struct { /* set phy register */ + char phy_interface; + union { + char page_num; + char external_phy_addr; + }; + int phy_reg_addr; + int phy_w_data; + int reg_addr; + int w_data; + /* 1 = ignore page_num, use last QSFP */ + char recall_qsfp_page : 1; + /* page value */ + /* 0 = use page_num for QSFP */ + char nr_lane; + } set_phy_reg; + + struct { + int enable; + int ddr_phy_hi; + int ddr_phy_lo; + int bytes; + } ddr_csl; + + struct { + } get_phy_ablity; + + struct { + int lane_mask; + int pfvf_num; + } get_mac_addr; + + struct { + char phy_interface; + union { + char page_num; + char external_phy_addr; + }; + int phy_reg_addr; + char nr_lane; + } get_phy_reg; + + struct { + unsigned int nr_lane; + } phy_statistics; + + struct { + char paration; + unsigned int bytes; + unsigned int bin_phy_lo; + unsigned int bin_phy_hi; + } fw_update; + }; +} _PACKED_ALIGN4; + +/* firmware -> driver */ +struct mbx_fw_cmd_reply { + unsigned short flags; + /* fw must set: DD, CMP, Error(if error), copy value */ + /* from command: LB,RD,VFC,BUF,SI,EI,FE */ + unsigned short opcode; /* 2-3: copy from req */ + unsigned short error_code; /* 4-5: 0 if no error */ + unsigned short datalen; + /* 6-7: */ + union { + struct { + unsigned int cookie_lo; /* 8-11: */ + unsigned int cookie_hi; /* 12-15: */ + }; + void *cookie; + }; + /* ===== data ==== [16-64] */ + union { + char data[0]; + + struct version { + unsigned int major; + unsigned int sub; + unsigned int modify; + } version; + + struct { + unsigned int value[4]; + } r_reg; + + struct { + unsigned int new_value; + } modify_reg; + + struct get_temp { + int temp; + int volatage; + } get_temp; + + struct lldp_stat { + int enable_stat; + } lldp; + + struct { +#define MBX_SFP_READ_MAX_CNT 32 + char value[MBX_SFP_READ_MAX_CNT]; + } sfp_read; + + struct mac_addr { + int lanes; + struct _addr { + /* for macaddr:01:02:03:04:05:06 + * mac-hi=0x01020304 mac-lo=0x05060000 + */ + unsigned char mac[8]; + } addrs[4]; + u32 pcode; + } mac_addr; + + struct get_dump_reply { + int flags; + int version; + int bytes; + int data[4]; + } get_dump; + + struct lane_stat_data lanestat; + struct link_stat_data linkstat; + struct phy_abilities phy_abilities; + struct phy_statistics phy_statistics; + }; +} _PACKED_ALIGN4; + +static inline void build_lldp_ctrl_set(struct mbx_fw_cmd_req *req, int nr_lane, + int enable) +{ + req->flags = 0; + req->opcode = LLDP_TX_CTL; + req->datalen = sizeof(req->lldp_tx); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->lldp_tx.op = LLDP_TX_SET; + req->lldp_tx.nr_lane = nr_lane; + req->lldp_tx.enable = enable; +} + +static inline void build_lldp_ctrl_get(struct mbx_fw_cmd_req *req, int nr_lane, + void *cookie) +{ + req->flags = 0; + req->opcode = LLDP_TX_CTL; + req->datalen = sizeof(req->lldp_tx); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->lldp_tx.op = LLDP_TX_GET; + req->lldp_tx.nr_lane = nr_lane; +} + +static inline void build_maintain_req(struct mbx_fw_cmd_req *req, void *cookie, + int cmd, int arg0, int req_bytes, + int reply_bytes, u32 dma_phy_lo, + u32 dma_phy_hi) +{ + req->flags = 0; + req->opcode = FW_MAINTAIN; + req->datalen = sizeof(req->maintain); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->maintain.cmd = cmd; + req->maintain.arg0 = arg0; + req->maintain.req_bytes = req_bytes; + req->maintain.reply_bytes = reply_bytes; + req->maintain.ddr_lo = dma_phy_lo; + req->maintain.ddr_hi = dma_phy_hi; +} + +static inline void build_fw_update_req(struct mbx_fw_cmd_req *req, void *cookie, + int partition, u32 fw_bin_phy_lo, + u32 fw_bin_phy_hi, int fw_bytes) +{ + req->flags = 0; + req->opcode = FW_UPDATE; + req->datalen = sizeof(req->fw_update); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->fw_update.paration = partition; + req->fw_update.bytes = fw_bytes; + req->fw_update.bin_phy_lo = fw_bin_phy_lo; + req->fw_update.bin_phy_hi = fw_bin_phy_hi; +} + +static inline void build_reset_phy_req(struct mbx_fw_cmd_req *req, void *cookie) +{ + req->flags = 0; + req->opcode = RESET_PHY; + req->datalen = 0; + req->reply_lo = 0; + req->reply_hi = 0; + req->cookie = cookie; +} + +static inline void build_phy_abalities_req(struct mbx_fw_cmd_req *req, + void *cookie) +{ + req->flags = 0; + req->opcode = GET_PHY_ABALITY; + req->datalen = 0; + req->reply_lo = 0; + req->reply_hi = 0; + req->cookie = cookie; +} + +static inline void build_get_macaddress_req(struct mbx_fw_cmd_req *req, + int lane_mask, int pfvfnum, + void *cookie) +{ + req->flags = 0; + req->opcode = GET_MAC_ADDRESS; + req->datalen = sizeof(req->get_mac_addr); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->get_mac_addr.lane_mask = lane_mask; + req->get_mac_addr.pfvf_num = pfvfnum; +} + +static inline void build_version_req(struct mbx_fw_cmd_req *req, void *cookie) +{ + req->flags = 0; + req->opcode = GET_VERSION; + req->reply_lo = 0; + req->reply_hi = 0; + req->datalen = 0; + req->cookie = cookie; +} + +/* 7.10.11.8 Read egister admin command */ +static inline void build_readreg_req(struct mbx_fw_cmd_req *req, int reg_addr, + void *cookie) +{ + req->flags = 0; + req->opcode = READ_REG; + req->datalen = sizeof(req->r_reg); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->r_reg.addr = reg_addr & ~(3); + req->r_reg.bytes = 4; +} + +static inline void mbx_fw_req_set_reply(struct mbx_fw_cmd_req *req, + dma_addr_t reply) +{ + u64 address = reply; + + req->reply_hi = (address >> 32); + req->reply_lo = (address) & 0xffffffff; +} + +/* 7.10.11.9 Write egister admin command */ +static inline void build_writereg_req(struct mbx_fw_cmd_req *req, void *cookie, + int reg_addr, int bytes, int value[4]) +{ + int i; + + req->flags = 0; + req->opcode = WRITE_REG; + req->datalen = sizeof(req->w_reg); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->w_reg.addr = reg_addr & ~3; + req->w_reg.bytes = bytes; + for (i = 0; i < bytes / 4; i++) + req->w_reg.data[i] = value[i]; +} + +/* 7.10.11.10 modify egister admin command */ +static inline void build_modifyreg_req(struct mbx_fw_cmd_req *req, void *cookie, + int reg_addr, int value, + unsigned int mask) +{ + req->flags = 0; + req->opcode = MODIFY_REG; + req->datalen = sizeof(req->modify_reg); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->modify_reg.addr = reg_addr; + req->modify_reg.data = value; + req->modify_reg.mask = mask; +} + +static inline void build_get_lane_status_req(struct mbx_fw_cmd_req *req, + int nr_lane, void *cookie) +{ + req->flags = 0; + req->opcode = GET_LANE_STATUS; + req->datalen = sizeof(req->get_lane_st); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->get_lane_st.nr_lane = nr_lane; +} + +static inline void build_get_link_status_req(struct mbx_fw_cmd_req *req, + int nr_lane, void *cookie) +{ + req->flags = 0; + req->opcode = GET_LINK_STATUS; + req->datalen = sizeof(req->get_linkstat); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->get_linkstat.nr_lane = nr_lane; +} + +static inline void build_get_temp(struct mbx_fw_cmd_req *req, void *cookie) +{ + req->flags = 0; + req->opcode = GET_TEMP; + req->datalen = 0; + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; +} +static inline void build_get_dump_req(struct mbx_fw_cmd_req *req, void *cookie, + int nr_lane, u32 fw_bin_phy_lo, + u32 fw_bin_phy_hi, int bytes) +{ + req->flags = 0; + req->opcode = GET_DUMP; + req->datalen = sizeof(req->get_dump); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->get_dump.bytes = bytes; + req->get_dump.nr_lane = nr_lane; + req->get_dump.bin_phy_lo = fw_bin_phy_lo; + req->get_dump.bin_phy_hi = fw_bin_phy_hi; +} + +static inline void build_set_dump(struct mbx_fw_cmd_req *req, int nr_lane, + int flag) +{ + req->flags = 0; + req->opcode = SET_DUMP; + req->datalen = sizeof(req->set_dump); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->set_dump.flag = flag; + req->set_dump.nr_lane = nr_lane; +} + +static inline void build_led_set(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int value, void *cookie) +{ + req->flags = 0; + req->opcode = LED_SET; + req->datalen = sizeof(req->led_set); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->led_set.nr_lane = nr_lane; + req->led_set.value = value; +} + +static inline void build_set_lane_fun(struct mbx_fw_cmd_req *req, int nr_lane, + int fun, int value0, int value1, + int value2, int value3) +{ + req->flags = 0; + req->opcode = SET_LANE_FUN; + req->datalen = sizeof(req->set_lane_fun); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->set_lane_fun.func = fun; + req->set_lane_fun.nr_lane = nr_lane; + req->set_lane_fun.value0 = value0; + req->set_lane_fun.value1 = value1; + req->set_lane_fun.value2 = value2; + req->set_lane_fun.value3 = value3; +} + +static inline void build_set_phy_reg(struct mbx_fw_cmd_req *req, void *cookie, + enum PHY_INTERFACE phy_inf, char nr_lane, + int reg, int w_data, int recall_qsfp_page) +{ + req->flags = 0; + req->opcode = SET_PHY_REG; + req->datalen = sizeof(req->set_phy_reg); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + + req->set_phy_reg.phy_interface = phy_inf; + req->set_phy_reg.nr_lane = nr_lane; + req->set_phy_reg.phy_reg_addr = reg; + req->set_phy_reg.phy_w_data = w_data; + + if (recall_qsfp_page) + req->set_phy_reg.recall_qsfp_page = 1; + else + req->set_phy_reg.recall_qsfp_page = 0; +} + +static inline void build_get_phy_reg(struct mbx_fw_cmd_req *req, void *cookie, + enum PHY_INTERFACE phy_inf, char nr_lane, + int reg) +{ + req->flags = 0; + req->opcode = GET_PHY_REG; + req->datalen = sizeof(req->get_phy_reg); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + + req->get_phy_reg.phy_interface = phy_inf; + + req->get_phy_reg.nr_lane = nr_lane; + req->get_phy_reg.phy_reg_addr = reg; +} + +static inline void build_phy_pause_set(struct mbx_fw_cmd_req *req, + int pause_mode, int nr_lane) +{ + req->flags = 0; + req->opcode = PHY_PAUSE_SET; + req->datalen = sizeof(req->phy_pause_set); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->phy_pause_set.nr_lane = nr_lane; + req->phy_pause_set.pause_mode = pause_mode; +} + +static inline void build_phy_link_set(struct mbx_fw_cmd_req *req, + unsigned int adv, int nr_lane, + unsigned int autoneg, unsigned int speed, + unsigned int duplex, + unsigned int tp_mdix_ctrl) +{ + req->flags = 0; + req->opcode = PHY_LINK_SET; + req->datalen = sizeof(req->phy_link_set); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->phy_link_set.nr_lane = nr_lane; + req->phy_link_set.adv_speed_mask = adv; + req->phy_link_set.autoneg = autoneg; + req->phy_link_set.speed = speed; + req->phy_link_set.duplex = duplex; + req->phy_link_set.tp_mdix_ctrl = tp_mdix_ctrl; +} + +static inline void build_ifup_down(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int up) +{ + req->flags = 0; + req->opcode = IFUP_DOWN; + req->datalen = sizeof(req->ifup); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->ifup.lane = nr_lane; + req->ifup.up = up; +} + +static inline void build_ifinsmod(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int status) +{ + req->flags = 0; + req->opcode = DRIVER_INSMOD; + req->datalen = sizeof(req->ifinsmod); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->ifinsmod.lane = nr_lane; + req->ifinsmod.status = status; +} + +static inline void build_ifsuspuse(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int status) +{ + req->flags = 0; + req->opcode = SYSTEM_SUSPUSE; + req->datalen = sizeof(req->ifsuspuse); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->ifinsmod.lane = nr_lane; + req->ifinsmod.status = status; +} + +static inline void build_ifforce(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int status) +{ + req->flags = 0; + req->opcode = FORCE_LINK_ON_CLOSE; + req->datalen = sizeof(req->ifforce); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->ifforce.nr_lane = nr_lane; + req->ifforce.status = status; +} + +static inline void build_mbx_sfp_read(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int sfp_addr, + int reg, int cnt, void *cookie) +{ + req->flags = 0; + req->opcode = SFP_MODULE_READ; + req->datalen = sizeof(req->sfp_read); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->sfp_read.nr_lane = nr_lane; + req->sfp_read.sfp_adr = sfp_addr; + req->sfp_read.reg = reg; + ; + req->sfp_read.cnt = cnt; +} + +static inline void build_mbx_sfp_write(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int sfp_addr, + int reg, int v) +{ + req->flags = 0; + req->opcode = SFP_MODULE_WRITE; + req->datalen = sizeof(req->sfp_write); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->sfp_write.nr_lane = nr_lane; + req->sfp_write.sfp_adr = sfp_addr; + req->sfp_write.reg = reg; + req->sfp_write.val = v; +} + +static inline void build_mbx_wol_set(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, u32 mode) +{ + req->flags = 0; + req->opcode = SET_WOL; + req->datalen = sizeof(req->sfp_write); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->wol.lane = nr_lane; + req->wol.enable = mode; +} + +/* enum link_event_mask or */ +static inline void build_link_set_event_mask(struct mbx_fw_cmd_req *req, + unsigned short event_mask, + unsigned short enable, + void *cookie) +{ + req->flags = 0; + req->opcode = SET_EVENT_MASK; + req->datalen = sizeof(req->stat_event_mask); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->stat_event_mask.event_mask = event_mask; + req->stat_event_mask.enable_stat = enable; +} + +static inline void +build_link_set_loopback_req(struct mbx_fw_cmd_req *req, void *cookie, + enum LOOPBACK_LEVEL level, + enum LOOPBACK_FORCE_SPEED force_speed) +{ + req->flags = 0; + req->opcode = SET_LOOPBACK_MODE; + req->datalen = sizeof(req->loopback); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + + req->loopback.loopback_level = level; + req->loopback.loopback_type = LOOPBACK_TYPE_LOCAL; + if (force_speed != LOOPBACK_FORCE_SPEED_NONE) { + req->loopback.loopback_force_speed = force_speed; + req->loopback.loopback_force_speed_enable = 1; + } +} + +static inline void build_ddr_csl(struct mbx_fw_cmd_req *req, void *cookie, + bool enable, dma_addr_t dma_phy, int bytes) +{ + req->flags = 0; + req->opcode = SET_DDR_CSL; + req->datalen = sizeof(req->ddr_csl); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + + req->ddr_csl.enable = enable; + + if (enable) { + req->ddr_csl.bytes = bytes; + req->ddr_csl.ddr_phy_hi = (dma_phy >> 32); + req->ddr_csl.ddr_phy_lo = dma_phy & 0xffffffff; + } else { + req->ddr_csl.bytes = 0; + } +} + +/* =========== errcode======= */ +enum MBX_ERR { + MBX_OK = 0, + MBX_ERR_NO_PERM, + MBX_ERR_INVAL_OPCODE, + MBX_ERR_INVALID_PARAM, + MBX_ERR_INVALID_ADDR, + MBX_ERR_INVALID_LEN, + MBX_ERR_NODEV, + MBX_ERR_IO, +}; + +int rnp_fw_get_capability(struct rnp_hw *hw, struct phy_abilities *abil); +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mpe.c b/drivers/net/ethernet/mucse/rnp/rnp_mpe.c new file mode 100644 index 0000000000000..fc95b16e98eb2 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_mpe.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include + +#include "rnp_common.h" +#include "rnp_mbx.h" +#include "rnp_mpe.h" +#define MPE_FW_BIN "n10c/n10-mpe.bin" +#define MPE_FW_DATA "n10c/n10-mpe-data.bin" +#define MPE_RPU_BIN "n10c/n10-rpu.bin" + +extern unsigned int mpe_src_port; +extern unsigned int mpe_pkt_version; +#define CFG_RPU_OFFSET 0x100000 +/* 4010_0000 broadcast addr */ +#define START_MPE_REG 0x00198700 +/* 4019_8700 start all mpe */ + +/* RV_CORE_STATUS: 4000_6000 */ +#define RV_CORE0_WORING_REG 0x6000 +#define RPU_ID 0x6060 +/* read-only rpu id */ + +/* RPU_REG */ +#define RV_BROADCAST_START_REG (0x106000) +#define RPU_DMA_START_REG (0x110000) +#define RPU_ENDIAN_REG (0x110010) +#define N10_START_REG (0x106000) + +/* MPE0_ICCM: 4020_0000H */ +#define CFG_MPE_ICCM(nr) (0x200000 + (nr) * 0x80000) +#define CFG_MPE_DCCM(nr) (0x220000 + (nr) * 0x80000) + +#define RPU_CM3_BASE 0x40000000 +#define RPU_SDRAM_BASE (0x60000000) +#define SDRAM_DEFAULT_VAL (0x88481c00) + +#define iowrite32_arrary(rpubase, offset, array, size) \ + do { \ + int i; \ + for (i = 0; i < size; i++) { \ + rnp_wr_reg(((char *)(rpubase)) + (offset) + i * 4, \ + (array)[i]); \ + } \ + } while (0) + +static void rnp_reset_mpe_and_rpu(struct rnp_hw *hw) +{ +#define SYSCTL_CRG_CTRL12 0x30007030 +#define RPU_RESET_BIT 9 + + /* reset rpu/mpe/pub */ + cm3_reg_write32(hw, SYSCTL_CRG_CTRL12, BIT(RPU_RESET_BIT + 16) | 0); + smp_mb(); + mdelay(150); + cm3_reg_write32(hw, SYSCTL_CRG_CTRL12, + BIT(RPU_RESET_BIT + 16) | BIT(RPU_RESET_BIT)); + smp_mb(); + mdelay(100); +} + +static void rnp_start_rpu(char *rpu_base, int do_start) +{ + int mpe_start_v = 0xff, rpu_start_v = 0x1; + + if (do_start == 0) { + mpe_start_v = 0; + rpu_start_v = 0; + } + rnp_wr_reg(rpu_base + START_MPE_REG, mpe_start_v); + + /* start all rpu-rv-core */ + rnp_wr_reg(rpu_base + RV_BROADCAST_START_REG, rpu_start_v); + /* start rpu */ + rnp_wr_reg(rpu_base + RPU_DMA_START_REG, rpu_start_v); + + smp_mb(); +} + +/* + @rpu_base: mapped(0x4000_0000) + @mpe_bin : required + @mpe_data: optional + @rpu_bin : optional +*/ +static int +rnp_download_and_start_rpu(struct rnp_hw *hw, char *rpu_base, + const unsigned int *mpe_bin, const int mpe_bin_sz, + const unsigned int *mpe_data, const int mpe_data_sz, + const unsigned int *rpu_bin, const int rpu_sz) +{ + int nr = 0; + + rnp_info("MPE: rpu:%d mpe:%d mpe-data:%d. Downloading...\n", rpu_sz, + mpe_bin_sz, mpe_data_sz); + + rnp_reset_mpe_and_rpu(hw); + + /* download rpu firmeware */ + if (rpu_sz) { + iowrite32_arrary(rpu_base, CFG_RPU_OFFSET + 0x4000, rpu_bin, + rpu_sz / 4); + } + + /* download firmware to 4 mpe-core: mpe0,mpe1,mpe2,mpe3 */ + for (nr = 0; nr < 4; nr++) { + iowrite32_arrary(rpu_base, CFG_MPE_ICCM(nr), mpe_bin, + mpe_bin_sz / 4); + if (mpe_data_sz) + iowrite32_arrary(rpu_base, CFG_MPE_DCCM(nr), mpe_data, + mpe_data_sz / 4); + } + smp_mb(); + + /* Enable MPE */ + if (mpe_src_port != 0) { + printk("%s %d\n", __func__, __LINE__); + rnp_wr_reg(rpu_base + 0x100000, mpe_pkt_version); + rnp_wr_reg(rpu_base + 0x100004, mpe_src_port); + } + + /* start mpe */ + rnp_wr_reg(rpu_base + RPU_ENDIAN_REG, 0xf); + smp_mb(); + rnp_start_rpu(rpu_base, 1); + + return 0; +} + +/* + *load fw bin from: /lib/firmware/ directory + */ +static const struct firmware *rnp_load_fw(struct device *dev, + const char *fw_name) +{ + const struct firmware *fw; + int rc; + + rc = request_firmware(&fw, fw_name, dev); + if (rc != 0) { + // dev_warn( dev, "Faild to requesting firmware file: %s, %d\n", + // fw_name, rc); + return NULL; + } + + return fw; +} + +int rnp_rpu_mpe_start(struct rnp_adapter *adapter) +{ + const struct firmware *mpe_bin = NULL, *mpe_data = NULL, + *rpu_bin = NULL; + struct rnp_hw *hw = &adapter->hw; + int rpu_version, err = 0; + // u32 val = 0; + + rpu_version = cm3_reg_read32(hw, RPU_CM3_BASE + RPU_ID); + dev_info(&adapter->pdev->dev, "rpu_version:0x%x\n", rpu_version); + + if (rpu_version != 0x20201125) { + dev_info(&adapter->pdev->dev, "rpu not enabled!\n"); + return -1; + } + + dev_info(&adapter->pdev->dev, "rpu_addr=%p\n", hw->rpu_addr); + if (hw->rpu_addr == NULL) { + return -EINVAL; + } + + mpe_bin = rnp_load_fw(&adapter->pdev->dev, MPE_FW_BIN); + if (!mpe_bin) { + dev_warn(&adapter->pdev->dev, "can't load mpe fw:%s\n", + MPE_FW_BIN); + goto quit; + } + mpe_data = rnp_load_fw(&adapter->pdev->dev, MPE_FW_DATA); + if (!mpe_data) { + dev_warn(&adapter->pdev->dev, "no %s, ignored\n", MPE_FW_DATA); + } + rpu_bin = rnp_load_fw(&adapter->pdev->dev, MPE_RPU_BIN); + if (!rpu_bin) { + dev_warn(&adapter->pdev->dev, "no %s, ignored\n", MPE_RPU_BIN); + } + + err = rnp_download_and_start_rpu( + hw, hw->rpu_addr, (unsigned int *)mpe_bin->data, mpe_bin->size, + mpe_data ? (unsigned int *)mpe_data->data : NULL, + mpe_data ? mpe_data->size : 0, + rpu_bin ? (unsigned int *)rpu_bin->data : NULL, + rpu_bin ? rpu_bin->size : 0); + if (err != 0) { + dev_warn(&adapter->pdev->dev, "can't start mpe and rpu\n"); + goto quit; + } + + adapter->rpu_inited = 1; + +quit: + if (rpu_bin) { + release_firmware(rpu_bin); + } + if (mpe_data) + release_firmware(mpe_data); + if (mpe_bin) + release_firmware(mpe_bin); + return 0; +} + +void rnp_rpu_mpe_stop(struct rnp_adapter *adapter) +{ + if (adapter->rpu_inited) { + rnp_start_rpu(adapter->hw.rpu_addr, 0); + rnp_reset_mpe_and_rpu(&adapter->hw); + } + + adapter->rpu_inited = 0; +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mpe.h b/drivers/net/ethernet/mucse/rnp/rnp_mpe.h new file mode 100644 index 0000000000000..d36fcb2a1b013 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_mpe.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef RNP_MPE_H +#define RNP_MPE_H + +#include "rnp.h" + +int rnp_rpu_mpe_start(struct rnp_adapter *adapter); +void rnp_rpu_mpe_stop(struct rnp_adapter *adapter); + +#endif //RNP_MPE_H diff --git a/drivers/net/ethernet/mucse/rnp/rnp_n10.c b/drivers/net/ethernet/mucse/rnp/rnp_n10.c new file mode 100644 index 0000000000000..3d626bc0f6553 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_n10.c @@ -0,0 +1,4812 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include + +#include "rnp.h" +#include "rnp_phy.h" +#include "rnp_mbx.h" +#include "rnp_mbx_fw.h" +#include "rnp_pcs.h" +#include "rnp_ethtool.h" +#include "rnp_sriov.h" + +#define RNP_N400_MAX_VF 8 +#define RNP_N400_RSS_TBL_NUM 128 +#define RNP_N400_RSS_TC_TBL_NUM 8 +#define RNP_N400_MAX_TX_QUEUES 8 +#define RNP_N400_MAX_RX_QUEUES 8 +#define RNP_N400_RAR_NCSI_RAR_ENTRIES 0 +#define RNP_N10_MAX_VF 64 +#define RNP_N10_RSS_TBL_NUM 128 +#define RNP_N10_RSS_TC_TBL_NUM 8 +#define RNP_N10_MAX_TX_QUEUES 128 +#define RNP_N10_MAX_RX_QUEUES 128 +#define RNP_N10_RAR_NCSI_RAR_ENTRIES 0 + +#if defined(NIC_VF_FXIED) || defined(VF_PROMISC_SUPPORT) +/* we use the last dmac to support vf promisc */ +#define RNP_N10_RAR_ENTRIES (127 - RNP_N10_RAR_NCSI_RAR_ENTRIES) +#else +#define RNP_N10_RAR_ENTRIES (128 - RNP_N10_RAR_NCSI_RAR_ENTRIES) +#endif + + +#define RNP_N10_MC_TBL_SIZE 128 +#define RNP_N10_VFT_TBL_SIZE 128 +#define RNP_N10_RX_PB_SIZE 512 +#ifndef RNP_N10_MSIX_VECTORS +#define RNP_N10_MSIX_VECTORS 64 +#endif +#define RNP_N400_MSIX_VECTORS 17 + +#define RNP10_MAX_LAYER2_FILTERS 16 +#define RNP10_MAX_TCAM_FILTERS 4096 +#define RNP10_MAX_TUPLE5_FILTERS 128 + + +/* setup queue speed limit to max_rate */ +static void rnp_dma_set_tx_maxrate_n10(struct rnp_dma_info *dma, u16 queue, + u32 max_rate) +{ +} + +/* setup mac with vf_num to veb table */ +static void rnp_dma_set_veb_mac_n10(struct rnp_dma_info *dma, u8 *mac, + u32 vfnum, u32 ring) +{ + u32 maclow, machi, ring_vfnum; + int port; + + maclow = (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5]; + machi = (mac[0] << 8) | mac[1]; + ring_vfnum = ring | ((0x80 | vfnum) << 8); + for (port = 0; port < 4; port++) { + dma_wr32(dma, RNP10_DMA_PORT_VBE_MAC_LO_TBL(port, vfnum), + maclow); + dma_wr32(dma, RNP10_DMA_PORT_VBE_MAC_HI_TBL(port, vfnum), + machi); + dma_wr32(dma, RNP10_DMA_PORT_VEB_VF_RING_TBL(port, vfnum), + ring_vfnum); + } +} + +/* setup vlan with vf_num to veb table */ +static void rnp_dma_set_veb_vlan_n10(struct rnp_dma_info *dma, u16 vlan, + u32 vfnum) +{ + int port; + + /* each vf can support only one vlan */ + for (port = 0; port < 4; port++) + dma_wr32(dma, RNP10_DMA_PORT_VEB_VID_TBL(port, vfnum), vlan); +} + +static void rnp_dma_clr_veb_all_n10(struct rnp_dma_info *dma) +{ + int port, i; + + for (port = 0; port < 4; port++) { + for (i = 0; i < VEB_TBL_CNTS; i++) { + dma_wr32(dma, RNP_DMA_PORT_VBE_MAC_LO_TBL(port, i), 0); + dma_wr32(dma, RNP_DMA_PORT_VBE_MAC_HI_TBL(port, i), 0); + dma_wr32(dma, RNP_DMA_PORT_VEB_VID_TBL(port, i), 0); + dma_wr32(dma, RNP_DMA_PORT_VEB_VF_RING_TBL(port, i), 0); + } + } +} + +static struct rnp_dma_operations dma_ops_n10 = { + .set_tx_maxrate = &rnp_dma_set_tx_maxrate_n10, + .set_veb_mac = &rnp_dma_set_veb_mac_n10, + .set_veb_vlan = &rnp_dma_set_veb_vlan_n10, + .clr_veb_all = &rnp_dma_clr_veb_all_n10, + +}; + +/** + * rnp_eth_set_rar_n10 - Set Rx address register + * @eth: pointer to eth structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + * @sriov_flag + * + * Puts an ethernet address into a receive address register. + **/ +static s32 rnp_eth_set_rar_n10(struct rnp_eth_info *eth, u32 index, u8 *addr, + bool enable_addr) +{ + u32 mcstctrl; + u32 rar_low, rar_high = 0; + u32 rar_entries = eth->num_rar_entries; + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + + /* Make sure we are using a valid rar index range */ + if (index >= (rar_entries + hw->ncsi_rar_entries)) { + rnp_err("RAR index %d is out of range.\n", index); + return RNP_ERR_INVALID_ARGUMENT; + } + + eth_dbg(eth, " RAR[%d] <= %pM. vmdq:%d enable:0x%x\n", index, addr); + + /* + * HW expects these in big endian so we reverse the byte + * order from network order (big endian) to little endian + */ + rar_low = ((u32)addr[5] | ((u32)addr[4] << 8) | ((u32)addr[3] << 16) | + ((u32)addr[2] << 24)); + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = eth_rd32(eth, RNP10_ETH_RAR_RH(index)); + rar_high &= ~(0x0000FFFF | RNP10_RAH_AV); + rar_high |= ((u32)addr[1] | ((u32)addr[0] << 8)); + + if (enable_addr) + rar_high |= RNP10_RAH_AV; + + eth_wr32(eth, RNP10_ETH_RAR_RL(index), rar_low); + eth_wr32(eth, RNP10_ETH_RAR_RH(index), rar_high); + + /* open unicast filter */ + /* we now not use unicast */ + /* but we must open this since dest-mac filter | unicast table */ + /* all packets up if close unicast table */ + mcstctrl = eth_rd32(eth, RNP10_ETH_DMAC_MCSTCTRL); + mcstctrl |= RNP10_MCSTCTRL_UNICASE_TBL_EN; + eth_wr32(eth, RNP10_ETH_DMAC_MCSTCTRL, mcstctrl); + + return 0; +} + +/** + * rnp_eth_clear_rar_n10 - Remove Rx address register + * @eth: pointer to eth structure + * @index: Receive address register to write + * + * Clears an ethernet address from a receive address register. + **/ +static s32 rnp_eth_clear_rar_n10(struct rnp_eth_info *eth, u32 index) +{ + u32 rar_high; + u32 rar_entries = eth->num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + eth_dbg(eth, "RAR index %d is out of range.\n", index); + return RNP_ERR_INVALID_ARGUMENT; + } + + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = eth_rd32(eth, RNP10_ETH_RAR_RH(index)); + rar_high &= ~(0x0000FFFF | RNP10_RAH_AV); + + eth_wr32(eth, RNP10_ETH_RAR_RL(index), 0); + eth_wr32(eth, RNP10_ETH_RAR_RH(index), rar_high); + /* clear VMDq pool/queue selection for this RAR */ + eth->ops.clear_vmdq(eth, index, RNP_CLEAR_VMDQ_ALL); + + return 0; +} + +/** + * rnp_eth_set_vmdq_n10 - Associate a VMDq pool index with a rx address + * @eth: pointer to eth struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq pool index + * only mac->vf + **/ +static s32 rnp_eth_set_vmdq_n10(struct rnp_eth_info *eth, u32 rar, u32 vmdq) +{ + u32 rar_entries = eth->num_rar_entries; + struct rnp_hw *hw = (struct rnp_hw *)ð->back; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + eth_dbg(eth, "RAR index %d is out of range.\n", rar); + return RNP_ERR_INVALID_ARGUMENT; + } + /* n400 should use like this + * ---------- + * vf0 | vf1 | vf2 + * n400 4 | 8 | 12 + * n10 2 | 4 | 6 + * n10(1)0 | 2 | 4 + * not good here + */ + if (hw->hw_type == rnp_hw_n400) + eth_wr32(eth, RNP10_VM_DMAC_MPSAR_RING(rar), vmdq * 2); + else + eth_wr32(eth, RNP10_VM_DMAC_MPSAR_RING(rar), vmdq); + + return 0; +} + +/** + * rnp_eth_clear_vmdq_n10 - Disassociate a VMDq pool index from a rx address + * @eth: pointer to eth struct + * @rar: receive address register index to disassociate + * @vmdq: VMDq pool index to remove from the rar + **/ +static s32 rnp_eth_clear_vmdq_n10(struct rnp_eth_info *eth, u32 rar, u32 vmdq) +{ + u32 rar_entries = eth->num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + eth_dbg(eth, "RAR index %d is out of range.\n", rar); + return RNP_ERR_INVALID_ARGUMENT; + } + + eth_wr32(eth, RNP10_VM_DMAC_MPSAR_RING(rar), 0); + + return 0; +} + +static s32 rnp10_mta_vector(struct rnp_eth_info *eth, u8 *mc_addr) +{ + u32 vector = 0; + + switch (eth->mc_filter_type) { + case 0: /* use bits [36:47] of the address */ + vector = ((mc_addr[4] << 8) | (((u16)mc_addr[5]))); + break; + case 1: /* use bits [35:46] of the address */ + vector = ((mc_addr[4] << 7) | (((u16)mc_addr[5]) >> 1)); + break; + case 2: /* use bits [34:45] of the address */ + vector = ((mc_addr[4] << 6) | (((u16)mc_addr[5]) >> 2)); + break; + case 3: /* use bits [32:43] of the address */ + vector = ((mc_addr[4] << 5) | (((u16)mc_addr[5]) >> 3)); + break; + default: /* Invalid mc_filter_type */ + hw_dbg(hw, "MC filter type param set incorrectly\n"); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +static void rnp10_set_mta(struct rnp_hw *hw, u8 *mc_addr) +{ + u32 vector; + u32 vector_bit; + u32 vector_reg; + struct rnp_eth_info *eth = &hw->eth; + + hw->addr_ctrl.mta_in_use++; + + vector = rnp10_mta_vector(eth, mc_addr); + + /* + * The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw_dbg(hw, "\t\t%pM: MTA-BIT:%4d, MTA_REG[%d][%d] <= 1\n", mc_addr, + vector, vector_reg, vector_bit); + eth->mta_shadow[vector_reg] |= (1 << vector_bit); +} + +static void rnp10_set_vf_mta(struct rnp_hw *hw, u16 vector) +{ + u32 vector_bit; + u32 vector_reg; + struct rnp_eth_info *eth = &hw->eth; + + hw->addr_ctrl.mta_in_use++; + + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw_dbg(hw, "\t\t vf M: MTA-BIT:%4d, MTA_REG[%d][%d] <= 1\n", vector, + vector_reg, vector_bit); + eth->mta_shadow[vector_reg] |= (1 << vector_bit); +} + +static u8 *rnp_addr_list_itr(struct rnp_hw __maybe_unused *hw, u8 **mc_addr_ptr) +{ + struct netdev_hw_addr *mc_ptr; + u8 *addr = *mc_addr_ptr; + + mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); + if (mc_ptr->list.next) { + struct netdev_hw_addr *ha; + + ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list); + *mc_addr_ptr = ha->addr; + } else + *mc_addr_ptr = NULL; + + return addr; +} + +/** + * rnp_update_mc_addr_list_n10 - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @netdev: pointer to net device structure + * + * The given list replaces any existing list. Clears the MC addrs from receive + * address registers and the multicast table. Uses unused receive address + * registers for the first multicast addresses, and hashes the rest into the + * multicast table. + **/ +static s32 rnp_eth_update_mc_addr_list_n10(struct rnp_eth_info *eth, + struct net_device *netdev, + bool sriov_on) +{ + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + struct netdev_hw_addr *ha; + u32 i; + u32 v; + int addr_count = 0; + u8 *addr_list = NULL; + + /* + * Set the new number of MC addresses that we are being requested to + * use. + */ + hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); + hw->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + eth_dbg(eth, " Clearing MTA(multicast table)\n"); + + memset(ð->mta_shadow, 0, sizeof(eth->mta_shadow)); + + /* Update mta shadow */ + eth_dbg(eth, " Updating MTA..\n"); + + addr_count = netdev_mc_count(netdev); + + ha = list_first_entry(&netdev->mc.list, struct netdev_hw_addr, list); + addr_list = ha->addr; + for (i = 0; i < addr_count; i++) { + eth_dbg(eth, " Adding the multicast addresses:\n"); + rnp10_set_mta(hw, rnp_addr_list_itr(hw, &addr_list)); + } + + if (hw->ncsi_en) { + eth->ops.ncsi_set_mc_mta(eth); + } + + if (sriov_on) { + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + if (!test_and_set_bit(__RNP_USE_VFINFI, &adapter->state)) { + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo) { + struct vf_data_storage *vfinfo = + &adapter->vfinfo[i]; + int j; + + for (j = 0; + j < vfinfo->num_vf_mc_hashes; j++) + rnp10_set_vf_mta( + hw, + vfinfo->vf_mc_hashes[j]); + } + } + clear_bit(__RNP_USE_VFINFI, &adapter->state); + } + } + + /* Enable mta */ + for (i = 0; i < hw->eth.mcft_size; i++) { + if (hw->addr_ctrl.mta_in_use) + eth_wr32(eth, RNP10_ETH_MULTICAST_HASH_TABLE(i), + eth->mta_shadow[i]); + } + + if (hw->addr_ctrl.mta_in_use > 0) { + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + if (!(adapter->flags & RNP_FLAG_SWITCH_LOOPBACK_EN)) { + v = eth_rd32(eth, RNP10_ETH_DMAC_MCSTCTRL); + eth_wr32(eth, RNP10_ETH_DMAC_MCSTCTRL, + v | RNP10_MCSTCTRL_MULTICASE_TBL_EN | + eth->mc_filter_type); + } + } + + eth_dbg(eth, " update MTA Done. mta_in_use:%d\n", + hw->addr_ctrl.mta_in_use); + return hw->addr_ctrl.mta_in_use; +} + +/* clean all mc addr */ +static void rnp_eth_clr_mc_addr_n10(struct rnp_eth_info *eth) +{ + int i; + + for (i = 0; i < eth->mcft_size; i++) + eth_wr32(eth, RNP10_ETH_MULTICAST_HASH_TABLE(i), 0); +} + +/** + * rnp_eth_update_rss_key_n10 - Remove Rx address register + * @eth: pointer to eth structure + * @sriov_flag sriov status + * + * update rss key to eth regs + **/ +static void rnp_eth_update_rss_key_n10(struct rnp_eth_info *eth, bool sriov_flag) +{ + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + int i; + u8 *key_temp; + int key_len = RNP_RSS_KEY_SIZE; + u8 *key = hw->rss_key; + u32 *value; + + u32 iov_en = (sriov_flag) ? RNP10_IOV_ENABLED : 0; + + key_temp = kmalloc(key_len, GFP_KERNEL); + /* reoder the key */ + for (i = 0; i < key_len; i++) + *(key_temp + key_len - i - 1) = *(key + i); + + value = (u32 *)key_temp; + for (i = 0; i < key_len; i = i + 4) + eth_wr32(eth, RNP10_ETH_RSS_KEY + i, *(value + i / 4)); + + kfree(key_temp); + + /* open rss now */ + eth_wr32(eth, RNP10_ETH_RSS_CONTROL, + RNP10_ETH_ENABLE_RSS_ONLY | iov_en); +} + +/** + * rnp_eth_update_rss_table_n10 - Remove Rx address register + * @eth: pointer to eth structure + * + * update rss table to eth regs + **/ +static void rnp_eth_update_rss_table_n10(struct rnp_eth_info *eth) +{ + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + u32 reta_entries = hw->rss_indir_tbl_num; + u32 tc_entries = hw->rss_tc_tbl_num; + int i; + + for (i = 0; i < tc_entries; i++) + eth_wr32(eth, RNP10_ETH_TC_IPH_OFFSET_TABLE(i), + hw->rss_tc_tbl[i]); + + for (i = 0; i < reta_entries; i++) + eth_wr32(eth, RNP10_ETH_RSS_INDIR_TBL(i), hw->rss_indir_tbl[i]); +} + +/** + * rnp_eth_set_vfta_n10 - Set VLAN filter table + * @eth: pointer to eth structure + * @vlan: VLAN id to write to VLAN filter + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +static s32 rnp_eth_set_vfta_n10(struct rnp_eth_info *eth, u32 vlan, bool vlan_on) +{ + s32 regindex; + u32 bitindex; + u32 vfta; + u32 targetbit; + bool vfta_changed = false; + + /* todo in vf mode vlvf regester can be set according to vind*/ + if (vlan > 4095) + return RNP_ERR_PARAM; + + regindex = (vlan >> 5) & 0x7F; + bitindex = vlan & 0x1F; + targetbit = (1 << bitindex); + vfta = eth_rd32(eth, RNP10_VFTA(regindex)); + + if (vlan_on) { + if (!(vfta & targetbit)) { + vfta |= targetbit; + vfta_changed = true; + } + } else { + if ((vfta & targetbit)) { + vfta &= ~targetbit; + vfta_changed = true; + } + } + + if (vfta_changed) + eth_wr32(eth, RNP10_VFTA(regindex), vfta); + return 0; +} + +static void rnp_eth_clr_vfta_n10(struct rnp_eth_info *eth) +{ + u32 offset; + + for (offset = 0; offset < eth->vft_size; offset++) + eth_wr32(eth, RNP10_VFTA(offset), 0); +} + +/** + * rnp_eth_set_vlan_filter_n10 - Set VLAN filter table + * @eth: pointer to eth structure + * @status: on |off + * Turn on/off VLAN filter table. + **/ +static void rnp_eth_set_vlan_filter_n10(struct rnp_eth_info *eth, bool status) +{ +#define ETH_VLAN_FILTER_BIT (30) + u32 value = eth_rd32(eth, RNP10_ETH_VLAN_FILTER_ENABLE); + + /* clear bit first */ + value &= (~(0x01 << ETH_VLAN_FILTER_BIT)); + if (status) + value |= (0x01 << ETH_VLAN_FILTER_BIT); + eth_wr32(eth, RNP10_ETH_VLAN_FILTER_ENABLE, value); +} + +static u16 rnp_layer2_pritologic_n10(u16 hw_id) +{ + return hw_id; +} + +static void rnp_eth_set_layer2_n10(struct rnp_eth_info *eth, + union rnp_atr_input *input, u16 pri_id, u8 queue, + bool prio_flag) +{ + u16 hw_id; + + hw_id = rnp_layer2_pritologic_n10(pri_id); + /* enable layer2 */ + eth_wr32(eth, RNP10_ETH_LAYER2_ETQF(hw_id), + (0x1 << 31) | (ntohs(input->layer2_formate.proto))); + + /* setup action */ + if (queue == RNP_FDIR_DROP_QUEUE) { + eth_wr32(eth, RNP10_ETH_LAYER2_ETQS(hw_id), (0x1 << 31)); + } else { + if (queue == ACTION_TO_MPE) + eth_wr32(eth, RNP10_ETH_LAYER2_ETQS(hw_id), + (0x1 << 29) | (MPE_PORT << 16)); + else + /* setup ring_number */ + eth_wr32(eth, RNP10_ETH_LAYER2_ETQS(hw_id), + (0x1 << 30) | (queue << 20)); + } +} + +static void rnp_eth_clr_layer2_n10(struct rnp_eth_info *eth, u16 pri_id) +{ + u16 hw_id; + + hw_id = rnp_layer2_pritologic_n10(pri_id); + eth_wr32(eth, RNP10_ETH_LAYER2_ETQF(hw_id), 0); +} + +static void rnp_eth_clr_all_layer2_n10(struct rnp_eth_info *eth) +{ + int i; +#define RNP10_MAX_LAYER2_FILTERS 16 + for (i = 0; i < RNP10_MAX_LAYER2_FILTERS; i++) + eth_wr32(eth, RNP10_ETH_LAYER2_ETQF(i), 0); +} + +static u16 rnp_tuple5_pritologic_n10(u16 hw_id) +{ + return hw_id; +} + +static u16 rnp_tuple5_pritologic_tcam_n10(u16 pri_id) +{ + int i; + int hw_id = 0; + int step = 32; + for (i = 0; i < pri_id; i++) { + hw_id += step; + if (hw_id > RNP10_MAX_TCAM_FILTERS) + hw_id = hw_id - RNP10_MAX_TCAM_FILTERS + 1; + } + + return hw_id; +} + +static void rnp_eth_set_tuple5_n10(struct rnp_eth_info *eth, + union rnp_atr_input *input, u16 pri_id, u8 queue, + bool prio_flag) +{ + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + +#define RNP10_SRC_IP_MASK BIT(0) +#define RNP10_DST_IP_MASK BIT(1) +#define RNP10_SRC_PORT_MASK BIT(2) +#define RNP10_DST_PORT_MASK BIT(3) +#define RNP10_L4_PROTO_MASK BIT(4) + + if (hw->fdir_mode != fdir_mode_tcam) { + u32 port = 0; + u8 mask_temp = 0; + u8 l4_proto_type = 0; + u16 hw_id; + + hw_id = rnp_tuple5_pritologic_n10(pri_id); + dbg("try to eable tuple 5 %x\n", hw_id); + if (input->formatted.src_ip[0] != 0) { + eth_wr32(eth, RNP10_ETH_TUPLE5_SAQF(hw_id), + htonl(input->formatted.src_ip[0])); + } else { + mask_temp |= RNP10_SRC_IP_MASK; + } + if (input->formatted.dst_ip[0] != 0) { + eth_wr32(eth, RNP10_ETH_TUPLE5_DAQF(hw_id), + htonl(input->formatted.dst_ip[0])); + } else + mask_temp |= RNP10_DST_IP_MASK; + if (input->formatted.src_port != 0) + port |= (htons(input->formatted.src_port)); + else + mask_temp |= RNP10_SRC_PORT_MASK; + if (input->formatted.dst_port != 0) + port |= (htons(input->formatted.dst_port) << 16); + else + mask_temp |= RNP10_DST_PORT_MASK; + + if (port != 0) + eth_wr32(eth, RNP10_ETH_TUPLE5_SDPQF(hw_id), port); + + switch (input->formatted.flow_type) { + case RNP_ATR_FLOW_TYPE_TCPV4: + l4_proto_type = IPPROTO_TCP; + break; + case RNP_ATR_FLOW_TYPE_UDPV4: + l4_proto_type = IPPROTO_UDP; + break; + case RNP_ATR_FLOW_TYPE_SCTPV4: + l4_proto_type = IPPROTO_SCTP; + break; + case RNP_ATR_FLOW_TYPE_IPV4: + l4_proto_type = input->formatted.inner_mac[0]; + break; + default: + l4_proto_type = 0; + } + + if (l4_proto_type == 0) + mask_temp |= RNP10_L4_PROTO_MASK; + + /* setup ftqf*/ + /* always set 0x3 */ + eth_wr32(eth, RNP10_ETH_TUPLE5_FTQF(hw_id), + (1 << 31) | (mask_temp << 25) | (l4_proto_type << 16) | + 0x3); + + /* setup action */ + if (queue == RNP_FDIR_DROP_QUEUE) { + eth_wr32(eth, RNP10_ETH_TUPLE5_POLICY(hw_id), + (0x1 << 31)); + } else { + if (queue == ACTION_TO_MPE) + eth_wr32(eth, RNP10_ETH_TUPLE5_POLICY(hw_id), + (0x1 << 29) | (MPE_PORT << 16)); + else + eth_wr32(eth, RNP10_ETH_TUPLE5_POLICY(hw_id), + ((0x1 << 30) | (queue << 20))); + } + + } else { + u32 port = 0; + u32 port_mask = 0; + u8 l4_proto_type = 0; + u8 l4_proto_mask = 0xff; + u32 action = 0; + u32 mark = 0; + u16 hw_id; + + hw_id = rnp_tuple5_pritologic_tcam_n10(pri_id); + eth_wr32(eth, RNP10_TCAM_MODE, 2); + dbg("try to eable tcam %d\n", hw_id); + if (input->formatted.src_ip[0] != 0) { + eth_wr32(eth, RNP10_TCAM_SAQF(hw_id), + htonl(input->formatted.src_ip[0])); + eth_wr32(eth, RNP10_TCAM_SAQF_MASK(hw_id), + htonl(input->formatted.src_ip_mask[0])); + + dbg("tcam src ip 0%x ---> 0x%x\n", + htonl(input->formatted.src_ip[0]), + RNP10_TCAM_SAQF(hw_id)); + dbg("tcam src ip mask 0%x ---> 0x%x\n", + htonl(input->formatted.src_ip_mask[0]), + RNP10_TCAM_SAQF_MASK(hw_id)); + } else { + eth_wr32(eth, RNP10_TCAM_SAQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_SAQF_MASK(hw_id), 0); + dbg("tcam src ip 0%x ---> 0x%x\n", 0, + RNP10_TCAM_SAQF(hw_id)); + dbg("tcam src ip mask 0%x ---> 0x%x\n", 0, + RNP10_TCAM_SAQF_MASK(hw_id)); + } + if (input->formatted.dst_ip[0] != 0) { + eth_wr32(eth, RNP10_TCAM_DAQF(hw_id), + htonl(input->formatted.dst_ip[0])); + eth_wr32(eth, RNP10_TCAM_DAQF_MASK(hw_id), + htonl(input->formatted.dst_ip_mask[0])); + dbg("tcam dst ip 0%x ---> 0x%x\n", + htonl(input->formatted.dst_ip[0]), + RNP10_TCAM_DAQF(hw_id)); + dbg("tcam dst ip mask 0%x ---> 0x%x\n", + htonl(input->formatted.dst_ip_mask[0]), + RNP10_TCAM_DAQF_MASK(hw_id)); + } else { + eth_wr32(eth, RNP10_TCAM_DAQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_DAQF_MASK(hw_id), 0); + dbg("tcam dst ip 0%x ---> 0x%x\n", 0, + RNP10_TCAM_DAQF(hw_id)); + dbg("tcam dst ip mask 0%x ---> 0x%x\n", 0, + RNP10_TCAM_DAQF_MASK(hw_id)); + } + if (input->formatted.src_port != 0) { + port |= (htons(input->formatted.src_port) << 16); + port_mask |= + (htons(input->formatted.src_port_mask) << 16); + + } + if (input->formatted.dst_port != 0) { + port |= (htons(input->formatted.dst_port)); + port_mask |= (htons(input->formatted.dst_port_mask)); + } + + /* setup src & dst port */ + if (port != 0) { + eth_wr32(eth, RNP10_TCAM_SDPQF(hw_id), port); + eth_wr32(eth, RNP10_TCAM_SDPQF_MASK(hw_id), port_mask); + + dbg("tcam port 0%x ---> 0x%x\n", port, + RNP10_TCAM_SDPQF(hw_id)); + dbg("tcam port mask 0%x ---> 0x%x\n", port_mask, + RNP10_TCAM_SDPQF_MASK(hw_id)); + } else { + eth_wr32(eth, RNP10_TCAM_SDPQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_SDPQF_MASK(hw_id), 0); + dbg("tcam port 0%x ---> 0x%x\n", port, + RNP10_TCAM_SDPQF(hw_id)); + dbg("tcam port mask 0%x ---> 0x%x\n", port_mask, + RNP10_TCAM_SDPQF_MASK(hw_id)); + } + + switch (input->formatted.flow_type) { + case RNP_ATR_FLOW_TYPE_TCPV4: + l4_proto_type = IPPROTO_TCP; + break; + case RNP_ATR_FLOW_TYPE_UDPV4: + l4_proto_type = IPPROTO_UDP; + break; + case RNP_ATR_FLOW_TYPE_SCTPV4: + l4_proto_type = IPPROTO_SCTP; + break; + case RNP_ATR_FLOW_TYPE_IPV4: + l4_proto_type = input->formatted.inner_mac[0]; + l4_proto_mask = input->formatted.inner_mac_mask[0]; + break; + default: + l4_proto_type = 0; + l4_proto_mask = 0; + } + + if (l4_proto_type != 0) { + action |= l4_proto_type; + mark |= l4_proto_mask; + } else { + } + + /* setup action */ + if (queue == RNP_FDIR_DROP_QUEUE) { + eth_wr32(eth, RNP10_TCAM_APQF(hw_id), + (0x1 << 31) | action); + eth_wr32(eth, RNP10_TCAM_APQF_MASK(hw_id), mark); + dbg("tcam action 0%x ---> 0x%x\n", (0x1 << 31) | action, + RNP10_TCAM_APQF(hw_id)); + dbg("tcam action mask 0%x ---> 0x%x\n", mark, + RNP10_TCAM_APQF_MASK(hw_id)); + } else { + if (queue == ACTION_TO_MPE) { + eth_wr32(eth, RNP10_TCAM_APQF(hw_id), + (0x1 << 29) | (MPE_PORT << 24) | + action); + } else { + eth_wr32(eth, RNP10_TCAM_APQF(hw_id), + ((0x1 << 30) | (queue << 16) | + action)); + } + eth_wr32(eth, RNP10_TCAM_APQF_MASK(hw_id), mark); + + dbg("tcam action 0%x ---> 0x%x\n", + (0x1 << 30) | (queue << 16) | action, + RNP10_TCAM_APQF(hw_id)); + dbg("tcam action mask 0%x ---> 0x%x\n", mark, + RNP10_TCAM_APQF_MASK(hw_id)); + } + eth_wr32(eth, RNP10_TCAM_MODE, 1); + } +} + +static void rnp_eth_clr_tuple5_n10(struct rnp_eth_info *eth, u16 pri_id) +{ + u16 hw_id; + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + + if (hw->fdir_mode != fdir_mode_tcam) { + hw_id = rnp_tuple5_pritologic_n10(pri_id); + eth_wr32(eth, RNP10_ETH_TUPLE5_FTQF(hw_id), 0); + } else { + hw_id = rnp_tuple5_pritologic_tcam_n10(pri_id); + dbg("disable tcam tuple5 %d\n", hw_id); + /* earase tcam */ + eth_wr32(eth, RNP10_TCAM_MODE, 2); + eth_wr32(eth, RNP10_TCAM_SAQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_SAQF_MASK(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_DAQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_DAQF_MASK(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_SDPQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_SDPQF_MASK(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_APQF(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_APQF_MASK(hw_id), 0); + eth_wr32(eth, RNP10_TCAM_MODE, 1); + } +} + +static void rnp_eth_clr_all_tuple5_n10(struct rnp_eth_info *eth) +{ + int i; + + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + + if (hw->fdir_mode != fdir_mode_tcam) { + for (i = 0; i < RNP10_MAX_TUPLE5_FILTERS; i++) + eth_wr32(eth, RNP10_ETH_TUPLE5_FTQF(i), 0); + eth_wr32(eth, RNP10_ETH_TCAM_EN, 0); + } else { + /* todo earase tcm */ + eth_wr32(eth, RNP10_ETH_TCAM_EN, 1); + eth_wr32(eth, RNP10_TOP_ETH_TCAM_CONFIG_ENABLE, 1); + eth_wr32(eth, RNP10_TCAM_MODE, 2); + /* dont't open tcam cache */ + eth_wr32(eth, RNP10_TCAM_CACHE_ENABLE, 0); + + for (i = 0; i < RNP10_MAX_TCAM_FILTERS; i++) { + eth_wr32(eth, RNP10_TCAM_SDPQF(i), 0); + eth_wr32(eth, RNP10_TCAM_DAQF(i), 0); + eth_wr32(eth, RNP10_TCAM_SAQF(i), 0); + eth_wr32(eth, RNP10_TCAM_APQF(i), 0); + + eth_wr32(eth, RNP10_TCAM_SDPQF_MASK(i), 0); + eth_wr32(eth, RNP10_TCAM_DAQF_MASK(i), 0); + eth_wr32(eth, RNP10_TCAM_SAQF_MASK(i), 0); + eth_wr32(eth, RNP10_TCAM_APQF_MASK(i), 0); + } + eth_wr32(eth, RNP10_TCAM_MODE, 1); + } +} + +static void rnp_eth_set_tcp_sync_n10(struct rnp_eth_info *eth, + int queue, bool flag, + bool prio) +{ + if (flag) + eth_wr32(eth, RNP10_ETH_SYNQF, (0x1 << 30) | (queue << 20)); + else + eth_wr32(eth, RNP10_ETH_SYNQF, 0); +} + +static void rnp_eth_set_min_max_packets_n10(struct rnp_eth_info *eth, int min, + int max) +{ + eth_wr32(eth, RNP10_ETH_DEFAULT_RX_MIN_LEN, min); + eth_wr32(eth, RNP10_ETH_DEFAULT_RX_MAX_LEN, max); +} + +static void rnp_eth_set_vlan_strip_n10(struct rnp_eth_info *eth, u16 queue, + bool enable) +{ + u32 reg = RNP10_ETH_VLAN_VME_REG(queue / 32); + u32 offset = queue % 32; + u32 data = eth_rd32(eth, reg); + + if (enable == true) + data |= (1 << offset); + else + data &= ~(1 << offset); + + eth_wr32(eth, reg, data); +} + +static void rnp_eth_set_vxlan_port_n10(struct rnp_eth_info *eth, u32 port) +{ + eth_wr32(eth, RNP10_ETH_VXLAN_PORT, port); +} + +static void rnp_eth_set_vxlan_mode_n10(struct rnp_eth_info *eth, bool inner) +{ + if (inner) + eth_wr32(eth, RNP10_ETH_WRAP_FIELD_TYPE, 1); + else + eth_wr32(eth, RNP10_ETH_WRAP_FIELD_TYPE, 0); +} + +static void rnp_eth_set_rx_hash_n10(struct rnp_eth_info *eth, bool status, + bool sriov_flag) +{ + u32 iov_en = (sriov_flag) ? RNP10_IOV_ENABLED : 0; + + if (status) { + eth_wr32(eth, RNP10_ETH_RSS_CONTROL, + RNP10_ETH_ENABLE_RSS_ONLY | iov_en); + } else { + eth_wr32(eth, RNP10_ETH_RSS_CONTROL, + RNP10_ETH_DISABLE_RSS | iov_en); + } +} + +static s32 rnp_eth_set_fc_mode_n10(struct rnp_eth_info *eth) +{ + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + s32 ret_val = 0; + int i; + + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & rnp_fc_tx_pause) && + hw->fc.high_water[i]) { + if (!hw->fc.low_water[i] || + hw->fc.low_water[i] >= hw->fc.high_water[i]) { + hw_dbg(hw, + "Invalid water mark configuration\n"); + ret_val = RNP_ERR_INVALID_LINK_SETTINGS; + goto out; + } + } + } + + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & rnp_fc_tx_pause)) { + if (hw->fc.high_water[i]) { + eth_wr32(eth, RNP10_ETH_HIGH_WATER(i), + hw->fc.high_water[i]); + } + if (hw->fc.low_water[i]) { + eth_wr32(eth, RNP10_ETH_LOW_WATER(i), + hw->fc.low_water[i]); + } + } + } +out: + return ret_val; +} + +static void rnp_eth_set_vf_vlan_mode_n10(struct rnp_eth_info *eth, u16 vlan, + int vf, bool enable) +{ + struct rnp_hw *hw = (struct rnp_hw *)ð->back; + u32 value = vlan; + if (enable) + value |= BIT(31); + + eth_wr32(eth, RNP10_VLVF(vf), value); + + /* todo, should consider mutiple queue */ + if (hw->hw_type == rnp_hw_n400) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + eth_wr32(eth, RNP10_VLVF_TABLE(vf), (vf + 1) * 2); + else + eth_wr32(eth, RNP10_VLVF_TABLE(vf), vf * 2); + + } else { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + eth_wr32(eth, RNP10_VLVF_TABLE(vf), vf + 1); + else + eth_wr32(eth, RNP10_VLVF_TABLE(vf), vf); + } +} + +static int __get_ncsi_shm_info(struct rnp_hw *hw, + struct ncsi_shm_info *ncsi_shm) +{ + int i; + int *ptr = (int *)ncsi_shm; + int rbytes = round_up(sizeof(*ncsi_shm), 4); + + memset(ncsi_shm, 0, sizeof(*ncsi_shm)); + for (i = 0; i < (rbytes / 4); i++) + ptr[i] = rd32(hw, hw->ncsi_vf_cpu_shm_pf_base + 4 * i); + + return (ncsi_shm->valid & RNP_NCSI_SHM_VALID_MASK) == + RNP_NCSI_SHM_VALID; +} + +static void rnp_ncsi_set_uc_addr_n10(struct rnp_eth_info *eth) +{ + struct ncsi_shm_info ncsi_shm; + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + u8 mac[ETH_ALEN]; + + if (!hw->ncsi_en) + return; + if (__get_ncsi_shm_info(hw, &ncsi_shm)) { + if (ncsi_shm.valid & RNP_MC_VALID) { + mac[0] = ncsi_shm.uc.uc_addr_lo & 0xff; + mac[1] = (ncsi_shm.uc.uc_addr_lo >> 8) & 0xff; + mac[2] = (ncsi_shm.uc.uc_addr_lo >> 16) & 0xff; + mac[3] = (ncsi_shm.uc.uc_addr_lo >> 24) & 0xff; + mac[4] = ncsi_shm.uc.uc_addr_hi & 0xff; + mac[5] = (ncsi_shm.uc.uc_addr_hi >> 8) & 0xff; + if (is_valid_ether_addr(mac)) + eth->ops.set_rar(eth, hw->num_rar_entries, mac, + true); + } + } +} + +static void rnp_ncsi_set_mc_mta_n10(struct rnp_eth_info *eth) +{ + struct ncsi_shm_info ncsi_shm; + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + u8 i; + u8 mac[ETH_ALEN]; + + if (!hw->ncsi_en) + return; + if (__get_ncsi_shm_info(hw, &ncsi_shm)) { + if (ncsi_shm.valid & RNP_MC_VALID) { + for (i = 0; i < RNP_NCSI_MC_COUNT; i++) { + mac[0] = ncsi_shm.mc[i].mc_addr_lo & 0xff; + mac[1] = (ncsi_shm.mc[i].mc_addr_lo >> 8) & + 0xff; + mac[2] = (ncsi_shm.mc[i].mc_addr_lo >> 16) & + 0xff; + mac[3] = (ncsi_shm.mc[i].mc_addr_lo >> 24) & + 0xff; + mac[4] = ncsi_shm.mc[i].mc_addr_hi & 0xff; + mac[5] = (ncsi_shm.mc[i].mc_addr_hi >> 8) & + 0xff; + if (is_multicast_ether_addr(mac) && + !is_zero_ether_addr(mac)) { + rnp10_set_mta(hw, mac); + } + } + } + } +} + +static void rnp_ncsi_set_vfta_n10(struct rnp_eth_info *eth) +{ + struct ncsi_shm_info ncsi_shm; + struct rnp_hw *hw = (struct rnp_hw *)eth->back; + + if (!hw->ncsi_en) + return; + if (__get_ncsi_shm_info(hw, &ncsi_shm)) { + if (ncsi_shm.valid & RNP_VLAN_VALID) + hw->ops.set_vlan_filter(hw, ncsi_shm.ncsi_vlan, true, + false); + } +} + +static struct rnp_eth_operations eth_ops_n10 = { + .set_rar = &rnp_eth_set_rar_n10, + .clear_rar = &rnp_eth_clear_rar_n10, + .set_vmdq = &rnp_eth_set_vmdq_n10, + .clear_vmdq = &rnp_eth_clear_vmdq_n10, + .update_mc_addr_list = &rnp_eth_update_mc_addr_list_n10, + .clr_mc_addr = &rnp_eth_clr_mc_addr_n10, + /* store rss info to eth */ + .set_rss_key = &rnp_eth_update_rss_key_n10, + .set_rss_table = &rnp_eth_update_rss_table_n10, + .set_vfta = &rnp_eth_set_vfta_n10, + .clr_vfta = &rnp_eth_clr_vfta_n10, + .set_vlan_filter = &rnp_eth_set_vlan_filter_n10, + /* ncsi */ + .ncsi_set_vfta = &rnp_ncsi_set_vfta_n10, + .ncsi_set_uc_addr = &rnp_ncsi_set_uc_addr_n10, + .ncsi_set_mc_mta = &rnp_ncsi_set_mc_mta_n10, + .set_layer2_remapping = &rnp_eth_set_layer2_n10, + .clr_layer2_remapping = &rnp_eth_clr_layer2_n10, + .clr_all_layer2_remapping = &rnp_eth_clr_all_layer2_n10, + .set_tuple5_remapping = &rnp_eth_set_tuple5_n10, + .clr_tuple5_remapping = &rnp_eth_clr_tuple5_n10, + .clr_all_tuple5_remapping = &rnp_eth_clr_all_tuple5_n10, + .set_tcp_sync_remapping = &rnp_eth_set_tcp_sync_n10, + .set_min_max_packet = &rnp_eth_set_min_max_packets_n10, + .set_vlan_strip = &rnp_eth_set_vlan_strip_n10, + .set_vxlan_port = &rnp_eth_set_vxlan_port_n10, + .set_vxlan_mode = &rnp_eth_set_vxlan_mode_n10, + .set_rx_hash = &rnp_eth_set_rx_hash_n10, + .set_fc_mode = &rnp_eth_set_fc_mode_n10, + .set_vf_vlan_mode = &rnp_eth_set_vf_vlan_mode_n10, +}; + +/** + * rnp_init_hw_n10 - Generic hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware, filling the bus info + * structure and media type, clears all on chip counters, initializes receive + * address registers, multicast table, VLAN filter table, calls routine to set + * up link and flow control settings, and leaves transmit and receive units + * disabled and uninitialized + **/ +static s32 rnp_init_hw_ops_n10(struct rnp_hw *hw) +{ + s32 status = 0; + + /* Reset the hardware */ + status = hw->ops.reset_hw(hw); + + /* Start the HW */ + if (status == 0) + status = hw->ops.start_hw(hw); + + return status; +} + +static s32 rnp_get_permtion_mac_addr_n10(struct rnp_hw *hw, u8 *mac_addr) +{ + if (rnp_fw_get_macaddr(hw, hw->pfvfnum, mac_addr, hw->nr_lane)) { + dbg("generate ramdom macaddress...\n"); + eth_random_addr(mac_addr); + } + + hw->mac.mac_flags |= RNP_FLAGS_INIT_MAC_ADDRESS; + dbg("%s mac:%pM\n", __func__, mac_addr); + + return 0; +} + +static s32 rnp_reset_hw_ops_n10(struct rnp_hw *hw) +{ + int i; + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + + /* Call adapter stop to disable tx/rx and clear interrupts */ + dma_wr32(dma, RNP_DMA_AXI_EN, 0); + + + /* if not ncsi or hw not support 'control nic_reset', driver control it */ + if (hw->ncsi_en && hw->fw_version >= 0x00060000) { + /* fw will do nic-reset. to reduct ncsi bmc ping pkg lose */ + } else { +#define N10_NIC_RESET 0 + wr32(hw, RNP10_TOP_NIC_REST_N, N10_NIC_RESET); + /* + * we need this + */ + wmb(); + wr32(hw, RNP10_TOP_NIC_REST_N, ~N10_NIC_RESET); + } + + rnp_mbx_fw_reset_phy(hw); + /* should set all tx-start to 1 */ + for (i = 0; i < RNP_N10_MAX_TX_QUEUES; i++) + dma_ring_wr32(dma, RING_OFFSET(i) + RNP_DMA_TX_START, 1); + + wr32(hw, RNP10_TOP_ETH_BUG_40G_PATCH, 1); + /* set 2046 --> 0x18070 */ + eth_wr32(eth, RNP10_ETH_RX_PROGFULL_THRESH_PORT, DROP_ALL_THRESH); + + /* tcam not reset */ + eth->ops.clr_all_tuple5_remapping(eth); + + /* Store the permanent mac address */ + if (!(hw->mac.mac_flags & RNP_FLAGS_INIT_MAC_ADDRESS)) { + rnp_get_permtion_mac_addr_n10(hw, hw->mac.perm_addr); + memcpy(hw->mac.addr, hw->mac.perm_addr, ETH_ALEN); + } + + hw->ops.init_rx_addrs(hw); + + /* open vxlan default */ +#define VXLAN_HW_ENABLE (1) + eth_wr32(eth, RNP10_ETH_TUNNEL_MOD, VXLAN_HW_ENABLE); + for (i = 0; i < dma->max_tx_queues; i++) + rnp_wr_reg(hw->ring_msix_base + RING_VECTOR(i), 0); + + if (hw->phy_type == PHY_TYPE_SGMII) { + u16 pause_bits = 0; + u32 value; + + if (hw->fc.requested_mode == PAUSE_AUTO) { + pause_bits |= ASYM_PAUSE | SYM_PAUSE; + } else { + if ((hw->fc.requested_mode & PAUSE_TX) && + (!(hw->fc.requested_mode & PAUSE_RX))) { + pause_bits |= ASYM_PAUSE; + + } else if ((!(hw->fc.requested_mode & PAUSE_TX)) && + (!(hw->fc.requested_mode & PAUSE_RX))) { + } else + pause_bits |= ASYM_PAUSE | SYM_PAUSE; + } + rnp_mbx_phy_read(hw, 4, &value); + value &= ~0xC00; + value |= pause_bits; + rnp_mbx_phy_write(hw, 4, value); + } + + return 0; +} + +static s32 rnp_start_hw_ops_n10(struct rnp_hw *hw) +{ + s32 ret_val = 0; + struct rnp_eth_info *eth = &hw->eth; + struct rnp_dma_info *dma = &hw->dma; + + eth_wr32(eth, RNP10_ETH_ERR_MASK_VECTOR, + INNER_L4_BIT | PKT_LEN_ERR | HDR_LEN_ERR); + eth_wr32(eth, RNP10_ETH_BYPASS, 0); + eth_wr32(eth, RNP10_ETH_DEFAULT_RX_RING, 0); + + /* DMA common Registers */ + dma_wr32(dma, RNP_DMA_CONFIG, DMA_VEB_BYPASS); + + /* enable-dma-axi */ + dma_wr32(dma, RNP_DMA_AXI_EN, (RX_AXI_RW_EN | TX_AXI_RW_EN)); + + return ret_val; +} + +/* set n10 min/max packet according to new_mtu + * we support mtu + 14 + 4 * 3 as max packet len*/ +static void rnp_set_mtu_hw_ops_n10(struct rnp_hw *hw, int new_mtu) +{ + struct rnp_eth_info *eth = &hw->eth; + + int min = 60; + int max = new_mtu + ETH_HLEN + ETH_FCS_LEN * 3; + + hw->min_length_current = min; + hw->max_length_current = max; + + eth->ops.set_min_max_packet(eth, min, max); +} + +/* setup n10 vlan filter status */ +static void rnp_set_vlan_filter_en_hw_ops_n10(struct rnp_hw *hw, bool status) +{ + struct rnp_eth_info *eth = &hw->eth; + eth->ops.set_vlan_filter(eth, status); +} + +/* set vlan to n10 vlan filter table & veb */ +/* pf setup call */ +static void rnp_set_vlan_filter_hw_ops_n10(struct rnp_hw *hw, u16 vid, + bool enable, bool sriov_flag) +{ + struct rnp_eth_info *eth = &hw->eth; + struct rnp_dma_info *dma = &hw->dma; + u32 vfnum = hw->max_vfs - 1; + + /* setup n10 eth vlan table */ + eth->ops.set_vfta(eth, vid, enable); + + /* setup veb */ + /* only ctags setup veb if in sriov and not stags */ + if (vid && sriov_flag) { + if (enable) { + dma->ops.set_veb_vlan(dma, vid, vfnum); + } else { + dma->ops.set_veb_vlan(dma, 0, vfnum); + } + } +} + +static void rnp_set_vf_vlan_filter_hw_ops_n10(struct rnp_hw *hw, u16 vid, + int vf, bool enable, + bool veb_only) +{ + struct rnp_dma_info *dma = &hw->dma; + + if (!veb_only) { + /* call set vfta without veb setup */ + hw->ops.set_vlan_filter(hw, vid, enable, false); + + } else { + if (enable) { + dma->ops.set_veb_vlan(dma, vid, vf); + } else { + dma->ops.set_veb_vlan(dma, 0, vf); + } + } +} + +static void rnp_clr_vlan_veb_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_dma_info *dma = &hw->dma; + u32 vfnum = hw->vfnum; + + dma->ops.set_veb_vlan(dma, 0, vfnum); +} + +/* setup n10 vlan strip status */ +static void rnp_set_vlan_strip_hw_ops_n10(struct rnp_hw *hw, u16 queue, + bool strip) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_vlan_strip(eth, queue, strip); +} + +/* update new n10 mac */ +static void rnp_set_mac_hw_ops_n10(struct rnp_hw *hw, u8 *mac, bool sriov_flag) +{ + struct rnp_eth_info *eth = &hw->eth; + struct rnp_dma_info *dma = &hw->dma; + struct rnp_mac_info *mac_info = &hw->mac; + /* use this queue index to setup veb */ + /* now pf use queu 0 /1 + * vfnum is the last vfnum */ + int queue = hw->veb_ring; + int vfnum = hw->vfnum; + + eth->ops.set_rar(eth, 0, mac, true); + if (sriov_flag) { + eth->ops.set_vmdq(eth, 0, queue / hw->sriov_ring_limit); + dma->ops.set_veb_mac(dma, mac, vfnum, queue); + } + + mac_info->ops.set_mac(mac_info, mac, 0); +} + +/** + * rnp_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int rnp_write_uc_addr_list_n10(struct rnp_hw *hw, + struct net_device *netdev, + bool sriov_flag) +{ + unsigned int rar_entries = hw->num_rar_entries - 1; + u32 vfnum = hw->vfnum; + struct rnp_eth_info *eth = &hw->eth; + int count = 0; + + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + vfnum = 0; + /* In SR-IOV mode significantly less RAR entries are available */ + if (sriov_flag) + rar_entries = hw->max_pf_macvlans - 1; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > rar_entries) + return -ENOMEM; + + if (!netdev_uc_empty(netdev)) { + struct netdev_hw_addr *ha; + + hw_dbg(hw, "%s: rar_entries:%d, uc_count:%d\n", __func__, + hw->num_rar_entries, netdev_uc_count(netdev)); + + /* return error if we do not support writing to RAR table */ + if (!eth->ops.set_rar) + return -ENOMEM; + + netdev_for_each_uc_addr(ha, netdev) { + if (!rar_entries) + break; + /* that's ok */ + eth->ops.set_rar(eth, rar_entries, ha->addr, + RNP10_RAH_AV); + if (sriov_flag) + eth->ops.set_vmdq(eth, rar_entries, vfnum); + + rar_entries--; + + count++; + } + } + /* write the addresses in reverse order to avoid write combining */ + + hw_dbg(hw, "%s: Clearing RAR[1 - %d]\n", __func__, rar_entries); + for (; rar_entries > 0; rar_entries--) + eth->ops.clear_rar(eth, rar_entries); + + if (hw->ncsi_en) + eth->ops.ncsi_set_uc_addr(eth); + + return count; +} + +void check_vf_promisc(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int i; + + hw->vf_promisc_mode = 0; + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].promisc_mode) { + hw->vf_promisc_mode = 1; + hw->vf_promisc_num = i; + break; + } + } +} + +static void rnp_set_rx_mode_hw_ops_n10(struct rnp_hw *hw, + struct net_device *netdev, + bool sriov_flag) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + u32 fctrl; + netdev_features_t features = netdev->features; + int count; + struct rnp_eth_info *eth = &hw->eth; + + hw_dbg(hw, "%s\n", __func__); + + /* broadcast always bypass */ + fctrl = eth_rd32(eth, RNP10_ETH_DMAC_FCTRL) | RNP10_FCTRL_BPE; + /* clear the bits we are changing the status of */ + fctrl &= ~(RNP10_FCTRL_UPE | RNP10_FCTRL_MPE); + /* promisc mode */ +#ifdef VF_PROMISC_SUPPORT + check_vf_promisc(adapter); + if ((netdev->flags & IFF_PROMISC) || (!hw->vf_promisc_mode)) { +#else + if (netdev->flags & IFF_PROMISC) { +#endif + hw->addr_ctrl.user_set_promisc = true; + fctrl |= (RNP10_FCTRL_UPE | RNP10_FCTRL_MPE); + /* disable hardware filter vlans in promisc mode */ + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + features &= ~NETIF_F_HW_VLAN_CTAG_RX; + } else { + if (netdev->flags & IFF_ALLMULTI) { + fctrl |= RNP10_FCTRL_MPE; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + /* we always update vf multicast info */ + count = eth->ops.update_mc_addr_list(eth, netdev, true); + if (count < 0) { + fctrl |= RNP10_FCTRL_MPE; + } else if (count) { + + } + } + hw->addr_ctrl.user_set_promisc = false; + } + + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + if (rnp_write_uc_addr_list_n10(hw, netdev, sriov_flag) < 0) { + fctrl |= RNP10_FCTRL_UPE; + } + + eth_wr32(eth, RNP10_ETH_DMAC_FCTRL, fctrl); + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + eth->ops.set_vlan_filter(eth, true); + else + eth->ops.set_vlan_filter(eth, false); + + if ((hw->addr_ctrl.user_set_promisc == true) || + (adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR)) { + /* set pkt_len_err and hdr_len_err default to 1 */ + eth_wr32(eth, RNP10_ETH_ERR_MASK_VECTOR, + INNER_L4_BIT | PKT_LEN_ERR | HDR_LEN_ERR); + } else { + eth_wr32(eth, RNP10_ETH_ERR_MASK_VECTOR, INNER_L4_BIT); + } + + hw->ops.set_mtu(hw, netdev->mtu); +} + +/* setup an rar with vfnum */ +static void rnp_set_rar_with_vf_hw_ops_n10(struct rnp_hw *hw, u8 *mac, int idx, + u32 vfnum, bool enable) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_rar(eth, idx, mac, enable); + eth->ops.set_vmdq(eth, idx, vfnum); +} + +static void rnp_clr_rar_hw_ops_n10(struct rnp_hw *hw, int idx) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.clear_rar(eth, idx); +} + +static void rnp_clr_rar_all_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_eth_info *eth = &hw->eth; + unsigned int rar_entries = hw->num_rar_entries - 1; + int i; + + for (i = 0; i < rar_entries; i++) + eth->ops.clear_rar(eth, rar_entries); +} + +static void rnp_set_fcs_mode_hw_ops_n10(struct rnp_hw *hw, bool status) +{ + struct rnp_mac_info *mac = &hw->mac; + + mac->ops.set_mac_fcs(mac, status); +} + +static void rnp_set_vxlan_port_hw_ops_n10(struct rnp_hw *hw, u32 port) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_vxlan_port(eth, port); +} + +static void rnp_set_vxlan_mode_hw_ops_n10(struct rnp_hw *hw, bool inner) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_vxlan_mode(eth, inner); +} + +static void rnp_set_mac_rx_hw_ops_n10(struct rnp_hw *hw, bool status) +{ + struct rnp_mac_info *mac = &hw->mac; + struct rnp_eth_info *eth = &hw->eth; + + if (status) + eth_wr32(eth, RNP10_ETH_RX_PROGFULL_THRESH_PORT, + RECEIVE_ALL_THRESH); + else + eth_wr32(eth, RNP10_ETH_RX_PROGFULL_THRESH_PORT, + DROP_ALL_THRESH); + + mac->ops.set_mac_rx(mac, status); +} + +static void rnp_set_sriov_status_hw_ops_n10(struct rnp_hw *hw, bool status) +{ + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + u32 v; + + if (status) { + dma_wr32(dma, RNP_DMA_CONFIG, + dma_rd32(dma, RNP_DMA_CONFIG) & (~DMA_VEB_BYPASS)); + v = eth_rd32(eth, RNP10_MRQC_IOV_EN); + v |= RNP10_IOV_ENABLED; + eth_wr32(eth, RNP10_MRQC_IOV_EN, v); + } else { + v = eth_rd32(eth, RNP10_MRQC_IOV_EN); + v &= ~(RNP10_IOV_ENABLED); + eth_wr32(eth, RNP10_MRQC_IOV_EN, v); + dma->ops.clr_veb_all(dma); + } + +#if defined(NIC_VF_FXIED) || defined(VF_PROMISC_SUPPORT) + /* we setup default to pf */ + eth_wr32(eth, RNP10_VM_DMAC_MPSAR_RING(127), hw->default_vf_num); + /* if pf or vf in promisc mode set promisc to that vf*/ + if (hw->vf_promisc_mode) { + int fix_vf_num; + + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + fix_vf_num = (hw->vf_promisc_num + 1) * hw->sriov_ring_limit / 2; + else + fix_vf_num = (hw->vf_promisc_num) * hw->sriov_ring_limit / 2; + + eth_wr32(eth, RNP10_VM_DMAC_MPSAR_RING(127), fix_vf_num); + } +#endif +} + +static void rnp_set_sriov_vf_mc_hw_ops_n10(struct rnp_hw *hw, u16 mc_addr) +{ + struct rnp_eth_info *eth = &hw->eth; + u32 vector_bit; + u32 vector_reg; + u32 mta_reg; + + vector_reg = (mc_addr >> 5) & 0x7F; + vector_bit = mc_addr & 0x1F; + mta_reg = eth_rd32(eth, RNP10_ETH_MULTICAST_HASH_TABLE(vector_reg)); + mta_reg |= (1 << vector_bit); + eth_wr32(eth, RNP10_ETH_MULTICAST_HASH_TABLE(vector_reg), mta_reg); +} + +static void rnp_update_sriov_info_hw_ops_n10(struct rnp_hw *hw) +{ +} + +static void rnp_set_pause_mode_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_mac_info *mac = &hw->mac; + struct rnp_eth_info *eth = &hw->eth; + + mac->ops.set_fc_mode(mac); + eth->ops.set_fc_mode(eth); +} + +static void rnp_get_pause_mode_hw_ops_n10(struct rnp_hw *hw) +{ + u32 value_r5; + + if (hw->phy_type != PHY_TYPE_SGMII) { + if ((hw->fc.requested_mode & PAUSE_TX) && + (hw->fc.requested_mode & PAUSE_RX)) { + hw->fc.current_mode = rnp_fc_full; + } else if (hw->fc.requested_mode & PAUSE_TX) { + hw->fc.current_mode = rnp_fc_tx_pause; + } else if (hw->fc.requested_mode & PAUSE_RX) { + hw->fc.current_mode = rnp_fc_rx_pause; + } else { + hw->fc.current_mode = rnp_fc_none; + } + return; + } + + /* we get pause mode from phy reg */ + rnp_mbx_phy_read(hw, 5, &value_r5); + if (!hw->link) { + /* if link is not up ,fc is null */ + hw->fc.current_mode = rnp_fc_none; + } else { + if (hw->fc.requested_mode == PAUSE_AUTO) { + if (value_r5 & SYM_PAUSE) + hw->fc.current_mode = rnp_fc_full; + else if (value_r5 & ASYM_PAUSE) + hw->fc.current_mode = rnp_fc_rx_pause; + else + hw->fc.current_mode = rnp_fc_none; + + } else if ((hw->fc.requested_mode & PAUSE_TX) && + (hw->fc.requested_mode & PAUSE_RX)) { + if (value_r5 & SYM_PAUSE) + hw->fc.current_mode = rnp_fc_full; + else if (value_r5 & ASYM_PAUSE) + hw->fc.current_mode = rnp_fc_rx_pause; + else + hw->fc.current_mode = rnp_fc_none; + + } else if (hw->fc.requested_mode & PAUSE_TX) { + if (value_r5 & SYM_PAUSE) + hw->fc.current_mode = rnp_fc_tx_pause; + else if (value_r5 & ASYM_PAUSE) + hw->fc.current_mode = rnp_fc_none; + else + hw->fc.current_mode = rnp_fc_none; + + } else if (hw->fc.requested_mode & PAUSE_RX) { + if (value_r5 & SYM_PAUSE) + hw->fc.current_mode = rnp_fc_rx_pause; + else if (value_r5 & ASYM_PAUSE) + hw->fc.current_mode = rnp_fc_rx_pause; + else + hw->fc.current_mode = rnp_fc_none; + + } else { + hw->fc.current_mode = rnp_fc_none; + } + } +} + +static void rnp_update_hw_info_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + u32 data; + /* 1 enable eth filter */ + eth_wr32(eth, RNP10_HOST_FILTER_EN, 1); + /* 2 open redir en */ + eth_wr32(eth, RNP10_REDIR_EN, 1); + + /* 3 open sctp checksum and other checksum */ + if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM) + eth_wr32(eth, RNP10_ETH_SCTP_CHECKSUM_EN, 1); + + /* 4 mark muticaset as broadcast */ + dma_wr32(dma, RNP_VEB_MAC_MASK_LO, 0xffffffff); + dma_wr32(dma, RNP_VEB_MAC_MASK_HI, 0xfeff); + /* 5 setup dma split */ + + data = dma_rd32(dma, RNP_DMA_CONFIG); + data &= (0x00000ffff); +#ifdef FT_PADDING +#define PADDING_BIT 8 + if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) + SET_BIT(PADDING_BIT, data); +#endif + /* in this mode we fixed dm split */ + /* if PAGE_SIZE */ +#define RX_MAX_DWORD (96) + data |= (((hw->dma_split_size) >> 4) << 16); + dma_wr32(dma, RNP_DMA_CONFIG, data); + /* 6 open vxlan inner match? */ + /* 7 setuptcp sync remmapping */ + /* n10 not support prio */ + if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC) { + hw->ops.set_tcp_sync_remapping(hw, adapter->tcp_sync_queue, + true, false); + } else { + hw->ops.set_tcp_sync_remapping(hw, adapter->tcp_sync_queue, + false, false); + } +} + +static void rnp_update_hw_rx_drop_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + int i; + struct rnp_ring *ring; + + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = adapter->rx_ring[i]; + if (adapter->rx_drop_status & BIT(i)) { + ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, + adapter->drop_time); + } else { + ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, 0); + } + } +} + +static void rnp_set_rx_hash_hw_ops_n10(struct rnp_hw *hw, bool status, + bool sriov_flag) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_rx_hash(eth, status, sriov_flag); +} + +/* setup mac to rar 0 + * clean vmdq + * clean mc addr */ +static s32 rnp_init_rx_addrs_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_eth_info *eth = &hw->eth; + + u32 i; + u32 rar_entries = eth->num_rar_entries; + u32 v; + + hw_dbg(hw, "init_rx_addrs:rar_entries:%d, mac.addr:%pM\n", rar_entries, + hw->mac.addr); + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (!is_valid_ether_addr(hw->mac.addr)) { + /* Get the MAC address from the RAR0 for later reference */ + memcpy(hw->mac.addr, hw->mac.perm_addr, ETH_ALEN); + hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); + } else { + /* Setup the receive address. */ + hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); + hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); + eth->ops.set_rar(eth, 0, hw->mac.addr, true); + + /* clear VMDq pool/queue selection for RAR 0 */ + eth->ops.clear_vmdq(eth, 0, RNP_CLEAR_VMDQ_ALL); + } + hw->addr_ctrl.overflow_promisc = 0; + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ + hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); + for (i = 1; i < rar_entries; i++) { + eth->ops.clear_rar(eth, i); + } + if (hw->ncsi_en) + eth->ops.ncsi_set_uc_addr(eth); + + /* Clear the MTA */ + hw->addr_ctrl.mta_in_use = 0; + v = eth_rd32(eth, RNP10_ETH_DMAC_MCSTCTRL); + v &= (~0x3); + v |= eth->mc_filter_type; + eth_wr32(eth, RNP10_ETH_DMAC_MCSTCTRL, v); + + hw_dbg(hw, " Clearing MTA\n"); + eth->ops.clr_mc_addr(eth); + if (hw->ncsi_en) { + eth->ops.ncsi_set_mc_mta(eth); + eth->ops.ncsi_set_vfta(eth); + } + + return 0; +} + +static void rnp_clr_vfta_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.clr_vfta(eth); +} + +static void rnp_set_txvlan_mode_hw_ops_n10(struct rnp_hw *hw, bool cvlan) +{ + struct rnp_mac_info *mac = &hw->mac; + if (cvlan) { + mac_wr32(mac, RNP10_MAC_TX_VLAN_TAG, 0x4000000); + mac_wr32(mac, RNP10_MAC_TX_VLAN_MODE, 0x100000); + mac_wr32(mac, RNP10_MAC_INNER_VLAN_INCL, 0x100000); + } else { + mac_wr32(mac, RNP10_MAC_TX_VLAN_TAG, 0xc600000); + mac_wr32(mac, RNP10_MAC_TX_VLAN_MODE, 0x180000); + mac_wr32(mac, RNP10_MAC_INNER_VLAN_INCL, 0x100000); + } +} + +static void rnp_set_rss_key_hw_ops_n10(struct rnp_hw *hw, bool sriov_flag) +{ + struct rnp_eth_info *eth = &hw->eth; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + int key_len = RNP_RSS_KEY_SIZE; + + memcpy(hw->rss_key, adapter->rss_key, key_len); + + eth->ops.set_rss_key(eth, sriov_flag); +} + +static void rnp_set_rss_table_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_rss_table(eth); +} + +static void rnp_set_mbx_link_event_hw_ops_n10(struct rnp_hw *hw, int enable) +{ + rnp_mbx_link_event_enable(hw, enable); +} + +static void rnp_set_mbx_ifup_hw_ops_n10(struct rnp_hw *hw, int enable) +{ + rnp_mbx_ifup_down(hw, enable); + + if (hw->phy_type == PHY_TYPE_10G_TP) { + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + /* first call reset an */ + if (enable) { + hw->ops.setup_link(hw, hw->phy.autoneg_advertised, + hw->autoneg, adapter->speed, + hw->duplex); + } + } +} + +/** + * rnp_check_mac_link_n10 - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +static s32 rnp_check_mac_link_hw_ops_n10(struct rnp_hw *hw, rnp_link_speed *speed, + bool *link_up, bool *duplex, + bool link_up_wait_to_complete) +{ + + if (hw->speed == 10) { + *speed = RNP_LINK_SPEED_10_FULL; + } else if (hw->speed == 100) { + *speed = RNP_LINK_SPEED_100_FULL; + } else if (hw->speed == 1000) { + *speed = RNP_LINK_SPEED_1GB_FULL; + } else if (hw->speed == 10000) { + *speed = RNP_LINK_SPEED_10GB_FULL; + } else if (hw->speed == 25000) { + *speed = RNP_LINK_SPEED_25GB_FULL; + } else if (hw->speed == 40000) { + *speed = RNP_LINK_SPEED_40GB_FULL; + } else { + *speed = RNP_LINK_SPEED_UNKNOWN; + } + + *link_up = hw->link; + *duplex = 1; + + return 0; +} + +static s32 rnp_setup_mac_link_hw_ops_n10(struct rnp_hw *hw, u32 adv, u32 autoneg, + u32 speed, u32 duplex) +{ + struct rnp_adapter *adpt = hw->back; + u32 value = 0; + u32 value_r4 = 0; + u32 value_r9 = 0; + + rnp_logd(LOG_PHY, + "%s setup phy: phy_addr=%d speed=%d duplex=%d autoneg=%d " + "is_backplane=%d is_sgmii=%d\n", + __func__, adpt->phy_addr, speed, duplex, autoneg, + hw->is_backplane, hw->is_sgmii); + + if (hw->is_backplane) { + /* Backplane type, support AN, unsupport set speed */ + return rnp_set_lane_fun(hw, LANE_FUN_AN, autoneg, 0, 0, 0); + } + + /* TODO: Not support fiber */ + if ((!hw->is_sgmii) && (hw->phy_type != PHY_TYPE_10G_TP)) { + if (hw->force_10g_1g_speed_ablity) { + return rnp_mbx_force_speed(hw, speed); + } else { + return 0; + } + } + + if (hw->phy_type == PHY_TYPE_10G_TP) { + rnp_mbx_phy_read(hw, PHY_826x_MDIX, &value); + + value &= ~(BIT(8) | BIT(9)); + /* Options: 0: Auto (default) 1: MDI mode 2: MDI-X mode */ + switch (hw->phy.mdix) { + case 1: + value |= BIT(8)|BIT(9); + break; + case 2: + value |= BIT(9); + break; + case 0: + default: + break; + } + rnp_mbx_phy_write(hw, PHY_826x_MDIX, value); + + if (!autoneg) { + rnp_mbx_phy_read(hw, PHY_826x_SPEED, &value); + value &= (~(BIT(13) | BIT(6) | BIT(5) | BIT(4) | + BIT(3) | BIT(2))); + + switch (speed) { + case RNP_LINK_SPEED_10GB_FULL: + value |= BIT(13) | BIT(6); + break; + case RNP_LINK_SPEED_1GB_FULL: + case RNP_LINK_SPEED_1GB_HALF: + value |= BIT(6); + ; + break; + case RNP_LINK_SPEED_100_FULL: + case RNP_LINK_SPEED_100_HALF: + value |= BIT(13); + break; + case RNP_LINK_SPEED_10_FULL: + case RNP_LINK_SPEED_10_HALF: + value = 0; + break; + default: + hw_dbg(hw, "unknown speed = 0x%x.\n", speed); + break; + } + rnp_mbx_phy_write(hw, PHY_826x_SPEED, value); + rnp_mbx_phy_read(hw, PHY_826x_DUPLEX, &value); + value &= (~BIT(8)); + if (duplex) + value |= BIT(8); + rnp_mbx_phy_write(hw, PHY_826x_DUPLEX, value); + rnp_mbx_phy_read(hw, PHY_826x_AN, &value); + value &= (~BIT(12)); + rnp_mbx_phy_write(hw, PHY_826x_AN, value); + } else { + rnp_mbx_phy_read(hw, PHY_826x_ADV, &value); + + value &= (~(BIT(5) | BIT(6) | BIT(7) | BIT(8) | + BIT(10) | BIT(11))); + + if (adv & RNP_LINK_SPEED_100_FULL) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_100_FULL; + value |= BIT(8); + } + if (adv & RNP_LINK_SPEED_100_HALF) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_100_FULL; + value |= BIT(7); + } + + value |= BIT(10) | BIT(11); + /* BIT10 fc BIT11 asyfc */ + rnp_mbx_phy_write(hw, PHY_826x_ADV, value); + + rnp_mbx_phy_read(hw, PHY_826x_GBASE_ADV, &value); + value &= (~(BIT(7) | BIT(8) | BIT(12))); + + /* bit 7 2.5G bit 8 5G */ + if (adv & RNP_LINK_SPEED_10GB_FULL) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_10GB_FULL; + value |= BIT(12); + } + rnp_mbx_phy_write(hw, PHY_826x_GBASE_ADV, value); + rnp_mbx_phy_read(hw, PHY_826x_GBASE_ADV_2, &value); + value &= 0x00ff; + if (adv & RNP_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_1GB_FULL; + value |= BIT(9); + } + if (adv & RNP_LINK_SPEED_1GB_HALF) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_1GB_HALF; + value |= BIT(8); + } + rnp_mbx_phy_write(hw, PHY_826x_GBASE_ADV_2, value); + rnp_mbx_phy_read(hw, PHY_826x_AN, &value); + value |= BIT(12) | BIT(9); + rnp_mbx_phy_write(hw, PHY_826x_AN, value); + } + + return 0; + } + + /* Set MDI/MDIX mode */ + rnp_mbx_phy_read(hw, RNP_YT8531_PHY_SPEC_CTRL, &value); + value &= ~RNP_YT8531_PHY_SPEC_CTRL_MDIX_CFG_MASK; + /* Options: 0: Auto (default) 1: MDI mode 2: MDI-X mode */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + value |= RNP_YT8531_PHY_SPEC_CTRL_FORCE_MDIX; + break; + case 0: + default: + value |= RNP_YT8531_PHY_SPEC_CTRL_AUTO_MDI_MDIX; + break; + } + rnp_mbx_phy_write(hw, RNP_YT8531_PHY_SPEC_CTRL, value); + + /* + * Clear autoneg_advertised and set new values based on input link + * speed. + */ + hw->phy.autoneg_advertised = speed; + + if (!autoneg) { + switch (speed) { + case RNP_LINK_SPEED_1GB_FULL: + case RNP_LINK_SPEED_1GB_HALF: + value = RNP_MDI_PHY_SPEED_SELECT1; + speed = RNP_LINK_SPEED_1GB_FULL; + goto out; + break; + case RNP_LINK_SPEED_100_FULL: + case RNP_LINK_SPEED_100_HALF: + value = RNP_MDI_PHY_SPEED_SELECT0; + break; + case RNP_LINK_SPEED_10_FULL: + case RNP_LINK_SPEED_10_HALF: + value = 0; + break; + default: + value = RNP_MDI_PHY_SPEED_SELECT0 | + RNP_MDI_PHY_SPEED_SELECT1; + hw_dbg(hw, "unknown speed = 0x%x.\n", speed); + break; + } + /* duplex full */ + if (duplex) + value |= RNP_MDI_PHY_DUPLEX; + value |= 0x8000; + rnp_mbx_phy_write(hw, 0x0, value); + goto skip_an; + } + + /* start_an */ + value_r4 = 0x1E0; + value_r9 = 0x300; + /* disable 100/10base-T Self-negotiation ability */ + rnp_mbx_phy_read(hw, 0x4, &value); + value &= ~value_r4; + rnp_mbx_phy_write(hw, 0x4, value); + + /* disable 1000base-T Self-negotiation ability */ + rnp_mbx_phy_read(hw, 0x9, &value); + value &= ~value_r9; + rnp_mbx_phy_write(hw, 0x9, value); + + value_r4 = 0x0; + value_r9 = 0x0; + + if (adv & RNP_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= RNP_LINK_SPEED_1GB_FULL; + value_r9 |= 0x200; + } + if (adv & RNP_LINK_SPEED_100_FULL) { + hw->phy.autoneg_advertised |= RNP_LINK_SPEED_100_FULL; + value_r4 |= 0x100; + } + if (adv & RNP_LINK_SPEED_10_FULL) { + hw->phy.autoneg_advertised |= RNP_LINK_SPEED_10_FULL; + value_r4 |= 0x40; + } + + if (adv & RNP_LINK_SPEED_1GB_HALF) { + hw->phy.autoneg_advertised |= RNP_LINK_SPEED_1GB_HALF; + value_r9 |= 0x100; + } + if (adv & RNP_LINK_SPEED_100_HALF) { + hw->phy.autoneg_advertised |= RNP_LINK_SPEED_100_HALF; + value_r4 |= 0x80; + } + if (adv & RNP_LINK_SPEED_10_HALF) { + hw->phy.autoneg_advertised |= RNP_LINK_SPEED_10_HALF; + value_r4 |= 0x20; + } + + /* enable 1000base-T Self-negotiation ability */ + rnp_mbx_phy_read(hw, 0x9, &value); + value |= value_r9; + rnp_mbx_phy_write(hw, 0x9, value); + + /* enable 100/10base-T Self-negotiation ability */ + rnp_mbx_phy_read(hw, 0x4, &value); + value |= value_r4; + rnp_mbx_phy_write(hw, 0x4, value); + + /* software reset to make the above configuration take effect*/ + rnp_mbx_phy_read(hw, 0x0, &value); + value |= 0x9200; + rnp_mbx_phy_write(hw, 0x0, value); +skip_an: + /* power on in UTP mode */ + rnp_mbx_phy_read(hw, 0x0, &value); + value &= ~0x800; + rnp_mbx_phy_write(hw, 0x0, value); + +out: + return 0; +} + +static void rnp_clean_link_hw_ops_n10(struct rnp_hw *hw) +{ + hw->link = 0; +} + +static void rnp_set_layer2_hw_ops_n10(struct rnp_hw *hw, + union rnp_atr_input *input, u16 pri_id, + u8 queue, bool prio_flag) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_layer2_remapping(eth, input, pri_id, queue, prio_flag); +} + +static void rnp_clr_layer2_hw_ops_n10(struct rnp_hw *hw, u16 pri_id) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.clr_layer2_remapping(eth, pri_id); +} + +static void rnp_clr_all_layer2_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.clr_all_layer2_remapping(eth); +} + +static void rnp_clr_all_tuple5_hw_ops_n10(struct rnp_hw *hw) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.clr_all_tuple5_remapping(eth); +} + +static void rnp_set_tcp_sync_hw_ops_n10(struct rnp_hw *hw, int queue, bool flag, + bool prio) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_tcp_sync_remapping(eth, queue, flag, prio); +} + +static void rnp_update_msix_count_hw_ops_n10(struct rnp_hw *hw, int msix_count) +{ + int msix_count_new; + struct rnp_mac_info *mac = &hw->mac; + + msix_count_new = clamp_t(int, msix_count, 2, RNP_N10_MSIX_VECTORS); + + mac->max_msix_vectors = msix_count_new; + hw->max_msix_vectors = msix_count_new; +} + +static void rnp_set_tuple5_hw_ops_n10(struct rnp_hw *hw, + union rnp_atr_input *input, u16 pri_id, + u8 queue, bool prio_flag) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.set_tuple5_remapping(eth, input, pri_id, queue, prio_flag); +} + +static void rnp_clr_tuple5_hw_ops_n10(struct rnp_hw *hw, u16 pri_id) +{ + struct rnp_eth_info *eth = &hw->eth; + + eth->ops.clr_tuple5_remapping(eth, pri_id); +} + +static void rnp_update_hw_status_hw_ops_n10(struct rnp_hw *hw, + struct rnp_hw_stats *hw_stats, + struct net_device_stats *net_stats) +{ + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + struct rnp_mac_info *mac = &hw->mac; + int port; + + hw_stats->dma_to_dma = + dma_rd32(dma, RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_0) + + dma_rd32(dma, RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_1) + + dma_rd32(dma, RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_2) + + dma_rd32(dma, RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_3); + + hw_stats->dma_to_switch = dma_rd32(dma, RNP_DMA_STATS_DMA_TO_SWITCH); + hw_stats->mac_to_dma = dma_rd32(dma, RNP_DMA_STATS_MAC_TO_DMA); + + net_stats->rx_crc_errors = 0; + net_stats->rx_errors = 0; + + for (port = 0; port < 4; port++) { + /* we use Hardware stats? */ + net_stats->rx_crc_errors += + eth_rd32(eth, RNP10_RXTRANS_CRC_ERR_PKTS(port)); + net_stats->rx_errors += + eth_rd32(eth, RNP10_RXTRANS_WDT_ERR_PKTS(port)) + + eth_rd32(eth, RNP10_RXTRANS_CODE_ERR_PKTS(port)) + + eth_rd32(eth, RNP10_RXTRANS_CRC_ERR_PKTS(port)) + + eth_rd32(eth, RNP10_RXTRANS_SLEN_ERR_PKTS(port)) + + eth_rd32(eth, RNP10_RXTRANS_GLEN_ERR_PKTS(port)) + + eth_rd32(eth, RNP10_RXTRANS_IPH_ERR_PKTS(port)) + + eth_rd32(eth, RNP10_RXTRANS_LEN_ERR_PKTS(port)); + } + hw_stats->invalid_dropped_packets = + eth_rd32(eth, RNP10_ETH_INVALID_DROP_PKTS); + hw_stats->rx_capabity_lost = + eth_rd32(eth, RNP10_RXTRANS_DROP(0)) + + eth_rd32(eth, RNP10_RXTRANS_CUT_ERR_PKTS(0)); + hw_stats->filter_dropped_packets = + eth_rd32(eth, RNP10_ETH_FILTER_DROP_PKTS); + hw_stats->host_l2_match_drop = + eth_rd32(eth, RNP10_ETH_HOST_L2_DROP_PKTS); + hw_stats->redir_input_match_drop = + eth_rd32(eth, RNP10_ETH_REDIR_INPUT_MATCH_DROP_PKTS); + hw_stats->redir_etype_match_drop = + eth_rd32(eth, RNP10_ETH_ETYPE_DROP_PKTS); + hw_stats->redir_tcp_syn_match_drop = + eth_rd32(eth, RNP10_ETH_TCP_SYN_DROP_PKTS); + hw_stats->redir_tuple5_match_drop = + eth_rd32(eth, RNP10_ETH_REDIR_TUPLE5_DROP_PKTS); + hw_stats->redir_tcam_match_drop = + eth_rd32(eth, RNP10_ETH_REDIR_TCAM_DROP_PKTS); + hw_stats->bmc_dropped_packets = + eth_rd32(eth, RNP10_ETH_DECAP_BMC_DROP_NUM); + hw_stats->switch_dropped_packets = + eth_rd32(eth, RNP10_ETH_DECAP_SWITCH_DROP_NUM); + hw_stats->mac_rx_broadcast = + mac_rd32(mac, RNP10_MAC_STATS_BROADCAST_LOW); + hw_stats->mac_rx_broadcast += + ((u64)mac_rd32(mac, RNP10_MAC_STATS_BROADCAST_HIGH) << 32); + hw_stats->mac_rx_multicast = + mac_rd32(mac, RNP10_MAC_STATS_MULTICAST_LOW); + hw_stats->mac_rx_multicast += + ((u64)mac_rd32(mac, RNP10_MAC_STATS_MULTICAST_HIGH) << 32); + hw_stats->mac_rx_pause_count = + mac_rd32(mac, RNP10_MAC_STATS_RX_PAUSE_COUNT_LOW); + hw_stats->mac_rx_pause_count += + ((u64)mac_rd32(mac, RNP10_MAC_STATS_RX_PAUSE_COUNT_HIGH) << 32); + hw_stats->mac_tx_pause_count = + mac_rd32(mac, RNP10_MAC_STATS_TX_PAUSE_COUNT_LOW); + hw_stats->mac_tx_pause_count += + ((u64)mac_rd32(mac, RNP10_MAC_STATS_TX_PAUSE_COUNT_HIGH) << 32); +} + +enum n10_priv_bits { + n10_mac_loopback = 0, + n10_switch_loopback = 1, + n10_veb_enable = 4, + n10_padding_enable = 8, + n10_padding_debug_enable = 0x10, +}; + +static const char rnp10_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define RNP10_MAC_LOOPBACK BIT(0) +#define RNP10_SWITCH_LOOPBACK BIT(1) +#define RNP10_VEB_ENABLE BIT(2) +#define RNP10_FT_PADDING BIT(3) +#define RNP10_PADDING_DEBUG BIT(4) +#define RNP10_PTP_FEATURE BIT(5) +#define RNP10_SIMULATE_DOWN BIT(6) +#define RNP10_VXLAN_INNER_MATCH BIT(7) +#define RNP10_STAG_ENABLE BIT(8) +#define RNP10_REC_HDR_LEN_ERR BIT(9) +#define RNP10_SRIOV_VLAN_MODE BIT(10) +#define RNP10_REMAP_MODE BIT(11) +#define RNP10_LLDP_EN_STAT BIT(12) +#define RNP10_FORCE_CLOSE BIT(13) + "mac_loopback", + "switch_loopback", + "veb_enable", + "pcie_patch", + "padding_debug", + "ptp_performance_debug", + "simulate_link_down", + "vxlan_inner_match", + "stag_enable", + "mask_len_err", + "sriov_vlan_mode", + "remap_mode1", + "lldp_en", + "link_down_on_close", +}; + +#define RNP10_PRIV_FLAGS_STR_LEN ARRAY_SIZE(rnp10_priv_flags_strings) + +const struct rnp_stats rnp10_gstrings_net_stats[] = { + RNP_NETDEV_STAT(rx_packets), + RNP_NETDEV_STAT(tx_packets), + RNP_NETDEV_STAT(rx_bytes), + RNP_NETDEV_STAT(tx_bytes), + RNP_NETDEV_STAT(rx_errors), + RNP_NETDEV_STAT(tx_errors), + RNP_NETDEV_STAT(rx_dropped), + RNP_NETDEV_STAT(tx_dropped), + RNP_NETDEV_STAT(multicast), + RNP_NETDEV_STAT(collisions), + RNP_NETDEV_STAT(rx_over_errors), + RNP_NETDEV_STAT(rx_crc_errors), + RNP_NETDEV_STAT(rx_frame_errors), + RNP_NETDEV_STAT(rx_fifo_errors), + RNP_NETDEV_STAT(rx_missed_errors), + RNP_NETDEV_STAT(tx_aborted_errors), + RNP_NETDEV_STAT(tx_carrier_errors), + RNP_NETDEV_STAT(tx_fifo_errors), + RNP_NETDEV_STAT(tx_heartbeat_errors), +}; + +#define RNP10_GLOBAL_STATS_LEN ARRAY_SIZE(rnp10_gstrings_net_stats) + +static struct rnp_stats rnp10_hwstrings_stats[] = { + RNP_HW_STAT("dma_to_mac", hw_stats.dma_to_dma), + RNP_HW_STAT("dma_to_switch", hw_stats.dma_to_switch), + RNP_HW_STAT("eth_to_dma", hw_stats.mac_to_dma), + RNP_HW_STAT("vlan_add_cnt", hw_stats.vlan_add_cnt), + RNP_HW_STAT("vlan_strip_cnt", hw_stats.vlan_strip_cnt), + RNP_HW_STAT("invalid_dropped_packets", + hw_stats.invalid_dropped_packets), + RNP_HW_STAT("rx_capabity_drop", hw_stats.rx_capabity_lost), + RNP_HW_STAT("filter_dropped_packets", hw_stats.filter_dropped_packets), + RNP_HW_STAT("host_l2_match_drop", hw_stats.host_l2_match_drop), + RNP_HW_STAT("redir_input_match_drop", hw_stats.redir_input_match_drop), + RNP_HW_STAT("redir_etype_match_drop", hw_stats.redir_etype_match_drop), + RNP_HW_STAT("redir_tcp_syn_match_drop", + hw_stats.redir_tcp_syn_match_drop), + RNP_HW_STAT("redir_tuple5_match_drop", + hw_stats.redir_tuple5_match_drop), + RNP_HW_STAT("redir_tcam_match_drop", hw_stats.redir_tcam_match_drop), + RNP_HW_STAT("bmc_dropped_packets", hw_stats.bmc_dropped_packets), + RNP_HW_STAT("switch_dropped_packets", hw_stats.switch_dropped_packets), + RNP_HW_STAT("rx_csum_offload_errors", hw_csum_rx_error), + RNP_HW_STAT("rx_csum_offload_good", hw_csum_rx_good), + RNP_HW_STAT("rx_broadcast_count", hw_stats.mac_rx_broadcast), + RNP_HW_STAT("rx_multicast_count", hw_stats.mac_rx_multicast), + RNP_HW_STAT("mac_rx_pause_count", hw_stats.mac_rx_pause_count), + RNP_HW_STAT("mac_tx_pause_count", hw_stats.mac_tx_pause_count), +}; + +#define RNP10_HWSTRINGS_STATS_LEN ARRAY_SIZE(rnp10_hwstrings_stats) + +#define RNP10_STATS_LEN \ + (RNP10_GLOBAL_STATS_LEN + RNP10_HWSTRINGS_STATS_LEN + \ + RNP_QUEUE_STATS_LEN) + +static const char rnp10_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define RNP10_TEST_LEN (sizeof(rnp10_gstrings_test) / ETH_GSTRING_LEN) + +static int rnp10_get_regs_len(struct net_device *netdev) +{ +#define RNP10_REGS_LEN 1 + return RNP10_REGS_LEN * sizeof(u32); +} + +#define ADVERTISED_MASK_10G \ + (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full | \ + SUPPORTED_10000baseKR_Full) + +#define SUPPORTED_MASK_40G \ + (SUPPORTED_40000baseKR4_Full | SUPPORTED_40000baseCR4_Full | \ + SUPPORTED_40000baseSR4_Full | SUPPORTED_40000baseLR4_Full) + +#define ADVERTISED_MASK_40G \ + (SUPPORTED_40000baseKR4_Full | SUPPORTED_40000baseCR4_Full | \ + SUPPORTED_40000baseSR4_Full | SUPPORTED_40000baseLR4_Full) + +#define SUPPORTED_10000baseT 0 + +static int rnp_set_autoneg_adv_from_hw(struct rnp_hw *hw, + struct ethtool_link_ksettings *ks) +{ + u32 value_r0 = 0, value_r4 = 0, value_r9 = 0; + u32 value_r20, value_r412; + + /* Read autoneg state from phy */ + if (hw->phy_type == PHY_TYPE_SGMII) { + rnp_mbx_phy_read(hw, 0x0, &value_r0); + /* Not support AN, return directly */ + if (!(value_r0 & BIT(12))) + return 0; + + rnp_mbx_phy_read(hw, 0x4, &value_r4); + rnp_mbx_phy_read(hw, 0x9, &value_r9); + if (value_r4 & 0x100) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Full); + if (value_r4 & 0x80) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Half); + if (value_r4 & 0x40) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10baseT_Full); + if (value_r4 & 0x20) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10baseT_Half); + if (value_r9 & 0x200) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + if (value_r9 & 0x100) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Half); + } + + if (hw->phy_type == PHY_TYPE_10G_TP) { + rnp_mbx_phy_read(hw, (PHY_C45 | PHY_MMD(7) | 0x0), &value_r0); + + if (!(value_r0 & BIT(12))) + return 0; + + rnp_mbx_phy_read(hw, (PHY_C45 | PHY_MMD(7) | 0x20), &value_r20); + + if (value_r20 & BIT(12)) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + + rnp_mbx_phy_read(hw, (PHY_C45 | PHY_MMD_VEND2 | 0xa412), + &value_r412); + + if (value_r412 & BIT(8)) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + if (value_r412 & BIT(9)) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + } + + return 0; +} + +/** + * rnp_phy_type_to_ethtool - convert the phy_types to ethtool link modes + * @adapter: adapter struct with hw->phy_type + * @ks: ethtool link ksettings struct to fill out + * + **/ +static void rnp_phy_type_to_ethtool(struct rnp_adapter *adapter, + struct ethtool_link_ksettings *ks) +{ + struct rnp_hw *hw = &adapter->hw; + u32 supported_link = hw->supported_link; + u8 phy_type = hw->phy_type; + + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + + if (phy_type == PHY_TYPE_NONE) { + if (supported_link & RNP_LINK_SPEED_10GB_FULL) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseER_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseER_Full); + } + + if (((supported_link & RNP_LINK_SPEED_10GB_FULL) || + (supported_link & RNP_LINK_SPEED_1GB_FULL))) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseX_Full); + } + } + if (phy_type == PHY_TYPE_SGMII) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Half); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10baseT_Half); + + rnp_set_autoneg_adv_from_hw(hw, ks); + } + + if (phy_type == PHY_TYPE_10G_TP) { + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + rnp_set_autoneg_adv_from_hw(hw, ks); + } + + if (rnp_fw_is_old_ethtool(hw) && + (supported_link & RNP_LINK_SPEED_40GB_FULL)) { + supported_link |= RNP_SFP_MODE_40G_CR4 | RNP_SFP_MODE_40G_SR4 | + PHY_TYPE_40G_BASE_LR4; + } + + if (supported_link & RNP_SFP_MODE_40G_CR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseCR4_Full); + } + if (supported_link & RNP_SFP_MODE_40G_SR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseSR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseSR4_Full); + } + if (supported_link & RNP_SFP_MODE_40G_LR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseLR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseLR4_Full); + } + + /* add 25G support here */ + if (supported_link & RNP_SFP_25G_SR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseSR_Full); + } + if (supported_link & RNP_SFP_25G_KR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + } + if (supported_link & RNP_SFP_25G_CR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + } + if (hw->is_backplane) { + if (phy_type == PHY_TYPE_40G_BASE_KR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseKR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseKR4_Full); + } + if (phy_type == PHY_TYPE_10G_BASE_KR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKR_Full); + if (supported_link & RNP_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode( + ks, advertising, 10000baseKR_Full); + } + } + + if (supported_link & RNP_SFP_MODE_1G_LX || + supported_link & RNP_SFP_MODE_1G_SX) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + if (supported_link & RNP_LINK_SPEED_1GB_FULL) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseX_Full); + } + } + + if (phy_type == PHY_TYPE_1G_BASE_KX) { + if (hw->is_backplane) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseKX_Full); + if (supported_link & RNP_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode( + ks, advertising, 1000baseKX_Full); + } + + if ((supported_link & RNP_SFP_MODE_1G_T) || + (supported_link & RNP_LINK_SPEED_1GB_FULL)) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + if (supported_link & RNP_LINK_SPEED_1GB_FULL) + ethtool_link_ksettings_add_link_mode( + ks, advertising, 1000baseT_Full); + } + } + /* need to add new 10G PHY types */ + if (phy_type == PHY_TYPE_10G_BASE_SR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + if (supported_link & RNP_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseSR_Full); + } + if (phy_type == PHY_TYPE_10G_BASE_ER) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseER_Full); + if (supported_link & RNP_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseER_Full); + } + if (phy_type == PHY_TYPE_10G_BASE_LR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + if (supported_link & RNP_LINK_SPEED_10GB_FULL) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseLR_Full); + } + if (hw->force_speed_stat == FORCE_SPEED_STAT_10G) { + ethtool_link_ksettings_del_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_del_link_mode(ks, advertising, + 1000baseT_Full); + + ethtool_link_ksettings_del_link_mode(ks, supported, + 1000baseX_Full); + ethtool_link_ksettings_del_link_mode(ks, advertising, + 1000baseX_Full); + + if (phy_type == PHY_TYPE_1G_BASE_KX) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseLR_Full); + } + } +} +/** + * rnp_get_settings_link_up - Get Link settings for when link is up + * @hw: hw structure + * @ks: ethtool ksettings to fill in + * @netdev: network interface device structure + **/ +static void rnp_get_settings_link_up(struct rnp_hw *hw, + struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct ethtool_link_ksettings cap_ksettings; + + /* Initialize supported and advertised settings based on phy settings */ + switch (hw->phy_type) { + case PHY_TYPE_40G_BASE_CR4: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseCR4_Full); + break; + + case PHY_TYPE_40G_BASE_SR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseSR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseSR4_Full); + break; + case PHY_TYPE_40G_BASE_LR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseLR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseLR4_Full); + break; + case PHY_TYPE_10G_BASE_SR: + case PHY_TYPE_10G_BASE_LR: + case PHY_TYPE_10G_BASE_ER: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseER_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseER_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseX_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + if (hw->speed == SPEED_10000) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + break; + case PHY_TYPE_1G_BASE_KX: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + if (!!hw->is_backplane) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseKX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseKX_Full); + } + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseX_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + break; + + case PHY_TYPE_SGMII: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Half); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Half); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10baseT_Half); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Half); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Half); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10baseT_Half); + break; + + case PHY_TYPE_40G_BASE_KR4: + case PHY_TYPE_10G_BASE_KR: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseKR4_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseKX_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKX4_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseKR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKX4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseKX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + break; + case PHY_TYPE_10G_TP: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + break; + + default: + /* if we got here and link is up something bad is afoot + */ + netdev_info(netdev, + "WARNING: Link is up but PHY type 0x%x is not " + "recognized, or incorrect cable is in use\n", + hw->phy_type); + } + + /* Now that we've worked out everything that could be supported by the + * current PHY type, get what is supported by the NVM and intersect + * them to get what is truly supported + */ + memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings)); + rnp_phy_type_to_ethtool(adapter, &cap_ksettings); + ethtool_intersect_link_masks(ks, &cap_ksettings); + + /* Set speed and duplex */ + ks->base.speed = adapter->speed; + ks->base.duplex = hw->duplex; +} + +/** + * rnp_get_settings_link_down - Get the Link settings when link is down + * @hw: hw structure + * @ks: ethtool ksettings to fill in + * @netdev: network interface device structure + * + * Reports link settings that can be determined when link is down + **/ +static void rnp_get_settings_link_down(struct rnp_hw *hw, + struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + /* link is down and the driver needs to fall back on + * supported phy types to figure out what info to display + */ + rnp_phy_type_to_ethtool(adapter, ks); + + /* With no link speed and duplex are unknown */ + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; + + if ((hw->phy_type == PHY_TYPE_SGMII) || + (hw->phy_type == PHY_TYPE_10G_TP)) { + ks->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; + ks->base.eth_tp_mdix_ctrl = hw->tp_mdix_ctrl; + } +} + +/** + * rnp_set_autoneg_state_from_hw - Set the autoneg state from hardware + * @hw: hw structure + * @ks: ethtool ksettings to fill in + * + * Set the autoneg state from hardware, like PHY + **/ +static int rnp_set_autoneg_state_from_hw(struct rnp_hw *hw, + struct ethtool_link_ksettings *ks) +{ + int ret; + struct rnp_adapter *adapter = hw->back; + + ks->base.autoneg = (adapter->an ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + /* Read autoneg state from phy */ + if (hw->phy_type == PHY_TYPE_SGMII) { + u32 value_r0 = 0; + ret = rnp_mbx_phy_read(hw, 0x0, &value_r0); + if (ret) + return -1; + + ks->base.autoneg = (value_r0 & BIT(12)) ? AUTONEG_ENABLE : + AUTONEG_DISABLE; + } + if (hw->phy_type == PHY_TYPE_10G_TP) { + u32 value_r0 = 0; + + rnp_mbx_phy_read(hw, PHY_826x_AN, &value_r0); + + ks->base.autoneg = (value_r0 & BIT(12)) ? AUTONEG_ENABLE : + AUTONEG_DISABLE; + if (value_r0) + adapter->an = 1; + } + + return 0; +} + +static int rnp_get_phy_mdix_from_hw(struct rnp_hw *hw) +{ + int ret; + int rmmd_reg = 0; + u32 value_r17 = 0; + + if (hw->phy_type == PHY_TYPE_SGMII) { + ret = rnp_mbx_phy_read(hw, 0x11, &value_r17); + if (ret) + return -1; + hw->phy.is_mdix = !!(value_r17 & 0x0040); + } + if (hw->phy_type == PHY_TYPE_10G_TP) { + rmmd_reg = (1 << 30) | (0x1f << 16) | (0xa430 & 0xffff); + ret = rnp_mbx_phy_read(hw, rmmd_reg, &value_r17); + if (ret) + return -1; + hw->phy.is_mdix = !!(value_r17 & 0x0200); + } + + return 0; +} + +__maybe_unused static bool fiber_unsupport(u32 supported_link, u8 phy_type) +{ + if ((phy_type == PHY_TYPE_10G_BASE_KR) || + (phy_type == PHY_TYPE_10G_BASE_SR) || + (phy_type == PHY_TYPE_10G_BASE_LR) || + (phy_type == PHY_TYPE_10G_BASE_ER)) { + if (!(supported_link & RNP_LINK_SPEED_10GB_FULL)) + return true; + } + + if ((phy_type == PHY_TYPE_40G_BASE_KR4) || + (phy_type == PHY_TYPE_40G_BASE_SR4) || + (phy_type == PHY_TYPE_40G_BASE_CR4) || + (phy_type == PHY_TYPE_40G_BASE_LR4)) { + if (!(supported_link & + (RNP_LINK_SPEED_40GB_FULL | RNP_LINK_SPEED_25GB_FULL))) + return true; + } + + if (phy_type == PHY_TYPE_1G_BASE_KX) { + if (!(supported_link & RNP_LINK_SPEED_1GB_FULL)) + return true; + } + + return false; +} + +static int rnp10_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + bool link_up; + int err; + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + + /* update hw from firmware */ + err = rnp_mbx_get_lane_stat(hw); + if (err /*|| fiber_unsupport(hw->supported_link, hw->phy_type)*/) { + /* + when force 1G speed and plugin in 10G-AOC, should not return + -1 + */ + return -1; + } + + /* update hw->phy.media_type by hw->phy_type */ + switch (hw->phy_type) { + case PHY_TYPE_NONE: + hw->phy.media_type = rnp_media_type_unknown; + break; + case PHY_TYPE_1G_BASE_KX: + if (hw->is_backplane) { + hw->phy.media_type = rnp_media_type_backplane; + } else if (hw->is_sgmii) { + hw->phy.media_type = rnp_media_type_copper; + } else { + if ((hw->supported_link & RNP_LINK_SPEED_1GB_FULL) || + (hw->supported_link & RNP_SFP_MODE_1G_LX)) { + hw->phy.media_type = rnp_media_type_fiber; + } else { + hw->phy.media_type = rnp_media_type_unknown; + } + } + break; + case PHY_TYPE_SGMII: + case PHY_TYPE_10G_TP: + hw->phy.media_type = rnp_media_type_copper; + ks->base.phy_address = adapter->phy_addr; + break; + case PHY_TYPE_10G_BASE_KR: + case PHY_TYPE_25G_BASE_KR: + case PHY_TYPE_40G_BASE_KR4: + hw->phy.media_type = rnp_media_type_backplane; + break; + case PHY_TYPE_10G_BASE_SR: + case PHY_TYPE_40G_BASE_SR4: + case PHY_TYPE_40G_BASE_CR4: + case PHY_TYPE_40G_BASE_LR4: + case PHY_TYPE_10G_BASE_LR: + case PHY_TYPE_10G_BASE_ER: + hw->phy.media_type = rnp_media_type_fiber; + break; + default: + hw->phy.media_type = rnp_media_type_unknown; + break; + } + + if (hw->supported_link & RNP_SFP_CONNECTOR_DAC) { + hw->phy.media_type = rnp_media_type_da; + } + + if ((hw->supported_link & RNP_SFP_TO_SGMII) || + (hw->supported_link & RNP_SFP_MODE_1G_T)) { + hw->phy.media_type = rnp_media_type_copper; + } + + /* Check Whether there is media on port */ + if (hw->phy.media_type == rnp_media_type_fiber) { + /* If adapter->sfp.mod_abs is 0, there is no media on port. */ + if (!adapter->sfp.mod_abs) { + hw->phy.media_type = rnp_media_type_unknown; + hw->phy_type = PHY_TYPE_NONE; + } + } + + /* Now set the settings that don't rely on link being up/down */ + /* Set autoneg settings */ + rnp_set_autoneg_state_from_hw(hw, ks); + + link_up = hw->link; + if (link_up) + rnp_get_settings_link_up(hw, ks, netdev); + else + rnp_get_settings_link_down(hw, ks, netdev); + + /* Set media type settings */ + switch (hw->phy.media_type) { + case rnp_media_type_backplane: + ethtool_link_ksettings_add_link_mode(ks, supported, Backplane); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Backplane); + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ks->base.port = PORT_NONE; + break; + case rnp_media_type_copper: + ethtool_link_ksettings_add_link_mode(ks, supported, TP); + ethtool_link_ksettings_add_link_mode(ks, advertising, TP); + if (PHY_TYPE_SGMII == hw->phy_type) + ethtool_link_ksettings_add_link_mode(ks, supported, + Autoneg); + if (AUTONEG_ENABLE == ks->base.autoneg) + ethtool_link_ksettings_add_link_mode(ks, advertising, + Autoneg); + else + ethtool_link_ksettings_del_link_mode(ks, advertising, + Autoneg); + ks->base.port = PORT_TP; + break; + case rnp_media_type_da: + case rnp_media_type_cx4: + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); + ks->base.port = PORT_DA; + break; + case rnp_media_type_fiber: + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); + ks->base.port = PORT_FIBRE; + break; + case rnp_media_type_unknown: + default: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ks->base.port = PORT_OTHER; + break; + } + + if (hw->force_speed_stat != FORCE_SPEED_STAT_DISABLED) { + ethtool_link_ksettings_del_link_mode(ks, advertising, Autoneg); + } + + /* Set flow control settings */ + ethtool_link_ksettings_add_link_mode(ks, supported, Pause); + ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause); + + /* should get pause from hw if 10G-TP */ + switch (hw->fc.requested_mode) { + case rnp_fc_full: + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); + break; + case rnp_fc_tx_pause: + ethtool_link_ksettings_add_link_mode(ks, advertising, + Asym_Pause); + break; + case rnp_fc_rx_pause: + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Asym_Pause); + break; + default: + ethtool_link_ksettings_del_link_mode(ks, advertising, Pause); + ethtool_link_ksettings_del_link_mode(ks, advertising, + Asym_Pause); + break; + } + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if ((hw->phy_type == PHY_TYPE_SGMII) || + (hw->phy_type == PHY_TYPE_10G_TP)) { + if (rnp_get_phy_mdix_from_hw(hw)) { + ks->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + } else { + ks->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + } + } else { + ks->base.eth_tp_mdix = hw->tp_mdx; + } + + if (hw->phy.mdix == AUTO_ALL_MODES) + ks->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ks->base.eth_tp_mdix_ctrl = hw->phy.mdix; + + rnp_logd(LOG_ETHTOOL, + "%s %s set link: speed=%d port=%d duplex=%d autoneg=%d " + "phy_address=%d, media_type=%d hw->phy_type:%d\n", + __func__, netdev->name, ks->base.speed, ks->base.port, + ks->base.duplex, ks->base.autoneg, ks->base.phy_address, + hw->phy.media_type, hw->phy_type); + return 0; +} + +static int rnp10_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct ethtool_link_ksettings safe_ks; + struct ethtool_link_ksettings copy_ks; + bool autoneg_changed = false, duplex_changed = false; + int timeout = 50; + int err = 0; + u8 autoneg; + u32 advertising_link_speed, speed = 0; + + /* copy the ksettings to copy_ks to avoid modifying the origin */ + memcpy(©_ks, ks, sizeof(struct ethtool_link_ksettings)); + + /* save autoneg out of ksettings */ + + autoneg = copy_ks.base.autoneg; + rnp_logd(LOG_ETHTOOL, + "%s %s set link: speed=%d port=%d duplex=%d autoneg=%d " + "phy_address=%d\n", + __func__, netdev->name, copy_ks.base.speed, copy_ks.base.port, + copy_ks.base.duplex, copy_ks.base.autoneg, + copy_ks.base.phy_address); + + /* get our own copy of the bits to check against */ + memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings)); + safe_ks.base.cmd = copy_ks.base.cmd; + safe_ks.base.link_mode_masks_nwords = + copy_ks.base.link_mode_masks_nwords; + + if (rnp10_get_link_ksettings(netdev, &safe_ks)) { + /* return err */ + return 0; + } + /* Get link modes supported by hardware and check against modes + * requested by user. Return an error if unsupported mode was set. + */ + /* if autoneg is off, this is not error ? */ + if (!bitmap_subset(copy_ks.link_modes.advertising, + safe_ks.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) { + return -EINVAL; + } + /* set autoneg back to what it currently is */ + copy_ks.base.autoneg = safe_ks.base.autoneg; + + memset(&advertising_link_speed, 0, sizeof(u32)); + + /* Check autoneg */ + if (autoneg == AUTONEG_ENABLE) { + /* If autoneg was not already enabled */ + if (!(adapter->an)) { + /* If autoneg is not supported, return error */ + if (!ethtool_link_ksettings_test_link_mode( + &safe_ks, supported, Autoneg)) { + netdev_info( + netdev, + "Autoneg not supported on this phy\n"); + err = -EINVAL; + goto done; + } + /* Autoneg is allowed to change */ + autoneg_changed = true; + } + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10baseT_Full)) + advertising_link_speed |= RNP_LINK_SPEED_10_FULL; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 100baseT_Full)) + advertising_link_speed |= RNP_LINK_SPEED_100_FULL; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseT_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseX_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseKX_Full)) + advertising_link_speed |= RNP_LINK_SPEED_1GB_FULL; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10baseT_Half)) + advertising_link_speed |= RNP_LINK_SPEED_10_HALF; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 100baseT_Half)) + advertising_link_speed |= RNP_LINK_SPEED_100_HALF; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseT_Half)) + advertising_link_speed |= RNP_LINK_SPEED_1GB_HALF; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseT_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseKX4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseKR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseCR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseSR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseLR_Full)) + advertising_link_speed |= RNP_LINK_SPEED_10GB_FULL; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseKR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseCR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseSR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseLR4_Full)) + advertising_link_speed |= RNP_LINK_SPEED_40GB_FULL; + + if (advertising_link_speed) { + hw->phy.autoneg_advertised = advertising_link_speed; + } else { + if (hw->force_speed_stat == + FORCE_SPEED_STAT_DISABLED) { + netdev_info(netdev, + "advertising_link_speed is 0\n"); + err = -EINVAL; + goto done; + } + } + + hw->advertised_link = advertising_link_speed; + if (hw->is_sgmii && hw->autoneg == false) + autoneg_changed = true; + hw->autoneg = true; + } else { + /* If autoneg is currently enabled */ + if (adapter->an) { + /* If autoneg is supported 10GBASE_T is the only PHY + * that can disable it, so otherwise return error + */ + if (ethtool_link_ksettings_test_link_mode( + &safe_ks, supported, Autoneg) && + hw->phy.media_type != rnp_media_type_copper) { + netdev_info( + netdev, + "Autoneg cannot be disabled on this phy\n"); + err = -EINVAL; + goto done; + } + /* Autoneg is allowed to change */ + autoneg_changed = true; + } + /* if 10G -TP, not support close an */ + if (hw->phy_type == PHY_TYPE_10G_TP) { + netdev_info(netdev, + "Autoneg cannot be disabled on this phy\n"); + err = -EINVAL; + goto done; + } + + /* Only allow one speed at a time when autoneg is AUTONEG_DISABLE. */ + switch (ks->base.speed) { + case SPEED_10: + speed = RNP_LINK_SPEED_10_FULL; + break; + case SPEED_100: + speed = RNP_LINK_SPEED_100_FULL; + break; + case SPEED_1000: + speed = RNP_LINK_SPEED_1GB_FULL; + break; + case SPEED_10000: + speed = RNP_LINK_SPEED_10GB_FULL; + break; + default: + netdev_info(netdev, "unsupported speed\n"); + err = -EINVAL; + goto done; + } + + hw->autoneg = false; + } + + hw->phy.autoneg_advertised = RNP_LINK_SPEED_UNKNOWN; + /* If speed didn't get set, set it to what it currently is. + * This is needed because if advertise is 0 (as it is when autoneg + * is disabled) then speed won't get set. + */ + + if (hw->is_sgmii) { + hw->duplex = ks->base.duplex; + duplex_changed = true; + } + + if (hw->phy_type == PHY_TYPE_10G_TP) { + hw->duplex = ks->base.duplex; + duplex_changed = true; + } + /* this sets the link speed and restarts auto-neg */ + while (test_and_set_bit(__RNP_IN_SFP_INIT, &adapter->state)) { + timeout--; + if (!timeout) + return -EBUSY; + usleep_range(1000, 2000); + } + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (copy_ks.base.eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (copy_ks.base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = copy_ks.base.eth_tp_mdix_ctrl; + } + + hw->mac.autotry_restart = true; + /* set speed */ + err = hw->ops.setup_link(hw, advertising_link_speed, hw->autoneg, speed, + hw->duplex); + if (err) + e_info(probe, "setup link failed with code %d\n", err); + + clear_bit(__RNP_IN_SFP_INIT, &adapter->state); +done: + return err; +} + +static void rnp10_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + strncpy(drvinfo->driver, rnp_driver_name, sizeof(drvinfo->driver)); + snprintf(drvinfo->version, sizeof(drvinfo->version), "%s", + rnp_driver_version); + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d.%d", ((unsigned char *)&(hw->fw_version))[3], + ((unsigned char *)&(hw->fw_version))[2], + ((unsigned char *)&(hw->fw_version))[1], + ((unsigned char *)&(hw->fw_version))[0]); + + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = RNP10_STATS_LEN; + drvinfo->testinfo_len = RNP10_TEST_LEN; + drvinfo->regdump_len = rnp10_get_regs_len(netdev); + drvinfo->n_priv_flags = RNP10_PRIV_FLAGS_STR_LEN; +} + +static void rnp10_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + u32 *regs_buff = p; + int i; + + memset(p, 0, RNP10_REGS_LEN * sizeof(u32)); + + for (i = 0; i < RNP10_REGS_LEN; i++) + regs_buff[i] = rd32(hw, i * 4); +} + +static int rnp_nway_reset(struct net_device *netdev) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + netdev_info(netdev, "NIC Link is Down\n"); + rnp_down(adapter); + msleep(10); + rnp_up(adapter); + return 0; +} + +/** + * rnpm_device_supports_autoneg_fc - Check if phy supports autoneg flow + * control + * @hw: pointer to hardware structure + * + * There are several phys that do not support autoneg flow control. This + * function check the device id to see if the associated phy supports + * autoneg flow control. + **/ +static bool rnp_device_supports_autoneg_fc(struct rnp_hw *hw) +{ + bool supported = false; + + switch (hw->phy.media_type) { + case rnp_media_type_fiber: + break; + case rnp_media_type_backplane: + break; + case rnp_media_type_copper: + /* only some copper devices support flow control autoneg */ + supported = true; + break; + default: + break; + } + + return supported; +} + +static void rnp10_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + /* we don't support autoneg */ + if (rnp_device_supports_autoneg_fc(hw) && !hw->fc.disable_fc_autoneg) + pause->autoneg = 1; + else + pause->autoneg = 0; + if (hw->fc.current_mode == rnp_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == rnp_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == rnp_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int rnp10_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_fc_info fc = hw->fc; + + /* we not support change in dcb mode */ + if (adapter->flags & RNP_FLAG_DCB_ENABLED) + return -EINVAL; + + /* we not support autoneg mode */ + if ((pause->autoneg == AUTONEG_ENABLE) && + !rnp_device_supports_autoneg_fc(hw)) + return -EINVAL; + + fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); + fc.requested_mode &= (~(PAUSE_TX | PAUSE_RX)); + if (pause->autoneg) { + fc.requested_mode |= PAUSE_AUTO; + } else { + if (pause->tx_pause) + fc.requested_mode |= PAUSE_TX; + if (pause->rx_pause) + fc.requested_mode |= PAUSE_RX; + } + + if (hw->phy_type == PHY_TYPE_SGMII) { + u16 pause_bits = 0; + u32 value; + u32 value_r0; + + if (hw->fc.requested_mode == PAUSE_AUTO) { + pause_bits |= ASYM_PAUSE | SYM_PAUSE; + } else { + if ((hw->fc.requested_mode & PAUSE_TX) && + (!(hw->fc.requested_mode & PAUSE_RX))) { + pause_bits |= ASYM_PAUSE; + + } else if ((!(hw->fc.requested_mode & PAUSE_TX)) && + (!(hw->fc.requested_mode & PAUSE_RX))) { + } else + pause_bits |= ASYM_PAUSE | SYM_PAUSE; + } + rnp_mbx_phy_read(hw, 4, &value); + value &= ~0xC00; + value |= pause_bits; + rnp_mbx_phy_write(hw, 4, value); + + if (hw->autoneg) { + rnp_mbx_phy_read(hw, 0, &value_r0); + value_r0 |= BIT(9); + rnp_mbx_phy_write(hw, 0, value_r0); + } + } + + /* if the thing changed then we'll update and use new autoneg */ + if (memcmp(&fc, &hw->fc, sizeof(struct rnp_fc_info))) { + /* to tell all vf new pause status */ + hw->fc = fc; + rnp_msg_post_status(adapter, PF_PAUSE_STATUS); + if (netif_running(netdev)) + rnp_reinit_locked(adapter); + else + rnp_reset(adapter); + } + + return 0; +} + +static void rnp10_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + char *p = (char *)data; + int i; + struct rnp_ring *ring; + u32 dma_ch; + + switch (stringset) { + /* maybe we don't support test? */ + case ETH_SS_TEST: + for (i = 0; i < RNP10_TEST_LEN; i++) { + memcpy(data, rnp10_gstrings_test[i], ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + case ETH_SS_STATS: + for (i = 0; i < RNP10_GLOBAL_STATS_LEN; i++) { + memcpy(p, rnp10_gstrings_net_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < RNP10_HWSTRINGS_STATS_LEN; i++) { + memcpy(p, rnp10_hwstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < RNP_NUM_TX_QUEUES; i++) { + /* ==== tx ======== */ + ring = adapter->tx_ring[i]; + dma_ch = ring->rnp_queue_idx; + sprintf(p, "---\n queue%u_tx_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_restart", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_busy", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_done_old", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_clean_desc", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_poll_count", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_irq_more", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_hw_head", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_hw_tail", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_sw_next_to_clean", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_sw_next_to_use", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_send_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_send_bytes_to_hw", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_todo_update", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_send_done_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_added_vlan_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_next_to_clean", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_irq_miss", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_equal_count", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_clean_times", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_tx_clean_count", i); + p += ETH_GSTRING_LEN; + + /* ==== rx ======== */ + ring = adapter->rx_ring[i]; + dma_ch = ring->rnp_queue_idx; + sprintf(p, "queue%u_rx_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_driver_drop_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_rsc", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_rsc_flush", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_non_eop_descs", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_alloc_page_failed", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_alloc_buff_failed", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_alloc_page", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_csum_offload_errs", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_csum_offload_good", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_poll_again_count", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_rm_vlan_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_hw_head", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_hw_tail", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_sw_next_to_use", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_sw_next_to_clean", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_next_to_clean", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_irq_miss", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_equal_count", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_clean_times", i); + p += ETH_GSTRING_LEN; + sprintf(p, "queue%u_rx_clean_count", i); + p += ETH_GSTRING_LEN; + } + + break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, rnp10_priv_flags_strings, + RNP10_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); + break; + } +} + +static int rnp10_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + /* now we don't support test */ + case ETH_SS_TEST: + return RNP10_TEST_LEN; + case ETH_SS_STATS: + return RNP10_STATS_LEN; + case ETH_SS_PRIV_FLAGS: + return RNP10_PRIV_FLAGS_STR_LEN; + default: + return -EOPNOTSUPP; + } +} + +static u32 rnp10_get_priv_flags(struct net_device *netdev) +{ + struct rnp_adapter *adapter = (struct rnp_adapter *)netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->priv_flags & RNP_PRIV_FLAG_MAC_LOOPBACK) + priv_flags |= RNP10_MAC_LOOPBACK; + if (adapter->priv_flags & RNP_PRIV_FLAG_SWITCH_LOOPBACK) + priv_flags |= RNP10_SWITCH_LOOPBACK; + if (adapter->priv_flags & RNP_PRIV_FLAG_VEB_ENABLE) + priv_flags |= RNP10_VEB_ENABLE; + if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) + priv_flags |= RNP10_FT_PADDING; + if (adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG) + priv_flags |= RNP10_PADDING_DEBUG; + if (adapter->priv_flags & RNP_PRIV_FLAG_PTP_DEBUG) + priv_flags |= RNP10_PTP_FEATURE; + if (adapter->priv_flags & RNP_PRIV_FLAG_SIMUATE_DOWN) + priv_flags |= RNP10_SIMULATE_DOWN; + if (adapter->priv_flags & RNP_PRIV_FLAG_VXLAN_INNER_MATCH) + priv_flags |= RNP10_VXLAN_INNER_MATCH; + if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) + priv_flags |= RNP10_STAG_ENABLE; + if (adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR) + priv_flags |= RNP10_REC_HDR_LEN_ERR; + if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) + priv_flags |= RNP10_SRIOV_VLAN_MODE; + if (adapter->priv_flags & RNP_PRIV_FLAG_REMAP_MODE) + priv_flags |= RNP10_REMAP_MODE; + if (adapter->priv_flags & RNP_PRIV_FLAG_LLDP_EN_STAT) + priv_flags |= RNP10_LLDP_EN_STAT; + if (adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE) + priv_flags |= RNP10_FORCE_CLOSE; + + return priv_flags; +} + +static int rnp10_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct rnp_adapter *adapter = (struct rnp_adapter *)netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + u32 data_old; + u32 data_new; + + data_old = dma_rd32(dma, RNP_DMA_CONFIG); + data_new = data_old; + + if (priv_flags & RNP10_MAC_LOOPBACK) { + SET_BIT(n10_mac_loopback, data_new); + adapter->priv_flags |= RNP_PRIV_FLAG_MAC_LOOPBACK; + } else if (adapter->priv_flags & RNP_PRIV_FLAG_MAC_LOOPBACK) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_MAC_LOOPBACK); + CLR_BIT(n10_mac_loopback, data_new); + } + + if (priv_flags & RNP10_LLDP_EN_STAT) { + if (rnp_mbx_lldp_port_enable(hw, true) == 0) { + adapter->priv_flags |= RNP_PRIV_FLAG_LLDP_EN_STAT; + } else { + rnp_err("%s: set lldp enable faild!\n", + adapter->netdev->name); + adapter->priv_flags &= (~RNP_PRIV_FLAG_LLDP_EN_STAT); + } + } else if (adapter->priv_flags & RNP_PRIV_FLAG_LLDP_EN_STAT) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_LLDP_EN_STAT); + rnp_mbx_lldp_port_enable(hw, false); + } + + if (priv_flags & RNP10_SWITCH_LOOPBACK) { + SET_BIT(n10_switch_loopback, data_new); + adapter->priv_flags |= RNP_PRIV_FLAG_SWITCH_LOOPBACK; + } else if (adapter->priv_flags & RNP_PRIV_FLAG_SWITCH_LOOPBACK) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_SWITCH_LOOPBACK); + CLR_BIT(n10_switch_loopback, data_new); + } + + if (priv_flags & RNP10_VEB_ENABLE) { + SET_BIT(n10_veb_enable, data_new); + adapter->priv_flags |= RNP_PRIV_FLAG_VEB_ENABLE; + } else if (adapter->priv_flags & RNP_PRIV_FLAG_VEB_ENABLE) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_VEB_ENABLE); + CLR_BIT(n10_veb_enable, data_new); + } + + if (priv_flags & RNP10_FT_PADDING) { + SET_BIT(n10_padding_enable, data_new); + adapter->priv_flags |= RNP_PRIV_FLAG_FT_PADDING; + } else if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_FT_PADDING); + CLR_BIT(n10_padding_enable, data_new); + } + + if (priv_flags & RNP10_PADDING_DEBUG) + adapter->priv_flags |= RNP_PRIV_FLAG_PADDING_DEBUG; + else if (adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG) + adapter->priv_flags &= (~RNP_PRIV_FLAG_PADDING_DEBUG); + + if (priv_flags & RNP10_PTP_FEATURE) { + adapter->priv_flags |= RNP_PRIV_FLAG_PTP_DEBUG; + adapter->flags2 |= ~RNP_FLAG2_PTP_ENABLED; + } else if (adapter->priv_flags & RNP_PRIV_FLAG_PTP_DEBUG) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_PTP_DEBUG); + adapter->flags2 &= (~RNP_FLAG2_PTP_ENABLED); + } + + if (priv_flags & RNP10_SIMULATE_DOWN) { + adapter->priv_flags |= RNP_PRIV_FLAG_SIMUATE_DOWN; + /* set check link again */ + adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE; + } else if (adapter->priv_flags & RNP_PRIV_FLAG_SIMUATE_DOWN) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_SIMUATE_DOWN); + /* set check link again */ + adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE; + } + + if (priv_flags & RNP10_VXLAN_INNER_MATCH) { + adapter->priv_flags |= RNP_PRIV_FLAG_VXLAN_INNER_MATCH; + hw->ops.set_vxlan_mode(hw, true); + } else if (adapter->priv_flags & RNP_PRIV_FLAG_VXLAN_INNER_MATCH) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_VXLAN_INNER_MATCH); + hw->ops.set_vxlan_mode(hw, false); + } + + if (priv_flags & RNP10_STAG_ENABLE) + adapter->flags2 |= RNP_FLAG2_VLAN_STAGS_ENABLED; + else + adapter->flags2 &= (~RNP_FLAG2_VLAN_STAGS_ENABLED); + + if (priv_flags & RNP10_REC_HDR_LEN_ERR) { + adapter->priv_flags |= RNP_PRIV_FLAG_REC_HDR_LEN_ERR; + eth_wr32(eth, RNP10_ETH_ERR_MASK_VECTOR, + INNER_L4_BIT | PKT_LEN_ERR | HDR_LEN_ERR); + + } else if (adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR) { + adapter->priv_flags &= (~RNP_PRIV_FLAG_REC_HDR_LEN_ERR); + eth_wr32(eth, RNP10_ETH_ERR_MASK_VECTOR, INNER_L4_BIT); + } + + if (priv_flags & RNP10_REMAP_MODE) + adapter->priv_flags |= RNP_PRIV_FLAG_REMAP_MODE; + else + adapter->priv_flags &= (~RNP_PRIV_FLAG_REMAP_MODE); + + if (priv_flags & RNP10_SRIOV_VLAN_MODE) { + int i; + + adapter->priv_flags |= RNP_PRIV_FLAG_SRIOV_VLAN_MODE; + if (!(adapter->flags & RNP_FLAG_SRIOV_INIT_DONE)) + goto skip_setup_vf_vlan; + /* should setup vlvf table */ + for (i = 0; i < adapter->num_vfs; i++) { + if (hw->ops.set_vf_vlan_mode) { + if (adapter->vfinfo[i].vf_vlan) + hw->ops.set_vf_vlan_mode( + hw, adapter->vfinfo[i].vf_vlan, + i, true); + + if (adapter->vfinfo[i].pf_vlan) + hw->ops.set_vf_vlan_mode( + hw, adapter->vfinfo[i].pf_vlan, + i, true); + } + } + + } else if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) { + int i; + adapter->priv_flags &= (~RNP_PRIV_FLAG_SRIOV_VLAN_MODE); + /* should clean vlvf table */ + for (i = 0; i < hw->max_vfs; i++) { + if (hw->ops.set_vf_vlan_mode) + hw->ops.set_vf_vlan_mode(hw, 0, i, false); + } + } + + if (hw->force_link_supported) { + if (priv_flags & RNP10_FORCE_CLOSE) { + if (!(adapter->priv_flags & + RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { + adapter->priv_flags |= + RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE; + if (hw->ops.driver_status) { + hw->ops.driver_status( + hw, true, + rnp_driver_force_control_mac); + } + } + } else { + if (adapter->priv_flags & + RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE) { + adapter->priv_flags &= + (~RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE); + if (hw->ops.driver_status) { + hw->ops.driver_status( + hw, false, + rnp_driver_force_control_mac); + } + } + } + } else { + if (priv_flags & RNP10_FORCE_CLOSE) + rnp_err("%s: firmware not support set `link_down_on_close` private flag\n", + adapter->netdev->name); + } + +skip_setup_vf_vlan: + + dbg("data new is %x\n", data_new); + if (data_old != data_new) + dma_wr32(dma, RNP_DMA_CONFIG, data_new); + /* if ft_padding changed */ + if (CHK_BIT(n10_padding_enable, data_old) != + CHK_BIT(n10_padding_enable, data_new)) { + rnp_msg_post_status(adapter, PF_FT_PADDING_STATUS); + } + + return 0; +} + +static void rnp10_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct net_device_stats *net_stats = &netdev->stats; + struct rnp_ring *ring; + int i, j; + char *p = NULL; + + rnp_update_stats(adapter); + + for (i = 0; i < RNP10_GLOBAL_STATS_LEN; i++) { + p = (char *)net_stats + rnp10_gstrings_net_stats[i].stat_offset; + data[i] = (rnp10_gstrings_net_stats[i].sizeof_stat == + sizeof(u64)) ? + *(u64 *)p : + *(u32 *)p; + } + for (j = 0; j < RNP10_HWSTRINGS_STATS_LEN; j++, i++) { + p = (char *)adapter + rnp10_hwstrings_stats[j].stat_offset; + data[i] = + (rnp10_hwstrings_stats[j].sizeof_stat == sizeof(u64)) ? + *(u64 *)p : + *(u32 *)p; + } + + BUG_ON(RNP_NUM_TX_QUEUES != RNP_NUM_RX_QUEUES); + + for (j = 0; j < RNP_NUM_TX_QUEUES; j++) { + int idx; + /* tx-ring */ + ring = adapter->tx_ring[j]; + if (!ring) { + /* tx */ + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + /* rx */ + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + continue; + } + idx = ring->rnp_queue_idx; + + data[i++] = ring->stats.packets; + data[i++] = ring->stats.bytes; + data[i++] = ring->tx_stats.restart_queue; + data[i++] = ring->tx_stats.tx_busy; + data[i++] = ring->tx_stats.tx_done_old; + data[i++] = ring->tx_stats.clean_desc; + data[i++] = ring->tx_stats.poll_count; + data[i++] = ring->tx_stats.irq_more_count; + + /* rnp_tx_queue_ring_stat */ + data[i++] = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_HEAD); + data[i++] = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_TAIL); + data[i++] = ring->next_to_clean; + data[i++] = ring->next_to_use; + data[i++] = ring->tx_stats.send_bytes; + data[i++] = ring->tx_stats.send_bytes_to_hw; + data[i++] = ring->tx_stats.todo_update; + data[i++] = ring->tx_stats.send_done_bytes; + data[i++] = ring->tx_stats.vlan_add; + if (ring->tx_stats.tx_next_to_clean == -1) + data[i++] = ring->count; + else + data[i++] = ring->tx_stats.tx_next_to_clean; + data[i++] = ring->tx_stats.tx_irq_miss; + data[i++] = ring->tx_stats.tx_equal_count; + data[i++] = ring->tx_stats.tx_clean_times; + data[i++] = ring->tx_stats.tx_clean_count; + + /* rx-ring */ + ring = adapter->rx_ring[j]; + if (!ring) { + /* rx */ + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + continue; + } + idx = ring->rnp_queue_idx; + data[i++] = ring->stats.packets; + data[i++] = ring->stats.bytes; + + data[i++] = ring->rx_stats.driver_drop_packets; + data[i++] = ring->rx_stats.rsc_count; + data[i++] = ring->rx_stats.rsc_flush; + data[i++] = ring->rx_stats.non_eop_descs; + data[i++] = ring->rx_stats.alloc_rx_page_failed; + data[i++] = ring->rx_stats.alloc_rx_buff_failed; + data[i++] = ring->rx_stats.alloc_rx_page; + data[i++] = ring->rx_stats.csum_err; + data[i++] = ring->rx_stats.csum_good; + data[i++] = ring->rx_stats.poll_again_count; + data[i++] = ring->rx_stats.vlan_remove; + + /* rnp_rx_queue_ring_stat */ + data[i++] = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD); + data[i++] = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_TAIL); + data[i++] = ring->next_to_use; + data[i++] = ring->next_to_clean; + if (ring->rx_stats.rx_next_to_clean == -1) + data[i++] = ring->count; + else + data[i++] = ring->rx_stats.rx_next_to_clean; + data[i++] = ring->rx_stats.rx_irq_miss; + data[i++] = ring->rx_stats.rx_equal_count; + data[i++] = ring->rx_stats.rx_clean_times; + data[i++] = ring->rx_stats.rx_clean_count; + } +} + +/* n10 ethtool_ops ops here */ +static const struct ethtool_ops rnp10_ethtool_ops = { + + .get_link_ksettings = rnp10_get_link_ksettings, + .set_link_ksettings = rnp10_set_link_ksettings, + .get_drvinfo = rnp10_get_drvinfo, + .get_regs_len = rnp10_get_regs_len, + .get_regs = rnp10_get_regs, + .get_wol = rnp_get_wol, + .set_wol = rnp_set_wol, + .nway_reset = rnp_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = rnp_get_ringparam, + .set_ringparam = rnp_set_ringparam, + .get_pauseparam = rnp10_get_pauseparam, + .set_pauseparam = rnp10_set_pauseparam, + .get_msglevel = rnp_get_msglevel, + .set_msglevel = rnp_set_msglevel, + .get_fecparam = rnp_get_fecparam, + .set_fecparam = rnp_set_fecparam, + .self_test = rnp_diag_test, + .get_strings = rnp10_get_strings, + .set_phys_id = rnp_set_phys_id, + .get_sset_count = rnp10_get_sset_count, + .get_priv_flags = rnp10_get_priv_flags, + .set_priv_flags = rnp10_set_priv_flags, + .get_ethtool_stats = rnp10_get_ethtool_stats, + .get_coalesce = rnp_get_coalesce, + .set_coalesce = rnp_set_coalesce, + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES_IRQ | + ETHTOOL_COALESCE_MAX_FRAMES, + .get_rxnfc = rnp_get_rxnfc, + .set_rxnfc = rnp_set_rxnfc, + .get_channels = rnp_get_channels, + .set_channels = rnp_set_channels, + .get_module_info = rnp_get_module_info, + .get_module_eeprom = rnp_get_module_eeprom, + .get_ts_info = rnp_get_ts_info, + .get_rxfh_indir_size = rnp_rss_indir_size, + .get_rxfh_key_size = rnp_get_rxfh_key_size, + .get_rxfh = rnp_get_rxfh, + .set_rxfh = rnp_set_rxfh, + .get_dump_flag = rnp_get_dump_flag, + .get_dump_data = rnp_get_dump_data, + .set_dump = rnp_set_dump, + .flash_device = rnp_flash_device, +}; + +static void rnp_set_ethtool_hw_ops_n10(struct net_device *netdev) +{ + netdev->ethtool_ops = &rnp10_ethtool_ops; +} + +/** + * rnp_get_thermal_sensor_data_hw_ops_n10 - Gathers thermal sensor data + * @hw: pointer to hardware structure + * Returns the thermal sensor data structure + **/ +static s32 rnp_get_thermal_sensor_data_hw_ops_n10(struct rnp_hw *hw) +{ + int voltage = 0; + struct rnp_thermal_sensor_data *data = &hw->thermal_sensor_data; + + data->sensor[0].temp = rnp_mbx_get_temp(hw, &voltage); + + return 0; +} + +/** + * rnp_init_thermal_sensor_thresh_hw_ops_n10 - Inits thermal sensor thresholds + * @hw: pointer to hardware structure + * Inits the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +static s32 rnp_init_thermal_sensor_thresh_hw_ops_n10(struct rnp_hw *hw) +{ + u8 i; + struct rnp_thermal_sensor_data *data = &hw->thermal_sensor_data; + + for (i = 0; i < RNP_MAX_SENSORS; i++) { + data->sensor[i].location = i + 1; + data->sensor[i].caution_thresh = 90; + data->sensor[i].max_op_thresh = 100; + } + + return 0; +} + +static s32 rnp_phy_read_reg_hw_ops_n10(struct rnp_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + s32 status = 0; + u32 data = 0; + + status = rnp_mbx_phy_read(hw, reg_addr, &data); + *phy_data = data & 0xffff; + + return status; +} + +static s32 rnp_phy_write_reg_hw_ops_n10(struct rnp_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + s32 status = 0; + + status = rnp_mbx_phy_write(hw, reg_addr, (u32)phy_data); + + return status; +} + +static void rnp_set_vf_vlan_mode_hw_ops_n10(struct rnp_hw *hw, u16 vlan, int vf, + bool enable) +{ + struct rnp_eth_info *eth = &hw->eth; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + + if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) + eth->ops.set_vf_vlan_mode(eth, vlan, vf, enable); +} + +static void rnp_driver_status_hw_ops_n10(struct rnp_hw *hw, bool enable, int mode) +{ + switch (mode) { + case rnp_driver_insmod: + rnp_mbx_ifinsmod(hw, enable); + break; + case rnp_driver_suspuse: + rnp_mbx_ifsuspuse(hw, enable); + break; + case rnp_driver_force_control_mac: + rnp_mbx_ifforce_control_mac(hw, enable); + + break; + } +} + +static struct rnp_hw_operations hw_ops_n10 = { + .init_hw = &rnp_init_hw_ops_n10, + .reset_hw = &rnp_reset_hw_ops_n10, + .start_hw = &rnp_start_hw_ops_n10, + .set_mtu = &rnp_set_mtu_hw_ops_n10, + .set_vlan_filter_en = &rnp_set_vlan_filter_en_hw_ops_n10, + .set_vlan_filter = &rnp_set_vlan_filter_hw_ops_n10, + .set_vf_vlan_filter = &rnp_set_vf_vlan_filter_hw_ops_n10, + .set_vlan_strip = &rnp_set_vlan_strip_hw_ops_n10, + .set_mac = &rnp_set_mac_hw_ops_n10, + .set_rx_mode = &rnp_set_rx_mode_hw_ops_n10, + .set_rar_with_vf = &rnp_set_rar_with_vf_hw_ops_n10, + .clr_rar = &rnp_clr_rar_hw_ops_n10, + .clr_rar_all = &rnp_clr_rar_all_hw_ops_n10, + .clr_vlan_veb = &rnp_clr_vlan_veb_hw_ops_n10, + .set_txvlan_mode = &rnp_set_txvlan_mode_hw_ops_n10, + .set_fcs_mode = &rnp_set_fcs_mode_hw_ops_n10, + .set_vxlan_port = &rnp_set_vxlan_port_hw_ops_n10, + .set_vxlan_mode = &rnp_set_vxlan_mode_hw_ops_n10, + .set_mac_rx = &rnp_set_mac_rx_hw_ops_n10, + .set_rx_hash = &rnp_set_rx_hash_hw_ops_n10, + .set_pause_mode = &rnp_set_pause_mode_hw_ops_n10, + .get_pause_mode = &rnp_get_pause_mode_hw_ops_n10, + .update_hw_info = &rnp_update_hw_info_hw_ops_n10, + .update_rx_drop = &rnp_update_hw_rx_drop_hw_ops_n10, + .update_sriov_info = &rnp_update_sriov_info_hw_ops_n10, + .set_sriov_status = &rnp_set_sriov_status_hw_ops_n10, + .set_sriov_vf_mc = &rnp_set_sriov_vf_mc_hw_ops_n10, + .init_rx_addrs = &rnp_init_rx_addrs_hw_ops_n10, + .clr_vfta = &rnp_clr_vfta_hw_ops_n10, + .set_rss_key = &rnp_set_rss_key_hw_ops_n10, + .set_rss_table = &rnp_set_rss_table_hw_ops_n10, + .update_hw_status = &rnp_update_hw_status_hw_ops_n10, + .set_mbx_link_event = &rnp_set_mbx_link_event_hw_ops_n10, + .set_mbx_ifup = &rnp_set_mbx_ifup_hw_ops_n10, + .check_link = &rnp_check_mac_link_hw_ops_n10, + .setup_link = &rnp_setup_mac_link_hw_ops_n10, + .clean_link = &rnp_clean_link_hw_ops_n10, + .set_layer2_remapping = &rnp_set_layer2_hw_ops_n10, + .clr_layer2_remapping = &rnp_clr_layer2_hw_ops_n10, + .clr_all_layer2_remapping = &rnp_clr_all_layer2_hw_ops_n10, + .set_tuple5_remapping = &rnp_set_tuple5_hw_ops_n10, + .clr_tuple5_remapping = &rnp_clr_tuple5_hw_ops_n10, + .clr_all_tuple5_remapping = &rnp_clr_all_tuple5_hw_ops_n10, + .set_tcp_sync_remapping = &rnp_set_tcp_sync_hw_ops_n10, + .update_msix_count = &rnp_update_msix_count_hw_ops_n10, + .get_thermal_sensor_data = &rnp_get_thermal_sensor_data_hw_ops_n10, + .init_thermal_sensor_thresh = + &rnp_init_thermal_sensor_thresh_hw_ops_n10, + .setup_ethtool = &rnp_set_ethtool_hw_ops_n10, + .phy_read_reg = &rnp_phy_read_reg_hw_ops_n10, + .phy_write_reg = &rnp_phy_write_reg_hw_ops_n10, + .set_vf_vlan_mode = &rnp_set_vf_vlan_mode_hw_ops_n10, + .driver_status = &rnp_driver_status_hw_ops_n10, +}; + +static void rnp_mac_set_rx_n10(struct rnp_mac_info *mac, bool status) +{ + struct rnp_hw *hw = (struct rnp_hw *)mac->back; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + + u32 value = 0; + u32 count = 0; + + if (status) { + do { + mac_wr32(mac, RNP10_MAC_RX_CFG, + mac_rd32(mac, RNP10_MAC_RX_CFG) | 0x01); + usleep_range(100, 200); + value = mac_rd32(mac, RNP10_MAC_RX_CFG); + count++; + if (count > 1000) { + printk("setup rx on timeout\n"); + break; + } + } while (!(value & 0x01)); + + if (adapter->flags & RNP_FLAG_SWITCH_LOOPBACK_EN) { + mac_wr32(mac, RNP10_MAC_PKT_FLT, BIT(31) | BIT(0)); + eth_wr32(&hw->eth, RNP10_ETH_DMAC_MCSTCTRL, 0x0); + } else { + do { + mac_wr32(mac, RNP10_MAC_RX_CFG, + mac_rd32(mac, RNP10_MAC_RX_CFG) & + (~0x400)); + usleep_range(100, 200); + value = mac_rd32(mac, RNP_MAC_RX_CFG); + count++; + if (count > 1000) { + printk("setup rx off timeout\n"); + break; + } + } while (value & 0x400); + if (hw->ncsi_en) + mac_wr32(mac, RNP10_MAC_PKT_FLT, 0x80000001); + else + mac_wr32(mac, RNP10_MAC_PKT_FLT, 0x00000001); + } + } else { + do { + mac_wr32(mac, RNP10_MAC_RX_CFG, + mac_rd32(mac, RNP10_MAC_RX_CFG) | 0x400); + usleep_range(100, 200); + value = mac_rd32(mac, RNP10_MAC_RX_CFG); + count++; + if (count > 1000) { + printk("setup rx on timeout\n"); + break; + } + } while (!(value & 0x400)); + mac_wr32(mac, RNP10_MAC_PKT_FLT, 0x0); + } +} + +static void rnp_mac_fcs_n10(struct rnp_mac_info *mac, bool status) +{ + u32 value; + +#define FCS_MASK (0x6) + value = mac_rd32(mac, RNP10_MAC_RX_CFG); + if (status) { + value &= (~FCS_MASK); + + } else { + value |= FCS_MASK; + } + + mac_wr32(mac, RNP10_MAC_RX_CFG, value); +} + +/** + * rnp_fc_mode_n10 - Enable flow control + * @hw: pointer to hardware structure + * + * Enable flow control according to the current settings. + **/ +static s32 rnp_mac_fc_mode_n10(struct rnp_mac_info *mac) +{ + struct rnp_hw *hw = (struct rnp_hw *)mac->back; + s32 ret_val = 0; + u32 reg; + u32 rxctl_reg, txctl_reg[RNP_MAX_TRAFFIC_CLASS]; + int i; + + /* + * Validate the water mark configuration for packet buffer 0. Zero + * water marks indicate that the packet buffer was not configured + * and the watermarks for packet buffer 0 should always be configured. + */ + if (!hw->fc.pause_time) { + ret_val = RNP_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* Disable any previous flow control settings */ + rxctl_reg = mac_rd32(mac, RNP10_MAC_RX_FLOW_CTRL); + rxctl_reg &= (~RNP10_RX_FLOW_ENABLE_MASK); + + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) { + txctl_reg[i] = mac_rd32(mac, RNP10_MAC_Q0_TX_FLOW_CTRL(i)); + txctl_reg[i] &= (~RNP10_TX_FLOW_ENABLE_MASK); + } + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case rnp_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case rnp_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + rxctl_reg |= (RNP10_RX_FLOW_ENABLE_MASK); + break; + case rnp_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) + txctl_reg[i] |= (RNP10_TX_FLOW_ENABLE_MASK); + break; + case rnp_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + rxctl_reg |= (RNP10_RX_FLOW_ENABLE_MASK); + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) + txctl_reg[i] |= (RNP10_TX_FLOW_ENABLE_MASK); + break; + default: + hw_dbg(hw, "Flow control param set incorrectly\n"); + ret_val = RNP_ERR_CONFIG; + goto out; + } + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time; + for (i = 0; i < (RNP_MAX_TRAFFIC_CLASS); i++) + txctl_reg[i] |= (reg << 16); + + /* Set 802.3x based flow control settings. */ + mac_wr32(mac, RNP10_MAC_RX_FLOW_CTRL, rxctl_reg); + for (i = 0; i < (RNP_MAX_TRAFFIC_CLASS); i++) + mac_wr32(mac, RNP10_MAC_Q0_TX_FLOW_CTRL(i), txctl_reg[i]); +out: + return ret_val; +} + +static void rnp_mac_set_mac_n10(struct rnp_mac_info *mac, u8 *addr, int index) +{ + u32 rar_low, rar_high = 0; + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | ((u32)addr[2] << 16) | + ((u32)addr[3] << 24)); + + rar_high = RNP_RAH_AV | ((u32)addr[4] | (u32)addr[5] << 8); + + mac_wr32(mac, RNP10_MAC_UNICAST_HIGH(index), rar_high); + mac_wr32(mac, RNP10_MAC_UNICAST_LOW(index), rar_low); +} + +static struct rnp_mac_operations mac_ops_n10 = { + .set_mac_rx = &rnp_mac_set_rx_n10, + .set_mac_fcs = &rnp_mac_fcs_n10, + .set_fc_mode = &rnp_mac_fc_mode_n10, + .set_mac = &rnp_mac_set_mac_n10, +}; + +static s32 rnp_get_invariants_n10(struct rnp_hw *hw) +{ + struct rnp_mac_info *mac = &hw->mac; + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + struct rnp_mbx_info *mbx = &hw->mbx; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + int i; + + /* setup dma info */ + dma->dma_base_addr = hw->hw_addr; + dma->dma_ring_addr = hw->hw_addr + RNP10_RING_BASE; + dma->max_tx_queues = RNP_N10_MAX_TX_QUEUES; + dma->max_rx_queues = RNP_N10_MAX_RX_QUEUES; + dma->back = hw; + memcpy(&hw->dma.ops, &dma_ops_n10, sizeof(hw->dma.ops)); + + /* setup eth info */ + memcpy(&hw->eth.ops, ð_ops_n10, sizeof(hw->eth.ops)); + + eth->eth_base_addr = hw->hw_addr + RNP10_ETH_BASE; + printk(" eth_base is %p\n", eth->eth_base_addr); + eth->back = hw; + eth->mc_filter_type = 0; + eth->mcft_size = RNP_N10_MC_TBL_SIZE; + eth->vft_size = RNP_N10_VFT_TBL_SIZE; + if (hw->eco) + eth->num_rar_entries = RNP_N10_RAR_ENTRIES - 1; + else + eth->num_rar_entries = RNP_N10_RAR_ENTRIES; + + eth->max_rx_queues = RNP_N10_MAX_RX_QUEUES; + eth->max_tx_queues = RNP_N10_MAX_TX_QUEUES; + + /* setup mac info */ + memcpy(&hw->mac.ops, &mac_ops_n10, sizeof(hw->mac.ops)); + mac->mac_addr = hw->hw_addr + RNP10_MAC_BASE; + mac->back = hw; + mac->mac_type = mac_dwc_xlg; + /* move this to eth todo */ + mac->mc_filter_type = 0; + mac->mcft_size = RNP_N10_MC_TBL_SIZE; + mac->vft_size = RNP_N10_VFT_TBL_SIZE; + if (hw->eco) + mac->num_rar_entries = RNP_N10_RAR_ENTRIES - 1; + else + mac->num_rar_entries = RNP_N10_RAR_ENTRIES; + mac->max_rx_queues = RNP_N10_MAX_RX_QUEUES; + mac->max_tx_queues = RNP_N10_MAX_TX_QUEUES; + mac->max_msix_vectors = RNP_N10_MSIX_VECTORS; + if (!hw->axi_mhz) + hw->usecstocount = 500; + else + hw->usecstocount = hw->axi_mhz; + + /* set up hw feature */ + hw->feature_flags |= + RNP_NET_FEATURE_SG | RNP_NET_FEATURE_TX_CHECKSUM | + RNP_NET_FEATURE_RX_CHECKSUM | RNP_NET_FEATURE_TSO | + RNP_NET_FEATURE_TX_UDP_TUNNEL | RNP_NET_FEATURE_VLAN_FILTER | + RNP_NET_FEATURE_VLAN_OFFLOAD | + RNP_NET_FEATURE_RX_NTUPLE_FILTER | RNP_NET_FEATURE_TCAM | + RNP_NET_FEATURE_RX_HASH | RNP_NET_FEATURE_RX_FCS; + /* maybe supported future*/ + /* setup some fdir resource */ + hw->min_length = RNP_MIN_MTU; + hw->max_length = RNP_MAX_JUMBO_FRAME_SIZE; + hw->max_msix_vectors = RNP_N10_MSIX_VECTORS; + if (hw->eco) + hw->num_rar_entries = RNP_N10_RAR_ENTRIES - 1; + else + hw->num_rar_entries = RNP_N10_RAR_ENTRIES; + hw->fdir_mode = fdir_mode_tuple5; + hw->max_vfs = RNP_N10_MAX_VF; + hw->max_vfs_noari = 3; + hw->sriov_ring_limit = 2; + /* some user only want 1 queue for each vf */ + hw->max_pf_macvlans = RNP_MAX_PF_MACVLANS_N10; + hw->wol_supported = WAKE_MAGIC; + /* ncsi */ + hw->ncsi_vf_cpu_shm_pf_base = RNP_VF_CPU_SHM_BASE_NR62; + hw->ncsi_mc_count = RNP_NCSI_MC_COUNT; + hw->ncsi_vlan_count = RNP_NCSI_VLAN_COUNT; + /* we suppose 1536 */ + hw->dma_split_size = 1536; + if (hw->fdir_mode == fdir_mode_tcam) { + hw->layer2_count = RNP10_MAX_LAYER2_FILTERS - 1; + hw->tuple5_count = RNP10_MAX_TCAM_FILTERS - 1; + } else { + hw->layer2_count = RNP10_MAX_LAYER2_FILTERS - 1; + hw->tuple5_count = RNP10_MAX_TUPLE5_FILTERS - 1; + } + + hw->default_rx_queue = 0; + hw->rss_indir_tbl_num = RNP_N10_RSS_TBL_NUM; + hw->rss_tc_tbl_num = RNP_N10_RSS_TC_TBL_NUM; + /* vf use the last vfnum */ + hw->vfnum = RNP_N10_MAX_VF - 1; + hw->feature_flags |= RNP_NET_FEATURE_VF_FIXED; + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + hw->veb_ring = 0; + else + hw->veb_ring = RNP_N10_MAX_RX_QUEUES; + + memcpy(&hw->ops, &hw_ops_n10, sizeof(hw->ops)); + /* PHY */ + /* setup pcs */ + memcpy(&hw->pcs.ops, &pcs_ops_generic, sizeof(hw->pcs.ops)); + mbx->mbx_feature |= MBX_FEATURE_WRITE_DELAY; + mbx->vf2pf_mbox_vec_base = 0xa5100; + mbx->cpu2pf_mbox_vec = 0xa5300; + mbx->pf_vf_shm_base = 0xa6000; + mbx->mbx_mem_size = 64; + mbx->pf2vf_mbox_ctrl_base = 0xa7100; + mbx->pf_vf_mbox_mask_lo = 0xa7200; + mbx->pf_vf_mbox_mask_hi = 0xa7300; + mbx->cpu_pf_shm_base = 0xaa000; + mbx->pf2cpu_mbox_ctrl = 0xaa100; + mbx->cpu_pf_mbox_mask = 0xaa300; + adapter->drop_time = 100; + hw->fc.requested_mode = PAUSE_TX | PAUSE_RX; + hw->fc.pause_time = RNP_DEFAULT_FCPAUSE; + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) { + hw->fc.high_water[i] = RNP10_DEFAULT_HIGH_WATER; + hw->fc.low_water[i] = RNP10_DEFAULT_LOW_WATER; + } +#ifdef FIX_MAC_PADDIN + adapter->priv_flags |= RNP_PRIV_FLAG_TX_PADDING; + +#endif + return 0; +} + +struct rnp_info rnp_n10_info = { + .one_pf_with_two_dma = false, + .total_queue_pair_cnts = RNP_N10_MAX_TX_QUEUES, + .adapter_cnt = 1, + .rss_type = rnp_rss_n10, + .hw_type = rnp_hw_n10, + .get_invariants = &rnp_get_invariants_n10, + .mac_ops = &mac_ops_n10, + .eeprom_ops = NULL, + .mbx_ops = &mbx_ops_generic, + .pcs_ops = &pcs_ops_generic, +}; + +static s32 rnp_get_invariants_n400(struct rnp_hw *hw) +{ + struct rnp_mac_info *mac = &hw->mac; + struct rnp_dma_info *dma = &hw->dma; + struct rnp_eth_info *eth = &hw->eth; + struct rnp_mbx_info *mbx = &hw->mbx; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + int i; + /* setup dma info */ + dma->dma_base_addr = hw->hw_addr; + dma->dma_ring_addr = hw->hw_addr + RNP10_RING_BASE; + dma->max_tx_queues = RNP_N400_MAX_TX_QUEUES; + dma->max_rx_queues = RNP_N400_MAX_RX_QUEUES; + dma->back = hw; + memcpy(&hw->dma.ops, &dma_ops_n10, sizeof(hw->dma.ops)); + + /* setup eth info */ + memcpy(&hw->eth.ops, ð_ops_n10, sizeof(hw->eth.ops)); + eth->eth_base_addr = hw->hw_addr + RNP10_ETH_BASE; + eth->back = hw; + eth->mc_filter_type = 0; + eth->mcft_size = RNP_N10_MC_TBL_SIZE; + eth->vft_size = RNP_N10_VFT_TBL_SIZE; + if (hw->eco) + eth->num_rar_entries = RNP_N10_RAR_ENTRIES - 1; + else + eth->num_rar_entries = RNP_N10_RAR_ENTRIES; + eth->max_rx_queues = RNP_N400_MAX_RX_QUEUES; + eth->max_tx_queues = RNP_N400_MAX_TX_QUEUES; + + /* setup mac info */ + memcpy(&hw->mac.ops, &mac_ops_n10, sizeof(hw->mac.ops)); + mac->mac_addr = hw->hw_addr + RNP10_MAC_BASE; + mac->back = hw; + mac->mac_type = mac_dwc_xlg; + /* move this to eth todo */ + mac->mc_filter_type = 0; + mac->mcft_size = RNP_N10_MC_TBL_SIZE; + mac->vft_size = RNP_N10_VFT_TBL_SIZE; + if (hw->eco) + mac->num_rar_entries = RNP_N10_RAR_ENTRIES - 1; + else + mac->num_rar_entries = RNP_N10_RAR_ENTRIES; + mac->max_rx_queues = RNP_N400_MAX_RX_QUEUES; + mac->max_tx_queues = RNP_N400_MAX_TX_QUEUES; + mac->max_msix_vectors = RNP_N400_MSIX_VECTORS; + if (!hw->axi_mhz) + hw->usecstocount = 125; + else + hw->usecstocount = hw->axi_mhz; + /* set up hw feature */ + hw->feature_flags |= + RNP_NET_FEATURE_SG | RNP_NET_FEATURE_TX_CHECKSUM | + RNP_NET_FEATURE_RX_CHECKSUM | RNP_NET_FEATURE_TSO | + RNP_NET_FEATURE_TX_UDP_TUNNEL | RNP_NET_FEATURE_VLAN_FILTER | + RNP_NET_FEATURE_VLAN_OFFLOAD | + RNP_NET_FEATURE_RX_NTUPLE_FILTER | RNP_NET_FEATURE_TCAM | + RNP_NET_FEATURE_RX_HASH | RNP_NET_FEATURE_RX_FCS; + /* setup some fdir resource */ + hw->min_length = RNP_MIN_MTU; + hw->max_length = RNP_MAX_JUMBO_FRAME_SIZE; + hw->max_msix_vectors = RNP_N400_MSIX_VECTORS; + if (hw->eco) + hw->num_rar_entries = RNP_N10_RAR_ENTRIES - 1; + else + hw->num_rar_entries = RNP_N10_RAR_ENTRIES; + hw->fdir_mode = fdir_mode_tuple5; + hw->max_vfs = RNP_N400_MAX_VF; + hw->max_vfs_noari = 3; + /* n400 only use 1 ring for each vf */ + hw->sriov_ring_limit = 1; + hw->max_pf_macvlans = RNP_MAX_PF_MACVLANS_N10; + /* ncsi */ + hw->ncsi_vf_cpu_shm_pf_base = RNP_VF_CPU_SHM_BASE_NR62; + hw->ncsi_mc_count = RNP_NCSI_MC_COUNT; + hw->ncsi_vlan_count = RNP_NCSI_VLAN_COUNT; + + if (hw->fdir_mode == fdir_mode_tcam) { + hw->layer2_count = RNP10_MAX_LAYER2_FILTERS - 1; + hw->tuple5_count = RNP10_MAX_TCAM_FILTERS - 1; + } else { + hw->layer2_count = RNP10_MAX_LAYER2_FILTERS - 1; + hw->tuple5_count = RNP10_MAX_TUPLE5_FILTERS - 1; + } + + hw->default_rx_queue = 0; + hw->rss_indir_tbl_num = RNP_N10_RSS_TBL_NUM; + hw->rss_tc_tbl_num = RNP_N10_RSS_TC_TBL_NUM; + /* vf use the last vfnum */ + hw->vfnum = RNP_N400_MAX_VF - 1; + + /* n400 should fix_vf_bug */ + hw->feature_flags |= RNP_NET_FEATURE_VF_FIXED; + + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + hw->veb_ring = 0; + hw->default_vf_num = 0; + } else { + hw->veb_ring = RNP_N400_MAX_RX_QUEUES; + hw->default_vf_num = RNP_N10_MAX_VF - 1; + } + + memcpy(&hw->ops, &hw_ops_n10, sizeof(hw->ops)); + /* setup pcs */ + memcpy(&hw->pcs.ops, &pcs_ops_generic, sizeof(hw->pcs.ops)); + mbx->mbx_feature |= MBX_FEATURE_WRITE_DELAY; + mbx->vf2pf_mbox_vec_base = 0xa5100; + mbx->cpu2pf_mbox_vec = 0xa5300; + mbx->pf_vf_shm_base = 0xa6000; + mbx->mbx_mem_size = 64; + mbx->pf2vf_mbox_ctrl_base = 0xa7100; + mbx->pf_vf_mbox_mask_lo = 0xa7200; + mbx->pf_vf_mbox_mask_hi = 0xa7300; + mbx->cpu_pf_shm_base = 0xaa000; + mbx->pf2cpu_mbox_ctrl = 0xaa100; + mbx->cpu_pf_mbox_mask = 0xaa300; + + adapter->drop_time = 100; + /* initialization default pause flow */ + hw->fc.requested_mode |= PAUSE_AUTO; + hw->fc.pause_time = RNP_DEFAULT_FCPAUSE; + for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) { + hw->fc.high_water[i] = RNP10_DEFAULT_HIGH_WATER; + hw->fc.low_water[i] = RNP10_DEFAULT_LOW_WATER; + } + + hw->autoneg = 1; + hw->tp_mdix_ctrl = ETH_TP_MDI_AUTO; + + return 0; +} + +struct rnp_info rnp_n400_info = { + .one_pf_with_two_dma = false, + .total_queue_pair_cnts = RNP_N400_MAX_TX_QUEUES, + .adapter_cnt = 1, + .rss_type = rnp_rss_n10, + .hw_type = rnp_hw_n400, + .get_invariants = &rnp_get_invariants_n400, + .mac_ops = &mac_ops_n10, + .eeprom_ops = NULL, + .mbx_ops = &mbx_ops_generic, + .pcs_ops = &pcs_ops_generic, +}; diff --git a/drivers/net/ethernet/mucse/rnp/rnp_param.c b/drivers/net/ethernet/mucse/rnp/rnp_param.c new file mode 100644 index 0000000000000..550974280cb76 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_param.c @@ -0,0 +1,346 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include + +#include "rnp.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define RNP_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define STRINGIFY(foo) #foo /* magic for getting defines into strings */ +#define XSTRINGIFY(bar) STRINGIFY(bar) + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define RNP_PARAM_INIT \ + { \ + [0 ... RNP_MAX_NIC] = OPTION_UNSET \ + } + +#define RNP_PARAM(X, desc) \ + static int X[RNP_MAX_NIC + 1] = RNP_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); +/* IntMode (Interrupt Mode) + * + * Valid Range: 0-2 + * - 0 - Legacy Interrupt + * - 1 - MSI Interrupt + * - 2 - MSI-X Interrupt(s) + * + * Default Value: 2 + */ +RNP_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " + "default 2"); +#define RNP_INT_LEGACY 0 +#define RNP_INT_MSI 1 +#define RNP_INT_MSIX 2 + +#ifdef CONFIG_PCI_IOV +/* max_vfs - SR I/O Virtualization + * + * Valid Range: 0-63 for n10 + * Valid Range: 0-7 for n400/n10 + * - 0 Disables SR-IOV + * - 1-x - enables SR-IOV and sets the number of VFs enabled + * + * Default Value: 0 + */ + +RNP_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable (default), " + "1-" XSTRINGIFY(MAX_SRIOV_VFS) " = enable " + "this many VFs"); + +/* SRIOV_Mode (SRIOV Mode) + * + * Valid Range: 0-1 + * - 0 - Legacy Interrupt + * - 1 - MSI Interrupt + * - 2 - MSI-X Interrupt(s) + * + * Default Value: 0 + */ +RNP_PARAM(SRIOV_Mode, "Change SRIOV Mode (0=MAC_MODE, 1=VLAN_MODE), " + "default 0"); +#define RNP_SRIOV_MAC_MODE 0 +#define RNP_SRIOV_VLAN_MODE 1 +#endif + +/* pf_msix_counts_set - Limit max msix counts + * + * Valid Range: 2-63 for n10 + * Valid Range: 2-7 for n400/n10 + * + * Default Value: 0 (un-limit) + */ +RNP_PARAM(pf_msix_counts_set, "Number of Max MSIX Count: (default un-limit)"); +#define RNP_INT_MIN 2 + +struct rnp_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + const char *msg; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct rnp_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +static int rnp_validate_option(struct net_device *netdev, unsigned int *value, + struct rnp_option *opt) +{ + if (*value == OPTION_UNSET) { + netdev_info(netdev, "Invalid %s specified (%d), %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + netdev_info(netdev, "%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + netdev_info(netdev, "%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if ((*value >= opt->arg.r.min && *value <= opt->arg.r.max) || + *value == opt->def) { + if (opt->msg) + netdev_info(netdev, "%s set to %d, %s\n", + opt->name, *value, opt->msg); + else + netdev_info(netdev, "%s set to %d\n", opt->name, + *value); + return 0; + } + break; + case list_option: { + int i; + + for (i = 0; i < opt->arg.l.nr; i++) { + const struct rnp_opt_list *ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + netdev_info(netdev, "%s\n", ent->str); + return 0; + } + } + } break; + default: + BUG(); + } + + netdev_info(netdev, "Invalid %s specified (%d), %s\n", opt->name, + *value, opt->err); + *value = opt->def; + return -1; +} + +#define LIST_LEN(l) (sizeof(l) / sizeof(l[0])) +#define PSTR_LEN 10 + +/** + * rnp_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void rnp_check_options(struct rnp_adapter *adapter) +{ + int bd = adapter->bd_number; + u32 *aflags = &adapter->flags; + + if (bd >= RNP_MAX_NIC) { + netdev_notice(adapter->netdev, + "Warning: no configuration for board #%d\n", bd); + netdev_notice(adapter->netdev, + "Using defaults for all values\n"); + } + + /* try to setup new irq mode */ + { /* Interrupt Mode */ + unsigned int int_mode; + static struct rnp_option opt = { + .type = range_option, + .name = "Interrupt Mode", + .err = "using default of " __MODULE_STRING( + RNP_INT_MSIX), + .def = RNP_INT_MSIX, + .arg = { .r = { .min = RNP_INT_LEGACY, + .max = RNP_INT_MSIX } } + }; + + if (num_IntMode > bd) { + int_mode = IntMode[bd]; + if (int_mode == OPTION_UNSET) + int_mode = RNP_INT_MSIX; + rnp_validate_option(adapter->netdev, &int_mode, &opt); + switch (int_mode) { + case RNP_INT_MSIX: + if (!(*aflags & RNP_FLAG_MSIX_CAPABLE)) { + netdev_info(adapter->netdev, + "Ignoring MSI-X setting; " + "support unavailable\n"); + } else + adapter->irq_mode = irq_mode_msix; + break; + case RNP_INT_MSI: + if (!(*aflags & RNP_FLAG_MSI_CAPABLE)) { + netdev_info(adapter->netdev, + "Ignoring MSI setting; " + "support unavailable\n"); + } else + adapter->irq_mode = irq_mode_msi; + + break; + case RNP_INT_LEGACY: + if (!(*aflags & RNP_FLAG_LEGACY_CAPABLE)) { + netdev_info(adapter->netdev, + "Ignoring MSI setting; " + "support unavailable\n"); + } else + adapter->irq_mode = irq_mode_legency; + + break; + } + } else { + /* default settings */ + /* msix -> msi -> Legacy */ + if (*aflags & RNP_FLAG_MSIX_CAPABLE) + adapter->irq_mode = irq_mode_msix; + else if (*aflags & RNP_FLAG_MSI_CAPABLE) + adapter->irq_mode = irq_mode_msi; + else + adapter->irq_mode = irq_mode_legency; + } + } + +#ifdef CONFIG_PCI_IOV + { /* Single Root I/O Virtualization (SR-IOV) */ + struct rnp_hw *hw = &adapter->hw; + static struct rnp_option opt = { + .type = range_option, + .name = "I/O Virtualization (IOV)", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED, + .arg = { .r = { .min = OPTION_DISABLED, + .max = OPTION_DISABLED } } + }; + + opt.arg.r.max = hw->max_vfs; + if (num_max_vfs > bd) { + unsigned int vfs = max_vfs[bd]; + + if (rnp_validate_option(adapter->netdev, &vfs, &opt)) { + vfs = 0; + DPRINTK(PROBE, INFO, + "max_vfs out of range " + "Disabling SR-IOV.\n"); + } + + adapter->num_vfs = vfs; + + if (vfs) + *aflags |= RNP_FLAG_SRIOV_ENABLED; + else + *aflags &= ~RNP_FLAG_SRIOV_ENABLED; + } else { + if (opt.def == OPTION_DISABLED) { + adapter->num_vfs = 0; + *aflags &= ~RNP_FLAG_SRIOV_ENABLED; + } else { + adapter->num_vfs = opt.def; + *aflags |= RNP_FLAG_SRIOV_ENABLED; + } + } + } + + { /* Sriov Mode */ + unsigned int sriov_mode; + static struct rnp_option opt = { + .type = range_option, + .name = "SRIOV Mode", + .err = "using default of " __MODULE_STRING( + RNP_SRIOV_MAC_MODE), + .def = RNP_SRIOV_MAC_MODE, + .arg = { .r = { .min = RNP_SRIOV_MAC_MODE, + .max = RNP_SRIOV_VLAN_MODE } } + }; + + if (num_SRIOV_Mode > bd) { + sriov_mode = SRIOV_Mode[bd]; + if (sriov_mode == OPTION_UNSET) + sriov_mode = RNP_SRIOV_MAC_MODE; + rnp_validate_option(adapter->netdev, &sriov_mode, &opt); + + if (sriov_mode == RNP_SRIOV_VLAN_MODE) + adapter->priv_flags |= + RNP_PRIV_FLAG_SRIOV_VLAN_MODE; + + } else { + /* default settings */ + adapter->priv_flags &= (~RNP_PRIV_FLAG_SRIOV_VLAN_MODE); + } + } +#endif /* CONFIG_PCI_IOV */ + + { /* max msix count setup */ + int pf_msix_counts; + struct rnp_hw *hw = &adapter->hw; + static struct rnp_option opt = { + .type = range_option, + .name = "Limit Msix Count", + .err = "using default of Un-limit", + .def = OPTION_DISABLED, + .arg = { .r = { .min = RNP_INT_MIN, + .max = RNP_INT_MIN } } + }; + + opt.arg.r.max = hw->max_msix_vectors; + if (num_pf_msix_counts_set > bd) { + pf_msix_counts = pf_msix_counts_set[bd]; + if (pf_msix_counts == OPTION_DISABLED) + pf_msix_counts = 0; + rnp_validate_option(adapter->netdev, &pf_msix_counts, + &opt); + + if (pf_msix_counts) { + if (hw->ops.update_msix_count) + hw->ops.update_msix_count( + hw, pf_msix_counts); + } + } + } +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_pcs.c b/drivers/net/ethernet/mucse/rnp/rnp_pcs.c new file mode 100644 index 0000000000000..e84879c43722d --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_pcs.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include "rnp_pcs.h" +#include "rnp_regs.h" +#include "rnp_common.h" + +static u32 rnp_read_pcs(struct rnp_hw *hw, int num, u32 addr) +{ + u32 reg_hi, reg_lo; + u32 value; + + reg_hi = addr >> 8; + reg_lo = (addr & 0xff) << 2; + wr32(hw, RNP_PCS_BASE(num) + (0xff << 2), reg_hi); + value = rd32(hw, RNP_PCS_BASE(num) + reg_lo); + return value; +} + +static void rnp_write_pcs(struct rnp_hw *hw, int num, u32 addr, u32 value) +{ + u32 reg_hi, reg_lo; + + reg_hi = addr >> 8; + reg_lo = (addr & 0xff) << 2; + wr32(hw, RNP_PCS_BASE(num) + (0xff << 2), reg_hi); + wr32(hw, RNP_PCS_BASE(num) + reg_lo, value); +} + +struct rnp_pcs_operations pcs_ops_generic = { + .read = rnp_read_pcs, + .write = rnp_write_pcs, +}; diff --git a/drivers/net/ethernet/mucse/rnp/rnp_pcs.h b/drivers/net/ethernet/mucse/rnp/rnp_pcs.h new file mode 100644 index 0000000000000..d79d947cc31dc --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_pcs.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_PCS_H_ +#define _RNP_PCS_H_ + +extern struct rnp_pcs_operations pcs_ops_generic; + +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_phy.h b/drivers/net/ethernet/mucse/rnp/rnp_phy.h new file mode 100644 index 0000000000000..6c0df9e098f21 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_phy.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_PHY_H_ +#define _RNP_PHY_H_ + +#include "rnp_type.h" +#define RNP_I2C_EEPROM_DEV_ADDR 0xA0 +#define RNP_I2C_EEPROM_DEV_ADDR2 0xA2 + +#define RNP_YT8531_PHY_SPEC_CTRL 0x10 +#define RNP_YT8531_PHY_SPEC_CTRL_FORCE_MDIX 0x0020 +#define RNP_YT8531_PHY_SPEC_CTRL_AUTO_MDI_MDIX 0x0060 +#define RNP_YT8531_PHY_SPEC_CTRL_MDIX_CFG_MASK 0x0060 + +/* EEPROM byte offsets */ +#define SFF_MODULE_ID_OFFSET 0x00 +#define SFF_DIAG_SUPPORT_OFFSET 0x5c +#define SFF_MODULE_ID_SFP 0x3 +#define SFF_MODULE_ID_QSFP 0xc +#define SFF_MODULE_ID_QSFP_PLUS 0xd +#define SFF_MODULE_ID_QSFP28 0x11 + +/* Bitmasks */ +#define RNP_SFF_DA_PASSIVE_CABLE 0x4 +#define RNP_SFF_DA_ACTIVE_CABLE 0x8 +#define RNP_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define RNP_SFF_1GBASESX_CAPABLE 0x1 +#define RNP_SFF_1GBASELX_CAPABLE 0x2 +#define RNP_SFF_1GBASET_CAPABLE 0x8 +#define RNP_SFF_10GBASESR_CAPABLE 0x10 +#define RNP_SFF_10GBASELR_CAPABLE 0x20 +#define RNP_SFF_ADDRESSING_MODE 0x4 +#define RNP_I2C_EEPROM_READ_MASK 0x100 +#define RNP_I2C_EEPROM_STATUS_MASK 0x3 +#define RNP_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define RNP_I2C_EEPROM_STATUS_PASS 0x1 +#define RNP_I2C_EEPROM_STATUS_FAIL 0x2 +#define RNP_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 + +/* Flow control defines */ +#define RNP_TAF_SYM_PAUSE 0x400 +#define RNP_TAF_ASM_PAUSE 0x800 + +/* Bit-shift macros */ +#define RNP_SFF_VENDOR_OUI_BYTE0_SHIFT 24 +#define RNP_SFF_VENDOR_OUI_BYTE1_SHIFT 16 +#define RNP_SFF_VENDOR_OUI_BYTE2_SHIFT 8 + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define RNP_SFF_VENDOR_OUI_TYCO 0x00407600 +#define RNP_SFF_VENDOR_OUI_FTL 0x00906500 +#define RNP_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define RNP_SFF_VENDOR_OUI_INTEL 0x001B2100 + +/* I2C SDA and SCL timing parameters for standard mode */ +#define RNP_I2C_T_HD_STA 4 +#define RNP_I2C_T_LOW 5 +#define RNP_I2C_T_HIGH 4 +#define RNP_I2C_T_SU_STA 5 +#define RNP_I2C_T_HD_DATA 5 +#define RNP_I2C_T_SU_DATA 1 +#define RNP_I2C_T_RISE 1 +#define RNP_I2C_T_FALL 1 +#define RNP_I2C_T_SU_STO 4 +#define RNP_I2C_T_BUF 5 + +#define RNP_TN_LASI_STATUS_REG 0x9005 +#define RNP_TN_LASI_STATUS_TEMP_ALARM 0x0008 + +/* SFP+ SFF-8472 Compliance code */ +#define RNP_SFF_SFF_8472_UNSUP 0x00 +#endif /* _RNP_PHY_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_ptp.c b/drivers/net/ethernet/mucse/rnp/rnp_ptp.c new file mode 100644 index 0000000000000..d3b1130605a2a --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_ptp.c @@ -0,0 +1,688 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include +#include + +#include "rnp.h" +#include "rnp_regs.h" +#include "rnp_ptp.h" + +/* PTP and HW Timer ops */ +static void config_hw_tstamping(void __iomem *ioaddr, u32 data) +{ + writel(data, ioaddr + PTP_TCR); +} + +static void config_sub_second_increment(void __iomem *ioaddr, u32 ptp_clock, + int gmac4, u32 *ssinc) +{ + u32 value = readl(ioaddr + PTP_TCR); + unsigned long data; + u32 reg_value; + + /* For GMAC3.x, 4.x versions, in "fine adjustement mode" set sub-second + * increment to twice the number of nanoseconds of a clock cycle. + * The calculation of the default_addend value by the caller will set it + * to mid-range = 2^31 when the remainder of this division is zero, + * which will make the accumulator overflow once every 2 ptp_clock + * cycles, adding twice the number of nanoseconds of a clock cycle : + * 2000000000ULL / ptp_clock. + */ + if (value & RNP_PTP_TCR_TSCFUPDT) + data = (2000000000ULL / ptp_clock); + else + data = (1000000000ULL / ptp_clock); + + /* 0.465ns accuracy */ + if (!(value & RNP_PTP_TCR_TSCTRLSSR)) + data = (data * 1000) / 465; + + data &= RNP_PTP_SSIR_SSINC_MASK; + + reg_value = data; + if (gmac4) + reg_value <<= RNP_PTP_SSIR_SSINC_SHIFT; + + writel(reg_value, ioaddr + PTP_SSIR); + + if (ssinc) + *ssinc = data; +} + +static int config_addend(void __iomem *ioaddr, u32 addend) +{ + u32 value; + int limit; + + writel(addend, ioaddr + PTP_TAR); + /* issue command to update the addend value */ + value = readl(ioaddr + PTP_TCR); + value |= RNP_PTP_TCR_TSADDREG; + writel(value, ioaddr + PTP_TCR); + + /* wait for present addend update to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & RNP_PTP_TCR_TSADDREG)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) +{ + int limit; + u32 value; + + writel(sec, ioaddr + PTP_STSUR); + writel(nsec, ioaddr + PTP_STNSUR); + /* issue command to initialize the system time value */ + value = readl(ioaddr + PTP_TCR); + value |= RNP_PTP_TCR_TSINIT; + writel(value, ioaddr + PTP_TCR); + + /* wait for present system time initialize to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & RNP_PTP_TCR_TSINIT)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +static void get_systime(void __iomem *ioaddr, u64 *systime) +{ + u64 ns; + + /* Get the TSSS value */ + ns = readl(ioaddr + PTP_STNSR); + /* Get the TSS and convert sec time value to nanosecond */ + ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; + + if (systime) + *systime = ns; +} + +static void config_mac_interrupt_enable(void __iomem *ioaddr, bool on) +{ + rnp_wr_reg(ioaddr + RNP_MAC_INTERRUPT_ENABLE, on); +} + +static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, int add_sub, + int gmac4) +{ + u32 value; + int limit; + + if (add_sub) { + /* If the new sec value needs to be subtracted with + * the system time, then MAC_STSUR reg should be + * programmed with (2^32 – ) + */ + if (gmac4) + sec = -sec; + + value = readl(ioaddr + PTP_TCR); + if (value & RNP_PTP_TCR_TSCTRLSSR) + nsec = (RNP_PTP_DIGITAL_ROLLOVER_MODE - nsec); + else + nsec = (RNP_PTP_BINARY_ROLLOVER_MODE - nsec); + } + + writel(sec, ioaddr + PTP_STSUR); + value = (add_sub << RNP_PTP_STNSUR_ADDSUB_SHIFT) | nsec; + writel(value, ioaddr + PTP_STNSUR); + + /* issue command to initialize the system time value */ + value = readl(ioaddr + PTP_TCR); + value |= RNP_PTP_TCR_TSUPDT; + writel(value, ioaddr + PTP_TCR); + + /* wait for present system time adjust/update to complete */ + limit = 10; + while (limit--) { + if (!(readl(ioaddr + PTP_TCR) & RNP_PTP_TCR_TSUPDT)) + break; + mdelay(10); + } + if (limit < 0) + return -EBUSY; + + return 0; +} + +const struct rnp_hwtimestamp mac_ptp = { + .config_hw_tstamping = config_hw_tstamping, + .config_mac_irq_enable = config_mac_interrupt_enable, + .init_systime = init_systime, + .config_sub_second_increment = config_sub_second_increment, + .config_addend = config_addend, + .adjust_systime = adjust_systime, + .get_systime = get_systime, +}; + +static int rnp_ptp_adjfreq(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct rnp_adapter *pf = + container_of(ptp, struct rnp_adapter, ptp_clock_ops); + unsigned long flags; + u32 addend; + + if (pf == NULL) { + printk(KERN_DEBUG "adapter_of contail is null\n"); + return 0; + } + + addend = adjust_by_scaled_ppm(pf->default_addend, scaled_ppm); + + spin_lock_irqsave(&pf->ptp_lock, flags); + pf->hwts_ops->config_addend(pf->ptp_addr, addend); + spin_unlock_irqrestore(&pf->ptp_lock, flags); + + return 0; +} + +static int rnp_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct rnp_adapter *pf = + container_of(ptp, struct rnp_adapter, ptp_clock_ops); + unsigned long flags; + u32 sec, nsec; + u32 quotient, reminder; + int neg_adj = 0; + + if (delta < 0) { + neg_adj = 1; + delta = -delta; + } + + if (delta == 0) + return 0; + + quotient = div_u64_rem(delta, 1000000000ULL, &reminder); + sec = quotient; + nsec = reminder; + + spin_lock_irqsave(&pf->ptp_lock, flags); + pf->hwts_ops->adjust_systime(pf->ptp_addr, sec, nsec, neg_adj, + pf->gmac4); + spin_unlock_irqrestore(&pf->ptp_lock, flags); + + return 0; +} + +static int rnp_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct rnp_adapter *pf = + container_of(ptp, struct rnp_adapter, ptp_clock_ops); + unsigned long flags; + u64 ns = 0; + + spin_lock_irqsave(&pf->ptp_lock, flags); + + pf->hwts_ops->get_systime(pf->ptp_addr, &ns); + + spin_unlock_irqrestore(&pf->ptp_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int rnp_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct rnp_adapter *pf = + container_of(ptp, struct rnp_adapter, ptp_clock_ops); + unsigned long flags; + + spin_lock_irqsave(&pf->ptp_lock, flags); + pf->hwts_ops->init_systime(pf->ptp_addr, ts->tv_sec, ts->tv_nsec); + spin_unlock_irqrestore(&pf->ptp_lock, flags); + + return 0; +} + +static int rnp_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + /*TODO add support for enable the option 1588 feature PPS Auxiliary */ + return -EOPNOTSUPP; +} + +int rnp_ptp_get_ts_config(struct rnp_adapter *pf, struct ifreq *ifr) +{ + struct hwtstamp_config *config = &pf->tstamp_config; + + return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? -EFAULT : + 0; +} + +static int rnp_ptp_setup_ptp(struct rnp_adapter *pf, u32 value) +{ + u32 sec_inc = 0; + u64 temp = 0; + struct timespec64 now; + + /* For now just use extrnal clock(the kernel-system clock)*/ + /* 1.Mask the Timestamp Trigger interrupt */ + /* 2.enable time stamping */ + /* 2.1 clear all bytes about time ctrl reg*/ + + pf->hwts_ops->config_hw_tstamping(pf->ptp_addr, value); + /* 3.Program the PTPclock frequency */ + /* program Sub Second Increment reg + * we use kernel-system clock + */ + pf->hwts_ops->config_sub_second_increment( + pf->ptp_addr, pf->clk_ptp_rate, pf->gmac4, &sec_inc); + /* 4.If use fine correction approash then, + * Program MAC_Timestamp_Addend register + */ + if (sec_inc == 0) { + printk(KERN_DEBUG "%s:%d the sec_inc is zero this is a bug\n", + __func__, __LINE__); + return -EFAULT; + } + temp = div_u64(1000000000ULL, sec_inc); + /* Store sub second increment and flags for later use */ + pf->sub_second_inc = sec_inc; + pf->systime_flags = value; + /* calculate default added value: + * formula is : + * addend = (2^32)/freq_div_ratio; + * where, freq_div_ratio = 1e9ns/sec_inc + */ + temp = (u64)(temp << 32); + + if (pf->clk_ptp_rate == 0) { + pf->clk_ptp_rate = 1000; + printk(KERN_DEBUG "%s:%d clk_ptp_rate is zero\n", __func__, + __LINE__); + } + + pf->default_addend = div_u64(temp, pf->clk_ptp_rate); + + pf->hwts_ops->config_addend(pf->ptp_addr, pf->default_addend); + /* 5.Poll wait for the TCR Update Addend Register*/ + /* 6.enabled Fine Update method */ + /* 7.program the second and nanosecond register*/ + /*TODO If we need to enable one-step timestamp */ + + /* initialize system time */ + ktime_get_real_ts64(&now); + + /* lower 32 bits of tv_sec are safe until y2106 */ + pf->hwts_ops->init_systime(pf->ptp_addr, (u32)now.tv_sec, now.tv_nsec); + + return 0; +} + +int rnp_ptp_set_ts_config(struct rnp_adapter *pf, struct ifreq *ifr) +{ + struct hwtstamp_config config; + u32 ptp_v2 = 0; + u32 tstamp_all = 0; + u32 ptp_over_ipv4_udp = 0; + u32 ptp_over_ipv6_udp = 0; + u32 ptp_over_ethernet = 0; + u32 snap_type_sel = 0; + u32 ts_master_en = 0; + u32 ts_event_en = 0; + u32 value = 0; + s32 ret = -1; + + if (!(pf->flags2 & RNP_FLAG2_PTP_ENABLED)) { + pci_alert(pf->pdev, "No support for HW time stamping\n"); + pf->ptp_tx_en = 0; + pf->ptp_tx_en = 0; + + return -EOPNOTSUPP; + } + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + netdev_info(pf->netdev, + "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", + __func__, config.flags, config.tx_type, config.rx_filter); + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + if (config.tx_type != HWTSTAMP_TX_OFF && + config.tx_type != HWTSTAMP_TX_ON) + return -ERANGE; + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + /* time stamp no incoming packet at all */ + config.rx_filter = HWTSTAMP_FILTER_NONE; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + /* PTP v1, UDP, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + /* 'mac' hardware can support Sync, Pdelay_Req and + * Pdelay_resp by setting bit14 and bits17/16 to 01 + * This leaves Delay_Req timestamps out. + * Enable all events *and* general purpose message + * timestamping + */ + snap_type_sel = RNP_PTP_TCR_SNAPTYPSEL_1; + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + /* PTP v1, UDP, Sync packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; + /* take time stamp for SYNC messages only */ + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + /* PTP v1, UDP, Delay_req packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; + /* take time stamp for Delay_Req messages only */ + ts_master_en = RNP_PTP_TCR_TSMSTRENA; + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + /* PTP v2, UDP, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + ptp_v2 = RNP_PTP_TCR_TSVER2ENA; + + /* take time stamp for all event messages */ + snap_type_sel = RNP_PTP_TCR_SNAPTYPSEL_1; + + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + /* PTP v2, UDP, Sync packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; + ptp_v2 = RNP_PTP_TCR_TSVER2ENA; + /* take time stamp for SYNC messages only */ + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + /* PTP v2, UDP, Delay_req packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; + ptp_v2 = RNP_PTP_TCR_TSVER2ENA; + /* take time stamp for Delay_Req messages only */ + ts_master_en = RNP_PTP_TCR_TSMSTRENA; + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_EVENT: + /* PTP v2/802.AS1 any layer, any kind of event packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + ptp_v2 = RNP_PTP_TCR_TSVER2ENA; + snap_type_sel = RNP_PTP_TCR_SNAPTYPSEL_1; + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = RNP_PTP_TCR_TSIPENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_SYNC: + /* PTP v2/802.AS1, any layer, Sync packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; + ptp_v2 = RNP_PTP_TCR_TSVER2ENA; + /* take time stamp for SYNC messages only */ + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = RNP_PTP_TCR_TSIPENA; + break; + + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* PTP v2/802.AS1, any layer, Delay_req packet */ + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; + ptp_v2 = RNP_PTP_TCR_TSVER2ENA; + /* take time stamp for Delay_Req messages only */ + ts_master_en = RNP_PTP_TCR_TSMSTRENA; + ts_event_en = RNP_PTP_TCR_TSEVNTENA; + + ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA; + ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA; + ptp_over_ethernet = RNP_PTP_TCR_TSIPENA; + break; + + case HWTSTAMP_FILTER_NTP_ALL: + case HWTSTAMP_FILTER_ALL: + /* time stamp any incoming packet */ + config.rx_filter = HWTSTAMP_FILTER_ALL; + tstamp_all = RNP_PTP_TCR_TSENALL; + break; + + default: + return -ERANGE; + } + + pf->ptp_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); + pf->ptp_tx_en = config.tx_type == HWTSTAMP_TX_ON; + + netdev_info( + pf->netdev, + "ptp config rx filter 0x%.2x tx_type 0x%.2x rx_en[%d] tx_en[%d]\n", + config.rx_filter, config.tx_type, pf->ptp_rx_en, pf->ptp_tx_en); + if (!pf->ptp_rx_en && !pf->ptp_tx_en) + /*rx and tx is not use hardware ts so clear the ptp register */ + pf->hwts_ops->config_hw_tstamping(pf->ptp_addr, 0); + else { + value = (RNP_PTP_TCR_TSENA | RNP_PTP_TCR_TSCFUPDT | + RNP_PTP_TCR_TSCTRLSSR | tstamp_all | ptp_v2 | + ptp_over_ethernet | ptp_over_ipv6_udp | + ptp_over_ipv4_udp | ts_master_en | snap_type_sel); + + ret = rnp_ptp_setup_ptp(pf, value); + if (ret < 0) + return ret; + } + pf->ptp_config_value = value; + memcpy(&pf->tstamp_config, &config, sizeof(config)); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : + 0; +} + +/* structure describing a PTP hardware clock */ +static struct ptp_clock_info rnp_ptp_clock_ops = { + .owner = THIS_MODULE, + .name = "rnp ptp", + .max_adj = 50000000, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + /* will be overwritten in stmmac_ptp_register */ + .n_pins = 0, + /* should be 0 if not set */ + .adjfine = rnp_ptp_adjfreq, + .adjtime = rnp_ptp_adjtime, + .gettime64 = rnp_ptp_gettime, + .settime64 = rnp_ptp_settime, + .enable = rnp_ptp_feature_enable, +}; + +int rnp_ptp_register(struct rnp_adapter *pf) +{ + pf->hwts_ops = &mac_ptp; + + pf->ptp_tx_en = 0; + pf->ptp_rx_en = 0; + + spin_lock_init(&pf->ptp_lock); + pf->flags2 |= RNP_FLAG2_PTP_ENABLED; + pf->ptp_clock_ops = rnp_ptp_clock_ops; + + /* default mac clock rate is 50Mhz */ + pf->clk_ptp_rate = 50000000; + if (pf->pdev == NULL) + printk(KERN_DEBUG "pdev dev is null\n"); + + pf->ptp_clock = ptp_clock_register(&pf->ptp_clock_ops, &pf->pdev->dev); + if (pf->ptp_clock == NULL) + pci_err(pf->pdev, "ptp clock register failed\n"); + + if (IS_ERR(pf->ptp_clock)) { + pci_err(pf->pdev, "ptp_clock_register failed\n"); + pf->ptp_clock = NULL; + } else { + pci_info(pf->pdev, "registered PTP clock\n"); + } + + return 0; +} + +void rnp_ptp_unregister(struct rnp_adapter *pf) +{ + /*1. stop the ptp module*/ + if (pf->ptp_clock) { + ptp_clock_unregister(pf->ptp_clock); + pf->ptp_clock = NULL; + pr_debug("Removed PTP HW clock successfully on %s\n", + "rnp_ptp"); + } +} + +void rnp_tx_hwtstamp_work(struct work_struct *work) +{ + struct rnp_adapter *adapter = + container_of(work, struct rnp_adapter, tx_hwtstamp_work); + void __iomem *ioaddr = adapter->hw.hw_addr; + + /* 1. read port belone timestatmp status reg */ + /* 2. status enabled read nsec and sec reg*/ + /* 3. */ + u64 nanosec = 0, sec = 0; + + if (!adapter->ptp_tx_skb) { + clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state); + return; + } + + if (rnp_rd_reg(ioaddr + RNP_ETH_PTP_TX_TSVALUE_STATUS(0)) & 0x01) { + struct sk_buff *skb = adapter->ptp_tx_skb; + struct skb_shared_hwtstamps shhwtstamps; + u64 txstmp = 0; + /* read and add nsec, sec turn to nsec*/ + + nanosec = rnp_rd_reg(ioaddr + RNP_ETH_PTP_TX_LTIMES(0)); + sec = rnp_rd_reg(ioaddr + RNP_ETH_PTP_TX_HTIMES(0)); + /* when we read the timestamp finish need to notice the hardware + * that the timestamp need to update via set tx_hwts_clear-reg + * from high to low + */ + rnp_wr_reg(ioaddr + RNP_ETH_PTP_TX_CLEAR(0), + PTP_GET_TX_HWTS_FINISH); + rnp_wr_reg(ioaddr + RNP_ETH_PTP_TX_CLEAR(0), + PTP_GET_TX_HWTS_UPDATE); + + txstmp = nanosec & PTP_HWTX_TIME_VALUE_MASK; + txstmp += (sec & PTP_HWTX_TIME_VALUE_MASK) * 1000000000ULL; + + /* Clear the global tx_hwtstamp_skb pointer and force writes + * prior to notifying the stack of a Tx timestamp. + */ + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(txstmp); + adapter->ptp_tx_skb = NULL; + /* force write prior to skb_tstamp_tx + * because the xmit will re used the point to store ptp skb + */ + wmb(); + + skb_tstamp_tx(skb, &shhwtstamps); + dev_consume_skb_any(skb); + clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state); + } else if (time_after(jiffies, + adapter->tx_hwtstamp_start + + adapter->tx_timeout_factor * HZ)) { + /* this function will mark the skb drop*/ + if (adapter->ptp_tx_skb) + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + adapter->tx_hwtstamp_timeouts++; + clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state); + netdev_warn(adapter->netdev, "clearing Tx timestamp hang\n"); + } else { + /* reschedule to check later */ + schedule_work(&adapter->tx_hwtstamp_work); + } +} + +void rnp_ptp_get_rx_hwstamp(struct rnp_adapter *adapter, + union rnp_rx_desc *desc, struct sk_buff *skb) +{ + u64 ns = 0; + u64 tsvalueh = 0, tsvaluel = 0; + struct skb_shared_hwtstamps *hwtstamps = NULL; + + if (!skb || !adapter->ptp_rx_en) { + netdev_dbg(adapter->netdev, + "hwstamp skb is null or " + "rx_en iszero %u\n", + adapter->ptp_rx_en); + return; + } + + if (likely(!((desc->wb.cmd) & RNP_RXD_STAT_PTP))) + return; + hwtstamps = skb_hwtstamps(skb); + /* because of rx hwstamp store before the mac head + * skb->head and skb->data is point to same location when call alloc_skb + * so we must move 16 bytes the skb->data to the mac head location + * but for the head point if we need move the skb->head need to be diss + */ + /* low8bytes is null high8bytes is timestamp + * high32bit is seconds low32bits is nanoseconds + */ + skb_copy_from_linear_data_offset(skb, RNP_RX_TIME_RESERVE, &tsvalueh, + RNP_RX_SEC_SIZE); + skb_copy_from_linear_data_offset(skb, + RNP_RX_TIME_RESERVE + RNP_RX_SEC_SIZE, + &tsvaluel, RNP_RX_NANOSEC_SIZE); + skb_pull(skb, RNP_RX_HWTS_OFFSET); + tsvalueh = ntohl(tsvalueh); + tsvaluel = ntohl(tsvaluel); + + ns = tsvaluel & RNP_RX_NSEC_MASK; + ns += ((tsvalueh & RNP_RX_SEC_MASK) * 1000000000ULL); + + netdev_dbg(adapter->netdev, + "ptp get hardware ts-sec %llu ts-nanosec %llu\n", tsvalueh, + tsvaluel); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +void rnp_ptp_reset(struct rnp_adapter *adapter) +{ + rnp_ptp_setup_ptp(adapter, adapter->ptp_config_value); +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_ptp.h b/drivers/net/ethernet/mucse/rnp/rnp_ptp.h new file mode 100644 index 0000000000000..a62e8128f0a49 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_ptp.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef __RNP_PTP_H__ +#define __RNP_PTP_H__ + +struct rnp_hwtimestamp { + void (*config_hw_tstamping)(void __iomem *ioaddr, u32 data); + void (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock, + int gmac4, u32 *ssinc); + void (*config_mac_irq_enable)(void __iomem *ioaddr, bool on); + int (*init_systime)(void __iomem *ioaddr, u32 sec, u32 nsec); + int (*config_addend)(void __iomem *ioaddr, u32 addend); + int (*adjust_systime)(void __iomem *ioaddr, u32 sec, u32 nsec, + int add_sub, int gmac4); + void (*get_systime)(void __iomem *ioaddr, u64 *systime); +}; +/* IEEE 1588 PTP register offsets */ +#define PTP_TCR 0x00 /* Timestamp Control Reg */ +#define PTP_SSIR 0x04 /* Sub-Second Increment Reg */ +#define PTP_STSR 0x08 /* System Time – Seconds Regr */ +#define PTP_STNSR 0x0c /* System Time – Nanoseconds Reg */ +#define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */ +#define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */ +#define PTP_TAR 0x18 /* Timestamp Addend Reg */ + +#define RNP_PTP_STNSUR_ADDSUB_SHIFT 31 +#define RNP_PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */ +#define RNP_PTP_BINARY_ROLLOVER_MODE 0x80000000 /* ~0.466 ns */ + +/* PTP Timestamp control register defines */ +#define RNP_PTP_TCR_TSENA BIT(0) /*Timestamp Enable*/ +#define RNP_PTP_TCR_TSCFUPDT BIT(1) /* Timestamp Fine/Coarse Update */ +#define RNP_PTP_TCR_TSINIT BIT(2) /* Timestamp Initialize */ +#define RNP_PTP_TCR_TSUPDT BIT(3) /* Timestamp Update */ +#define RNP_PTP_TCR_TSTRIG BIT(4) /* Timestamp Interrupt Trigger Enable */ +#define RNP_PTP_TCR_TSADDREG BIT(5) /* Addend Reg Update */ +#define RNP_PTP_TCR_TSENALL BIT(8) /* Enable Timestamp for All Frames */ +#define RNP_PTP_TCR_TSCTRLSSR BIT(9) /* Digital or Binary Rollover Control */ +#define RNP_PTP_TCR_TSVER2ENA \ + BIT(10) /* Enable PTP packet Processing for Version 2 Format */ +#define RNP_PTP_TCR_TSIPENA \ + BIT(11) /* Enable Processing of PTP over Ethernet Frames */ +#define RNP_PTP_TCR_TSIPV6ENA \ + BIT(12) /* Enable Processing of PTP Frames Sent over IPv6-UDP */ +#define RNP_PTP_TCR_TSIPV4ENA \ + BIT(13) /* Enable Processing of PTP Frames Sent over IPv4-UDP */ +#define RNP_PTP_TCR_TSEVNTENA \ + BIT(14) /* Enable Timestamp Snapshot for Event Messages */ +#define RNP_PTP_TCR_TSMSTRENA \ + BIT(15) /* Enable Snapshot for Messages Relevant to Master */ +/* Note 802.1 AS Is work Over Ethernet FramesC_Sub_Second_Incremen + * and Normal PTP Is work Oveer UDP + */ + +/* Select PTP packets for Taking Snapshots + * On mac specifically: + * Enable SYNC, Pdelay_Req, Pdelay_Resp when TSEVNTENA is enabled. + * or + * Enable SYNC, Follow_Up, Delay_Req, Delay_Resp, Pdelay_Req, Pdelay_Resp, + * Pdelay_Resp_Follow_Up if TSEVNTENA is disabled + */ +#define RNP_PTP_TCR_SNAPTYPSEL_1 BIT(16) +#define RNP_PTP_TCR_TSENMACADDR \ + BIT(18) /* Enable MAC address for PTP Frame Filtering */ +#define RNP_PTP_TCR_ESTI \ + BIT(20) /* External System Time Input Or MAC Internal Clock*/ +#define RNP_PTP_TCR_AV8021ASMEN BIT(28) /* AV802.1 AS Mode Enable*/ +/* Sub Second increament define */ +#define RNP_PTP_SSIR_SSINC_MASK (0xff) /* Sub-second increment value */ +#define RNP_PTP_SSIR_SSINC_SHIFT (16) /* Sub-second increment offset */ + +#define RNP_MAC_TXTSC BIT(15) /* TX timestamp reg is fill complete */ +#define RNP_MAC_TXTSSTSLO GENMASK(30, 0) /*nano second avalid value */ + +#define RNP_RX_SEC_MASK GENMASK(30, 0) +#define RNP_RX_NSEC_MASK GENMASK(30, 0) +#define RNP_RX_TIME_RESERVE (8) +#define RNP_RX_SEC_SIZE (4) +#define RNP_RX_NANOSEC_SIZE (4) +#define RNP_RX_HWTS_OFFSET \ + (RNP_RX_SEC_SIZE + RNP_RX_NANOSEC_SIZE + RNP_RX_TIME_RESERVE) + +#define PTP_HWTX_TIME_VALUE_MASK GENMASK(31, 0) +#define PTP_GET_TX_HWTS_FINISH (1) +#define PTP_GET_TX_HWTS_UPDATE (0) +/* hardware ts can't so fake ts from the software clock */ +#define DEBUG_PTP_HARD_SOFTWAY + +int rnp_ptp_get_ts_config(struct rnp_adapter *pf, struct ifreq *ifr); +int rnp_ptp_set_ts_config(struct rnp_adapter *pf, struct ifreq *ifr); +int rnp_ptp_register(struct rnp_adapter *pf); +void rnp_ptp_unregister(struct rnp_adapter *pf); + +void rnp_ptp_get_rx_hwstamp(struct rnp_adapter *pf, union rnp_rx_desc *desc, + struct sk_buff *skb); +void rnp_tx_hwtstamp_work(struct work_struct *work); +void rnp_ptp_reset(struct rnp_adapter *adapter); +#endif diff --git a/drivers/net/ethernet/mucse/rnp/rnp_regs.h b/drivers/net/ethernet/mucse/rnp/rnp_regs.h new file mode 100644 index 0000000000000..6fa8fb1ef83af --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_regs.h @@ -0,0 +1,820 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef RNP_REGS_H +#define RNP_REGS_H + +/* BAR4 memory */ +/* ------------------------------------------*/ +/* module | size | start | end */ +/* DMA | 64KB | 0_0000H | 0_FFFFH */ +/* ETH | 64KB | 1_0000H | 1_FFFFH */ +/* REG | 64KB | 3_0000H | 3_FFFFH */ +/* SERDES | 128KB | 4_0000H | 5_FFFFH */ +/* XLMAC | 256KB | 6_0000H | 9_FFFFH */ +/* MSIX | 64KB | A_0000H | A_FFFFH */ +/* SWITCH | 64KB | B_0000H | B_FFFFH */ +/* TCAM | 256KB | C_0000H | F_FFFFH */ +/* ------------------------------------------*/ + +/* ==================== RNP-DMA Global Registers ==================== */ +/* n10 */ +#define RNP10_RING_BASE (0x8000) +/* n20 */ +#define RNP20_RING_BASE (0x8000) +#define RING_OFFSET(queue_idx) (0x100 * (queue_idx)) +#define RNP_DMA_VERSION (0x0000) +#define RNP_DMA_CONFIG (0x0004) +#define RNP_DMA_AXI_READY (0x0014) +#define DMA_MAC_LOOPBACK (1 << 0) +#define DMA_SWITCH_LOOPBACK (1 << 1) +#define DMA_VEB_BYPASS (1 << 4) +#define DMA_AXI_ORDER (1 << 5) +#define DMA_RX_PADDING (1 << 8) +#define DMA_MAP_MODE(n) (n << 12) +#define DMA_RX_FRAGMENT_BYTES(n) (((n) / 16) << 16) +#define RNP_DMA_STATUS (0x0008) +#define RNP_DMA_RX_DATA_PROG_FULL_THRESH (0x00a0) +#define DMA_RING_NUM (0xff << 24) +#define RC_CONTROL_HW (0x01) +#define RC_CONTROL_PHY_DRIVER (0x02) +#define RC_JUMP_STATUS (0x04) +#define RC_PHY_LINK_DONE (0x08) +#define RC_LINK_CHANGE (0x10) +#define RNP_DMA_DUMY (0x000c) +#define RNP_DMA_RX_START (0x10) +#define RNP_DMA_RX_READY (0x14) +#define RNP_DMA_TX_START (0x18) +#define RNP_DMA_TX_READY (0x1c) +#define RNP_DMA_INT_STAT (0x20) +#define RNP_DMA_INT_MASK (0x24) +#define TX_INT_MASK (1 << 1) +#define RX_INT_MASK (1 << 0) +#define RNP_DMA_INT_CLR (0x28) +#define RNP_DMA_INT_TRIG (0x2c) +#define RNP_DMA_AXI_EN (0x0010) +#define RX_AXI_RW_EN (0x03 << 0) +#define TX_AXI_RW_EN (0x03 << 2) +#define RNP_DMA_AXI_STAT (0x0014) +#define RNP_VEB_MAC_MASK_LO (0x0020) +#define RNP_VEB_MAC_MASK_HI (0x0024) +#define RNP_VEB_VLAN_MASK (0x0028) +#define DEBUG_PROBE_NUM 16 +#define RNP_DMA_DEBUG_PROBE_LO_REG(n) (0x0100 + 0x08 * (n)) +#define RNP_DMA_DEBUG_PROBE_HI_REG(n) (0x0100 + 0x08 * (n)) +#define DEBUG_CNT_NUM 76 +#define RNP_DMA_DEBUG_CNT(n) (0x0200 + 0x04 * (n)) +#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_0 (RNP_DMA_DEBUG_CNT(17)) +#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_1 (RNP_DMA_DEBUG_CNT(18)) +#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_2 (RNP_DMA_DEBUG_CNT(19)) +#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_3 (RNP_DMA_DEBUG_CNT(20)) +#define RNP_DMA_STATS_DMA_TO_SWITCH (RNP_DMA_DEBUG_CNT(21)) +#define RNP_DMA_STATS_MAC_TO_DMA (RNP_DMA_DEBUG_CNT(22)) +#define RNP_DMA_STATS_SWITCH_TO_DMA (RNP_DMA_DEBUG_CNT(23)) +#define RNP_PCI_WR_TO_HOST (RNP_DMA_DEBUG_CNT(34)) +/* RX-Queue Registers */ +#define RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_HI (0x30) +#define RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_LO (0x34) +#define RNP_DMA_REG_RX_DESC_BUF_LEN (0x38) +#define RNP_DMA_REG_RX_DESC_BUF_HEAD (0x3c) +#define RNP_DMA_REG_RX_DESC_BUF_TAIL (0x40) +#define RNP_DMA_REG_RX_DESC_FETCH_CTRL (0x44) +#define RNP_DMA_REG_RX_INT_DELAY_TIMER (0x48) +#define RNP_DMA_REG_RX_INT_DELAY_PKTCNT (0x4c) +#define RNP_DMA_REG_RX_ARB_DEF_LVL (0x50) +#define PCI_DMA_REG_RX_DESC_TIMEOUT_TH (0x54) +#define PCI_DMA_REG_RX_SCATTER_LENGTH (0x58) +/* TX-Queue Registers */ +#define RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_HI (0x60) +#define RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_LO (0x64) +#define RNP_DMA_REG_TX_DESC_BUF_LEN (0x68) +#define RNP_DMA_REG_TX_DESC_BUF_HEAD (0x6c) +#define RNP_DMA_REG_TX_DESC_BUF_TAIL (0x70) +#define RNP_DMA_REG_TX_DESC_FETCH_CTRL (0x74) +#define RNP_DMA_REG_TX_INT_DELAY_TIMER (0x78) +#define RNP_DMA_REG_TX_INT_DELAY_PKTCNT (0x7c) +#define RNP_DMA_REG_TX_ARB_DEF_LVL (0x80) +#define RNP_DMA_REG_TX_FLOW_CTRL_TH (0x84) +#define RNP_DMA_REG_TX_FLOW_CTRL_TM (0x88) +#define RNP_DMA_PKT_FIFO_DATA_PROG_FULL_THRESH (0x0098) +#define VEB_TBL_CNTS 64 +#define RNP_DMA_PORT_VBE_MAC_LO_TBL(port, vf) \ + (0x80A0 + 4 * (port) + 0x100 * (vf)) +#define RNP_DMA_PORT_VBE_MAC_HI_TBL(port, vf) \ + (0x80B0 + 4 * (port) + 0x100 * (vf)) +#define RNP_DMA_PORT_VEB_VID_TBL(port, vf) (0x80C0 + 4 * (port) + 0x100 * (vf)) +#define RNP_DMA_PORT_VEB_VF_RING_TBL(port, vf) \ + (0x80D0 + 4 * (port) + 0x100 * (vf)) +#define RNP_DMA_STATS_MAC_TO_MAC (0x1b0) +#define RNP_DMA_STATS_SWITCH_TO_SWITCH (0x1a4) + +/* ==================== RNP-ETH Global Registers ==================== */ +#define RNP_ETH_BASE (0x10000) +#define RNP10_ETH_BASE (0x10000) +#define RNP10_ETH_ENABLE_RSS_ONLY (0x3f30001) +#define RNP10_RAH_AV 0x80000000 +#define RNP10_ETH_RAR_RL(n) (0xa000 + 0x04 * n) +#define RNP10_ETH_RAR_RH(n) (0xa400 + 0x04 * n) +#define RNP10_ETH_DMAC_FCTRL (0x9110) +#define RNP10_ETH_DMAC_MCSTCTRL (0x9114) +#define RNP10_MCSTCTRL_MULTICASE_TBL_EN (1 << 2) +#define RNP10_MCSTCTRL_UNICASE_TBL_EN (1 << 3) +#define RNP10_VM_DMAC_MPSAR_RING(entry) \ + (0xb400 + (4 * (entry))) +#define RNP10_ETH_MULTICAST_HASH_TABLE(n) (0xac00 + 0x04 * n) +#define RNP10_ETH_LAYER2_ETQF(n) (0x9200 + 0x04 * (n)) +#define RNP10_ETH_LAYER2_ETQS(n) (0x9240 + 0x04 * (n)) + +/* ==================== RNP10-TCAM Global Registers ==================== */ +#define RNP10_TCAM_BASE (0xc0000 - RNP10_ETH_BASE) +#define RNP10_TCAM_SDPQF(n) \ + (RNP10_TCAM_BASE + 0x00 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_DAQF(n) \ + (RNP10_TCAM_BASE + 0x04 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_SAQF(n) \ + (RNP10_TCAM_BASE + 0x08 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_APQF(n) \ + (RNP10_TCAM_BASE + 0x0c + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_ETH_TCAM_EN (0x8024) +#define RNP10_TCAM_SDPQF_MASK(n) \ + (RNP10_TCAM_BASE + 0x20 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_DAQF_MASK(n) \ + (RNP10_TCAM_BASE + 0x24 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_SAQF_MASK(n) \ + (RNP10_TCAM_BASE + 0x28 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_APQF_MASK(n) \ + (RNP10_TCAM_BASE + 0x2c + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP10_TCAM_MODE (RNP10_TCAM_BASE + 0x20000) +#define RNP10_TCAM_CACHE_ENABLE (RNP10_TCAM_BASE + 0x20004) +#define RNP10_TCAM_CACHE_ADDR_CLR (RNP10_TCAM_BASE + 0x20008) +#define RNP10_TCAM_CACHE_REQ_CLR (RNP10_TCAM_BASE + 0x2000c) +#define RNP10_TOP_ETH_TCAM_CONFIG_ENABLE (0x30000 - RNP10_ETH_BASE + 0x8050) +#define RNP10_VEB_TBL_CNTS 64 +#define RNP10_DMA_PORT_VBE_MAC_LO_TBL(port, vf) \ + (0x80A0 + 4 * (port) + 0x100 * (vf)) +#define RNP10_DMA_PORT_VBE_MAC_HI_TBL(port, vf) \ + (0x80B0 + 4 * (port) + 0x100 * (vf)) +#define RNP10_DMA_PORT_VEB_VID_TBL(port, vf) \ + (0x80C0 + 4 * (port) + 0x100 * (vf)) +#define RNP10_DMA_PORT_VEB_VF_RING_TBL(port, vf) \ + (0x80D0 + 4 * (port) + 0x100 * (vf)) +/* + * [3:0]: + * 4'b0000:RSS disable + * 4'b0001:RSS only + * 4'b0100:DCB and RSS--8*16 + * 4'b1010:POOLS and RSS--32*4 + * [3] :virtual enable + * [16]:ipv4_hash_tcp_enable + * [17]:ipv4_hash_enable + * [20]:ipv6_hash_enable + * [21]:ipv6_hash_tcp_enable + * [22]:ipv4_hash_udp_enable + * [23]:ipv6_hash_udp_enable + * [24]:ipv4_hash_sctp_enable + * [25]:ipv6_hash_sctp_enable + */ +#define RNP10_ETH_RSS_CONTROL (0x92a0) +#define RNP10_IOV_ENABLED (1 << 3) +#define RNP10_ETH_RSS_KEY (0x92d0) +#define RNP10_ETH_TC_IPH_OFFSET_TABLE(n) (0xe800 + 0x04 * (n)) +#define RNP10_ETH_RSS_INDIR_TBL(n) (0xe000 + 0x04 * (n)) +#define RNP10_ETH_VLAN_FILTER_TABLE(n) (0xb000 + 0x04 * (n)) +#define RNP10_VFTA RNP10_ETH_VLAN_FILTER_TABLE +#define RNP10_VLVF(idx) (0xb600 + 4 * (idx)) +#define RNP10_VLVF_TABLE(idx) (0xb700 + 4 * (idx)) +#define RNP10_ETH_TUPLE5_SAQF(n) (0xc000 + 0x04 * (n)) +#define RNP10_ETH_TUPLE5_DAQF(n) (0xc400 + 0x04 * (n)) +#define RNP10_ETH_TUPLE5_SDPQF(n) (0xc800 + 0x04 * (n)) +#define RNP10_ETH_TUPLE5_FTQF(n) (0xcc00 + 0x04 * (n)) +#define RNP10_ETH_TUPLE5_POLICY(n) (0xd000 + 0x04 * (n)) +#define RNP10_ETH_VLAN_FILTER_ENABLE (0x9118) +#define RNP10_ETH_DEFAULT_RX_MIN_LEN (0x80f0) +#define RNP10_ETH_DEFAULT_RX_MAX_LEN (0x80f4) +#define RNP10_ETH_VLAN_VME_REG(n) (0x8040 + 0x04 * (n)) +#define RNP10_ETH_VXLAN_PORT (0x8010) +#define RNP10_FCTRL_BPE BIT(10) +#define RNP10_FCTRL_UPE BIT(9) +#define RNP10_FCTRL_MPE BIT(8) +#define RNP10_HOST_FILTER_EN (0x801c) +#define RNP10_REDIR_EN (0x8030) +#define RNP10_ETH_SCTP_CHECKSUM_EN (0x8038) +#define RNP10_ETH_ENABLE_RSS_ONLY (0x3f30001) +#define RNP10_ETH_DISABLE_RSS (0) +#define RNP10_COMM_REG0 0x30000 +#define RNP10_TOP_NIC_CONFIG (RNP10_COMM_REG0 + 0x0004) +#define RNP10_TOP_NIC_REST_N (RNP10_COMM_REG0 + 0x0010) +#define RNP10_TOP_ETH_BUG_40G_PATCH (RNP10_COMM_REG0 + 0x801c) +#define RNP10_TOP_MAC_OUI (RNP10_COMM_REG0 + 0xc004) +#define RNP10_TOP_MAC_SN (RNP10_COMM_REG0 + 0xc008) +#define RNP10_ETH_TUNNEL_MOD (0x8004) +#define INNER_L4_BIT BIT(6) +#define PKT_LEN_ERR (2) +#define HDR_LEN_ERR (1) +#define RNP10_ETH_ERR_MASK_VECTOR (0x8060) +#define RNP10_ETH_BYPASS (0x8000) +#define RNP10_ETH_DEFAULT_RX_RING (0x806c) +#define DROP_ALL_THRESH (2046) +#define RECEIVE_ALL_THRESH (0x270) +#define RNP10_ETH_RX_PROGFULL_THRESH_PORT (0x8070) +#define RNP10_ETH_HIGH_WATER(n) (0x80c0 + n * (0x08)) +#define RNP10_ETH_LOW_WATER(n) (0x80c4 + n * (0x08)) +#define RNP10_ETH_WRAP_FIELD_TYPE (0x805c) +#define RNP10_MRQC_IOV_EN (0x92a0) +#define RNP10_ETH_SYNQF (0x9290) +#define RNP10_ETH_SYNQF_PRIORITY (0x9294) +#define RNP10_RXTRANS_DROP(port) (0x8904 + 0x40 * (port)) +#define RNP10_RXTRANS_WDT_ERR_PKTS(port) (0x8908 + 0x40 * (port)) +#define RNP10_RXTRANS_CODE_ERR_PKTS(port) (0x890c + 0x40 * (port)) +#define RNP10_RXTRANS_CRC_ERR_PKTS(port) (0x8910 + 0x40 * (port)) +#define RNP10_RXTRANS_SLEN_ERR_PKTS(port) (0x8914 + 0x40 * (port)) +#define RNP10_RXTRANS_GLEN_ERR_PKTS(port) (0x8918 + 0x40 * (port)) +#define RNP10_RXTRANS_IPH_ERR_PKTS(port) (0x891c + 0x40 * (port)) +#define RNP10_RXTRANS_CSUM_ERR_PKTS(port) (0x8920 + 0x40 * (port)) +#define RNP10_RXTRANS_LEN_ERR_PKTS(port) (0x8924 + 0x40 * (port)) +#define RNP10_RXTRANS_CUT_ERR_PKTS(port) (0x8928 + 0x40 * (port)) +#define RNP10_ETH_DECAP_PKT_DROP_NUM(port) (0x82e8 + 0x04 * (port)) +#define RNP10_ETH_INVALID_DROP_PKTS RNP10_ETH_DECAP_PKT_DROP_NUM(0) +#define RNP10_ETH_FILTER_DROP_PKTS RNP10_ETH_DECAP_PKT_DROP_NUM(1) +#define RNP10_ETH_RX_DEBUG(n) (0x8400 + 0x04 * (n)) +#define RNP10_ETH_RX_FC_DEBUG0_NUM RNP10_ETH_RX_DEBUG(0) +#define RNP10_ETH_RX_FC_DEBUG1_NUM RNP10_ETH_RX_DEBUG(1) +#define RNP10_ETH_RX_DIS_DEBUG0_NUM RNP10_ETH_RX_DEBUG(2) +#define RNP10_ETH_RX_DIS_DEBUG1_NUM RNP10_ETH_RX_DEBUG(3) +#define RNP10_ETH_HOST_L2_DROP_PKTS RNP10_ETH_RX_DEBUG(4) +#define RNP10_ETH_REDIR_INPUT_MATCH_DROP_PKTS RNP10_ETH_RX_DEBUG(5) +#define RNP10_ETH_ETYPE_DROP_PKTS RNP10_ETH_RX_DEBUG(6) +#define RNP10_ETH_TCP_SYN_DROP_PKTS RNP10_ETH_RX_DEBUG(7) +#define RNP10_ETH_REDIR_TUPLE5_DROP_PKTS RNP10_ETH_RX_DEBUG(8) +#define RNP10_ETH_REDIR_TCAM_DROP_PKTS RNP10_ETH_RX_DEBUG(9) +#define RNP10_MAC_STATS_BROADCAST_LOW (0x0918) +#define RNP10_MAC_STATS_BROADCAST_HIGH (0x091c) +#define RNP10_MAC_STATS_MULTICAST_LOW (0x0920) +#define RNP10_MAC_STATS_MULTICAST_HIGH (0x0924) +#define RNP10_MAC_STATS_RX_PAUSE_COUNT_LOW (0x0988) +#define RNP10_MAC_STATS_RX_PAUSE_COUNT_HIGH (0x098C) +#define RNP10_MAC_STATS_TX_PAUSE_COUNT_LOW (0x0894) +#define RNP10_MAC_STATS_TX_PAUSE_COUNT_HIGH (0x898) +#define RNP10_ETH_DECAP_BMC_DROP_NUM (0x82f4) +#define RNP10_ETH_DECAP_SWITCH_DROP_NUM (0x82f8) +#define RNP10_VLVF(idx) (0xb600 + 4 * (idx)) +#define WATCHDOG_TIMER_ERROR BIT(0) +#define RUN_FRAME_ERROR BIT(1) +#define GAINT_FRAME_ERROR BIT(2) +#define LATE_COLLISION_ERROR BIT(3) +#define GMII_ERROR BIT(4) +#define DRIBBLING_BIT_ERROR BIT(5) +#define CRC_ERROR BIT(6) +#define LENGTH_ERROR BIT(8) +#define DA_FILTER_ERROR BIT(9) +#define SA_FILTER_ERROR BIT(10) + +/* ================================================================== */ +#define ETH_ERR_SCTP (1 << 4) +#define ETH_ERR_L4 (1 << 3) +#define ETH_ERR_L3 (1 << 2) +#define ETH_ERR_PKT_LEN_ERR (1 << 1) +#define ETH_ERR_HDR_LEN_ERR (1 << 0) +#define ETH_IGNORE_ALL_ERR \ + (ETH_ERR_SCTP | ETH_ERR_L4 | ETH_ERR_L3 | ETH_ERR_PKT_LEN_ERR | \ + ETH_ERR_HDR_LEN_ERR) +#define VM_DMAC_TBL_SZ 128 +#define RNP_ETH_ENABLE_RSS_ONLY (0x3f30001) +#define RNP_ETH_DISABLE_RSS (0) +#define RNP_ETH_TX_PROGFULL_THRESH_PORT(n) (RNP_ETH_BASE + 0x0060 + 0x08 * (n)) +#define RNP_ETH_TX_PROGEMPTY_THRESH_PORT(n) (RNP_ETH_BASE + 0x0064 + 0x08 * (n)) +#define RNP_ETH_EMAC_DMA_PROFULL_THRESH (RNP_ETH_BASE + 0x0080) +#define RNP_ETH_EMAC_DMA_PROEMPTY_THRESH (RNP_ETH_BASE + 0x0084) +#define RNP_ETH_EMAC_SW_PROFULL_THRESH (RNP_ETH_BASE + 0x0088) +#define RNP_ETH_EMAC_SW_PROEMPTY_THRESH (RNP_ETH_BASE + 0x008c) +#define RNP_ETH_EMAC_BMC_TX_PROFULL_THRESH (RNP_ETH_BASE + 0x0090) +#define RNP_ETH_EMAC_BMC_TX_PROEMPTY_THRESH (RNP_ETH_BASE + 0x0094) +#define RNP_ETH_CNT_PKT_EMAC_TX(n) (RNP_ETH_BASE + 0x00a0 + 0x04 * (n)) +#define RNP_ETH_CNT_PKT_PECL_TX(n) (RNP_ETH_BASE + 0x00b0 + 0x04 * (n)) +#define RNP_ETH_STATUS_TX_FLOWCTRL(n) (RNP_ETH_BASE + 0x00c0 + 0x04 * (n)) +#define RNP_ETH_VERSION_FLOWWCTRL (RNP_ETH_BASE + 0x00d0) +#define RNP_ETH_CFG_ETH_MAC (RNP_ETH_BASE + 0x00d4) +#define RNP_ETH_SCA_TX_CS(port) (RNP_ETH_BASE + 0x0100 + 0x08 * (port)) +#define RNP_ETH_SCA_TX_NS(port) (RNP_ETH_BASE + 0x0104 + 0x08 * (port)) +#define RNP_ETH_TXTRANS_CS(port) (RNP_ETH_BASE + 0x0120 + 0x08 * (port)) +#define RNP_ETH_TXTRANS_NS(port) (RNP_ETH_BASE + 0x0124 + 0x08 * (port)) +#define RNP_ETH_1TO4_INST0_IN_PKTS (RNP_ETH_BASE + 0x0200) +#define RNP_ETH_1TO4_INST1_IN_PKTS (RNP_ETH_BASE + 0x0204) +#define RNP_ETH_1TO4_INST2_IN_PKTS (RNP_ETH_BASE + 0x0208) +#define RNP_ETH_1TO4_INST3_IN_PKTS (RNP_ETH_BASE + 0x020c) +#define RNP_ETH_IN_0_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x0210 + 0x10 * (port)) +#define RNP_ETH_IN_1_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x0214 + 0x10 * (port)) +#define RNP_ETH_IN_2_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x0218 + 0x10 * (port)) +#define RNP_ETH_IN_3_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x021c + 0x10 * (port)) +#define RNP_ETH_EMAC_TX_TO_PHY_PKTS(port) (RNP_ETH_BASE + 0x0250 + 4 * (port)) +#define RNP_ETH_TXTRANS_PTP_PKT_NUM(port) (RNP_ETH_BASE + 0x0260 + 4 * (port)) +#define RNP_ETH_TX_DEBUG(n) (RNP_ETH_BASE + 0x0300 + 0x04 * (n)) +#define RNP_ETH_PTP_TX_STATUS(n) (RNP_ETH_BASE + 0x0400) +#define RNP_ETH_PTP_TX_HTIMES(n) (RNP_ETH_BASE + 0x0404) +#define RNP_ETH_PTP_TX_LTIMES(n) (RNP_ETH_BASE + 0x0408) +#define RNP_ETH_PTP_TX_TSVALUE_STATUS(n) (RNP_ETH_BASE + 0x040c) +#define RNP_ETH_PTP_TX_CLEAR(n) (RNP_ETH_BASE + 0x0410) +#define RNP_ETH_MAC_SPEED_PORT(n) (RNP_ETH_BASE + 0x0450 + 0x04 * (n)) +#define RNP_ETH_MAC_LOOPBACK_MODE_PORT(n) (RNP_ETH_BASE + 0x0460 + 0x04 * (n)) +#define RNP_ETH_EXCEPT_DROP_PROC (RNP_ETH_BASE + 0x0470) +#define RNP_ETH_IPP (RNP_ETH_BASE + 0x8000) +#define RNP_ETH_BYPASS (RNP_ETH_BASE + 0x8000) +#define RNP_ETH_TUNNEL_MOD (RNP_ETH_BASE + 0x8004) +#define RNP_ETH_LOOPBACK_EN (RNP_ETH_BASE + 0x8008) +#define RNP_FIFO_CTRL_MODE (RNP_ETH_BASE + 0x800c) +#define RNP_ETH_VXLAN_PORT (RNP_ETH_BASE + 0x8010) +#define RNP_ETH_NVGRE_PORT (RNP_ETH_BASE + 0x8014) +#define RNP_ETH_RDMA_PORT (RNP_ETH_BASE + 0x8018) +#define RNP_HOST_FILTER_EN (RNP_ETH_BASE + 0x801c) +#define RNP_MNG_FILTER_EN (RNP_ETH_BASE + 0x8020) +#define RNP_ETH_TCAM_EN (RNP_ETH_BASE + 0x8024) +#define RNP_CONGEST_DROP_EN (RNP_ETH_BASE + 0x8028) +#define RNP_REDIR_EN (RNP_ETH_BASE + 0x8030) +#define RNP_ETH_SCTP_CHECKSUM_EN (RNP_ETH_BASE + 0x8038) +#define RNP_ETH_ARP_FUNC_EN (RNP_ETH_BASE + 0x803c) +#define RNP_ETH_VLAN_VME_REG(n) (RNP_ETH_BASE + 0x8040 + 0x04 * (n)) +#define RNP_ETH_CVLAN_RM_EN (RNP_ETH_BASE + 0x8050) +#define RNP_ETH_VLAN_RM_TYPE (RNP_ETH_BASE + 0x8054) +#define RNP_ETH_WRAP_FIELD_TYPE (RNP_ETH_BASE + 0x805c) +#define RNP_ETH_ERR_MASK_VECTOR (RNP_ETH_BASE + 0x8060) +#define RNP_ETH_DEFAULT_RX_RING (RNP_ETH_BASE + 0x806c) +#define RNP_ETH_RX_PROGFULL_THRESH_PORT(n) (RNP_ETH_BASE + 0x8070 + 0x08 * (n)) +#define RNP_ETH_RX_PROGEMPTY_THRESH_PORT(n) (RNP_ETH_BASE + 0x8074 + 0x08 * (n)) +#define RNP_ETH_EMAC_GAT_PROGFULL_THRESH (RNP_ETH_BASE + 0x8090) +#define RNP_ETH_EMAC_GAT_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x8094) +#define RNP_ETH_EMAC_PARSE_PROGFULL_THRESH (RNP_ETH_BASE + 0x8098) +#define RNP_ETH_EMAC_PARSE_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x809c) +#define RNP_ETH_FC_PROGFULL_THRESH (RNP_ETH_BASE + 0x80a0) +#define RNP_ETH_FC_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80a4) +#define RNP_ETH_DIS_PROGFULL_THRESH (RNP_ETH_BASE + 0x80a8) +#define RNP_ETH_DIS_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80ac) +#define RNP_ETH_COV_PROGFULL_THRESH (RNP_ETH_BASE + 0x80b0) +#define RNP_ETH_COV_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80b4) +#define RNP_ETH_BMC_RX_PROGFULL_THRESH (RNP_ETH_BASE + 0x80b8) +#define RNP_ETH_BMC_RX_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80bc) +#define RNP_ETH_HIGH_WATER(n) (RNP_ETH_BASE + 0x80c0 + n * (0x08)) +#define RNP_ETH_LOW_WATER(n) (RNP_ETH_BASE + 0x80c4 + n * (0x08)) +#define RNP_ETH_DEFAULT_RX_MIN_LEN (RNP_ETH_BASE + 0x80f0) +#define RNP_ETH_DEFAULT_RX_MAX_LEN (RNP_ETH_BASE + 0x80f4) +#define RNP_ETH_PTP_EVENT_PORT (RNP_ETH_BASE + 0x80f8) +#define RNP_ETH_PTP_GENER_PORT_REG (RNP_ETH_BASE + 0x80fc) +#define RNP_ETH_RX_TRANS_CS_PORT(n) (RNP_ETH_BASE + 0x8100 + 0x08 * (n)) +#define RNP_ETH_RX_TRANS_NS_PORT(n) (RNP_ETH_BASE + 0x8104 + 0x08 * (n)) +#define RNP_ETH_GAT_RX_CS (RNP_ETH_BASE + 0x8120) +#define RNP_ETH_GAT_RX_NS (RNP_ETH_BASE + 0x8124) +#define RNP_ETH_EMAC_PIP_CS (RNP_ETH_BASE + 0x8128) +#define RNP_ETH_EMAC_PIP_NS (RNP_ETH_BASE + 0x812c) +#define RNP_ETH_EMAC_FC_CS (RNP_ETH_BASE + 0x8138) +#define RNP_ETH_EMAC_FC_NS (RNP_ETH_BASE + 0x813c) +#define RNP_ETH_EMAC_DIS_CS (RNP_ETH_BASE + 0x8140) +#define RNP_ETH_EMAC_DIS_NS (RNP_ETH_BASE + 0x8144) +#define RNP_ETH_HOST_L2_FILTER_CS (RNP_ETH_BASE + 0x8150) +#define RNP_ETH_HOST_L2_FILTER_NS (RNP_ETH_BASE + 0x8154) +#define RNP_ETH_EMAC_DECAP_CS (RNP_ETH_BASE + 0x8158) +#define RNP_ETH_EMAC_DECAP_NS (RNP_ETH_BASE + 0x815c) +#define RNP_ETH_PFC_CONFIG_PROT(n) (RNP_ETH_BASE + 0x8180 + n * (0x04)) +#define RNP_ETH_RX_PKT_NUM(port) (RNP_ETH_BASE + 0x8220 + 0x04 * (port)) +#define RNP_ETH_RX_DROP_PKT_NUM(port) (RNP_ETH_BASE + 0x8230 + 0x04 * (port)) +#define RNP_ETH_TOTAL_GAT_RX_PKT_NUM (RNP_ETH_BASE + 0x8240) +#define RNP_ETH_PKT_ARP_REQ_NUM (RNP_ETH_BASE + 0x8250) +#define RNP_ETH_PKT_ARP_RESPONSE_NUM (RNP_ETH_BASE + 0x8254) +#define RNP_ETH_ICMP_NUM (RNP_ETH_BASE + 0x8258) +#define RNP_ETH_PKT_UDP_NUM (RNP_ETH_BASE + 0x825c) +#define RNP_ETH_PKT_TCP_NUM (RNP_ETH_BASE + 0x8260) +#define RNP_ETH_PKT_ESP_NUM (RNP_ETH_BASE + 0x8264) +#define RNP_ETH_PKT_GRE_NUM (RNP_ETH_BASE + 0x8268) +#define RNP_ETH_PKT_SCTP_NUM (RNP_ETH_BASE + 0x826c) +#define RNP_ETH_PKT_TCPSYN_NUM (RNP_ETH_BASE + 0x8270) +#define RNP_ETH_PKT_VXLAN_NUM (RNP_ETH_BASE + 0x8274) +#define RNP_ETH_PKT_NVGRE_NUM (RNP_ETH_BASE + 0x8278) +#define RNP_ETH_PKT_FRAGMENT_NUM (RNP_ETH_BASE + 0x827c) +#define RNP_ETH_PKT_LAYER1_VLAN_NUM (RNP_ETH_BASE + 0x8280) +#define RNP_ETH_PKT_LAYER2_VLAN_NUM (RNP_ETH_BASE + 0x8284) +#define RNP_ETH_PKT_IPV4_NUM (RNP_ETH_BASE + 0x8288) +#define RNP_ETH_PKT_IPV6_NUM (RNP_ETH_BASE + 0x828c) +#define RNP_ETH_PKT_INGRESS_NUM (RNP_ETH_BASE + 0x8290) +#define RNP_ETH_PKT_EGRESS_NUM (RNP_ETH_BASE + 0x8294) +#define RNP_ETH_PKT_IP_HDR_LEN_ERR_NUM (RNP_ETH_BASE + 0x8298) +#define RNP_ETH_PKT_IP_PKT_LEN_ERR_NUM (RNP_ETH_BASE + 0x829c) +#define RNP_ETH_PKT_L3_HDR_CHK_ERR_NUM (RNP_ETH_BASE + 0x82a0) +#define RNP_ETH_PKT_L4_HDR_CHK_ERR_NUM (RNP_ETH_BASE + 0x82a4) +#define RNP_ETH_PKT_SCTP_CHK_ERR_NUM (RNP_ETH_BASE + 0x82a8) +#define RNP_ETH_PKT_VLAN_ERR_NUM (RNP_ETH_BASE + 0x82ac) +#define RNP_ETH_PKT_RDMA_NUM (RNP_ETH_BASE + 0x82b0) +#define RNP_ETH_PKT_ARP_AUTO_RESPONSE_NUM (RNP_ETH_BASE + 0x82b4) +#define RNP_ETH_PKT_ICMPV6_NUM (RNP_ETH_BASE + 0x82b8) +#define RNP_ETH_PKT_IPV6_EXTEND_NUM (RNP_ETH_BASE + 0x82bc) +#define RNP_ETH_PKT_802_3_NUM (RNP_ETH_BASE + 0x82c0) +#define RNP_ETH_PKT_EXCEPT_SHORT_NUM (RNP_ETH_BASE + 0x82c4) +#define RNP_ETH_PKT_PTP_NUM (RNP_ETH_BASE + 0x82c8) +#define RNP_ETH_DECAP_PKT_IN_NUM (RNP_ETH_BASE + 0x82d0) +#define RNP_ETH_DECAP_PKT_OUT_NUM (RNP_ETH_BASE + 0x82d4) +#define RNP_ETH_DECAP_DMAC_OUT_NUM (RNP_ETH_BASE + 0x82d8) +#define RNP_ETH_DECAP_BMC_OUT_NUM (RNP_ETH_BASE + 0x82dc) +#define RNP_ETH_DECAP_SW_OUT_NUM (RNP_ETH_BASE + 0x82e0) +#define RNP_ETH_DECAP_MIRROR_OUT_NUM (RNP_ETH_BASE + 0x82e4) +#define RNP_ETH_DECAP_PKT_DROP_NUM(port) (RNP_ETH_BASE + 0x82e8 + 0x04 * (port)) +#define RNP_ETH_INVALID_DROP_PKTS RNP_ETH_DECAP_PKT_DROP_NUM(0) +#define RNP_ETH_FILTER_DROP_PKTS RNP_ETH_DECAP_PKT_DROP_NUM(1) +#define RNP_ETH_DECAP_DMAC_DROP_NUM (RNP_ETH_BASE + 0x82f0) +#define RNP_ETH_DECAP_BMC_DROP_NUM (RNP_ETH_BASE + 0x82f4) +#define RNP_ETH_DECAP_SWITCH_DROP_NUM (RNP_ETH_BASE + 0x82f8) +#define RNP_ETH_DECAP_RM_VLAN_NUM (RNP_ETH_BASE + 0x82fc) +#define RNP_ETH_RX_FC_PKT_IN_NUM (RNP_ETH_BASE + 0x8300) +#define RNP_ETH_RX_FC_PKT_OUT_NUM (RNP_ETH_BASE + 0x8304) +#define RNP_ETH_RX_FC_PKT_DROP0_NUM (RNP_ETH_BASE + 0x8308) +#define RNP_ETH_RX_FC_PKT_DROP1_NUM (RNP_ETH_BASE + 0x830c) +#define RNP_ETH_RING_FC_STATUS0 (RNP_ETH_BASE + 0x8310) +#define RNP_ETH_RING_FC_STATUS1 (RNP_ETH_BASE + 0x8314) +#define RNP_ETH_RING_FC_STATUS2 (RNP_ETH_BASE + 0x8318) +#define RNP_ETH_RING_FC_STATUS3 (RNP_ETH_BASE + 0x831c) +#define RNP_ETH_RX_DEBUG(n) (RNP_ETH_BASE + 0x8400 + 0x04 * (n)) +#define RNP_ETH_RX_FC_DEBUG0_NUM RNP_ETH_RX_DEBUG(0) +#define RNP_ETH_RX_FC_DEBUG1_NUM RNP_ETH_RX_DEBUG(1) +#define RNP_ETH_RX_DIS_DEBUG0_NUM RNP_ETH_RX_DEBUG(2) +#define RNP_ETH_RX_DIS_DEBUG1_NUM RNP_ETH_RX_DEBUG(3) +#define RNP_ETH_HOST_L2_DROP_PKTS RNP_ETH_RX_DEBUG(4) +#define RNP_ETH_REDIR_INPUT_MATCH_DROP_PKTS RNP_ETH_RX_DEBUG(5) +#define RNP_ETH_ETYPE_DROP_PKTS RNP_ETH_RX_DEBUG(6) +#define RNP_ETH_TCP_SYN_DROP_PKTS RNP_ETH_RX_DEBUG(7) +#define RNP_ETH_REDIR_TUPLE5_DROP_PKTS RNP_ETH_RX_DEBUG(8) +#define RNP_ETH_REDIR_TCAM_DROP_PKTS RNP_ETH_RX_DEBUG(9) +#define RNP_ETH_VMARK_TC(n) (RNP_ETH_BASE + 0x8500 + 0x04 * (n)) +#define RNP_RING_FC_ENABLE (RNP_ETH_BASE + 0x8520) +#define RNP_SELECT_RING_EN(n) (RNP_ETH_BASE + 0x8524 + (0x4 * n)) +#define RNP_TC_FC_SW_EN (RNP_ETH_BASE + 0x8534) +#define RNP_ETH_LOCAL_DIP(n) (RNP_ETH_BASE + 0x8600 + 0x04 * (n)) +#define RNP_ETH_LOCAL_DMAC_H(n) (RNP_ETH_BASE + 0x8700 + 0x04 * (n)) +#define RNP_ETH_LOCAL_DMAC_L(n) (RNP_ETH_BASE + 0x8800 + 0x04 * (n)) +/* Rx Ring Flow Control */ +#define RNP_RXTRANS_RX_PKTS(port) (RNP_ETH_BASE + 0x8900 + 0x40 * (port)) +#define RNP_RXTRANS_DROP_PKTS(port) (RNP_ETH_BASE + 0x8904 + 0x40 * (port)) +#define RNP_RXTRANS_WDT_ERR_PKTS(port) (RNP_ETH_BASE + 0x8908 + 0x40 * (port)) +#define RNP_RXTRANS_CODE_ERR_PKTS(port) (RNP_ETH_BASE + 0x890c + 0x40 * (port)) +#define RNP_RXTRANS_CRC_ERR_PKTS(port) (RNP_ETH_BASE + 0x8910 + 0x40 * (port)) +#define RNP_RXTRANS_SLEN_ERR_PKTS(port) (RNP_ETH_BASE + 0x8914 + 0x40 * (port)) +#define RNP_RXTRANS_GLEN_ERR_PKTS(port) (RNP_ETH_BASE + 0x8918 + 0x40 * (port)) +#define RNP_RXTRANS_IPH_ERR_PKTS(port) (RNP_ETH_BASE + 0x891c + 0x40 * (port)) +#define RNP_RXTRANS_CSUM_ERR_PKTS(port) (RNP_ETH_BASE + 0x8920 + 0x40 * (port)) +#define RNP_RXTRANS_LEN_ERR_PKTS(port) (RNP_ETH_BASE + 0x8924 + 0x40 * (port)) +#define RNP_RXTRANS_CUT_ERR_PKTS(port) (RNP_ETH_BASE + 0x8928 + 0x40 * (port)) +#define RNP_RXTRANS_EXCEPT_BYTES(port) (RNP_ETH_BASE + 0x892c + 0x40 * (port)) +#define RNP_RXTRANS_G1600_BYTES_PKTS(port) \ + (RNP_ETH_BASE + 0x8930 + 0x40 * (port)) +#define RNP_RX_RING_MAXRATE(n) (RNP_ETH_BASE + 0x8a00 + (0x4 * n)) +#define RNP_ETH_RX_PROGFULL_RTRN(n) (RNP_ETH_BASE + 0x8c00 + 0x04 * (n)) +#define RNP_ETH_CNT_PKT_EMAC_RX(n) (RNP_ETH_BASE + 0x8c10 + 0x04 * (n)) +#define RNP_ETH_CNT_PKT_PECL_RX(n) (RNP_ETH_BASE + 0x8c20 + 0x04 * (n)) +#define RNP_ETH_STATUS_RX_FLOWCTRL(n) (RNP_ETH_BASE + 0x8c30 + 0x04 * (n)) +#define RNP_ETH_DMAC_FCTRL (RNP_ETH_BASE + 0x9110) +#define RNP_ETH_DMAC_MCSTCTRL (RNP_ETH_BASE + 0x9114) +#define RNP_MCSTCTRL_MULTICASE_TBL_EN (1 << 2) +#define RNP_MCSTCTRL_UNICASE_TBL_EN (1 << 3) +#define RNP_MCSTCTRL_DMAC_47 0x00 +#define RNP_MCSTCTRL_DMAC_46 0x01 +#define RNP_MCSTCTRL_DMAC_45 0x02 +#define RNP_MCSTCTRL_DMAC_43 0x03 +#define RNP_ETH_VLAN_FILTER_ENABLE (RNP_ETH_BASE + 0x9118) +#define RNP_ETH_INPORT_POLICY_VAL (RNP_ETH_BASE + 0x91d0) +#define RNP_ETH_INPORT_POLICY_REG(n) (RNP_ETH_BASE + 0x91e0 + 0x04 * (n)) +#define ETH_LAYER2_NUM (16) +#define RNP_ETH_LAYER2_ETQF(n) (RNP_ETH_BASE + 0x9200 + 0x04 * (n)) +#define RNP_ETH_LAYER2_ETQS(n) (RNP_ETH_BASE + 0x9240 + 0x04 * (n)) +#define RNP_ETH_LAYER2_ETQS_DEFAULT (RNP_ETH_BASE + 0x9280) +#define RNP_ETH_ETQF_DEFAULT (RNP_ETH_BASE + 0x9284) +#define RNP_ETH_SYNQF (RNP_ETH_BASE + 0x9290) +#define RNP_ETH_SYNQF_PRIORITY (RNP_ETH_BASE + 0x9294) +/* + * [3:0]: + * 4'b0000:RSS disable + * 4'b0001:RSS only + * 4'b0100:DCB and RSS--8*16 + * 4'b1010:POOLS and RSS--32*4 + * [3] :virtual enable + * [16]:ipv4_hash_tcp_enable + * [17]:ipv4_hash_enable + * [20]:ipv6_hash_enable + * [21]:ipv6_hash_tcp_enable + * [22]:ipv4_hash_udp_enable + * [23]:ipv6_hash_udp_enable + * [24]:ipv4_hash_sctp_enable + * [25]:ipv6_hash_sctp_enable + */ +#define RNP_ETH_RSS_CONTROL (RNP_ETH_BASE + 0x92a0) +#define RNP_MRQC_IOV_EN (RNP_ETH_BASE + 0x92a0) +#define RNP_IOV_ENABLED (1 << 3) +#define RNP_ETH_RSS_KEY (RNP_ETH_BASE + 0x92d0) +#define RNP_ETH_RAR_RL(n) (RNP_ETH_BASE + 0xa000 + 0x04 * n) +#define RNP_ETH_RAR_RH(n) (RNP_ETH_BASE + 0xa400 + 0x04 * n) +#define RNP_ETH_UTA(n) (RNP_ETH_BASE + 0xa800 + 0x04 * n) +#define RNP_ETH_MULTICAST_HASH_TABLE(n) (RNP_ETH_BASE + 0xac00 + 0x04 * n) +#define RNP_MTA(n) RNP_ETH_MULTICAST_HASH_TABLE(n) +#define RNP_ETH_VLAN_FILTER_TABLE(n) (RNP_ETH_BASE + 0xb000 + 0x04 * (n)) +#define RNP_VFTA RNP_ETH_VLAN_FILTER_TABLE +#define RNP_FCTRL_MULTICASE_BYPASS (1 << 8) +#define RNP_FCTRL_UNICASE_BYPASS (1 << 9) +#define RNP_FCTRL_BROADCAST_BYPASS (1 << 10) +#define RNP_ETH_ETYPE_TABLE(n) (RNP_ETH_BASE + 0xb300 + 0x04 * (n)) +#define RNP_VM_DMAC_MPSAR_RING(entry) \ + (RNP_ETH_BASE + 0xb400 + (4 * (entry))) +#define RNP_VLVF(idx) (RNP_ETH_BASE + 0xb600 + 4 * (idx)) +#define RNP_VLVFB(idx) (RNP_ETH_BASE + 0xb700 + 4 * (idx)) +#define RNP_VM_TUNNEL_PFVLVF_L(n) (RNP_ETH_BASE + 0xb800 + 0x04 * (n)) +#define RNP_VM_TUNNEL_PFVLVF_H(n) (RNP_ETH_BASE + 0xb900 + 0x04 * (n)) +/* 5 tuple */ +#define ETH_TUPLE5_NUM 128 +#define RNP_ETH_TUPLE5_SAQF(n) (RNP_ETH_BASE + 0xc000 + 0x04 * (n)) +#define RNP_ETH_TUPLE5_DAQF(n) (RNP_ETH_BASE + 0xc400 + 0x04 * (n)) +#define RNP_ETH_TUPLE5_SDPQF(n) (RNP_ETH_BASE + 0xc800 + 0x04 * (n)) +#define RNP_ETH_TUPLE5_FTQF(n) (RNP_ETH_BASE + 0xcc00 + 0x04 * (n)) +#define RNP_ETH_TUPLE5_POLICY(n) (RNP_ETH_BASE + 0xd000 + 0x04 * (n)) +#define RNP_ETH_RSS_INDIR_TBL(p, n) \ + (RNP_ETH_BASE + 0xe000 + 0x04 * (n) + 0x200 * (p)) +#define RNP_ETH_TC_IPH_OFFSET_TABLE(n) (RNP_ETH_BASE + 0xe800 + 0x04 * (n)) +#define RNP_ETH_TC_VLAN_OFFSET_TABLE(n) (RNP_ETH_BASE + 0xe820 + 0x04 * (n)) +#define RNP_ETH_TC_PORT_OFFSET_TABLE(n) (RNP_ETH_BASE + 0xe840 + 0x04 * (n)) +#define RNP_REDIR_RING_MASK (RNP_ETH_BASE + 0xe860) +#define RNP_ETH_RSS_MODE (0x6fe00) +#define RNP_ETH_RSS_INDIR_TBL_UV3P(n) (0x6ff00 + 0x04 * (n)) +/* ================================================================== */ + +/* ==================== RNP-REG Global Registers ==================== */ +#define RNP_COMM_REG0 0x30000 +#define RNP_TOP_NIC_VERSION (RNP_COMM_REG0 + 0x0000) +#define RNP_TOP_NIC_CONFIG (RNP_COMM_REG0 + 0x0004) +#define RNP_TOP_NIC_STAT (RNP_COMM_REG0 + 0x0008) +#define RNP_TOP_NIC_DUMMY (RNP_COMM_REG0 + 0x000c) +#define RNP_TOP_NIC_REST_N (RNP_COMM_REG0 + 0x0010) +#define NIC_RESET 0 +#define RNP_TOP_DMA_MEM_SLP (RNP_COMM_REG0 + 0x4004) +#define RNP_TOP_DMA_MEM_SD (RNP_COMM_REG0 + 0x4008) +#define RNP_TOP_ETH_TIMESTAMP_SEL (RNP_COMM_REG0 + 0x8010) +#define RNP_TOP_ETH_MAC_CLK_SEL (RNP_COMM_REG0 + 0x8014) +#define RNP_TOP_ETH_INF_ETH_STATUS (RNP_COMM_REG0 + 0x8018) +#define RNP_TOP_ETH_BUG_40G_PATCH (RNP_COMM_REG0 + 0x801c) +#define RNP_TOP_ETH_PWR_PORT_NUM (4) +#define RNP_TOP_ETH_PWR_CLAMP_CTRL_PORT(n) (RNP_COMM_REG0 + 0x8020 + 0xc * (n)) +#define RNP_TOP_ETH_PWR_ISOLATE_PORT(n) (RNP_COMM_REG0 + 0x8024 + 0xc * (n)) +#define RNP_TOP_ETH_PWR_DOWN_PORT(n) (RNP_COMM_REG0 + 0x8028 + 0xc * (n)) +#define RNP_TOP_ETH_TCAM_CONFIG_ENABLE (RNP_COMM_REG0 + 0x8050) +#define RNP_TOP_ETH_SLIP (RNP_COMM_REG0 + 0x8060) +#define RNP_TOP_ETH_SHUT_DOWN (RNP_COMM_REG0 + 0x8064) +#define RNP_TOP_ETH_OVS_SLIP (RNP_COMM_REG0 + 0x8068) +#define RNP_TOP_ETH_OVS_SHUT_DOWN (RNP_COMM_REG0 + 0x806c) +#define RNP_FC_PORT_ENABLE (RNP_COMM_REG0 + 0x9004) +#define RNP_FC_PORT_PRIO_MAP(n) (RNP_COMM_REG0 + 0x9008 + (0x04 * n)) +#define RNP_FC_EN_CONF_AVAILABLE (RNP_COMM_REG0 + 0x9018) +#define RNP_FC_UNCTAGS_MAP_OFFSET (16) +#define RNP_TOP_MAC_OUI (RNP_COMM_REG0 + 0xc004) +#define RNP_TOP_MAC_SN (RNP_COMM_REG0 + 0xc008) +/* ================================================================== */ + +/* ==================== RNP-SERDES Global Registers ================= */ + +#define RNP_SERDES (0x40000) +#define RNP_PCS_OFFSET (0x1000) + +#define RNP_PCS_BASE(i) (RNP_SERDES + RNP_PCS_OFFSET * i) +#define RNP_PCS_1G_OR_10G BIT(13) +#define RNP_PCS_SPPEED_MASK (0x1c) +#define RNP_PCS_SPPEED_10G (0x0) +#define RNP_PCS_SPPEED_40G (0xc) +#define RNP_PCS_LINK_SPEED (0x30000) +#define RNP_PCS_LINKUP BIT(2) +#define RNP_PCS_LINK_STATUS (0x30001) + +/* ================================================================== */ + +/* ==================== RNP-MAC Global Registers ==================== */ +#define RNP10_MAC_BASE (0x60000) +#define RNP_XLMAC (0x60000) +#define RNP10_MAC_TX_CFG (0x0000) +#define RNP10_MAC_RX_CFG (0x0004) +#define RNP_IPC_MASK_XLGMAC BIT(9) +#define RNP_RX_ALL BIT(31) +#define RNP_RX_ALL_MUL BIT(4) +#define RNP10_MAC_PKT_FLT (0x0008) +#define RNP10_MAC_LPI_CTRL (0x00d0) +#define RNP10_MAC_Q0_TX_FLOW_CTRL(i) (0x0070 + 0x04 * (i)) +#define RNP10_MAC_RX_FLOW_CTRL (0x0090) +#define RNP10_TX_FLOW_ENABLE_MASK (0x2) +#define RNP10_RX_FLOW_ENABLE_MASK (0x1) +#define RNP10_MAC_TX_VLAN_TAG (0x0050) +#define RNP10_MAC_TX_VLAN_MODE (0x0060) +#define RNP10_MAC_INNER_VLAN_INCL (0x0064) +#define RNP10_MAC_UNICAST_LOW(i) (0x304 + i * 0x08) +#define RNP10_MAC_UNICAST_HIGH(i) (0x300 + i * 0x08) +#define RNP_MODE_NO_SA_INSER (0x0) +#define RNP_SARC_OFFSET (28) +#define RNP_TWOKPE_MASK BIT(27) +#define RNP_SFTERR_MASK BIT(26) +#define RNP_CST_MASK BIT(25) +#define RNP_TC_MASK BIT(24) +#define RNP_WD_MASK BIT(23) +#define RNP_JD_MASK BIT(22) +#define RNP_BE_MASK BIT(21) +#define RNP_JE_MASK BIT(20) +#define RNP_IFG_96 (0x00) +#define RNP_IFG_OFFSET (17) +#define RNP_DCRS_MASK BIT(16) +#define RNP_PS_MASK BIT(15) +#define RNP_FES_MASK BIT(14) +#define RNP_DO_MASK BIT(13) +#define RNP_LM_MASK BIT(12) +#define RNP_DM_MASK BIT(11) +#define RNP_IPC_MASK BIT(10) +#define RNP_DR_MASK BIT(9) +#define RNP_LUD_MASK BIT(8) +#define RNP_ACS_MASK BIT(7) +#define RNP_BL_MODE (0x00) +#define RNP_BL_OFFSET (5) +#define RNP_DC_MASK BIT(4) +#define RNP_TE_MASK BIT(3) +#define RNP_RE_MASK BIT(2) +#define RNP_PRELEN_MODE (0) +#define GMAC_CONTROL 0x00000000 /* Configuration */ +#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */ +#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */ +#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */ +#define GMAC_MII_ADDR 0x00000010 /* MII Address */ +#define GMAC_MII_DATA 0x00000014 /* MII Data */ +#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */ +#define GMAC_PMT 0x0000002c + +enum power_event { + pointer_reset = 0x80000000, + global_unicast = 0x00000200, + wake_up_rx_frame = 0x00000040, + magic_frame = 0x00000020, + wake_up_frame_en = 0x00000004, + magic_pkt_en = 0x00000002, + power_down = 0x00000001, +}; + +#define GMAC_VTHM_MASK BIT(19) +#define GMAC_ESVL_MASK BIT(18) +#define GMAC_VTIM_MASK BIT(17) +#define GMAC_ETV_MASK BIT(16) +#define GMAC_VLAN_TAG_CTRL 0x0000001c +#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense */ +#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */ +#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */ +#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */ +#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ +#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */ +#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ +#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */ +#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */ +#define GMAC_CONTROL_ACS 0x00000080 /* Auto Pad/FCS Stripping */ +#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */ +#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ +#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ +/* GMAC Frame Filter defines */ +#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ +#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ +#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ +#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ +#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ +#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ +#define GMAC_FRAME_FILTER_PCF 0x00000080 /* Pass Control frames */ +#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ +#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ +#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ +#define GMAC_FRAME_FILTER_VLAN 0x00010000 /* vlan filter open */ +#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ +/* GMII ADDR defines */ +#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */ +#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */ +/* GMAC FLOW CTRL defines */ +#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ +#define GMAC_FLOW_CTRL_PT_SHIFT 16 +#define GMAC_FLOW_CTRL_UP 0x00000008 /* Unicast pause frame enable */ +#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ +#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ +#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ +#define GMAC_MANAGEMENT_RX_UNDERSIZE (0x01a4) +#define RNP_MAC_TX_CFG (RNP_XLMAC + 0x0000) +#define RNP_MAC_RX_CFG (RNP_XLMAC + 0x0004) +#define RNP_MAC_PKT_FLT (RNP_XLMAC + 0x0008) +#define RNP_MAC_LPI_CTRL (RNP_XLMAC + 0x00d0) +#define RNP_MAC_TX_VLAN_TAG (RNP_XLMAC + 0x0050) +#define RNP_MAC_TX_VLAN_MODE (RNP_XLMAC + 0x0060) +#define RNP_MAC_INNER_VLAN_INCL (RNP_XLMAC + 0x0064) +#define RNP_MAC_Q0_TX_FLOW_CTRL(i) (RNP_XLMAC + 0x0070 + 0x04 * (i)) +#define RNP_MAC_RX_FLOW_CTRL (RNP_XLMAC + 0x0090) +#define RNP_MAC_HW_FEATURE (RNP_XLMAC + 0x0120) +/*1588 */ +#define RNP_MAC_TS_CTRL (RNP_XLMAC + 0X0d00) +#define RNP_MAC_SUB_SECOND_INCREMENT (RNP_XLMAC + 0x0d04) +#define RNP_MAC_SYS_TIME_SEC_CFG (RNP_XLMAC + 0x0d08) +#define RNP_MAC_SYS_TIME_NANOSEC_CFG (RNP_XLMAC + 0x0d0c) +#define RNP_MAC_SYS_TIME_SEC_UPDATE (RNP_XLMAC + 0x0d10) +#define RNP_MAC_SYS_TIME_NANOSEC_UPDATE (RNP_XLMAC + 0x0d14) +#define RNP_MAC_TS_ADDEND (RNP_XLMAC + 0x0d18) +#define RNP_MAC_TS_STATS (RNP_XLMAC + 0x0d20) +#define RNP_MAC_INTERRUPT_ENABLE (RNP_XLMAC + 0x00b4) +#define RNP_MAC_STATS_BROADCAST_LOW (RNP_XLMAC + 0x0918) +#define RNP_MAC_STATS_BROADCAST_HIGH (RNP_XLMAC + 0x091c) +#define RNP_MAC_STATS_MULTICAST_LOW (RNP_XLMAC + 0x0920) +#define RNP_MAC_STATS_MULTICAST_HIGH (RNP_XLMAC + 0x0924) +#define RNP_TX_FLOW_ENABLE_MASK (0x2) +#define RNP_RX_FLOW_ENABLE_MASK (0x1) +/* ================================================================== */ + +/* ==================== RNP-MSIX Global Registers ==================== */ +//==== Ring-MSIX Registers (MSI-X_module_design.docs) === +#define RING_VECTOR(n) (0x04 * (n)) + +/* ================================================================== */ + +/* ==================== RNP-SWITCH Global Registers ================= */ +#define RNP_SWITCH_BASE 0xB0000 + +#define RNP_SWITCH_RULE_INGS(port, n) \ + (RNP_SWITCH_BASE + 0x24 * (port) + 0x1000 + 0x04 * (n)) +#define RNP_SWITCH_RULE_INGS_RPU_NP(port) \ + (RNP_SWITCH_BASE + 0x24 * (port) + 0x1014) +#define RNP_SWITCH_RULE_INGS_RPU_SWITCH(port) \ + (RNP_SWITCH_BASE + 0x24 * (port) + 0x1018) +#define RNP_SWITCH_RULE_INGS_SEC(port) \ + (RNP_SWITCH_BASE + 0x24 * (port) + 0x101c) +#define RNP_SWITCH_RULE_INGS_EXFPGA(port) \ + (RNP_SWITCH_BASE + 0x24 * (port) + 0x1020) +#define RNP_SWITCH_CNT_EGRESS_PKT(port) (RNP_SWITCH_BASE + 0x10db + 0x04 * (n)) +#define RNP_SWITCH_CNT_INGRESS_PKT(port) (RNP_SWITCH_BASE + 0x10f0 + 0x04 * (n)) +#define RNP_SWITCH_RPUUP_DATA_PROG_FULL_THRESH (RNP_SWITCH_BASE + 0x1108) +#define RNP_SWITCH_RPUDN_DATA_PROG_FULL_THRESH (RNP_SWITCH_BASE + 0x110c) +#define RNP_SWITCH_MAC0_DATA_PROG_FULL_THRESH (RNP_SWITCH_BASE + 0x1110) +#define RNP_SWITCH_MAC1_DATA_PROG_FULL_THRESH (RNP_SWITCH_BASE + 0x1114) +#define RNP_SWITCH_DMA0_DATA_PROG_FULL_THRESH (RNP_SWITCH_BASE + 0x1118) +#define RNP_SWITCH_DMA1_DATA_PROG_FULL_THRESH (RNP_SWITCH_BASE + 0x111c) +#define RNP_SWITCH_REG1_INGRESS_STATUS(port) \ + (RNP_SWITCH_BASE + 0x1120 + 0x08 * (port)) +#define RNP_SWITCH_REG2_INGRESS_STATUS(port) \ + (RNP_SWITCH_BASE + 0x1124 + 0x08 * (port)) +#define RNP_SWITCH_REG_STATUS_ROBIN(port) \ + (RNP_SWITCH_BASE + 0x1150 + 0x04 * (port)) +#define RNP_SWITCH_REG_EGRESS_STATUS(port) \ + (RNP_SWITCH_BASE + 0x1168 + 0x04 * (port)) +#define RNP_SWITCH_INFO_FIFO_DMA_TX(n) (RNP_SWITCH_BASE + 0x1198 + 0x08 * (n)) +#define RNP_SWITCH_INFO_FIFO_DMA_RX(n) (RNP_SWITCH_BASE + 0x119c + 0x08 * (n)) +#define RNP_SWITCH_INFO_FIFO_MAC_TX(n) (RNP_SWITCH_BASE + 0x11a8 + 0x08 * (n)) +#define RNP_SWITCH_INFO_FIFO_MAC_RX(n) (RNP_SWITCH_BASE + 0x11ac + 0x08 * (n)) +#define RNP_SWITCH_INFO_FIFO_RPUUP_RX(n) (RNP_SWITCH_BASE + 0x11bc + 0x08 * (n)) +#define RNP_SWITCH_INFO_FIFO_RPUDN_RX(n) (RNP_SWITCH_BASE + 0x11c0 + 0x08 * (n)) +#define RNP_SWITCH_EN_SOFT_RESET (RNP_SWITCH_BASE + 0xf000) +#define RNP_SWITCH_SOFT_RESET (RNP_SWITCH_BASE + 0xf004) +#define RNP_SWITCH_CLR_INGS_ERR (RNP_SWITCH_BASE + 0xf008) +#define RNP_SWITCH_ERR_CODE_INGS(port) \ + (RNP_SWITCH_BASE + 0xf010 + 0x04 * (port)) +#define RNP_SWITCH_MEM_SD (RNP_SWITCH_BASE + 0xf028) +#define RNP_SWITCH_MEM_SLP (RNP_SWITCH_BASE + 0xf02c) +#define RNP_SWITCH_EN_INVALID_DPORT_DROP_O (RNP_SWITCH_BASE + 0xf030) + +/* ================================================================== */ + +/* ==================== RNP-TCAM Global Registers ==================== */ +#define RNP_TCAM_BASE (0xc0000) +#define RNP_TCAM_SDPQF(n) \ + (RNP_TCAM_BASE + 0x00 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_DAQF(n) \ + (RNP_TCAM_BASE + 0x04 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_SAQF(n) \ + (RNP_TCAM_BASE + 0x08 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_APQF(n) \ + (RNP_TCAM_BASE + 0x0c + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_SDPQF_MASK(n) \ + (RNP_TCAM_BASE + 0x20 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_DAQF_MASK(n) \ + (RNP_TCAM_BASE + 0x24 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_SAQF_MASK(n) \ + (RNP_TCAM_BASE + 0x28 + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_APQF_MASK(n) \ + (RNP_TCAM_BASE + 0x2c + 0x40 * (n / 2) + 0x10 * (n % 2)) +#define RNP_TCAM_MODE (RNP_TCAM_BASE + 0x20000) +#define RNP_TCAM_CACHE_ENABLE (RNP_TCAM_BASE + 0x20004) +#define RNP_TCAM_CACHE_ADDR_CLR (RNP_TCAM_BASE + 0x20008) +#define RNP_TCAM_CACHE_REQ_CLR (RNP_TCAM_BASE + 0x2000c) + +/* ================================================================== */ + +/* ==================== OTHER Global Registers ==================== */ +/* ===== PF-VF Functions ==== */ +#define VF_NUM_REG 0xa3000 +/* 8bit: 7:vf_actiove 6:fun0/fun1 [5:0]:vf_num */ +#define VF_NUM(vfnum, fun) ((1 << 7) | (((fun) & 0x1) << 6) | ((vfnum) & 0x3f)) +#define PF_BIT 6 +#define PF_NUM(fun) (((fun) & 0x1) << 6) +#define IS_VF(vfnum) (((vfnum) & (1 << 7)) ? 1 : 0) + +/* PFC Flow Control*/ +enum NIC_MODE { + MODE_NIC_MODE_2PORT_40G = 0, + MODE_NIC_MODE_2PORT_10G = 1, + MODE_NIC_MODE_4PORT_10G = 2, + MODE_NIC_MODE_8PORT_10G = 3, +}; + +/* ================================================================== */ + +#endif /* RNP_REGS_H */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_sriov.c b/drivers/net/ethernet/mucse/rnp/rnp_sriov.c new file mode 100644 index 0000000000000..5dccdf8b569f6 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_sriov.c @@ -0,0 +1,1737 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rnp.h" +#include "rnp_type.h" +#include "rnp_sriov.h" + +int rnp_msg_post_status_signle(struct rnp_adapter *adapter, + enum PF_STATUS status, int vf); +#ifdef CONFIG_PCI_IOV +static int __rnp_enable_sriov(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int num_vf_macvlans, i, num_vebvlans; + struct vf_macvlans *mv_list; + struct vf_vebvlans *vv_list = NULL; + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + /* sriov and dcb cannot open together */ + /* reset numtc */ + adapter->flags &= (~RNP_FLAG_DCB_ENABLED); + netdev_reset_tc(adapter->netdev); + + e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs); + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= RNP_FLAG_VMDQ_ENABLED; + if (!adapter->ring_feature[RING_F_VMDQ].limit) + adapter->ring_feature[RING_F_VMDQ].limit = 1; + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + adapter->ring_feature[RING_F_VMDQ].offset = 0; + else + adapter->ring_feature[RING_F_VMDQ].offset = hw->max_vfs - 1; + + num_vf_macvlans = hw->num_rar_entries - + (hw->max_pf_macvlans + 1 + adapter->num_vfs); + num_vebvlans = hw->num_vebvlan_entries; + + adapter->mv_list = mv_list = kcalloc( + num_vf_macvlans, sizeof(struct vf_macvlans), GFP_KERNEL); + if (num_vebvlans) + hw->vv_list = vv_list = kcalloc( + num_vebvlans, sizeof(struct vf_vebvlans), GFP_KERNEL); + + if (mv_list) { + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&adapter->vf_mvs.l); + for (i = 0; i < num_vf_macvlans; i++) { + mv_list->vf = -1; + mv_list->free = true; + mv_list->rar_entry = hw->mac.num_rar_entries - + (i + adapter->num_vfs + 1); + list_add(&mv_list->l, &adapter->vf_mvs.l); + mv_list++; + } + } + + if (vv_list) { + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&hw->vf_vas.l); + for (i = 0; i < num_vebvlans; i++) { + vv_list->vid = -1; + vv_list->vid = 0; + vv_list->free = true; + vv_list->veb_entry = i; + list_add(&vv_list->l, &hw->vf_vas.l); + vv_list++; + } + } + + adapter->flags2 |= RNP_FLAG2_BRIDGE_MODE_VEB; + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + + hw->ops.set_sriov_status(hw, true); + adapter->vfinfo = kcalloc(adapter->num_vfs, + sizeof(struct vf_data_storage), GFP_KERNEL); + if (adapter->vfinfo) { + /* limit trafffic classes based on VFs enabled */ + /* TODO analyze VF need support pfc or traffic classes */ + /* We do not support RSS w/ SR-IOV */ + adapter->ring_feature[RING_F_RSS].limit = hw->sriov_ring_limit; + + /* Disable RSC when in SR-IOV mode */ + adapter->flags2 &= + ~(RNP_FLAG2_RSC_CAPABLE | RNP_FLAG2_RSC_ENABLED); + + adapter->flags |= RNP_FLAG_SRIOV_ENABLED; + + /* enable spoof checking for all VFs */ + return 0; + } + + /* open flags at last to avoid null call adapter->vfinfo */ + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + return -ENOMEM; +} + +void rnp_enable_sriov_true(struct rnp_adapter *adapter) +{ + int err = 0; + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return; + + adapter->flags |= RNP_FLAG_SRIOV_INIT_DONE; + + err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (err) { + printk("Failed to enable PCI sriov: %d num %d\n", err, + adapter->num_vfs); + printk("We cannot handle this error\n"); + } + + adapter->flags |= RNP_FLAG_VF_INIT_DONE; +} + +/* Note this function is called when the user wants to enable SR-IOV + * VFs using the now deprecated module parameter + * never used + */ +void rnp_enable_sriov(struct rnp_adapter *adapter) +{ + int pre_existing_vfs = 0; + struct rnp_hw *hw = &adapter->hw; + + pre_existing_vfs = pci_num_vf(adapter->pdev); + if (!pre_existing_vfs && !adapter->num_vfs) + return; + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + if (!pre_existing_vfs) + dev_warn( + &adapter->pdev->dev, + "Enabling SR-IOV VFs using the module parameter is deprecated " + "- please use the pci sysfs interface.\n"); + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + /* If there are pre-existing VFs then we have to force + * use of that many - over ride any module parameter value. + * This may result from the user unloading the PF driver + * while VFs were assigned to guest VMs or because the VFs + * have been created via the new PCI SR-IOV sysfs interface. + */ + if (pre_existing_vfs) { + adapter->num_vfs = pre_existing_vfs; + dev_warn( + &adapter->pdev->dev, + "Virtual Functions already enabled for this device - Please " + "reload all VF drivers to avoid spoofed packet errors\n"); + } else { + int i; + /* + * The n10 supports up to 64 VFs per physical function + * but this implementation limits allocation to 126 so that + * basic networking resources are still available to the + * physical function. If the user requests greater than + * 64 VFs then it is an error - reset to default of zero. + */ + adapter->num_vfs = + min_t(unsigned int, adapter->num_vfs, hw->max_vfs - 1); + + /* should first alloc memory for sriov */ + if (__rnp_enable_sriov(adapter)) { + e_err(probe, "Failed to alloc memory for sriov\n"); + adapter->num_vfs = 0; + } + + for (i = 0; i < adapter->num_vfs; i++) + rnp_vf_configuration(adapter->pdev, (i | 0x10000000)); + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + } +} + +static bool rnp_vfs_are_assigned(struct rnp_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct pci_dev *vfdev; + unsigned int dev_id = RNP_DEV_ID_N10_PF0_VF_N; + unsigned int vendor_id = PCI_VENDOR_ID_MUCSE; + + switch (adapter->pdev->device) { + case RNP_DEV_ID_N10_PF0: + case RNP_DEV_ID_N10_PF1: + vendor_id = 0x1dab; + if (rnp_is_pf1(&adapter->hw)) + dev_id = RNP_DEV_ID_N10_PF1_VF; + else + dev_id = RNP_DEV_ID_N10_PF0_VF; + break; + case PCI_DEVICE_ID_N10_PF0: + case PCI_DEVICE_ID_N10_PF1: + vendor_id = PCI_VENDOR_ID_MUCSE; + if (rnp_is_pf1(&adapter->hw)) + dev_id = RNP_DEV_ID_N10_PF1_VF_N; + else + dev_id = RNP_DEV_ID_N10_PF0_VF_N; + } + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(vendor_id, dev_id, NULL); + while (vfdev) { + /* if we don't own it we don't care */ + if (vfdev->is_virtfn && vfdev->physfn == pdev) { + /* if it is assigned we cannot release it */ + if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) + return true; + } + + vfdev = pci_get_device(vendor_id, dev_id, vfdev); + } + + return false; +} +#endif /* #ifdef CONFIG_PCI_IOV */ + +int rnp_disable_sriov(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int rss; + int time = 0; + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return 0; + + adapter->num_vfs = 0; + adapter->flags &= ~RNP_FLAG_SRIOV_ENABLED; + adapter->flags &= ~RNP_FLAG_SRIOV_INIT_DONE; + adapter->flags &= ~RNP_FLAG_VF_INIT_DONE; + adapter->vlan_count = 0; + msleep(100); + + /* only do if not ncsi card */ + if (!hw->ncsi_en) + hw->ops.set_mac_rx(hw, false); + + hw->ops.set_sriov_status(hw, false); + + /* set num VFs to 0 to prevent access to vfinfo */ + while (test_and_set_bit(__RNP_USE_VFINFI, &adapter->state)) { + msleep(100); + time++; + + if (time > 100) { + printk("wait flags timeout\n"); + break; + } + } + if (time < 100) + clear_bit(__RNP_USE_VFINFI, &adapter->state); + + /* free VF control structures */ + kfree(adapter->vfinfo); + adapter->vfinfo = NULL; + + /* free macvlan list */ + if (hw->vv_list) { + kfree(hw->vv_list); + hw->vv_list = NULL; + } + + if (adapter->mv_list) { + kfree(adapter->mv_list); + adapter->mv_list = NULL; + } + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + /* if SR-IOV is already disabled then there is nothing to do */ + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); +#ifdef CONFIG_PCI_IOV + /* + * If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (rnp_vfs_are_assigned(adapter)) { + e_dev_warn( + "Unloading driver while VFs are assigned - VFs will not be " + "deallocated\n"); + return -EPERM; + } + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); +#endif + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + + /* set default pool back to 0 */ + + /* Disable VMDq flag so device will be set in VM mode */ + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) + adapter->flags &= ~RNP_FLAG_VMDQ_ENABLED; + adapter->ring_feature[RING_F_VMDQ].offset = 0; + + rss = min_t(int, adapter->max_ring_pair_counts, num_online_cpus()); + + rss = min_t(int, rss, + hw->mac.max_msix_vectors - adapter->num_other_vectors); + + adapter->ring_feature[RING_F_RSS].limit = rss; + + /* take a breather then clean up driver data */ + msleep(100); + + dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags); + return 0; +} + +#ifdef CONFIG_PCI_IOV +static bool check_ari_mode(struct pci_dev *dev) +{ + struct pci_bus *bus = dev->bus; + + return bus->self && bus->self->ari_enabled; +} +#endif + +static int rnp_pci_sriov_enable(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + struct rnp_adapter *adapter = pci_get_drvdata(dev); + struct rnp_hw *hw = &adapter->hw; + int err = 0; + int i; + int pre_existing_vfs = pci_num_vf(dev); + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + err = rnp_disable_sriov(adapter); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + goto out; + + /* maybe bug, if add 1 vlan, then open sriov */ + if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN) { + if (adapter->vlan_count > hw->max_vfs - 1) { + dev_err(&adapter->pdev->dev, + "vlans is too much, delete less than %d vlans\n", + hw->max_vfs - 1); + + err = -EOPNOTSUPP; + goto err_out; + } + + } else if (adapter->vlan_count > 1) { + dev_err(&adapter->pdev->dev, + "only 1 vlan in sriov mode, delete other vlans\n"); + dev_err(&adapter->pdev->dev, "please delete all vlans first\n"); + + err = -EOPNOTSUPP; + goto err_out; + } + + adapter->vlan_count = 0; + if (err) + goto err_out; + + /* While the SR-IOV capability structure reports total VFs to be + * 64 we limit the actual number that can be allocated to 63 so + * that some transmit/receive resources can be reserved to the + * PF. The PCI bus driver already checks for other values out of + * range. + */ + + if (check_ari_mode(dev)) { + int temp = hw->sriov_ring_limit; + + if (temp == 1) + temp = 2; + + + if (num_vfs > (128 / temp - 1)) { + err = -EPERM; + goto err_out; + } + } else { + if (num_vfs > hw->max_vfs_noari) { + err = -EPERM; + goto err_out; + } + } + + adapter->num_vfs = num_vfs; + err = __rnp_enable_sriov(adapter); + if (err) + goto err_out; + + for (i = 0; i < adapter->num_vfs; i++) + rnp_vf_configuration(dev, (i | 0x10000000)); + /* we should reinit pf first */ + dbg("flags:0x%x\n", adapter->flags); + if (hw->ops.clr_rar_all) + hw->ops.clr_rar_all(hw); + + rnp_sriov_reinit(adapter); + + adapter->flags |= RNP_FLAG_SRIOV_INIT_DONE; + err = pci_enable_sriov(dev, num_vfs); + if (err) { + e_dev_warn("Failed to enable PCI sriov: %d num %d\n", err, + num_vfs); + rnp_disable_sriov(adapter); + rnp_sriov_reinit(adapter); + goto err_out; + } + adapter->flags |= RNP_FLAG_VF_INIT_DONE; + +out: + return num_vfs; + +err_out: + return err; +#endif + return 0; +} + +static int rnp_pci_sriov_disable(struct pci_dev *dev) +{ + struct rnp_adapter *adapter = pci_get_drvdata(dev); + int err; + u32 current_flags = adapter->flags; + + err = rnp_disable_sriov(adapter); + + /* Only reinit if no error and state changed */ + if (!err && current_flags != adapter->flags) { + /* rnp_disable_sriov() doesn't clear VMDQ flag */ + adapter->flags &= ~RNP_FLAG_VMDQ_ENABLED; +#ifdef CONFIG_PCI_IOV + rnp_sriov_reinit(adapter); +#endif + } + + return err; +} + +static int rnp_set_vf_multicasts(struct rnp_adapter *adapter, u32 *msgbuf, + u32 vf) +{ + int entries = (msgbuf[0] & RNP_VT_MSGINFO_MASK) >> RNP_VT_MSGINFO_SHIFT; + u16 *hash_list = (u16 *)&msgbuf[1]; + struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + struct rnp_hw *hw = &adapter->hw; + int i; + + /* only so many hash values supported */ + entries = min(entries, RNP_MAX_VF_MC_ENTRIES); + + /* + * salt away the number of multi cast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vfinfo->num_vf_mc_hashes = entries; + + /* + * VFs are limited to using the MTA hash table for their multicast + * addresses + */ + for (i = 0; i < entries; i++) + vfinfo->vf_mc_hashes[i] = hash_list[i]; + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + /* fixed mode */ + hw->ops.set_sriov_vf_mc(hw, vfinfo->vf_mc_hashes[i]); + } + + return 0; +} + +void rnp_restore_vf_macs(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + int vf; + u8 *mac_addr; + int rar_entry; + int fix_vf_num = 0; + + for (vf = 0; vf < adapter->num_vfs; vf++) { + mac_addr = adapter->vfinfo[vf].vf_mac_addresses; + rar_entry = hw->mac.num_rar_entries - (vf + 1); + /* setup to the hw */ + if (hw->sriov_ring_limit > 2) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (vf + 1) * hw->sriov_ring_limit / 2; + } else { + fix_vf_num = (vf) * hw->sriov_ring_limit / 2; + } + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, + fix_vf_num, true); + } else { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf + 1, + true); + else + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf, + true); + } + } +} + +void rnp_restore_vf_macvlans(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + struct list_head *pos; + struct vf_macvlans *entry; + int fix_vf_num = 0; + + hw_dbg(hw, "%s Staring..\n", __func__); + + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (!entry->free) { + hw_dbg(hw, " vf:%d MACVLAN: RAR[%d] <= %pM\n", + entry->vf, entry->rar_entry, entry->vf_macvlan); + + if (hw->sriov_ring_limit > 2) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (entry->vf + 1) * hw->sriov_ring_limit / 2; + } else { + fix_vf_num = (entry->vf) * hw->sriov_ring_limit / 2; + } + hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, entry->rar_entry, + fix_vf_num, true); + + + } else { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, + entry->rar_entry, + entry->vf + 1, true); + } else { + hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, + entry->rar_entry, + entry->vf, true); + } + } + } + } + hw_dbg(hw, "%s Done\n", __func__); +} + +void rnp_restore_vf_multicasts(struct rnp_adapter *adapter) +{ + /* Restore any VF macvlans */ + rnp_restore_vf_macvlans(adapter); +} + +static int rnp_set_vf_vlan(struct rnp_adapter *adapter, int add, int vid, + u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + int true_handle = 1; + int i; + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + /* should check other vf */ + if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + /* if other vf use this vlan, don't true remove */ + if (!add) { + /* check equal pf_vlan */ + if (vid == adapter->vf_vlan) + true_handle = 0; + if (!test_and_set_bit(__RNP_USE_VFINFI, + &adapter->state)) { + for (i = 0; i < adapter->num_vfs; i++) { + /* check if other vf_vlan still valid */ + if ((i != vf) && + (vid == adapter->vfinfo[i].vf_vlan)) + true_handle = 0; + /* check if other pf_vlan still valid */ + if ((i != vf) && + (vid == adapter->vfinfo[i].pf_vlan)) + true_handle = 0; + } + clear_bit(__RNP_USE_VFINFI, &adapter->state); + } + } + } + if (true_handle) + hw->ops.set_vf_vlan_filter(hw, vid, vf, (bool)add, false); + + return 0; +} + +static s32 rnp_set_vf_lpe(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + return 0; +} + +static inline void rnp_vf_reset_event(struct rnp_adapter *adapter, u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + int rar_entry = hw->mac.num_rar_entries - (vf + 1); + int i; + + /* reset multicast table array for vf */ + adapter->vfinfo[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + rnp_set_rx_mode(adapter->netdev); + + /* clear this rar_entry */ + hw->ops.clr_rar(hw, rar_entry); + + /* reset VF api back to unknown */ + adapter->vfinfo[vf].vf_api = 0; + for (i = 0; i < RNP_MAX_VF_MC_ENTRIES; i++) + adapter->vfinfo[vf].vf_mc_hashes[i] = 0; + adapter->vfinfo[vf].vf_vlan = 0; + adapter->vfinfo[vf].vlan_count = 0; +} + +static int rnp_set_vf_mac(struct rnp_adapter *adapter, int vf, + unsigned char *mac_addr) +{ + struct rnp_hw *hw = &adapter->hw; + int fix_vf_num = 0; + /* this rar_entry may be cofict with mac vlan with pf */ + int rar_entry = hw->mac.num_rar_entries - (vf + 1); + + memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6); + + /* setup to the hw */ + if (hw->sriov_ring_limit > 2) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (vf + 1) * hw->sriov_ring_limit / 2; + } else { + fix_vf_num = (vf) * hw->sriov_ring_limit / 2; + } + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, + fix_vf_num, true); + + } else { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf + 1, true); + else + hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf, true); + } + + return 0; +} + +static int rnp_set_vf_macvlan(struct rnp_adapter *adapter, int vf, int index, + unsigned char *mac_addr) +{ + struct rnp_hw *hw = &adapter->hw; + struct list_head *pos; + struct vf_macvlans *entry; + int fix_vf_num = 0; + /* index = 0 , only earase */ + /* index = 1 , earase and then set */ + if (index <= 1) { + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + entry->is_macvlan = false; + hw->ops.clr_rar(hw, entry->rar_entry); + } + } + } + + /* + * If index was zero then we were asked to clear the uc list + * for the VF. We're done. + */ + if (!index) + return 0; + + entry = NULL; + + list_for_each(pos, &adapter->vf_mvs.l) { + entry = list_entry(pos, struct vf_macvlans, l); + if (entry->free) + break; + } + + /* + * If we traversed the entire list and didn't find a free entry + * then we're out of space on the RAR table. Also entry may + * be NULL because the original memory allocation for the list + * failed, which is not fatal but does mean we can't support + * VF requests for MACVLAN because we couldn't allocate + * memory for the list management required. + */ + if (!entry || !entry->free) + return -ENOSPC; + + entry->free = false; + entry->is_macvlan = true; + entry->vf = vf; + memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); + if (hw->sriov_ring_limit > 2) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (entry->vf + 1) * hw->sriov_ring_limit / 2; + } else { + fix_vf_num = (entry->vf) * hw->sriov_ring_limit / 2; + } + hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, entry->rar_entry, + fix_vf_num, true); + + } else { + + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, entry->rar_entry, + entry->vf + 1, true); + } else { + hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, entry->rar_entry, + entry->vf, true); + } + } + + return 0; +} + +int rnp_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) +{ + unsigned char vf_mac_addr[6]; + struct rnp_adapter *adapter = pci_get_drvdata(pdev); + unsigned int vfn = (event_mask & 0x3f); + + bool enable = ((event_mask & 0x10000000U) != 0); + + if (enable) { + eth_zero_addr(vf_mac_addr); + memcpy(vf_mac_addr, adapter->hw.mac.perm_addr, 6); + vf_mac_addr[5] = vf_mac_addr[5] + (0x80 | vfn); + vf_mac_addr[4] = vf_mac_addr[4] + (pdev->devfn); + + memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); + } + + return 0; +} + +static int rnp_vf_reset_msg(struct rnp_adapter *adapter, u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; + u32 msgbuf[RNP_VF_PERMADDR_MSG_LEN]; + u8 *addr = (u8 *)(&msgbuf[1]); + + /* reset the filters for the device */ + rnp_vf_reset_event(adapter, vf); + + /* set vf mac address */ + if (!is_zero_ether_addr(vf_mac)) + rnp_set_vf_mac(adapter, vf, vf_mac); + + /* enable VF mailbox for further messages */ + adapter->vfinfo[vf].clear_to_send = true; + + /* Enable counting of spoofed packets in the SSVPC register */ + /* reply to reset with ack and vf mac address */ + msgbuf[0] = RNP_VF_RESET; + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] |= RNP_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] |= RNP_VT_MSGTYPE_NACK; + dev_warn( + &adapter->pdev->dev, + "VF %d has no MAC address assigned, you may have to assign " + "one manually\n", + vf); + } + + /* + * Piggyback the multicast filter type so VF can compute the + * correct vectors + */ + msgbuf[RNP_VF_MC_TYPE_WORD] = 0; + /* setup link status , pause mode, ft padding mode */ + /* pause mode */ + msgbuf[RNP_VF_MC_TYPE_WORD] |= (0xff & hw->fc.current_mode) << 16; + if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) + msgbuf[RNP_VF_MC_TYPE_WORD] |= (0x01 << 8); + else + msgbuf[RNP_VF_MC_TYPE_WORD] |= (0x00 << 8); + /* mc_type */ + msgbuf[RNP_VF_MC_TYPE_WORD] |= rd32(hw, RNP_ETH_DMAC_MCSTCTRL) & 0x03; + msgbuf[RNP_VF_DMA_VERSION_WORD] = rd32(hw, RNP_DMA_VERSION); + msgbuf[RNP_VF_VLAN_WORD] = adapter->vfinfo[vf].pf_vlan; + /* fixme tx fetch to be added here */ + msgbuf[RNP_VF_PHY_TYPE_WORD] = (hw->mac_type << 16) | hw->phy_type; + msgbuf[RNP_VF_FW_VERSION_WORD] = (hw->fw_version); + if (adapter->vfinfo[vf].link_state == rnp_link_state_auto) { + msgbuf[RNP_VF_LINK_STATUS_WORD] = + (adapter->link_up ? RNP_PF_LINK_UP : 0) | + adapter->link_speed; + } else if (adapter->vfinfo[vf].link_state == rnp_link_state_on) { + msgbuf[RNP_VF_LINK_STATUS_WORD] = RNP_PF_LINK_UP | + adapter->link_speed; + } else { + msgbuf[RNP_VF_LINK_STATUS_WORD] = 0; + } + + msgbuf[RNP_VF_AXI_MHZ] = hw->usecstocount; + /* we start from 0 */ + msgbuf[RNP_VF_FEATURE] = 0; + if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + msgbuf[RNP_VF_FEATURE] |= PF_FEATRURE_VLAN_FILTER; + if (hw->ncsi_en) + msgbuf[RNP_VF_FEATURE] |= PF_NCSI_EN; + + /* now vf maybe has no irq handler if it is the first reset*/ + rnp_write_mbx(hw, msgbuf, RNP_VF_PERMADDR_MSG_LEN, vf); + + return 0; +} + +static int rnp_get_vf_mac_addr(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + u8 *mac = ((u8 *)(&msgbuf[1])); + + memcpy(mac, adapter->vfinfo[vf].vf_mac_addresses, 6); + + return 0; +} + +/* vf call setup a new mac */ +static int rnp_set_vf_mac_addr(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + + if (adapter->vfinfo[vf].pf_set_mac && + memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, ETH_ALEN)) { + e_warn(drv, + "VF %d attempted to override administratively set MAC address\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } + rnp_set_vf_mac(adapter, vf, new_mac); + + return 0; +} + +static int rnp_set_vf_vlan_msg(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + int add = ((msgbuf[0] & RNP_VT_MSGINFO_MASK) >> RNP_VT_MSGINFO_SHIFT); + int vid = (msgbuf[1] & RNP_VLVF_VLANID_MASK); + int err; + + if (adapter->vfinfo[vf].pf_vlan) { + e_warn(drv, + "VF %d attempted to override administratively set VLAN " + "configuration\n" + "Reload the VF driver to resume operations\n", + vf); + return -1; + } + /* only allow 1 vlan for each vf */ + if ((add) && (adapter->vfinfo[vf].vlan_count)) { + e_warn(drv, "VF %d attempted to set more than 1 vlan", vf); + e_warn(drv, " vlan now %d, try to set %d\n", + adapter->vfinfo[vf].vf_vlan, vid); + return -1; + } + + /* vlan 0 has no work todo */ + if (!vid) + return 0; + if (add) { + adapter->vfinfo[vf].vlan_count++; + adapter->vfinfo[vf].vf_vlan = vid; + } else if (adapter->vfinfo[vf].vlan_count) { + adapter->vfinfo[vf].vf_vlan = 0; + adapter->vfinfo[vf].vlan_count--; + } + + err = rnp_set_vf_vlan(adapter, add, vid, vf); + + return err; +} + +static int rnp_set_vf_vlan_strip_msg(struct rnp_adapter *adapter, u32 *msgbuf, + u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + int vlan_strip_on = !!(msgbuf[1] >> 31); + int queue_cnt = msgbuf[1] & 0xffff; + int err = 0, i; + + vf_dbg("strip_on:%d queeu_cnt:%d, %d %d\n", vlan_strip_on, queue_cnt, + msgbuf[2], msgbuf[3]); + + for (i = 0; i < queue_cnt; i++) { + if (vlan_strip_on) + hw->ops.set_vlan_strip(hw, msgbuf[2 + i], true); + else + hw->ops.set_vlan_strip(hw, msgbuf[2 + i], false); + } + + return err; +} + +static int rnp_set_vf_macvlan_msg(struct rnp_adapter *adapter, u32 *msgbuf, + u32 vf) +{ + u8 *new_mac = ((u8 *)(&msgbuf[1])); + int index = (msgbuf[0] & RNP_VT_MSGINFO_MASK) >> RNP_VT_MSGINFO_SHIFT; + int err; + + if (adapter->vfinfo[vf].pf_set_mac && index > 0) { + e_warn(drv, + "VF %d requested MACVLAN filter but is administratively denied\n", + vf); + return -1; + } + + /* An non-zero index indicates the VF is setting a filter */ + if (index) { + if (!is_valid_ether_addr(new_mac)) { + e_warn(drv, "VF %d attempted to set invalid mac\n", vf); + return -1; + } + } + + err = rnp_set_vf_macvlan(adapter, vf, index, new_mac); + if (err == -ENOSPC) + e_warn(drv, + "VF %d has requested a MACVLAN filter but there is no space for " + "it\n", + vf); + + return err < 0; + + return 0; +} + +static int rnp_negotiate_vf_api(struct rnp_adapter *adapter, u32 *msgbuf, + u32 vf) +{ + adapter->vfinfo[vf].vf_api = 0; + + return 0; +} + +static int rnp_get_vf_reg(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + u32 reg = msgbuf[1]; + + msgbuf[1] = rd32(&adapter->hw, reg); + + return 0; +} + +static int rnp_set_vf_mtu(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct net_device *netdev = adapter->netdev; + if (msgbuf[1] > netdev->mtu) { + e_dev_warn( + "vf %d try to change %d mtu to %d (large than pf limit)\n", + vf, netdev->mtu, msgbuf[1]); + return -1; + } else + return 0; +} + + +static int rnp_set_vf_promisc(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + int i; + int ret = 0; + struct rnp_hw *hw = &adapter->hw; + + if (msgbuf[1]) { + /* check if other vf in promisc */ + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[vf].promisc_mode) { + printk("vf %d already in promisc\n", vf); + ret = -1; + break; + } + } + /* if no vf in promisc mode */ + adapter->vfinfo[vf].promisc_mode = true; + hw->ops.set_rx_mode(hw, adapter->netdev, true); + hw->ops.set_sriov_status(hw, true); + + } else { + adapter->vfinfo[vf].promisc_mode = false; + hw->ops.set_rx_mode(hw, adapter->netdev, true); + hw->ops.set_sriov_status(hw, true); + } + return ret; +} + +static int rnp_get_vf_mtu(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct net_device *netdev = adapter->netdev; + msgbuf[1] = netdev->mtu; + return 0; +} + +static int rnp_get_vf_fw(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + + msgbuf[1] = hw->fw_version; + + return 0; +} + +static int rnp_get_vf_link(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + if (adapter->vfinfo[vf].link_state == rnp_link_state_auto) { + msgbuf[1] = (adapter->link_up ? RNP_PF_LINK_UP : 0) | + adapter->link_speed; + } else if (adapter->vfinfo[vf].link_state == rnp_link_state_on) { + msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed; + + } else { + msgbuf[1] = 0; + } + return 0; +} + +static int rnp_get_vf_dma_frag(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + /* we fixed 1536 bytes */ + msgbuf[1] = 1536; + return 0; +} + +static int rnp_get_vf_queues(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + + msgbuf[RNP_VF_TX_QUEUES] = hw->sriov_ring_limit; + msgbuf[RNP_VF_RX_QUEUES] = hw->sriov_ring_limit; + msgbuf[RNP_VF_TRANS_VLAN] = adapter->vfinfo[vf].pf_vlan; + msgbuf[RNP_VF_DEF_QUEUE] = 0; + if (hw->hw_type == rnp_hw_n400) { + /* n400, we use + * vf0 use ring4 + * vf1 use ring8 + */ + msgbuf[RNP_VF_QUEUE_START] = vf * 4 + 4; + + } else if ((hw->hw_type == rnp_hw_n10) && (hw->sriov_ring_limit == 1)) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + msgbuf[RNP_VF_QUEUE_START] = vf * 2 + 2; + else + msgbuf[RNP_VF_QUEUE_START] = vf * 2; + + } else { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + msgbuf[RNP_VF_QUEUE_START] = vf * hw->sriov_ring_limit + + hw->sriov_ring_limit; + else + msgbuf[RNP_VF_QUEUE_START] = vf * hw->sriov_ring_limit; + } + msgbuf[RNP_VF_QUEUE_DEPTH] = (adapter->tx_ring_item_count << 16) | + adapter->rx_ring_item_count; + + return 0; +} + +static int rnp_rcv_msg_from_vf(struct rnp_adapter *adapter, u32 vf) +{ + u32 mbx_size = RNP_VFMAILBOX_SIZE; + u32 msgbuf[RNP_VFMAILBOX_SIZE]; + struct rnp_hw *hw = &adapter->hw; + s32 retval; + + vf_dbg("msg from vf:%d\n", vf); + + retval = rnp_read_mbx(hw, msgbuf, mbx_size, vf); + if (retval) { + pr_err("Error receiving message from VF\n"); + return retval; + } + vf_dbg("msg[0]=0x%08x\n", msgbuf[0]); + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (RNP_VT_MSGTYPE_ACK | RNP_VT_MSGTYPE_NACK)) + return retval; + + /* flush the ack before we write any messages back */ + /* clear vf_num */ + msgbuf[0] &= (~RNP_VF_MASK); + + /* this is a vf reset irq */ + if ((msgbuf[0] & RNP_MAIL_CMD_MASK) == RNP_VF_RESET) { + vf_dbg("vf %d up\n", vf); + return rnp_vf_reset_msg(adapter, vf); + } + + /* + * until the vf completes a virtual function reset it should not be + * allowed to start any configuration. + */ + if (!adapter->vfinfo[vf].clear_to_send) { + vf_dbg("wait vf clear to send\n"); + msgbuf[0] |= RNP_VT_MSGTYPE_NACK; + rnp_write_mbx(hw, msgbuf, 1, vf); + return retval; + } + + switch ((msgbuf[0] & RNP_MAIL_CMD_MASK)) { + case RNP_VF_SET_MAC_ADDR: + retval = rnp_set_vf_mac_addr(adapter, msgbuf, vf); + break; + case RNP_VF_SET_MULTICAST: + retval = rnp_set_vf_multicasts(adapter, msgbuf, vf); + break; + case RNP_VF_SET_VLAN: + retval = rnp_set_vf_vlan_msg(adapter, msgbuf, vf); + break; + case RNP_VF_SET_VLAN_STRIP: + retval = rnp_set_vf_vlan_strip_msg(adapter, msgbuf, vf); + break; + case RNP_VF_SET_LPE: + retval = rnp_set_vf_lpe(adapter, msgbuf, vf); + break; + case RNP_VF_GET_MACADDR: + retval = rnp_get_vf_mac_addr(adapter, msgbuf, vf); + break; + case RNP_VF_SET_MACVLAN: + retval = rnp_set_vf_macvlan_msg(adapter, msgbuf, vf); + break; + case RNP_VF_API_NEGOTIATE: + retval = rnp_negotiate_vf_api(adapter, msgbuf, vf); + break; + case RNP_VF_GET_QUEUES: + retval = rnp_get_vf_queues(adapter, msgbuf, vf); + break; + case RNP_VF_REG_RD: + retval = rnp_get_vf_reg(adapter, msgbuf, vf); + break; + case RNP_VF_GET_MTU: + retval = rnp_get_vf_mtu(adapter, msgbuf, vf); + break; + case RNP_VF_SET_MTU: + retval = rnp_set_vf_mtu(adapter, msgbuf, vf); + break; + case RNP_VF_GET_FW: + retval = rnp_get_vf_fw(adapter, msgbuf, vf); + break; + case RNP_VF_GET_LINK: + retval = rnp_get_vf_link(adapter, msgbuf, vf); + break; + case RNP_PF_REMOVE: + vf_dbg("vf %d removed\n", vf); + adapter->vfinfo[vf].clear_to_send = false; + retval = 1; + break; + case RNP_VF_RESET_PF: + adapter->flags2 |= RNP_FLAG2_RESET_PF; + retval = 1; + break; + case RNP_VF_GET_DMA_FRAG: + retval = rnp_get_vf_dma_frag(adapter, msgbuf, vf); + + break; + case RNP_VF_SET_PROMISCE: + retval = rnp_set_vf_promisc(adapter, msgbuf, vf); + break; + default: + e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); + retval = RNP_ERR_MBX; + break; + } + + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= RNP_VT_MSGTYPE_NACK; + else + msgbuf[0] |= RNP_VT_MSGTYPE_ACK; + + /* write vf_num */ + msgbuf[0] |= (vf << 21); + + msgbuf[0] |= RNP_VT_MSGTYPE_CTS; + + if ((msgbuf[0] & RNP_MAIL_CMD_MASK) != RNP_PF_REMOVE) + rnp_write_mbx(hw, msgbuf, mbx_size, vf); + + return retval; +} + +static void rnp_rcv_ack_from_vf(struct rnp_adapter *adapter, u32 vf) +{ + struct rnp_hw *hw = &adapter->hw; + u32 msg = RNP_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!adapter->vfinfo[vf].clear_to_send) + rnp_write_mbx(hw, &msg, 1, vf); +} + +void rnp_msg_task(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + u32 vf; + + rnp_fw_msg_handler(adapter); + + if (!(adapter->flags & RNP_FLAG_SRIOV_INIT_DONE)) + return; + for (vf = 0; vf < adapter->num_vfs; vf++) { + /* process any reset requests */ + + /* check flag */ + if (test_and_set_bit(__VF_MBX_USED, + &adapter->vfinfo[vf].status)) { + adapter->miss_time++; + e_info(drv, "we missed some irqs %d\n", vf); + continue; + } + + /* process any messages pending */ + if (!rnp_check_for_msg(hw, vf)) + rnp_rcv_msg_from_vf(adapter, vf); + + /* process any acks */ + if (!rnp_check_for_ack(hw, vf)) + rnp_rcv_ack_from_vf(adapter, vf); + clear_bit(__VF_MBX_USED, &adapter->vfinfo[vf].status); + } +} + +static int rnp_msg_post_status_signle_link(struct rnp_adapter *adapter, int vf, + int link_state) +{ + u32 msgbuf[RNP_VFMAILBOX_SIZE]; + struct rnp_hw *hw = &adapter->hw; + struct rnp_mbx_info *mbx = &hw->mbx; + msgbuf[0] = RNP_PF_SET_LINK | (vf << RNP_VNUM_OFFSET); + + switch (link_state) { + case rnp_link_state_on: + msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed; + break; + case rnp_link_state_off: + msgbuf[1] = 0; + break; + case rnp_link_state_auto: + if (adapter->link_up) { + msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed; + } else { + msgbuf[1] = 0; + } + break; + } + return mbx->ops.write(hw, msgbuf, 2, vf); +} + +int rnp_msg_post_status_signle(struct rnp_adapter *adapter, + enum PF_STATUS status, int vf) +{ + u32 msgbuf[RNP_VFMAILBOX_SIZE]; + struct rnp_hw *hw = &adapter->hw; + struct rnp_mbx_info *mbx = &hw->mbx; + switch (status) { + case PF_FCS_STATUS: + msgbuf[0] = RNP_PF_SET_FCS | (vf << RNP_VNUM_OFFSET); + if (adapter->netdev->features & NETIF_F_RXFCS) + msgbuf[1] = 1; + else + msgbuf[1] = 0; + break; + case PF_PAUSE_STATUS: + msgbuf[0] = RNP_PF_SET_PAUSE | (vf << RNP_VNUM_OFFSET); + msgbuf[1] = hw->fc.requested_mode; + break; + case PF_FT_PADDING_STATUS: + msgbuf[0] = RNP_PF_SET_FT_PADDING | (vf << RNP_VNUM_OFFSET); + if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) { + msgbuf[1] = 1; + } else { + msgbuf[1] = 0; + } + + break; + case PF_VLAN_FILTER_STATUS: + msgbuf[0] = RNP_PF_SET_VLAN_FILTER | (vf << RNP_VNUM_OFFSET); + if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { + msgbuf[1] = 1; + } else { + msgbuf[1] = 0; + } + + break; + case PF_SET_VLAN_STATUS: + msgbuf[0] = RNP_PF_SET_VLAN | (vf << RNP_VNUM_OFFSET); + + msgbuf[1] = adapter->vfinfo[vf].pf_vlan; + break; + case PF_SET_LINK_STATUS: + if (adapter->vfinfo[vf].link_state != rnp_link_state_auto) + return 0; + /* only update link state if in auto mode */ + msgbuf[0] = RNP_PF_SET_LINK | (vf << RNP_VNUM_OFFSET); + if (adapter->link_up) { + msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed; + } else { + msgbuf[1] = 0; + } + break; + case PF_SET_MTU: + msgbuf[0] = RNP_PF_SET_MTU | (vf << RNP_VNUM_OFFSET); + msgbuf[1] = adapter->netdev->mtu; + break; + case PF_SET_RESET: + msgbuf[0] = RNP_PF_SET_RESET | (vf << RNP_VNUM_OFFSET); + msgbuf[1] = 0; + + break; + } + + return mbx->ops.write(hw, msgbuf, 2, vf); +} + +/* try to send mailbox to all active vf */ +int rnp_msg_post_status(struct rnp_adapter *adapter, enum PF_STATUS status) +{ + u32 vf; + int err = 0; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + /* broadcast */ + for (vf = 0; vf < adapter->num_vfs; vf++) { + if (adapter->vfinfo[vf].clear_to_send) { + if (!test_bit(__RNP_IN_IRQ, &adapter->state)) { + if (test_and_set_bit(__VF_MBX_USED, + &adapter->vfinfo[vf].status)) { + adapter->miss_time++; + printk("send \n"); + return -1; + } + err |= rnp_msg_post_status_signle( + adapter, status, vf); + // clear flags + clear_bit(__VF_MBX_USED, + &adapter->vfinfo[vf].status); + } + } + } + } + return err; +} + +void rnp_disable_tx_rx(struct rnp_adapter *adapter) +{ +} + +void rnp_ping_all_vfs(struct rnp_adapter *adapter) +{ + struct rnp_hw *hw = &adapter->hw; + u32 ping; + int i; + + for (i = 0; i < adapter->num_vfs; i++) { + ping = RNP_PF_CONTROL_PRING_MSG; + /* only send to active vf */ + ping |= RNP_VT_MSGTYPE_CTS; + rnp_write_mbx(hw, &ping, 1, i); + } +} + +int rnp_get_vf_ringnum(struct rnp_hw *hw, int vf, int num) +{ + int fix_vf_num; + + if (hw->sriov_ring_limit >= 2) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (vf + 1) * hw->sriov_ring_limit + num; + } else { + fix_vf_num = (vf) * hw->sriov_ring_limit + num; + } + } else { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { + fix_vf_num = (vf + 1) * 2 + num; + } else { + fix_vf_num = (vf) * 2 + num; + } + + + } + + return fix_vf_num; +} + +int rnp_setup_ring_maxrate(struct rnp_adapter *adapter, int ring, u64 max_rate) +{ + struct rnp_hw *hw = &adapter->hw; + struct rnp_dma_info *dma = &hw->dma; + int samples_1sec = adapter->hw.usecstocount * 1000000; + + dma_ring_wr32(dma, RING_OFFSET(ring) + RNP_DMA_REG_TX_FLOW_CTRL_TM, + samples_1sec); + dma_ring_wr32(dma, RING_OFFSET(ring) + RNP_DMA_REG_TX_FLOW_CTRL_TH, + max_rate); + return 0; +} + +static int rnp_disable_port_vlan(struct rnp_adapter *adapter, int vf) +{ + struct rnp_hw *hw = &adapter->hw; + int err; + + err = rnp_set_vf_vlan(adapter, false, adapter->vfinfo[vf].pf_vlan, vf); + + if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) { + if (hw->ops.set_vf_vlan_mode) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_vf_vlan_mode( + hw, adapter->vfinfo[vf].pf_vlan, vf + 1, + false); + else + hw->ops.set_vf_vlan_mode( + hw, adapter->vfinfo[vf].pf_vlan, vf, + false); + } + } + adapter->vfinfo[vf].pf_vlan = 0; + adapter->vfinfo[vf].pf_qos = 0; + /* clear veb */ + hw->ops.set_vf_vlan_filter(hw, 0, vf, false, true); + + return err; +} + +static int rnp_enable_port_vlan(struct rnp_adapter *adapter, int vf, u16 vlan, + u8 qos) +{ + struct rnp_hw *hw = &adapter->hw; + int err; + + err = rnp_set_vf_vlan(adapter, true, vlan, vf); + if (err) + goto out; + + adapter->vfinfo[vf].pf_vlan = vlan; + adapter->vfinfo[vf].pf_qos = qos; + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__RNP_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + hw->ops.set_vf_vlan_filter(hw, vlan, vf, true, true); + + /* if in sriov vlan mode should setup pfvlvf table */ + if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) { + if (hw->ops.set_vf_vlan_mode) { + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + hw->ops.set_vf_vlan_mode(hw, vlan, vf + 1, + true); + else + hw->ops.set_vf_vlan_mode(hw, vlan, vf, true); + } + } +out: + return err; +} + +int rnp_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto) +{ + int err = 0; + struct rnp_adapter *adapter = netdev_priv(netdev); + + /* VLAN IDs accepted range 0-4094 */ + if (vf < 0 || vf >= adapter->num_vfs || vlan > VLAN_VID_MASK - 1 || + qos > 7) + return -EINVAL; + + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + if (vlan || qos) { + /* + * Check if there is already a port VLAN set, if so + * we have to delete the old one first before we + * can set the new one. The usage model had + * previously assumed the user would delete the + * old port VLAN before setting a new one but this + * is not necessarily the case. + */ + if (adapter->vfinfo[vf].vf_vlan) { + dev_err(&adapter->pdev->dev, + "vf set vlan before, delete it before add new\n"); + err = -EINVAL; + goto out; + } + if (adapter->vfinfo[vf].pf_vlan) + err = rnp_disable_port_vlan(adapter, vf); + if (err) + goto out; + err = rnp_enable_port_vlan(adapter, vf, vlan, qos); + + } else { + /* if only vf set vlan */ + if ((adapter->vfinfo[vf].pf_vlan == 0) && + (adapter->vfinfo[vf].vf_vlan)) { + dev_err(&adapter->pdev->dev, + "pf cannot delete vm vlan(ip link add)\n"); + err = -EINVAL; + } + /* if not set vlan before, nothing todo */ + if (adapter->vfinfo[vf].pf_vlan == 0) + return 0; + + err = rnp_disable_port_vlan(adapter, vf); + } + /* send mbx to vf */ + rnp_msg_post_status_signle(adapter, PF_SET_VLAN_STATUS, vf); +out: + return err; +} + +#ifdef CONFIG_PCI_IOV +int rnp_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + if (vf < 0 || vf >= adapter->num_vfs) + return -EINVAL; + + adapter->vfinfo[vf].spoofchk_enabled = setting; + + return 0; +} +#else +inline int rnp_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) +{ + return 0; +} +#endif /* CONFIG_PCI_IOV */ + +int rnp_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + if (vf < 0 || vf >= adapter->num_vfs) + return -EINVAL; + + /* nothing to do */ + if (adapter->vfinfo[vf].trusted == setting) + return 0; + + adapter->vfinfo[vf].trusted = setting; + + /* reset VF to reconfigure features */ + e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); + + return 0; +} + +int rnp_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (vf < 0 || vf >= adapter->num_vfs) { + dev_err(&adapter->pdev->dev, + "NDO set VF link - invalid VF identifier %d\n", vf); + ret = -EINVAL; + goto out; + } + + switch (state) { + case IFLA_VF_LINK_STATE_ENABLE: + dev_info(&adapter->pdev->dev, + "NDO set VF %d link state %d \n", vf, state); + adapter->vfinfo[vf].link_state = rnp_link_state_on; + rnp_msg_post_status_signle_link(adapter, vf, rnp_link_state_on); + break; + case IFLA_VF_LINK_STATE_DISABLE: + dev_info(&adapter->pdev->dev, + "NDO set VF %d link state disable\n", vf); + adapter->vfinfo[vf].link_state = rnp_link_state_off; + rnp_msg_post_status_signle_link(adapter, vf, + rnp_link_state_off); + break; + case IFLA_VF_LINK_STATE_AUTO: + dev_info(&adapter->pdev->dev, + "NDO set VF %d link state auto\n", vf); + adapter->vfinfo[vf].link_state = rnp_link_state_auto; + rnp_msg_post_status_signle_link(adapter, vf, + rnp_link_state_auto); + break; + default: + dev_info(&adapter->pdev->dev, + "NDO set VF %d - invalid link state %d\n", vf, state); + ret = -EINVAL; + } +out: + return ret; +} + +int rnp_ndo_set_vf_bw(struct net_device *netdev, int vf, + int __always_unused min_tx_rate, int max_tx_rate) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + /* limit vf ring rate */ + int ring_max_rate; + int vf_ring; + int link_speed = 0; + u64 real_rate = 0; + int i; + + if (vf >= hw->max_vfs - 1) + return -EINVAL; + + switch (adapter->link_speed) { + case RNP_LINK_SPEED_40GB_FULL: + link_speed = 40000; + break; + case RNP_LINK_SPEED_25GB_FULL: + link_speed = 25000; + break; + case RNP_LINK_SPEED_10GB_FULL: + link_speed = 10000; + break; + case RNP_LINK_SPEED_1GB_FULL: + link_speed = 1000; + break; + case RNP_LINK_SPEED_100_FULL: + link_speed = 100; + break; + } + /* rate limit cannot be less than 10Mbs or greater than link speed */ + if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) + return -EINVAL; + + adapter->vfinfo[vf].tx_rate = max_tx_rate; + + ring_max_rate = max_tx_rate / hw->sriov_ring_limit; + + if (max_tx_rate && (ring_max_rate == 0)) + return -EINVAL; + + real_rate = (ring_max_rate * 1024 * 128); + + for (i = 0; i < hw->sriov_ring_limit; i++) { + vf_ring = rnp_get_vf_ringnum(hw, vf, i); + rnp_setup_ring_maxrate(adapter, vf_ring, real_rate); + } + return 0; +} + +int rnp_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) + return -EINVAL; + adapter->vfinfo[vf].pf_set_mac = true; + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); + dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" + " change effective."); + if (test_bit(__RNP_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF MAC address has been set," + " but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before" + " attempting to use the VF device.\n"); + } + rnp_set_vf_mac(adapter, vf, mac); + rnp_msg_post_status_signle(adapter, PF_SET_RESET, vf); + + return 0; +} + +int rnp_ndo_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi) +{ + struct rnp_adapter *adapter = netdev_priv(netdev); + + if (vf >= adapter->num_vfs) + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); + ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate; + ivi->min_tx_rate = 0; + + if (adapter->vfinfo[vf].pf_vlan) + ivi->vlan = adapter->vfinfo[vf].pf_vlan; + else + ivi->vlan = adapter->vfinfo[vf].vf_vlan; + + ivi->qos = adapter->vfinfo[vf].pf_qos; + ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; + switch (adapter->vfinfo[vf].link_state) { + case rnp_link_state_on: + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + break; + case rnp_link_state_off: + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; + break; + case rnp_link_state_auto: + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + break; + default: + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + } + ivi->trusted = adapter->vfinfo[vf].trusted; + + return 0; +} + +int rnp_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + vf_dbg("\n\n !!!! %s:%d num_vfs:%d\n", __func__, __LINE__, num_vfs); + if (num_vfs == 0) + return rnp_pci_sriov_disable(dev); + else + return rnp_pci_sriov_enable(dev, num_vfs); +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_sriov.h b/drivers/net/ethernet/mucse/rnp/rnp_sriov.h new file mode 100644 index 0000000000000..37f7adf7d1bd0 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_sriov.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#ifndef _RNP_SRIOV_H_ +#define _RNP_SRIOV_H_ + +void rnp_restore_vf_multicasts(struct rnp_adapter *adapter); +void rnp_restore_vf_macvlans(struct rnp_adapter *adapter); + +void rnp_restore_vf_macs(struct rnp_adapter *adapter); +void rnp_msg_task(struct rnp_adapter *adapter); +int rnp_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); +void rnp_disable_tx_rx(struct rnp_adapter *adapter); +void rnp_ping_all_vfs(struct rnp_adapter *adapter); +int rnp_ndo_set_vf_bw(struct net_device *netdev, int vf, + int __always_unused min_tx_rate, int max_tx_rate); +int rnp_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); +int rnp_msg_post_status(struct rnp_adapter *adapter, enum PF_STATUS status); + +int rnp_setup_ring_maxrate(struct rnp_adapter *adapter, int ring, u64 max_rate); +int rnp_get_vf_ringnum(struct rnp_hw *hw, int vf, int num); +int rnp_ndo_set_vf_bw(struct net_device *netdev, int vf, + int __always_unused min_tx_rate, int max_tx_rate); +int rnp_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +int rnp_ndo_get_vf_config(struct net_device *netdev, int vf, + struct ifla_vf_info *ivi); +void rnp_check_vf_rate_limit(struct rnp_adapter *adapter); +int rnp_disable_sriov(struct rnp_adapter *adapter); +#ifdef CONFIG_PCI_IOV +void rnp_enable_sriov_true(struct rnp_adapter *adapter); +void rnp_enable_sriov(struct rnp_adapter *adapter); +#endif +int rnp_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +int rnp_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto); +int rnp_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state); +#ifdef CONFIG_PCI_IOV +int rnp_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +#endif +int rnp_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); +#endif /* _RNP_SRIOV_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_sysfs.c b/drivers/net/ethernet/mucse/rnp/rnp_sysfs.c new file mode 100644 index 0000000000000..0af15e7d992d0 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnp/rnp_sysfs.c @@ -0,0 +1,2239 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2024 Mucse Corporation. */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rnp.h" +#include "rnp_common.h" +#include "rnp_type.h" + +#include "rnp_mbx.h" +#include "rnp_mbx_fw.h" + +#define PHY_EXT_REG_FLAG 0x80000000 + +struct maintain_req { + int magic; +#define MAINTAIN_MAGIC 0xa6a7a8a9 + + int cmd; + int arg0; + int req_data_bytes; + int reply_bytes; + char data[0]; +} __attribute__((packed)); + +struct ucfg_mac_sn { + unsigned char macaddr[64]; + unsigned char sn[32]; + int magic; +#define MAC_SN_MAGIC 0x87654321 + char rev[52]; + unsigned char pn[32]; +} __attribute__((packed, aligned(4))); + +static int print_desc(char *buf, void *data, int len) +{ + u8 *ptr = (u8 *)data; + int ret = 0; + int i = 0; + + for (i = 0; i < len; i++) + ret += sprintf(buf + ret, "%02x ", *(ptr + i)); + + return ret; +} + +#ifdef RNP_HWMON +static ssize_t rnp_hwmon_show_location(struct device __always_unused *dev, + struct device_attribute *attr, char *buf) +{ + struct hwmon_attr *rnp_attr = + container_of(attr, struct hwmon_attr, dev_attr); + + return snprintf(buf, PAGE_SIZE, "loc%u\n", rnp_attr->sensor->location); +} + +static ssize_t rnp_hwmon_show_name(struct device __always_unused *dev, + struct device_attribute *attr, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "rnp\n"); +} + +static ssize_t rnp_hwmon_show_temp(struct device __always_unused *dev, + struct device_attribute *attr, char *buf) +{ + struct hwmon_attr *rnp_attr = + container_of(attr, struct hwmon_attr, dev_attr); + unsigned int value; + + /* reset the temp field */ + rnp_attr->hw->ops.get_thermal_sensor_data(rnp_attr->hw); + + value = rnp_attr->sensor->temp; + /* display millidegree */ + value *= 1000; + + return snprintf(buf, PAGE_SIZE, "%u\n", value); +} + +static ssize_t rnp_hwmon_show_cautionthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *rnp_attr = + container_of(attr, struct hwmon_attr, dev_attr); + unsigned int value = rnp_attr->sensor->caution_thresh; + /* display millidegree */ + value *= 1000; + + return snprintf(buf, PAGE_SIZE, "%u\n", value); +} + +static ssize_t rnp_hwmon_show_maxopthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) +{ + struct hwmon_attr *rnp_attr = + container_of(attr, struct hwmon_attr, dev_attr); + unsigned int value = rnp_attr->sensor->max_op_thresh; + + /* display millidegree */ + value *= 1000; + + return snprintf(buf, PAGE_SIZE, "%u\n", value); +} + +/** + * rnp_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. + * @adapter: pointer to the adapter structure + * @offset: offset in the eeprom sensor data table + * @type: type of sensor data to display + * + * For each file we want in hwmon's sysfs interface we need a + * device_attribute This is included in our hwmon_attr struct that contains + * the references to the data structures we need to get the data to display + */ +static int rnp_add_hwmon_attr(struct rnp_adapter *adapter, unsigned int offset, + int type) +{ + unsigned int n_attr; + struct hwmon_attr *rnp_attr; + + n_attr = adapter->rnp_hwmon_buff->n_hwmon; + rnp_attr = &adapter->rnp_hwmon_buff->hwmon_list[n_attr]; + + switch (type) { + case RNP_HWMON_TYPE_LOC: + rnp_attr->dev_attr.show = rnp_hwmon_show_location; + snprintf(rnp_attr->name, sizeof(rnp_attr->name), "temp%u_label", + offset + 1); + break; + case RNP_HWMON_TYPE_NAME: + rnp_attr->dev_attr.show = rnp_hwmon_show_name; + snprintf(rnp_attr->name, sizeof(rnp_attr->name), "name"); + break; + case RNP_HWMON_TYPE_TEMP: + rnp_attr->dev_attr.show = rnp_hwmon_show_temp; + snprintf(rnp_attr->name, sizeof(rnp_attr->name), "temp%u_input", + offset + 1); + break; + case RNP_HWMON_TYPE_CAUTION: + rnp_attr->dev_attr.show = rnp_hwmon_show_cautionthresh; + snprintf(rnp_attr->name, sizeof(rnp_attr->name), "temp%u_max", + offset + 1); + break; + case RNP_HWMON_TYPE_MAX: + rnp_attr->dev_attr.show = rnp_hwmon_show_maxopthresh; + snprintf(rnp_attr->name, sizeof(rnp_attr->name), "temp%u_crit", + offset + 1); + break; + default: + return -EPERM; + } + + /* These always the same regardless of type */ + rnp_attr->sensor = &adapter->hw.thermal_sensor_data.sensor[offset]; + rnp_attr->hw = &adapter->hw; + rnp_attr->dev_attr.store = NULL; + rnp_attr->dev_attr.attr.mode = 0444; + rnp_attr->dev_attr.attr.name = rnp_attr->name; + sysfs_attr_init(&rnp_attr->dev_attr.attr); + adapter->rnp_hwmon_buff->attrs[n_attr] = &rnp_attr->dev_attr.attr; + ++adapter->rnp_hwmon_buff->n_hwmon; + + return 0; +} +#endif /* RNP_HWMON */ + +#define to_net_device(n) container_of(n, struct net_device, dev) +static ssize_t maintain_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t off, + size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int rbytes = count; + + if (adapter->maintain_buf == NULL) + return 0; + + if (off + count > adapter->maintain_buf_len) + rbytes = adapter->maintain_buf_len - off; + + memcpy(buf, adapter->maintain_buf + off, rbytes); + + if ((off + rbytes) >= adapter->maintain_buf_len) { + kfree(adapter->maintain_buf); + adapter->maintain_buf = NULL; + adapter->maintain_buf_len = 0; + } + + return rbytes; +} + +static ssize_t maintain_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t off, + size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct maintain_req *req; + void *dma_buf = NULL; + dma_addr_t dma_phy; + int bytes; + + if (off == 0) { + if (count < sizeof(*req)) { + return -EINVAL; + } + req = (struct maintain_req *)buf; + if (req->magic != MAINTAIN_MAGIC) { + return -EINVAL; + } + bytes = max_t(int, req->req_data_bytes, req->reply_bytes); + bytes += sizeof(*req); + + /* free no readed buf */ + if (adapter->maintain_buf) { + kfree(adapter->maintain_buf); + adapter->maintain_buf = NULL; + adapter->maintain_buf_len = 0; + } + + dma_buf = dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy, + GFP_ATOMIC); + if (!dma_buf) { + netdev_err(netdev, "%s: no memory:%d!", __func__, + bytes); + return -ENOMEM; + } + + adapter->maintain_dma_buf = dma_buf; + adapter->maintain_dma_phy = dma_phy; + adapter->maintain_dma_size = bytes; + adapter->maintain_in_bytes = req->req_data_bytes + sizeof(*req); + + memcpy(dma_buf + off, buf, count); + + if (count < adapter->maintain_in_bytes) + return count; + } + + dma_buf = adapter->maintain_dma_buf; + dma_phy = adapter->maintain_dma_phy; + req = (struct maintain_req *)dma_buf; + + memcpy(dma_buf + off, buf, count); + + /* all data got, send req */ + if ((off + count) >= adapter->maintain_in_bytes) { + int reply_bytes = req->reply_bytes; + err = rnp_maintain_req(hw, req->cmd, req->arg0, + req->req_data_bytes, req->reply_bytes, + dma_phy); + if (err != 0) { + goto err_quit; + } + /* copy data for read */ + if (reply_bytes > 0) { + adapter->maintain_buf_len = reply_bytes; + adapter->maintain_buf = + kmalloc(adapter->maintain_buf_len, GFP_KERNEL); + if (!adapter->maintain_buf) { + netdev_err(netdev, + "No Memory for maintain buf:%d\n", + adapter->maintain_buf_len); + err = -ENOMEM; + + goto err_quit; + } + memcpy(adapter->maintain_buf, dma_buf, reply_bytes); + } + + if (dma_buf) { + dma_free_coherent(&hw->pdev->dev, + adapter->maintain_dma_size, dma_buf, + dma_phy); + } + adapter->maintain_dma_buf = NULL; + } + + return count; +err_quit: + if (dma_buf) { + dma_free_coherent(&hw->pdev->dev, adapter->maintain_dma_size, + dma_buf, dma_phy); + adapter->maintain_dma_buf = NULL; + } + return err; +} + +static BIN_ATTR(maintain, (S_IWUSR | S_IRUGO), maintain_read, maintain_write, + 1 * 1024 * 1024); + +static ssize_t show_version_info(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = 0; + + ret += sprintf(buf + ret, "driver :%s-%x\n", + rnp_driver_version, hw->pcode); + ret += sprintf(buf + ret, "fw :%d.%d.%d.%d 0x%08x\n", ((char *)&(hw->fw_version))[3], + ((char *)&(hw->fw_version))[2], ((char *)&(hw->fw_version))[1], + ((char *)&(hw->fw_version))[0], hw->bd_uid); + + return ret; +} + +static ssize_t show_ring_sriov_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = 0; + + ret += sprintf(buf + ret, "now sriov ring num is %d\n", hw->sriov_ring_limit); + + return ret; +} + +static ssize_t store_ring_sriov_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = count; + u32 sriov_ring_num; + + if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { + printk("should close sriov first\n"); + return -EINVAL; + } + + if (0 != kstrtou32(buf, 0, &sriov_ring_num)) + return -EINVAL; + /* should check tx_ring_num is valid */ + if ((sriov_ring_num != 0) && (sriov_ring_num <= 32)) { + hw->sriov_ring_limit = sriov_ring_num; + } else { + ret = -EINVAL; + } + + return ret; +} + +static ssize_t show_rx_desc_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + u32 rx_ring_num = adapter->sysfs_rx_ring_num; + u32 rx_desc_num = adapter->sysfs_rx_desc_num; + struct rnp_ring *ring = adapter->rx_ring[rx_ring_num]; + int ret = 0; + union rnp_rx_desc *desc; + + if (test_bit(__RNP_DOWN, &adapter->state)) { + ret += sprintf(buf + ret, "port not up \n"); + return ret; + } + + desc = RNP_RX_DESC(ring, rx_desc_num); + ret += sprintf(buf + ret, "rx ring %d desc %d:\n", rx_ring_num, + rx_desc_num); + ret += print_desc(buf + ret, desc, sizeof(*desc)); + ret += sprintf(buf + ret, "\n"); + + return ret; +} + +static ssize_t store_rx_desc_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = count; + u32 rx_desc_num = adapter->sysfs_rx_desc_num; + u32 rx_ring_num = adapter->sysfs_rx_ring_num; + struct rnp_ring *ring = adapter->rx_ring[rx_ring_num]; + + if (0 != kstrtou32(buf, 0, &rx_desc_num)) + return -EINVAL; + /* should check tx_ring_num is valid */ + if (rx_desc_num < ring->count) { + adapter->sysfs_rx_desc_num = rx_desc_num; + } else { + ret = -EINVAL; + } + + return ret; +} + +static ssize_t show_tcp_sync_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC) + ret += sprintf( + buf + ret, "tcp sync remap on queue %d prio %s\n", + adapter->tcp_sync_queue, + (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC_PRIO) ? + "NO" : + "OFF"); + else + ret += sprintf(buf + ret, "tcp sync remap off\n"); + + return ret; +} + +static ssize_t store_tcp_sync_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = count; + u32 tcp_sync_queue; + + if (0 != kstrtou32(buf, 0, &tcp_sync_queue)) + return -EINVAL; + + if (tcp_sync_queue < adapter->num_rx_queues) { + adapter->tcp_sync_queue = tcp_sync_queue; + adapter->priv_flags |= RNP_PRIV_FLAG_TCP_SYNC; + + if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC_PRIO) + hw->ops.set_tcp_sync_remapping( + hw, adapter->tcp_sync_queue, true, true); + else + hw->ops.set_tcp_sync_remapping( + hw, adapter->tcp_sync_queue, true, false); + + } else { + adapter->priv_flags &= ~RNP_PRIV_FLAG_TCP_SYNC; + + hw->ops.set_tcp_sync_remapping(hw, adapter->tcp_sync_queue, + false, false); + } + + return ret; +} + +static ssize_t show_rx_skip_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (adapter->priv_flags & RNP_PRIV_FLAG_RX_SKIP_EN) { + ret += sprintf(buf + ret, "rx skip bytes: %d\n", + 16 * (adapter->priv_skip_count + 1)); + } else { + ret += sprintf(buf + ret, "rx skip off\n"); + } + + return ret; +} + +static ssize_t store_rx_skip_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = count; + u32 rx_skip_count; + + if (0 != kstrtou32(buf, 0, &rx_skip_count)) + return -EINVAL; + + if ((rx_skip_count > 0) && (rx_skip_count < 17)) { + adapter->priv_skip_count = rx_skip_count - 1; + adapter->priv_flags |= RNP_PRIV_FLAG_RX_SKIP_EN; + hw->ops.set_rx_skip(hw, adapter->priv_skip_count, true); + + } else { + adapter->priv_flags &= ~RNP_PRIV_FLAG_RX_SKIP_EN; + + hw->ops.set_rx_skip(hw, adapter->priv_skip_count, false); + + return -EINVAL; + } + + return ret; +} + +static ssize_t show_rx_drop_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + ret += sprintf(buf + ret, "rx_drop_status %llx\n", + adapter->rx_drop_status); + + return ret; +} + +static ssize_t store_rx_drop_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = count; + u64 rx_drop_status; + + if (0 != kstrtou64(buf, 0, &rx_drop_status)) + return -EINVAL; + + adapter->rx_drop_status = rx_drop_status; + + hw->ops.update_rx_drop(hw); + + return ret; +} + +static ssize_t show_outer_vlan_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (adapter->priv_flags & RNP_PRIV_FLAG_DOUBLE_VLAN) + ret += sprintf(buf + ret, "double vlan on\n"); + else + ret += sprintf(buf + ret, "double vlan off\n"); + + switch (adapter->outer_vlan_type) { + case outer_vlan_type_88a8: + ret += sprintf(buf + ret, "outer vlan 0x88a8\n"); + + break; + case outer_vlan_type_9100: + ret += sprintf(buf + ret, "outer vlan 0x9100\n"); + + break; + case outer_vlan_type_9200: + ret += sprintf(buf + ret, "outer vlan 0x9200\n"); + + break; + default: + ret += sprintf(buf + ret, "outer vlan error\n"); + break; + } + return ret; +} + +static ssize_t store_outer_vlan_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = count; + u32 outer_vlan_type; + + if (0 != kstrtou32(buf, 0, &outer_vlan_type)) + return -EINVAL; + /* should check tx_ring_num is valid */ + if (outer_vlan_type < outer_vlan_type_max) { + adapter->outer_vlan_type = outer_vlan_type; + } else + ret = -EINVAL; + /* should update to hw */ + if (hw->ops.set_outer_vlan_type) + hw->ops.set_outer_vlan_type(hw, outer_vlan_type); + + return ret; +} + +static ssize_t show_tx_stags_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) + ret += sprintf(buf + ret, "tx stags on\n"); + else + ret += sprintf(buf + ret, "tx stags off\n"); + + ret += sprintf(buf + ret, "vid 0x%x\n", adapter->stags_vid); + + return ret; +} + +static ssize_t store_tx_stags_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_eth_info *eth = &hw->eth; + int ret = count; + u16 tx_stags; + + if (0 != kstrtou16(buf, 0, &tx_stags)) + return -EINVAL; + if (tx_stags < VLAN_N_VID) { + adapter->stags_vid = tx_stags; + } else + ret = -EINVAL; + /* should update vlan filter */ + eth->ops.set_vfta(eth, adapter->stags_vid, true); + + return ret; +} + +static ssize_t show_tx_desc_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + u32 tx_ring_num = adapter->sysfs_tx_ring_num; + u32 tx_desc_num = adapter->sysfs_tx_desc_num; + struct rnp_ring *ring = adapter->tx_ring[tx_ring_num]; + int ret = 0; + struct rnp_tx_desc *desc; + + if (test_bit(__RNP_DOWN, &adapter->state)) { + ret += sprintf(buf + ret, "port not up \n"); + return ret; + } + + desc = RNP_TX_DESC(ring, tx_desc_num); + ret += sprintf(buf + ret, "tx ring %d desc %d:\n", tx_ring_num, + tx_desc_num); + ret += print_desc(buf + ret, desc, sizeof(*desc)); + ret += sprintf(buf + ret, "\n"); + + return ret; +} + +static ssize_t store_tx_desc_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = count; + u32 tx_desc_num = adapter->sysfs_tx_desc_num; + u32 tx_ring_num = adapter->sysfs_tx_ring_num; + struct rnp_ring *ring = adapter->tx_ring[tx_ring_num]; + + if (0 != kstrtou32(buf, 0, &tx_desc_num)) + return -EINVAL; + if (tx_desc_num < ring->count) + adapter->sysfs_tx_desc_num = tx_desc_num; + else + ret = -EINVAL; + + return ret; +} + +static ssize_t show_para_info(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + struct rnp_eth_info *eth = &hw->eth; + struct rnp_mac_info *mac = &hw->mac; + + ret += sprintf(buf + ret, "nsi_en:%d\n", hw->ncsi_en); + ret += sprintf( + buf + ret, + "eth: \n\tmc_filter_type:%u, mcft_size:%u, vft_size:%u, " + "num_rar_entries:%u,\n" + "\trar_highwater:%u, rx_pb_size:%u, max_tx_queues:%u, " + "max_rx_queues:%u, \n" + "\treg_off:%u, orig_autoc:%u, cached_autoc:%u, orig_autoc2:%u\n", + eth->mc_filter_type, eth->mcft_size, eth->vft_size, + eth->num_rar_entries, eth->rar_highwater, eth->rx_pb_size, + eth->max_tx_queues, eth->max_rx_queues, eth->reg_off, + eth->orig_autoc, eth->cached_autoc, eth->orig_autoc2); + + ret += sprintf( + buf + ret, + "mac:\n\t" + "mc_filter_type:%u mcft_size:%u vft_size:%u num_rar_entries:%u \n" + "\trar_highwater:%u rx_pb_size:%u max_tx_queues:%u max_rx_queues:%u \n" + "\treg_off:%u orig_autoc:%u cached_autoc:%u orig_autoc2:%u " + "orig_link_settings_stored:%u \n" + "\tautotry_restart:%u mac_flags:%u\n", + mac->mc_filter_type, mac->mcft_size, mac->vft_size, + mac->num_rar_entries, mac->rar_highwater, mac->rx_pb_size, + mac->max_tx_queues, mac->max_rx_queues, mac->reg_off, + mac->orig_autoc, mac->cached_autoc, mac->orig_autoc2, + mac->orig_link_settings_stored, mac->autotry_restart, + mac->mac_flags); + + return ret; +} + +static ssize_t show_rx_ring_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + u32 rx_ring_num = adapter->sysfs_rx_ring_num; + struct rnp_ring *ring = adapter->rx_ring[rx_ring_num]; + int ret = 0; + union rnp_rx_desc *rx_desc; + + ret += sprintf(buf + ret, "queue %d info:\n", rx_ring_num); + ret += sprintf(buf + ret, "next_to_use %d\n", ring->next_to_use); + ret += sprintf(buf + ret, "next_to_clean %d\n", ring->next_to_clean); + rx_desc = RNP_RX_DESC(ring, ring->next_to_clean); + ret += sprintf(buf + ret, "next_to_clean desc: "); + ret += print_desc(buf + ret, rx_desc, sizeof(*rx_desc)); + ret += sprintf(buf + ret, "\n"); + + return ret; +} + +static ssize_t store_rx_ring_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = count; + + u32 rx_ring_num = adapter->sysfs_rx_ring_num; + + if (0 != kstrtou32(buf, 0, &rx_ring_num)) + return -EINVAL; + if (rx_ring_num < adapter->num_rx_queues) + adapter->sysfs_rx_ring_num = rx_ring_num; + else + ret = -EINVAL; + + return ret; +} + +static ssize_t show_tx_ring_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + u32 tx_ring_num = adapter->sysfs_tx_ring_num; + struct rnp_ring *ring = adapter->tx_ring[tx_ring_num]; + int ret = 0; + struct rnp_tx_buffer *tx_buffer; + struct rnp_tx_desc *eop_desc; + + ret += sprintf(buf + ret, "queue %d info:\n", tx_ring_num); + ret += sprintf(buf + ret, "next_to_use %d\n", ring->next_to_use); + ret += sprintf(buf + ret, "next_to_clean %d\n", ring->next_to_clean); + + tx_buffer = &ring->tx_buffer_info[ring->next_to_clean]; + eop_desc = tx_buffer->next_to_watch; + /* if have watch desc */ + if (eop_desc) { + ret += sprintf(buf + ret, "next_to_watch:\n"); + ret += print_desc(buf + ret, eop_desc, sizeof(*eop_desc)); + ret += sprintf(buf + ret, "\n"); + } else { + ret += sprintf(buf + ret, "no next_to_watch data\n"); + } + + return ret; +} + +static ssize_t store_tx_ring_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = count; + + u32 tx_ring_num = adapter->sysfs_tx_ring_num; + + if (0 != kstrtou32(buf, 0, &tx_ring_num)) + return -EINVAL; + if (tx_ring_num < adapter->num_tx_queues) + adapter->sysfs_tx_ring_num = tx_ring_num; + else + ret = -EINVAL; + + return ret; +} + +static ssize_t show_tx_counter(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u32 val = 0; + int i, ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + ret += sprintf(buf + ret, "tx counters\n"); + for (i = 0; i < 4; i++) { + ret += sprintf(buf + ret, "ring%d-tx:\n", i); + + val = rd32(hw, RNP10_RING_BASE + RING_OFFSET(i) + + RNP_DMA_REG_TX_DESC_BUF_LEN); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "len:", + RNP10_RING_BASE + RING_OFFSET(i) + + RNP_DMA_REG_TX_DESC_BUF_LEN, + val); + + val = rd32(hw, RNP10_RING_BASE + RING_OFFSET(i) + + RNP_DMA_REG_TX_DESC_BUF_HEAD); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "head:", + RNP10_RING_BASE + RING_OFFSET(i) + + RNP_DMA_REG_TX_DESC_BUF_HEAD, + val); + + val = rd32(hw, RNP10_RING_BASE + RING_OFFSET(i) + + RNP_DMA_REG_TX_DESC_BUF_TAIL); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "tail:", + RNP10_RING_BASE + RING_OFFSET(i) + + RNP_DMA_REG_TX_DESC_BUF_TAIL, + val); + } + + ret += sprintf(buf + ret, "to_1to4_p1:\n"); + + val = rd32(hw, RNP_ETH_1TO4_INST0_IN_PKTS); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "emac_in:", RNP_ETH_1TO4_INST0_IN_PKTS, val); + + val = rd32(hw, RNP_ETH_IN_0_TX_PKT_NUM(0)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "emac_send:", RNP_ETH_IN_0_TX_PKT_NUM(0), val); + + ret += sprintf(buf + ret, "to_1to4_p2:\n"); + + val = rd32(hw, RNP_ETH_IN_1_TX_PKT_NUM(0)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "sop_pkt:", RNP_ETH_IN_1_TX_PKT_NUM(0), val); + + val = rd32(hw, RNP_ETH_IN_2_TX_PKT_NUM(0)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "eop_pkt:", RNP_ETH_IN_2_TX_PKT_NUM(0), val); + + val = rd32(hw, RNP_ETH_IN_3_TX_PKT_NUM(0)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "send_terr:", RNP_ETH_IN_3_TX_PKT_NUM(0), val); + + ret += sprintf(buf + ret, "to_tx_trans(phy):\n"); + + val = rd32(hw, RNP_ETH_EMAC_TX_TO_PHY_PKTS(0)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "in:", RNP_ETH_EMAC_TX_TO_PHY_PKTS(0), val); + + val = rd32(hw, RNP_ETH_TXTRANS_PTP_PKT_NUM(0)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "out:", RNP_ETH_TXTRANS_PTP_PKT_NUM(0), val); + + ret += sprintf(buf + ret, "mac:\n"); + + val = rd32(hw, 0x60000); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "mac-tx-cfg:", 0x60000, val); + + val = rd32(hw, 0x1081c); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "mac-tx:", 0x1081c, + val); + + val = rd32(hw, 0x1087c); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "underflow_err:", 0x1087c, val); + + val = rd32(hw, RNP_ETH_TX_DEBUG(0)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "port0_txtrans_sop:", RNP_ETH_TX_DEBUG(0), val); + + val = rd32(hw, RNP_ETH_TX_DEBUG(4)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "port0_txtrans_eop:", RNP_ETH_TX_DEBUG(4), val); + + val = rd32(hw, RNP_ETH_TX_DEBUG(13)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "tx_empty:", RNP_ETH_TX_DEBUG(13), val); + + val = rd32(hw, RNP_ETH_TX_DEBUG(14)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: 0x%x\n", + "tx_prog_full:", RNP_ETH_TX_DEBUG(14), val); + + val = rd32(hw, RNP_ETH_TX_DEBUG(15)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: 0x%x\n", + "tx_full:", RNP_ETH_TX_DEBUG(15), val); + + return ret; +} + +static DEVICE_ATTR(tx_counter, S_IRUGO | S_IWUSR, show_tx_counter, NULL); + +static ssize_t show_rx_counter(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u32 val = 0, port = 0; + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + ret += sprintf(buf + ret, "rx counters\n"); + for (port = 0; port < 4; port++) { + ret += sprintf(buf + ret, "emac_rx_trans (port:%d):\n", port); + + val = rd32(hw, RNP_XLMAC + 0x900 + port * 0x10000); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "mac-pkts:", RNP_XLMAC + 0x900 + port * 0x10000, + val); + + val = rd32(hw, RNP_RXTRANS_RX_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "pkts:", RNP_RXTRANS_RX_PKTS(port), val); + + val = rd32(hw, RNP_RXTRANS_DROP_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "drop:", RNP_RXTRANS_DROP_PKTS(port), val); + + val = rd32(hw, RNP_RXTRANS_WDT_ERR_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "wdt_err:", RNP_RXTRANS_WDT_ERR_PKTS(port), val); + + val = rd32(hw, RNP_RXTRANS_CODE_ERR_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "code_err:", RNP_RXTRANS_CODE_ERR_PKTS(port), + val); + + val = rd32(hw, RNP_RXTRANS_CRC_ERR_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "crc_err:", RNP_RXTRANS_CRC_ERR_PKTS(port), val); + + val = rd32(hw, RNP_RXTRANS_SLEN_ERR_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "slen_err:", RNP_RXTRANS_SLEN_ERR_PKTS(port), + val); + + val = rd32(hw, RNP_RXTRANS_GLEN_ERR_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "glen_err:", RNP_RXTRANS_GLEN_ERR_PKTS(port), + val); + + val = rd32(hw, RNP_RXTRANS_IPH_ERR_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "iph_err:", RNP_RXTRANS_IPH_ERR_PKTS(port), val); + + val = rd32(hw, RNP_RXTRANS_CSUM_ERR_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "csum_err:", RNP_RXTRANS_CSUM_ERR_PKTS(port), + val); + + val = rd32(hw, RNP_RXTRANS_LEN_ERR_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "len_err:", RNP_RXTRANS_LEN_ERR_PKTS(port), val); + + val = rd32(hw, RNP_RXTRANS_CUT_ERR_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "trans_cut_err:", RNP_RXTRANS_CUT_ERR_PKTS(port), + val); + + val = rd32(hw, RNP_RXTRANS_EXCEPT_BYTES(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "expt_byte_err:", RNP_RXTRANS_EXCEPT_BYTES(port), + val); + + val = rd32(hw, RNP_RXTRANS_G1600_BYTES_PKTS(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + ">1600Byte:", RNP_RXTRANS_G1600_BYTES_PKTS(port), + val); + } + + ret += sprintf(buf + ret, "gather:\n"); + val = rd32(hw, RNP_ETH_TOTAL_GAT_RX_PKT_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "total_in_pkts:", RNP_ETH_TOTAL_GAT_RX_PKT_NUM, val); + + port = 0; + val = rd32(hw, RNP_ETH_RX_PKT_NUM(port)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "to_nxt_mdodule:", RNP_ETH_RX_PKT_NUM(port), val); + + for (port = 0; port < 4; port++) { + u8 pname[16] = { 0 }; + val = rd32(hw, RNP_ETH_RX_PKT_NUM(port)); + sprintf(pname, "p%d-rx:", port); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", pname, + RNP_ETH_RX_PKT_NUM(port), val); + } + + for (port = 0; port < 4; port++) { + u8 pname[16] = { 0 }; + val = rd32(hw, RNP_ETH_RX_DROP_PKT_NUM(port)); + sprintf(pname, "p%d-drop:", port); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", pname, + RNP_ETH_RX_DROP_PKT_NUM(port), val); + } + + ret += sprintf(buf + ret, "debug:\n"); + val = rd32(hw, RNP_ETH_RX_DEBUG(10)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "data_eop:", RNP_ETH_RX_DEBUG(10), val); + val = rd32(hw, RNP_ETH_RX_DEBUG(11)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "data_descs:", RNP_ETH_RX_DEBUG(11), val); + val = rd32(hw, RNP_ETH_RX_DEBUG(12)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "data_desc_sop:", RNP_ETH_RX_DEBUG(12), val); + val = rd32(hw, RNP_ETH_RX_DEBUG(13)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "data_desc_eop:", RNP_ETH_RX_DEBUG(13), val); + val = rd32(hw, RNP_ETH_RX_DEBUG(14)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "4to1_gather_sop:", RNP_ETH_RX_DEBUG(14), val); + val = rd32(hw, RNP_ETH_RX_DEBUG(15)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "4to1_gather_eop:", RNP_ETH_RX_DEBUG(15), val); + + ret += sprintf(buf + ret, "ip-parse:\n"); + + val = rd32(hw, RNP_ETH_PKT_EGRESS_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "pkg_egree:", RNP_ETH_PKT_EGRESS_NUM, val); + + val = rd32(hw, RNP_ETH_PKT_IP_HDR_LEN_ERR_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "L3_len_err:", RNP_ETH_PKT_IP_HDR_LEN_ERR_NUM, val); + + val = rd32(hw, RNP_ETH_PKT_IP_PKT_LEN_ERR_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "ip_hdr_err:", RNP_ETH_PKT_IP_PKT_LEN_ERR_NUM, val); + + val = rd32(hw, RNP_ETH_PKT_L3_HDR_CHK_ERR_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "l3-csum-err:", RNP_ETH_PKT_L3_HDR_CHK_ERR_NUM, val); + + val = rd32(hw, RNP_ETH_PKT_L4_HDR_CHK_ERR_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "l4-csum-err:", RNP_ETH_PKT_L4_HDR_CHK_ERR_NUM, val); + + val = rd32(hw, RNP_ETH_PKT_SCTP_CHK_ERR_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "sctp-err:", RNP_ETH_PKT_SCTP_CHK_ERR_NUM, val); + + val = rd32(hw, RNP_ETH_PKT_VLAN_ERR_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "vlan-err:", RNP_ETH_PKT_VLAN_ERR_NUM, val); + + val = rd32(hw, RNP_ETH_PKT_EXCEPT_SHORT_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "except_short_num:", RNP_ETH_PKT_EXCEPT_SHORT_NUM, val); + + val = rd32(hw, RNP_ETH_PKT_PTP_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "ptp:", RNP_ETH_PKT_PTP_NUM, val); + + ret += sprintf(buf + ret, "to-indecap:\n"); + + val = rd32(hw, RNP_ETH_DECAP_PKT_IN_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "*in engin*:", RNP_ETH_DECAP_PKT_IN_NUM, val); + + val = rd32(hw, RNP_ETH_DECAP_PKT_OUT_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "*out engin*:", RNP_ETH_DECAP_PKT_OUT_NUM, val); + + val = rd32(hw, RNP_ETH_DECAP_DMAC_OUT_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "to-dma/host:", RNP_ETH_DECAP_DMAC_OUT_NUM, val); + + val = rd32(hw, RNP_ETH_DECAP_BMC_OUT_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "to-bmc:", RNP_ETH_DECAP_BMC_OUT_NUM, val); + + val = rd32(hw, RNP_ETH_DECAP_SW_OUT_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "to-switch:", RNP_ETH_DECAP_SW_OUT_NUM, val); + + val = rd32(hw, RNP_ETH_DECAP_MIRROR_OUT_NUM); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "bmc+host:", RNP_ETH_DECAP_MIRROR_OUT_NUM, val); + + val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(0x0)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "err_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(0x0), val); + + val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(1)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "plicy_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(1), val); + + val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(2)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "dmac_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(2), val); + + val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(3)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "bmc_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(3), val); + + val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(4)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "sw_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(4), val); + + val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(5)); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "rm_vlane_num:", RNP_ETH_DECAP_PKT_DROP_NUM(5), val); + + ret += sprintf(buf + ret, "\npolicy-drop-reason:\n"); + val = rd32(hw, RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(4)); + ret += sprintf(buf + ret, "\t %30s 0x%08x: %d\n", "host_l2_match_drop:", + RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(4), val); + val = rd32(hw, RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(5)); + ret += sprintf(buf + ret, "\t %30s 0x%08x: %d\n", + "redir_input_match_drop:", + RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(5), val); + val = rd32(hw, RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(6)); + ret += sprintf(buf + ret, "\t %30s 0x%08x: %d\n", + "redir_etypt_match_drop:", + RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(6), val); + val = rd32(hw, RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(7)); + ret += sprintf(buf + ret, "\t %30s 0x%08x: %d\n", + "redir_tcp_sync_match_drop:", + RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(7), val); + val = rd32(hw, RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(8)); + ret += sprintf(buf + ret, "\t %30s 0x%08x: %d\n", + "redir_tuple5_match_drop:", + RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(8), val); + val = rd32(hw, RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(9)); + ret += sprintf(buf + ret, "\t %30s 0x%08x: %d\n", + "recdir_tcam_match_drop:", + RNP_ETH_BASE + RNP10_ETH_RX_DEBUG(9), val); + + ret += sprintf(buf + ret, "dma-2-host:\n"); + + val = rd32(hw, 0x264); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "fifo equ:", 0x264, + val); + + val = rd32(hw, 0x268); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "fifo deq:", 0x268, + val); + + val = rd32(hw, 0x114); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", + "unexpt_abtring:", 0x114, val); + + val = rd32(hw, 0x288); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "pci2host:", 0x288, + val); + + for (port = 0; port < 4; port++) { + ret += sprintf(buf + ret, "rx-ring%d:\n", port); + + val = rd32(hw, RNP10_RING_BASE + RING_OFFSET(port) + + RNP_DMA_REG_RX_DESC_BUF_HEAD); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %u\n", "head:", + RNP10_RING_BASE + RING_OFFSET(port) + + RNP_DMA_REG_RX_DESC_BUF_HEAD, + val); + + val = rd32(hw, RNP10_RING_BASE + RING_OFFSET(port) + + RNP_DMA_REG_RX_DESC_BUF_TAIL); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %u\n", "tail:", + RNP10_RING_BASE + RING_OFFSET(port) + + RNP_DMA_REG_RX_DESC_BUF_TAIL, + val); + + val = rd32(hw, RNP10_RING_BASE + RING_OFFSET(port) + + RNP_DMA_REG_RX_DESC_BUF_LEN); + ret += sprintf(buf + ret, "\t %16s 0x%08x: %u\n", "len:", + RNP10_RING_BASE + RING_OFFSET(port) + + RNP_DMA_REG_RX_DESC_BUF_LEN, + val); + } + + /* maybe too large */ + if (ret >= PAGE_SIZE) + ret = PAGE_SIZE; + + return ret; +} + +static DEVICE_ATTR(rx_counter, S_IRUGO | S_IWUSR, show_rx_counter, NULL); + +static ssize_t show_active_vid(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u16 vid; + u16 current_vid = 0; + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + u8 vfnum = hw->max_vfs - 1; + /* use last-vf's table entry. the last one */ + + if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { + current_vid = rd32(hw, RNP_DMA_PORT_VEB_VID_TBL(adapter->port, + vfnum)); + } + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) { + ret += sprintf(buf + ret, "%u%s ", vid, + (current_vid == vid ? "*" : "")); + } + ret += sprintf(buf + ret, "\n"); + return ret; +} + +static ssize_t store_active_vid(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + u16 vid; + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + u8 vfnum = hw->max_vfs - 1; + int port = 0; + + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) + return -EIO; + + if (0 != kstrtou16(buf, 0, &vid)) + return -EINVAL; + + if ((vid < 4096) && test_bit(vid, adapter->active_vlans)) { + if (rd32(hw, RNP_DMA_VERSION) >= 0x20201231) { + for (port = 0; port < 4; port++) + wr32(hw, RNP_DMA_PORT_VEB_VID_TBL(port, vfnum), + vid); + } else { + wr32(hw, RNP_DMA_PORT_VEB_VID_TBL(adapter->port, vfnum), + vid); + } + err = 0; + } + + return err ? err : count; +} + +static inline int pn_sn_dlen(char *v, int v_len) +{ + int i, len = 0; + for (i = 0; i < v_len; i++) { + if (isascii(v[i])) { + len++; + } else { + break; + } + } + return len; +} + +static int rnp_mbx_get_pn_sn(struct rnp_hw *hw, char pn[33], char sn[33]) +{ + struct maintain_req *req; + void *dma_buf = NULL; + dma_addr_t dma_phy; + struct ucfg_mac_sn *cfg; + + int err = 0, bytes = sizeof(*req) + sizeof(struct ucfg_mac_sn); + + memset(pn, 0, 33); + memset(sn, 0, 33); + + dma_buf = + dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy, GFP_KERNEL); + if (!dma_buf) { + printk("%s: no memory:%d!", __func__, bytes); + return -ENOMEM; + } + + req = (struct maintain_req *)dma_buf; + memset(dma_buf, 0, bytes); + cfg = (struct ucfg_mac_sn *)(req + 1); + req->magic = MAINTAIN_MAGIC; + req->cmd = 0; + req->arg0 = 3; + req->req_data_bytes = 0; + req->reply_bytes = bytes - sizeof(*req); + + err = rnp_maintain_req(hw, req->cmd, req->arg0, req->req_data_bytes, + req->reply_bytes, dma_phy); + if (err != 0) { + goto err_quit; + } + if (cfg->magic == MAC_SN_MAGIC) { + int sz = pn_sn_dlen(cfg->pn, 32); + if (sz) { + memcpy(pn, cfg->pn, sz); + pn[sz] = 0; + } + sz = pn_sn_dlen(cfg->sn, 32); + if (sz) { + memcpy(sn, cfg->sn, sz); + sn[sz] = 0; + } + } + +err_quit: + if (dma_buf) + dma_free_coherent(&hw->pdev->dev, bytes, dma_buf, dma_phy); + + return 0; +} + +static ssize_t show_own_vpd(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + char pn[33] = { 0 }, sn[33] = { 0 }; + + rnp_mbx_get_pn_sn(hw, pn, sn); + + ret += sprintf( + buf + ret, "Product Name: %s\n", + "Ethernet Controller N10 Series for 10GbE or 40GbE (Dual-port)"); + ret += sprintf(buf + ret, "[PN] Part number: %s\n", pn); + ret += sprintf(buf + ret, "[SN] Serial number: %s\n", sn); + + return ret; +} +static DEVICE_ATTR(own_vpd, S_IRUGO, show_own_vpd, NULL); + +static ssize_t show_port_idx(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + + ret += sprintf(buf, "%d\n", adapter->portid_of_card); + return ret; +} +static DEVICE_ATTR(port_idx, S_IRUGO | S_IRUSR, show_port_idx, NULL); + +static ssize_t show_debug_linkstat(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + ret += sprintf(buf, "%d %d dumy:0x%x up-flag:%d carry:%d\n", + adapter->link_up, adapter->hw.link, rd32(hw, 0xc), + adapter->flags & RNP_FLAG_NEED_LINK_UPDATE, + netif_carrier_ok(netdev)); + return ret; +} + +static DEVICE_ATTR(debug_linkstat, S_IRUGO | S_IRUSR, show_debug_linkstat, + NULL); + +static ssize_t show_sfp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + ret += sprintf(buf, " IO Error\n"); + } else { + ret += sprintf( + buf, "mod-abs:%d\ntx-fault:%d\ntx-dis:%d\nrx-los:%d\n", + adapter->sfp.mod_abs, adapter->sfp.fault, + adapter->sfp.tx_dis, adapter->sfp.los); + } + + return ret; +} +static DEVICE_ATTR(sfp, S_IRUGO | S_IRUSR, show_sfp, NULL); + +static ssize_t store_pci(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int gen = 3, lanes = 8; + + if (count > 30) + return -EINVAL; + + if (sscanf(buf, "gen%dx%d", &gen, &lanes) != 2) { + printk("Error: invalid input. example: gen3x8\n"); + return -EINVAL; + } + if (gen > 3 || lanes > 8) + return -EINVAL; + + err = rnp_set_lane_fun(hw, LANE_FUN_PCI_LANE, gen, lanes, 0, 0); + + return err ? err : count; +} + +static ssize_t show_pci(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + ret += sprintf(buf, " IO Error\n"); + } else { + ret += sprintf(buf, "gen%dx%d\n", hw->pci_gen, hw->pci_lanes); + } + + return ret; +} + +static DEVICE_ATTR(pci, S_IRUGO | S_IWUSR | S_IRUSR, show_pci, store_pci); + +static ssize_t store_sfp_tx_disable(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + long enable = 0; + + if (kstrtol(buf, 10, &enable)) { + return -EINVAL; + } + + err = rnp_set_lane_fun(hw, LANE_FUN_SFP_TX_DISABLE, !!enable, 0, 0, 0); + + return err ? err : count; +} + +static ssize_t show_sfp_tx_disable(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + ret += sprintf(buf, " IO Error\n"); + } else { + ret += sprintf(buf, "%d\n", adapter->sfp.tx_dis); + } + + return ret; +} + +static DEVICE_ATTR(sfp_tx_disable, S_IRUGO | S_IWUSR | S_IRUSR, + show_sfp_tx_disable, store_sfp_tx_disable); + +static ssize_t store_link_traing(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + long enable = 0; + + if (kstrtol(buf, 10, &enable)) { + return -EINVAL; + } + + err = rnp_set_lane_fun(hw, LANE_FUN_LINK_TRAING, !!enable, 0, 0, 0); + + return err ? err : count; +} + +static ssize_t show_link_traing(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + ret += sprintf(buf, " IO Error\n"); + } else { + ret += sprintf(buf, "%d\n", adapter->link_traing); + } + + return ret; +} + +static DEVICE_ATTR(link_traing, S_IRUGO | S_IWUSR | S_IRUSR, show_link_traing, + store_link_traing); + +static ssize_t store_fec(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + long enable = 0; + + if (kstrtol(buf, 10, &enable)) { + return -EINVAL; + } + + err = rnp_set_lane_fun(hw, LANE_FUN_FEC, !!enable, 0, 0, 0); + + return err ? err : count; +} + +static ssize_t show_fec(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + ret += sprintf(buf, " IO Error\n"); + } else { + ret += sprintf(buf, "%d\n", adapter->fec); + } + + return ret; +} + +static DEVICE_ATTR(fec, S_IRUGO | S_IWUSR | S_IRUSR, show_fec, store_fec); + +static ssize_t store_pcs(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 reg_hi = 0, reg_lo = 0, pcs_base_regs = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int input_arg_cnt; + u32 pcs_phy_regs[] = { + 0x00040000, 0x00041000, 0x00042000, 0x00043000, + 0x00040000, 0x00041000, 0x00042000, 0x00043000, + }; + + if (count > 64) { + printk("Error: Input size >100: too large\n"); + return -EINVAL; + } + + input_arg_cnt = sscanf(buf, "%u %x %x", &adapter->sysfs_pcs_lane_num, + &adapter->sysfs_bar4_reg_addr, + &adapter->sysfs_bar4_reg_val); + + if (input_arg_cnt != 2 && input_arg_cnt != 3) { + printk("Error: Invalid Input: read lane x reg 0xXXX or write phy x reg " + "0xXXX val 0xXXX\n"); + return -EINVAL; + } + + if (adapter->sysfs_pcs_lane_num > 8) { + printk("Error: Invalid value. should in 0~7\n"); + return -EINVAL; + } + + switch (input_arg_cnt) { + case 2: + reg_hi = adapter->sysfs_bar4_reg_addr >> 8; + reg_lo = (adapter->sysfs_bar4_reg_addr & 0xff) << 2; + pcs_base_regs = pcs_phy_regs[adapter->sysfs_pcs_lane_num]; + wr32(hw, pcs_base_regs + (0xff << 2), reg_hi); + adapter->sysfs_bar4_reg_val = rd32(hw, pcs_base_regs + reg_lo); + break; + case 3: + reg_hi = adapter->sysfs_bar4_reg_addr >> 8; + reg_lo = (adapter->sysfs_bar4_reg_addr & 0xff) << 2; + pcs_base_regs = pcs_phy_regs[adapter->sysfs_pcs_lane_num]; + wr32(hw, pcs_base_regs + (0xff << 2), reg_hi); + wr32(hw, pcs_base_regs + reg_lo, adapter->sysfs_bar4_reg_val); + break; + default: + printk("Error: Invalid value. input_arg_cnt=%d\n", + input_arg_cnt); + break; + } + adapter->sysfs_input_arg_cnt = input_arg_cnt; + + return count; +} + +static ssize_t show_pcs(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + + switch (adapter->sysfs_input_arg_cnt) { + case 2: + ret += sprintf(buf, "lane%u pcs: 0x%x => 0x%x\n", + adapter->sysfs_pcs_lane_num, + adapter->sysfs_bar4_reg_addr, + adapter->sysfs_bar4_reg_val); + break; + case 3: + ret += sprintf(buf, "lane%u pcs: 0x%x <= 0x%x\n", + adapter->sysfs_pcs_lane_num, + adapter->sysfs_bar4_reg_addr, + adapter->sysfs_bar4_reg_val); + break; + default: + break; + } + + return ret; +} + +static DEVICE_ATTR(pcs_reg, S_IRUGO | S_IWUSR | S_IRUSR, show_pcs, store_pcs); + +static ssize_t phy_reg_read(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int val = 0; + int err = -EINVAL; + int phy_reg = adapter->sysfs_phy_reg; + + if (hw) { + if (adapter->sysfs_is_phy_ext_reg) { + err = rnp_mbx_phy_read(hw, phy_reg | PHY_EXT_REG_FLAG, + &val); + } else { + err = rnp_mbx_phy_read(hw, phy_reg, &val); + } + } + + if (err) { + return 0; + } else { + return sprintf(buf, "phy %s 0x%04x : 0x%04x\n", + adapter->sysfs_is_phy_ext_reg ? "ext reg" : + "reg", + phy_reg, val & 0xffff); + } +} + +static ssize_t phy_reg_write(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int i = 0, argc = 0, err = -EINVAL; + char argv[3][16]; + unsigned long val[3] = { 0 }; + int phy_reg = 0; + + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + memset(argv, 0, sizeof(argv)); + argc = sscanf(buf, "%15s %15s %15s", argv[0], argv[1], argv[2]); + + if (argc < 1) { + return -EINVAL; + } + + adapter->sysfs_is_phy_ext_reg = 0; + + if (strcmp(argv[0], "ext") == 0) { + adapter->sysfs_is_phy_ext_reg = 1; + } else { + if (kstrtoul(argv[0], 0, &val[0])) { + return -EINVAL; + } + } + + for (i = 1; i < argc; i++) { + if (kstrtoul(argv[i], 0, &val[i])) { + return -EINVAL; + } + } + + if (argc == 1) { + if (adapter->sysfs_is_phy_ext_reg) { + return -EINVAL; + } else { + /* set phy reg index */ + phy_reg = val[0]; + err = 0; + } + } + + if (argc == 2) { + if (adapter->sysfs_is_phy_ext_reg) { + /* set ext phy reg index */ + phy_reg = val[1]; + err = 0; + } else { + /* write phy reg */ + phy_reg = val[0]; + err = rnp_mbx_phy_write(hw, phy_reg, val[1]); + } + } + + if (argc == 3) { + if (adapter->sysfs_is_phy_ext_reg) { + /* write ext phy reg */ + phy_reg = val[1]; + err = rnp_mbx_phy_write(hw, phy_reg | PHY_EXT_REG_FLAG, + val[2]); + } else { + return -EINVAL; + } + } + + adapter->sysfs_phy_reg = phy_reg; + + return err ? err : count; +} + +static DEVICE_ATTR(phy_reg, 0664, phy_reg_read, phy_reg_write); + +static ssize_t store_prbs(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + long prbs = 0; + + if (kstrtol(buf, 10, &prbs)) { + return -EINVAL; + } + + err = rnp_set_lane_fun(hw, LANE_FUN_PRBS, prbs, 0, 0, 0); + + return err ? err : count; +} + +static DEVICE_ATTR(prbs, S_IRUGO | S_IWUSR | S_IRUSR, NULL, store_prbs); + +static ssize_t store_autoneg(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + long enable = 0; + + if (kstrtol(buf, 10, &enable)) { + return -EINVAL; + } + + err = rnp_set_lane_fun(hw, LANE_FUN_AN, !!enable, 0, 0, 0); + + return err ? err : count; +} + +static ssize_t show_autoneg(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret = 0; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + ret += sprintf(buf, " IO Error\n"); + } else { + ret += sprintf(buf, "%d\n", adapter->an); + } + + return ret; +} + +static DEVICE_ATTR(autoneg, S_IRUGO | S_IWUSR | S_IRUSR, show_autoneg, + store_autoneg); + +static ssize_t store_lane_si(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err = -EINVAL; + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int si_main = -1, si_pre = -1, si_post = -1, si_txboost = -1; + int cnt; + + if (rnp_mbx_get_lane_stat(hw) != 0) { + printk("Error: rnp_mbx_get_lane_stat failed\n"); + return -EIO; + } + if (count > 100) { + printk("Error: Input size >100: too large\n"); + return -EINVAL; + } + + if (hw->supported_link & + (RNP_LINK_SPEED_40GB_FULL | RNP_LINK_SPEED_25GB_FULL)) { + u32 lane0_main, lane0_pre, lane0_post, lane0_boost; + u32 lane1_main, lane1_pre, lane1_post, lane1_boost; + u32 lane2_main, lane2_pre, lane2_post, lane2_boost; + u32 lane3_main, lane3_pre, lane3_post, lane3_boost; + + cnt = sscanf(buf, + "%u %u %u %u,%u %u %u %u,%u %u %u %u,%u %u %u %u", + &lane0_main, &lane0_pre, &lane0_post, &lane0_boost, + &lane1_main, &lane1_pre, &lane1_post, &lane1_boost, + &lane2_main, &lane2_pre, &lane2_post, &lane2_boost, + &lane3_main, &lane3_pre, &lane3_post, + &lane3_boost); + if (cnt != 16) { + printk("Error: Invalid Input.\n" + " ,,,\n" + " laneX_si:
  \n\n"
+			       "   ie: 21 0 11 11,22 0 12 12,23 0 13 13,24 0 14 14 \n");
+
+			return -EINVAL;
+		}
+
+		si_main = ((lane0_main & 0xff) << 0) |
+			  ((lane1_main & 0xff) << 8) |
+			  ((lane2_main & 0xff) << 16) |
+			  ((lane3_main & 0xff) << 24);
+		si_pre = ((lane0_pre & 0xff) << 0) | ((lane1_pre & 0xff) << 8) |
+			 ((lane2_pre & 0xff) << 16) |
+			 ((lane3_pre & 0xff) << 24);
+		si_post = ((lane0_post & 0xff) << 0) |
+			  ((lane1_post & 0xff) << 8) |
+			  ((lane2_post & 0xff) << 16) |
+			  ((lane3_post & 0xff) << 24);
+		si_txboost = ((lane0_boost & 0xf) << 0) |
+			     ((lane1_boost & 0xf) << 4) |
+			     ((lane2_boost & 0xf) << 8) |
+			     ((lane3_boost & 0xf) << 12);
+		printk("%s: main:0x%x pre:0x%x post:0x%x boost:0x%x\n",
+		       adapter->name, si_main, si_pre, si_post, si_txboost);
+	} else {
+		cnt = sscanf(buf, "%u %u %u %u", &si_main, &si_pre, &si_post,
+			     &si_txboost);
+		if (cnt != 4) {
+			printk("Error: Invalid Input: 
  \n");
+			return -EINVAL;
+		}
+		if (si_main > 63 || si_pre > 63 || si_post > 63) {
+			printk("Error: Invalid value. should in 0~63\n");
+			return -EINVAL;
+		}
+		if (si_txboost > 16) {
+			printk("Error: Invalid txboost. should in 0~15\n");
+			return -EINVAL;
+		}
+	}
+	err = rnp_set_lane_fun(hw, LANE_FUN_SI, si_main, si_pre, si_post,
+			       si_txboost);
+
+	return err ? err : count;
+}
+
+static ssize_t show_lane_si(struct device *dev, struct device_attribute *attr,
+			    char *buf)
+{
+	int ret = 0, i;
+	struct net_device *netdev = to_net_device(dev);
+	struct rnp_adapter *adapter = netdev_priv(netdev);
+	struct rnp_hw *hw = &adapter->hw;
+
+	if (rnp_mbx_get_lane_stat(hw) != 0) {
+		ret += sprintf(buf, " IO Error\n");
+	} else {
+		if (hw->supported_link &
+		    (RNP_LINK_SPEED_40GB_FULL | RNP_LINK_SPEED_25GB_FULL)) {
+			ret += sprintf(
+				buf + ret,
+				"main:0x%08x pre:0x%08x post:0x%08x tx_boost:0x%04x\n\n",
+				adapter->si.main, adapter->si.pre,
+				adapter->si.post, adapter->si.tx_boost);
+			for (i = 0; i < 4; i++) {
+				ret += sprintf(
+					buf + ret,
+					" lane%d main:%u pre:%u post:%u tx_boost:%u\n",
+					i, (adapter->si.main >> (i * 8)) & 0xff,
+					(adapter->si.pre >> (i * 8)) & 0xff,
+					(adapter->si.post >> (i * 8)) & 0xff,
+					(adapter->si.tx_boost >> (i * 4)) &
+						0xf);
+			}
+		} else {
+			ret += sprintf(
+				buf + ret,
+				"lane:%d main:%u pre:%u post:%u tx_boost:%u\n",
+				hw->nr_lane, adapter->si.main, adapter->si.pre,
+				adapter->si.post, adapter->si.tx_boost & 0xf);
+		}
+	}
+
+	return ret;
+}
+
+static DEVICE_ATTR(si, S_IRUGO | S_IWUSR | S_IRUSR, show_lane_si,
+		   store_lane_si);
+
+static ssize_t show_temperature(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnp_adapter *adapter = netdev_priv(netdev);
+	struct rnp_hw *hw = &adapter->hw;
+	int ret = 0, temp = 0, voltage = 0;
+
+	temp = rnp_mbx_get_temp(hw, &voltage);
+
+	ret += sprintf(buf, "temp:%d oC  volatage:%d mV\n", temp, voltage);
+	return ret;
+}
+
+static struct pci_dev *pcie_find_root_port_old(struct pci_dev *dev)
+{
+	while (1) {
+		if (!pci_is_pcie(dev))
+			break;
+		if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+			return dev;
+		if (!dev->bus->self)
+			break;
+		dev = dev->bus->self;
+	}
+	return NULL;
+}
+
+static ssize_t show_root_slot_info(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnp_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+	struct pci_dev *root_pdev = pcie_find_root_port_old(adapter->pdev);
+
+	if (root_pdev) {
+		ret += sprintf(buf + ret, "%02x:%02x.%x\n",
+			       root_pdev->bus->number,
+			       PCI_SLOT(root_pdev->devfn),
+			       PCI_FUNC(root_pdev->devfn));
+	}
+	return ret;
+}
+
+static int do_switch_loopback_set(struct rnp_adapter *adapter, int en,
+				  int sport_lane, int dport_lane)
+{
+	int v;
+	struct rnp_hw *hw = &adapter->hw;
+
+	printk("%s: %s %d -> %d en:%d\n", __func__,
+	       netdev_name(adapter->netdev), sport_lane, dport_lane, en);
+
+	if (en) {
+		adapter->flags |= RNP_FLAG_SWITCH_LOOPBACK_EN;
+	} else {
+		adapter->flags &= ~RNP_FLAG_SWITCH_LOOPBACK_EN;
+	}
+
+	wr32(hw, RNP_ETH_INPORT_POLICY_REG(sport_lane),
+	     BIT(29) | (dport_lane << 16));
+
+	v = rd32(hw, RNP_ETH_INPORT_POLICY_VAL);
+	if (en) {
+		v |= BIT(sport_lane);
+	} else {
+		v &= ~BIT(sport_lane);
+	}
+	wr32(hw, RNP_ETH_INPORT_POLICY_VAL, v);
+
+	v = mac_rd32(&hw->mac, RNP10_MAC_PKT_FLT);
+	if (en) {
+		v |= (RNP_RX_ALL | RNP_RX_ALL_MUL);
+	} else {
+		v &= ~(RNP_RX_ALL | RNP_RX_ALL_MUL);
+	}
+	mac_wr32(&hw->mac, RNP10_MAC_PKT_FLT, v);
+
+	eth_wr32(&hw->eth, RNP10_ETH_DMAC_MCSTCTRL, 0x0);
+
+	return 0;
+}
+
+static ssize_t _switch_loopback(struct rnp_adapter *adapter,
+				const char *peer_eth, int en)
+{
+	struct net_device *peer_netdev = NULL;
+	struct rnp_adapter *peer_adapter = NULL;
+	char name[100];
+
+	strncpy(name, peer_eth, sizeof(name));
+	strim(name);
+
+	printk("%s: nr_lane:%d peer_lane:%s en:%d\n", __func__, 0, peer_eth,
+	       en);
+
+	peer_netdev = dev_get_by_name(&init_net, name);
+	if (!peer_netdev) {
+		printk("canot' find %s\n", name);
+		return -EINVAL;
+	}
+	peer_adapter = netdev_priv(peer_netdev);
+
+	if (PCI_SLOT(peer_adapter->pdev->devfn) !=
+	    PCI_SLOT(adapter->pdev->devfn)) {
+		printk("%s %s not in same slot\n", netdev_name(adapter->netdev),
+		       netdev_name(peer_adapter->netdev));
+		dev_put(peer_netdev);
+		return -EINVAL;
+	}
+
+	printk("%s: %s(%d)<->%s(%d)\n", __func__, netdev_name(adapter->netdev),
+	       0, netdev_name(peer_adapter->netdev), 0);
+
+	do_switch_loopback_set(adapter, en, 0,
+			       rnp_is_pf1(&peer_adapter->hw) ? 4 : 0);
+	do_switch_loopback_set(peer_adapter, en, 0,
+			       rnp_is_pf1(&adapter->hw) ? 4 : 0);
+
+	if (peer_netdev) {
+		dev_put(peer_netdev);
+	}
+
+	return 0;
+}
+
+static ssize_t store_switch_loopback_on(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	struct rnp_adapter *adapter = netdev_priv(to_net_device(dev));
+
+	return _switch_loopback(adapter, buf, 1) == 0 ? count : -EINVAL;
+}
+
+static DEVICE_ATTR(switch_loopback_on, 0664, NULL, store_switch_loopback_on);
+
+static ssize_t store_switch_loopback_off(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t count)
+{
+	struct rnp_adapter *adapter = netdev_priv(to_net_device(dev));
+
+	return _switch_loopback(adapter, buf, 0) == 0 ? count : -EINVAL;
+}
+static DEVICE_ATTR(switch_loopback_off, 0664, NULL, store_switch_loopback_off);
+static DEVICE_ATTR(root_slot_info, 0644, show_root_slot_info, NULL);
+static DEVICE_ATTR(temperature, S_IRUGO | S_IRUSR, show_temperature, NULL);
+static DEVICE_ATTR(active_vid, 0644, show_active_vid, store_active_vid);
+static DEVICE_ATTR(tx_ring_info, 0644, show_tx_ring_info, store_tx_ring_info);
+static DEVICE_ATTR(rx_ring_info, 0644, show_rx_ring_info, store_rx_ring_info);
+static DEVICE_ATTR(para_info, 0644, show_para_info, NULL);
+static DEVICE_ATTR(tx_desc_info, 0644, show_tx_desc_info, store_tx_desc_info);
+static DEVICE_ATTR(rx_desc_info, 0644, show_rx_desc_info, store_rx_desc_info);
+static DEVICE_ATTR(ring_sriov_info, 0644, show_ring_sriov_info, store_ring_sriov_info);
+static DEVICE_ATTR(rx_drop_info, 0644, show_rx_drop_info, store_rx_drop_info);
+static DEVICE_ATTR(outer_vlan_info, 0644, show_outer_vlan_info,
+		   store_outer_vlan_info);
+static DEVICE_ATTR(tcp_sync_info, 0644, show_tcp_sync_info,
+		   store_tcp_sync_info);
+static DEVICE_ATTR(rx_skip_info, 0644, show_rx_skip_info, store_rx_skip_info);
+static DEVICE_ATTR(tx_stags_info, 0644, show_tx_stags_info,
+		   store_tx_stags_info);
+static DEVICE_ATTR(version_info, 0644, show_version_info, NULL);
+static struct attribute *dev_attrs[] = {
+	&dev_attr_tx_stags_info.attr,
+	&dev_attr_version_info.attr,
+	&dev_attr_root_slot_info.attr,
+	&dev_attr_active_vid.attr,
+	&dev_attr_rx_drop_info.attr,
+	&dev_attr_outer_vlan_info.attr,
+	&dev_attr_tcp_sync_info.attr,
+	&dev_attr_rx_skip_info.attr,
+	&dev_attr_tx_ring_info.attr,
+	&dev_attr_rx_ring_info.attr,
+	&dev_attr_para_info.attr,
+	&dev_attr_tx_desc_info.attr,
+	&dev_attr_rx_desc_info.attr,
+	&dev_attr_ring_sriov_info.attr,
+	&dev_attr_tx_counter.attr,
+	&dev_attr_rx_counter.attr,
+	&dev_attr_own_vpd.attr,
+	&dev_attr_port_idx.attr,
+	&dev_attr_temperature.attr,
+	&dev_attr_si.attr,
+	&dev_attr_sfp.attr,
+	&dev_attr_autoneg.attr,
+	&dev_attr_sfp_tx_disable.attr,
+	&dev_attr_fec.attr,
+	&dev_attr_link_traing.attr,
+	&dev_attr_pci.attr,
+	&dev_attr_prbs.attr,
+	&dev_attr_pcs_reg.attr,
+	&dev_attr_phy_reg.attr,
+	&dev_attr_debug_linkstat.attr,
+	&dev_attr_switch_loopback_off.attr,
+	&dev_attr_switch_loopback_on.attr,
+	NULL,
+};
+static struct bin_attribute *dev_bin_attrs[] = {
+	&bin_attr_maintain,
+	NULL,
+};
+static struct attribute_group dev_attr_grp = {
+	.attrs = dev_attrs,
+	.bin_attrs = dev_bin_attrs,
+};
+
+static void rnp_sysfs_del_adapter(struct rnp_adapter __maybe_unused *adapter)
+{
+}
+
+/* called from rnp_main.c */
+void rnp_sysfs_exit(struct rnp_adapter *adapter)
+{
+	rnp_sysfs_del_adapter(adapter);
+	sysfs_remove_group(&adapter->netdev->dev.kobj, &dev_attr_grp);
+}
+
+/* called from rnp_main.c */
+int rnp_sysfs_init(struct rnp_adapter *adapter)
+{
+	int rc = 0;
+	int flag;
+#ifdef RNP_HWMON
+	struct hwmon_buff *rnp_hwmon;
+	struct device *hwmon_dev;
+	unsigned int i;
+#endif /* RNP_HWMON */
+
+	flag = sysfs_create_group(&adapter->netdev->dev.kobj, &dev_attr_grp);
+	if (flag != 0) {
+		dev_err(&adapter->netdev->dev,
+			"sysfs_create_group failed:flag:%d\n", flag);
+		return flag;
+	}
+#ifdef RNP_HWMON
+	/* If this method isn't defined we don't support thermals */
+	if (adapter->hw.ops.init_thermal_sensor_thresh == NULL) {
+		goto no_thermal;
+	}
+
+	/* Don't create thermal hwmon interface if no sensors present */
+	if (adapter->hw.ops.init_thermal_sensor_thresh(&adapter->hw))
+		goto no_thermal;
+
+	rnp_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*rnp_hwmon),
+				 GFP_KERNEL);
+
+	if (!rnp_hwmon) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	adapter->rnp_hwmon_buff = rnp_hwmon;
+
+	for (i = 0; i < RNP_MAX_SENSORS; i++) {
+		/*
+		 * Only create hwmon sysfs entries for sensors that have
+		 * meaningful data for.
+		 */
+		if (adapter->hw.thermal_sensor_data.sensor[i].location == 0)
+			continue;
+
+		/* Bail if any hwmon attr struct fails to initialize */
+		rc = rnp_add_hwmon_attr(adapter, i, RNP_HWMON_TYPE_CAUTION);
+		if (rc)
+			goto err;
+		rc = rnp_add_hwmon_attr(adapter, i, RNP_HWMON_TYPE_LOC);
+		if (rc)
+			goto err;
+		rc = rnp_add_hwmon_attr(adapter, i, RNP_HWMON_TYPE_TEMP);
+		if (rc)
+			goto err;
+		rc = rnp_add_hwmon_attr(adapter, i, RNP_HWMON_TYPE_MAX);
+		if (rc)
+			goto err;
+	}
+
+	rnp_hwmon->groups[0] = &rnp_hwmon->group;
+	rnp_hwmon->group.attrs = rnp_hwmon->attrs;
+
+	hwmon_dev = devm_hwmon_device_register_with_groups(
+		&adapter->pdev->dev, "rnp", rnp_hwmon, rnp_hwmon->groups);
+
+	if (IS_ERR(hwmon_dev)) {
+		rc = PTR_ERR(hwmon_dev);
+		goto exit;
+	}
+no_thermal:
+#endif /* RNP_HWMON */
+	goto exit;
+
+err:
+	rnp_sysfs_exit(adapter);
+exit:
+	return rc;
+}
diff --git a/drivers/net/ethernet/mucse/rnp/rnp_tc_u32_parse.h b/drivers/net/ethernet/mucse/rnp/rnp_tc_u32_parse.h
new file mode 100644
index 0000000000000..c40c4c0542115
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnp/rnp_tc_u32_parse.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef __RNP_TC_U32_PARSE_H__
+#define __RNP_TC_U32_PARSE_H__
+#include "rnp.h"
+
+struct rnp_match_parser {
+	int off; /* the skb offset begin form the 12 bytes mac_type */
+	/* parse the value/mask to realy value*/
+	int (*val)(struct rnp_fdir_filter *f, __be32 val, __be32 mask);
+};
+inline void ip_print(u32 ip, bool src_true)
+{
+	printk(KERN_DEBUG "%s_ip is %d.%d.%d.%d \n", src_true ? "src" : "dst",
+	       ip & 0xff, ip >> 8 & 0xff, ip >> 16 & 0xff, ip >> 24 & 0xff);
+}
+/* Ipv4 Rule Parse */
+static inline int rnp_fill_ipv4_src_ip(struct rnp_fdir_filter *f, __be32 val,
+				       __be32 mask)
+{
+	memcpy(&f->filter.formatted.src_ip[0], &val, sizeof(u32));
+	memcpy(&f->filter.formatted.src_ip_mask[0], &mask, sizeof(u32));
+
+	f->filter.formatted.flow_type = RNP_ATR_FLOW_TYPE_IPV4;
+	f->filter.layer2_formate.proto = htons(ETH_P_IP);
+
+	ip_print(f->filter.formatted.src_ip[0], true);
+	printk(KERN_DEBUG "ip mask is 0x%.2x\n",
+	       f->filter.formatted.src_ip_mask[0]);
+	return 0;
+}
+
+static inline int rnp_fill_ipv4_dst_ip(struct rnp_fdir_filter *f, __be32 val,
+				       __be32 mask)
+{
+	memcpy(&f->filter.formatted.dst_ip[0], &val, sizeof(u32));
+	memcpy(&f->filter.formatted.dst_ip_mask[0], &mask, sizeof(u32));
+
+	f->filter.formatted.flow_type = RNP_ATR_FLOW_TYPE_IPV4;
+	f->filter.layer2_formate.proto = htons(ETH_P_IP);
+
+	ip_print(f->filter.formatted.dst_ip[0], false);
+	printk(KERN_DEBUG "ip mask is 0x%.2x\n",
+	       f->filter.formatted.dst_ip_mask[0]);
+
+	return 0;
+}
+
+static const struct rnp_match_parser rnp_ipv4_parser[] = {
+	{ .off = 12, .val = rnp_fill_ipv4_src_ip },
+	{ .off = 16, .val = rnp_fill_ipv4_dst_ip },
+	{ .val = NULL }
+};
+
+#endif
diff --git a/drivers/net/ethernet/mucse/rnp/rnp_type.h b/drivers/net/ethernet/mucse/rnp/rnp_type.h
new file mode 100644
index 0000000000000..db9396be0cb34
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnp/rnp_type.h
@@ -0,0 +1,1298 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNP_TYPE_H_
+#define _RNP_TYPE_H_
+
+#include 
+#include 
+#include 
+
+#if defined(CONFIG_MXGBE_FIX_VF_QUEUE) && !defined(FIX_VF_BUG)
+#define FIX_VF_BUG
+#endif
+#if defined(CONFIG_MXGBE) && !defined(N10)
+#define N10
+#endif
+
+#if defined(CONFIG_MXGBE_FIX_MAC_PADDING) && !defined(FIX_MAC_PADDIN)
+#define FIX_MAC_PADDIN
+#endif
+
+#if defined(CONFIG_MXGBE_OPTM_WITH_LARGE) && !defined(OPTM_WITH_LPAGE)
+#define OPTM_WITH_LPAGE
+#endif
+
+#if defined(CONFIG_MXGBE_MSIX_COUNT)
+#define RNP_N10_MSIX_VECTORS CONFIG_MXGBE_MSIX_COUNT
+#endif
+
+// if kylin os, try to set OPTM_WITH_LPAGE to reduce memory cost?
+#if (PAGE_SIZE < 8192)
+//error
+#ifdef OPTM_WITH_LPAGE
+//#error can't open OPTM_WITH_LPAGE with PAGE_SIZE small than 8192
+#undef OPTM_WITH_LPAGE
+#endif
+#endif
+
+/* not open it in default */
+//#define VF_PROMISC_SUPPORT
+
+#if (PAGE_SIZE < 8192)
+/* if page_size is 4k, no need use this */
+#ifdef OPTM_WITH_LPAGE
+#undef OPTM_WITH_LPAGE
+#endif /* OPTM_WITH_LARGE */
+#endif
+
+#include "rnp_regs.h"
+
+#if IS_ENABLED(CONFIG_SYSFS)
+#ifndef RNP_SYSFS
+#define RNP_SYSFS
+#endif /* RNP_SYSFS */
+#endif /* CONFIG_SYSFS */
+
+#if IS_ENABLED(CONFIG_HWMON)
+#ifndef RNP_HWMON
+#define RNP_HWMON
+#endif /* RNP_HWMON */
+#endif /* CONFIG_HWMON */
+
+#ifdef CONFIG_DEBUG_FS
+#define HAVE_RNP_DEBUG_FS
+#endif /* CONFIG_DEBUG_FS */
+
+/* Device IDs */
+#define PCI_VENDOR_ID_MUCSE 0x8848
+#define PCI_DEVICE_ID_N10_PF0 0x1000
+#define PCI_DEVICE_ID_N10_PF1 0x1001
+
+#define RNP_DEV_ID_N10_PF0 0x7001
+#define RNP_DEV_ID_N10_PF1 0x7002
+
+#define PCI_DEVICE_ID_N10 0x1000
+#define PCI_DEVICE_ID_N10_TP 0x1004
+#define PCI_DEVICE_ID_N10_X1 0x1002
+#define PCI_DEVICE_ID_N10C 0x1C00
+#define PCI_DEVICE_ID_N400 0x1001 /* N400  2-port */
+#define PCI_DEVICE_ID_N400C 0x1C01 /* N400C 2-port */
+#define PCI_DEVICE_ID_N400_X1 0x1003 /* N400  1-port */
+#define PCI_DEVICE_ID_N400C_X1 0x1C03 /* N400C 1-port */
+/* Wake Up Control */
+#define RNP_WUC_PME_EN 0x00000002 /* PME Enable */
+#define RNP_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define RNP_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion  */
+
+/* Wake Up Filter Control */
+#define RNP_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define RNP_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define RNP_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define RNP_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define RNP_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define RNP_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define RNP_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define RNP_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define RNP_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */
+
+#define RNP_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
+#define RNP_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define RNP_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define RNP_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define RNP_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define RNP_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
+#define RNP_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
+#define RNP_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
+#define RNP_WUFC_FLX_FILTERS_6 0x003F0000 /* Mask for 6 flex filters */
+#define RNP_WUFC_FLX_FILTERS_8 0x00FF0000 /* Mask for 8 flex filters */
+#define RNP_WUFC_FW_RST_WK 0x80000000 /* Ena wake on FW reset assertion */
+/* Mask for Ext. flex filters */
+#define RNP_WUFC_EXT_FLX_FILTERS 0x00300000
+#define RNP_WUFC_ALL_FILTERS 0x000F00FF /* Mask all 4 flex filters */
+#define RNP_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask all 6 flex filters */
+#define RNP_WUFC_ALL_FILTERS_8 0x00FF00FF /* Mask all 8 flex filters */
+#define RNP_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
+
+#define RNP_MAX_SENSORS 1
+struct rnp_thermal_diode_data {
+	u8 location;
+	u8 temp;
+	u8 caution_thresh;
+	u8 max_op_thresh;
+};
+
+struct rnp_thermal_sensor_data {
+	struct rnp_thermal_diode_data sensor[RNP_MAX_SENSORS];
+};
+
+/* Proxy Status */
+#define RNP_PROXYS_EX 0x00000004 /* Exact packet received */
+#define RNP_PROXYS_ARP_DIR 0x00000020 /* ARP w/filter match received */
+#define RNP_PROXYS_NS 0x00000200 /* IPV6 NS received */
+#define RNP_PROXYS_NS_DIR 0x00000400 /* IPV6 NS w/DA match received */
+#define RNP_PROXYS_ARP 0x00000800 /* ARP request packet received */
+#define RNP_PROXYS_MLD 0x00001000 /* IPv6 MLD packet received */
+
+/* Proxying Filter Control */
+#define RNP_PROXYFC_ENABLE 0x00000001 /* Port Proxying Enable */
+#define RNP_PROXYFC_EX 0x00000004 /* Directed Exact Proxy Enable */
+#define RNP_PROXYFC_ARP_DIR 0x00000020 /* Directed ARP Proxy Enable */
+#define RNP_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */
+#define RNP_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Enable */
+#define RNP_PROXYFC_MLD 0x00000800 /* IPv6 MLD Proxy Enable */
+#define RNP_PROXYFC_NO_TCO 0x00008000 /* Ignore TCO packets */
+
+#define RNP_WUPL_LENGTH_MASK 0xFFFF
+
+/* max 4 in n10 */
+#define RNP_MAX_TRAFFIC_CLASS 4
+#define TSRN10_TX_DEFAULT_BURST 16
+
+#ifndef TSRN10_RX_DEFAULT_BURST
+#define TSRN10_RX_DEFAULT_BURST 16
+#endif
+
+#ifndef TSRN10_RX_DEFAULT_LINE
+#define TSRN10_RX_DEFAULT_LINE 64
+#endif
+
+#ifndef RNP_PKT_TIMEOUT
+#define RNP_PKT_TIMEOUT 30
+#endif
+
+#ifndef RNP_RX_PKT_POLL_BUDGET
+#define RNP_RX_PKT_POLL_BUDGET 64
+#endif
+
+#ifndef RNP_TX_PKT_POLL_BUDGET
+#define RNP_TX_PKT_POLL_BUDGET 0x30
+#endif
+
+#ifndef RNP_PKT_TIMEOUT_TX
+#define RNP_PKT_TIMEOUT_TX 100
+#endif
+/* VF Device IDs */
+#define RNP_DEV_ID_N10_PF0_VF 0x8001
+#define RNP_DEV_ID_N10_PF1_VF 0x8002
+
+#define RNP_DEV_ID_N10_PF0_VF_N 0x1010
+#define RNP_DEV_ID_N10_PF1_VF_N 0x1011
+
+/* Transmit Descriptor - Advanced */
+struct rnp_tx_desc {
+	union {
+		__le64 pkt_addr; // Packet buffer address
+		struct {
+			__le32 adr_lo;
+			__le32 adr_hi;
+		};
+	};
+	union {
+		__le64 vlan_cmd_bsz;
+		struct {
+			__le32 blen_mac_ip_len;
+			__le32 vlan_cmd;
+		};
+	};
+#define RNP_TXD_FLAGS_VLAN_PRIO_MASK 0xe000
+#define RNP_TX_FLAGS_VLAN_PRIO_SHIFT 13
+#define RNP_TX_FLAGS_VLAN_CFI_SHIFT 12
+#define RNP_TXD_VLAN_VALID (0x80000000)
+#define RNP_TXD_SVLAN_TYPE (0x02000000)
+#define RNP_TXD_VLAN_CTRL_NOP (0x00 << 13)
+#define RNP_TXD_VLAN_CTRL_RM_VLAN (0x20000000)
+#define RNP_TXD_VLAN_CTRL_INSERT_VLAN (0x40000000)
+#define RNP_TXD_L4_CSUM (0x10000000) /* udp tcp sctp csum */
+#define RNP_TXD_IP_CSUM (0x8000000)
+#define RNP_TXD_TUNNEL_MASK (0x3000000)
+#define RNP_TXD_TUNNEL_VXLAN (0x1000000)
+#define RNP_TXD_TUNNEL_NVGRE (0x2000000)
+#define RNP_TXD_L4_TYPE_UDP (0xc00000)
+#define RNP_TXD_L4_TYPE_TCP (0x400000)
+#define RNP_TXD_L4_TYPE_SCTP (0x800000)
+#define RNP_TXD_FLAG_IPv4 (0)
+#define RNP_TXD_FLAG_IPv6 (0x200000)
+#define RNP_TXD_FLAG_TSO (0x100000)
+#define RNP_TXD_FLAG_PTP (0x4000000)
+#define RNP_TXD_CMD_RS (0x040000)
+#define RNP_TXD_CMD_INNER_VLAN (0x08000000)
+#define RNP_TXD_STAT_DD (0x020000)
+#define RNP_TXD_CMD_EOP (0x010000)
+#define RNP_TXD_PAD_CTRL (0x01000000)
+};
+
+struct rnp_tx_ctx_desc {
+	__le32 mss_len_vf_num;
+	__le32 inner_vlan_tunnel_len;
+#define VF_VEB_MARK (1 << 24) /* bit 56 */
+#define VF_VEB_IGNORE_VLAN (1 << 25) /* bit 57 */
+	__le32 resv;
+	__le32 resv_cmd;
+#define RNP_TXD_FLAG_TO_RPU (1 << 15)
+#define RNP_TXD_SMAC_CTRL_NOP (0x00 << 12)
+#define RNP_TXD_SMAC_CTRL_REPLACE_MACADDR0 (0x02 << 12)
+#define RNP_TXD_SMAC_CTRL_REPLACE_MACADDR1 (0x06 << 12)
+#define RNP_TXD_CTX_VLAN_CTRL_NOP (0x00 << 10)
+#define RNP_TXD_CTX_VLAN_CTRL_RM_VLAN (0x01 << 10)
+#define RNP_TXD_CTX_VLAN_CTRL_INSERT_VLAN (0x02 << 10)
+#define RNP_TXD_MTI_CRC_PAD_CTRL (0x01000000)
+#define RNP_TXD_CTX_CTRL_DESC (0x080000)
+#define RNP_TXD_CMD_RS (0x040000)
+#define RNP_TXD_STAT_DD (0x020000)
+};
+
+/* Receive Descriptor - Advanced */
+union rnp_rx_desc {
+	struct {
+		union {
+			__le64 pkt_addr; /* Packet buffer address */
+			struct {
+				__le32 addr_lo;
+				__le32 addr_hi;
+			};
+		};
+		__le64 resv_cmd;
+#define RNP_RXD_FLAG_RS (0)
+	};
+
+	struct {
+		__le32 rss_hash;
+		__le16 mark;
+		__le16 rev1;
+#define RNP_RX_L3_TYPE_MASK (1 << 15) /* 1 is ipv4 */
+#define VEB_VF_PKG (1 << 0) /* bit 48 */
+#define VEB_VF_IGNORE_VLAN (1 << 1) /* bit 49 */
+#define REV_OUTER_VLAN (1 << 5)
+		__le16 len;
+		__le16 padding_len;
+		__le16 vlan;
+		__le16 cmd;
+#define RNP_RXD_STAT_VLAN_VALID (1 << 15)
+#define RNP_RXD_STAT_STAG (0x01 << 14)
+#define RNP_RXD_STAT_TUNNEL_NVGRE (0x02 << 13)
+#define RNP_RXD_STAT_TUNNEL_VXLAN (0x01 << 13)
+#define RNP_RXD_STAT_TUNNEL_MASK (0x03 << 13)
+#define RNP_RXD_STAT_ERR_MASK (0x1f << 8)
+#define RNP_RXD_STAT_SCTP_MASK (0x04 << 8)
+#define RNP_RXD_STAT_L4_MASK (0x02 << 8)
+#define RNP_RXD_STAT_L4_SCTP (0x02 << 6)
+#define RNP_RXD_STAT_L4_TCP (0x01 << 6)
+#define RNP_RXD_STAT_L4_UDP (0x03 << 6)
+#define RNP_RXD_STAT_IPV6 (1 << 5)
+#define RNP_RXD_STAT_IPV4 (0 << 5)
+#define RNP_RXD_STAT_PTP (1 << 4)
+#define RNP_RXD_STAT_DD (1 << 1)
+#define RNP_RXD_STAT_EOP (1 << 0)
+	} wb;
+} __packed;
+
+/* Host Interface Command Structures */
+struct rnp_hic_hdr {
+	u8 cmd;
+	u8 buf_len;
+	union {
+		u8 cmd_resv;
+		u8 ret_status;
+	} cmd_or_resp;
+	u8 checksum;
+};
+
+struct rnp_hic_drv_info {
+	struct rnp_hic_hdr hdr;
+	u8 port_num;
+	u8 ver_sub;
+	u8 ver_build;
+	u8 ver_min;
+	u8 ver_maj;
+	u8 pad; /* end spacing to ensure length is mult. of dword */
+	u16 pad2; /* end spacing to ensure length is mult. of dword2 */
+};
+
+/* Context descriptors */
+struct rnp_adv_tx_context_desc {
+	__le32 vlan_macip_lens;
+	__le32 seqnum_seed;
+	__le32 type_tucmd_mlhl;
+	__le32 mss_l4len_idx;
+};
+
+/* RAH */
+#define RNP_RAH_VIND_MASK 0x003C0000
+#define RNP_RAH_VIND_SHIFT 18
+#define RNP_RAH_AV 0x80000000
+#define RNP_CLEAR_VMDQ_ALL 0xFFFFFFFF
+
+/* Autonegotiation advertised speeds */
+typedef u32 rnp_autoneg_advertised;
+/* Link speed */
+typedef u32 rnp_link_speed;
+#define RNP_LINK_SPEED_UNKNOWN 0
+#define RNP_LINK_SPEED_10_FULL BIT(2)
+#define RNP_LINK_SPEED_100_FULL BIT(3)
+#define RNP_LINK_SPEED_1GB_FULL BIT(4)
+#define RNP_LINK_SPEED_10GB_FULL BIT(5)
+#define RNP_LINK_SPEED_40GB_FULL BIT(6)
+#define RNP_LINK_SPEED_25GB_FULL BIT(7)
+#define RNP_LINK_SPEED_50GB_FULL BIT(8)
+#define RNP_LINK_SPEED_100GB_FULL BIT(9)
+#define RNP_LINK_SPEED_10_HALF BIT(10)
+#define RNP_LINK_SPEED_100_HALF BIT(11)
+#define RNP_LINK_SPEED_1GB_HALF BIT(12)
+#define RNP_SFP_MODE_10G_LR BIT(13)
+#define RNP_SFP_MODE_10G_SR BIT(14)
+#define RNP_SFP_MODE_10G_LRM BIT(15)
+#define RNP_SFP_MODE_1G_T BIT(16)
+#define RNP_SFP_MODE_1G_KX BIT(17)
+#define RNP_SFP_MODE_1G_SX BIT(18)
+#define RNP_SFP_MODE_1G_LX BIT(19)
+#define RNP_SFP_MODE_40G_SR4 BIT(20)
+#define RNP_SFP_MODE_40G_CR4 BIT(21)
+#define RNP_SFP_MODE_40G_LR4 BIT(22)
+#define RNP_SFP_MODE_1G_CX BIT(23)
+#define RNP_SFP_MODE_10G_BASE_T BIT(24)
+#define RNP_SFP_MODE_FIBER_CHANNEL_SPEED BIT(25)
+#define RNP_SFP_CONNECTOR_DAC BIT(26)
+#define RNP_SFP_TO_SGMII BIT(27)
+#define RNP_SFP_25G_SR BIT(28)
+#define RNP_SFP_25G_KR BIT(29)
+#define RNP_SFP_25G_CR BIT(30)
+#define RNP_LINK_SPEED_10GB_HALF BIT(31)
+
+/* Flow Control Data Sheet defined values
+ * Calculation and defines taken from 802.1bb Annex O
+ */
+
+enum rnp_atr_flow_type {
+	RNP_ATR_FLOW_TYPE_IPV4 = 0x0,
+	RNP_ATR_FLOW_TYPE_UDPV4 = 0x1,
+	RNP_ATR_FLOW_TYPE_TCPV4 = 0x2,
+	RNP_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+	RNP_ATR_FLOW_TYPE_IPV6 = 0x4,
+	RNP_ATR_FLOW_TYPE_UDPV6 = 0x5,
+	RNP_ATR_FLOW_TYPE_TCPV6 = 0x6,
+	RNP_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+	RNP_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10,
+	RNP_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11,
+	RNP_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12,
+	RNP_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13,
+	RNP_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14,
+	RNP_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15,
+	RNP_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16,
+	RNP_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17,
+	RNP_ATR_FLOW_TYPE_ETHER = 0x18,
+	RNP_ATR_FLOW_TYPE_USERDEF = 0x19,
+};
+
+#define RNP_FDIR_DROP_QUEUE (200)
+
+enum {
+	fdir_mode_tcam = 0,
+	fdir_mode_tuple5,
+};
+/* Flow Director ATR input struct. */
+union rnp_atr_input {
+	/*
+	 * Byte layout in order, all values with MSB first:
+	 *
+	 * vm_pool      - 1 byte
+	 * flow_type    - 1 byte
+	 * vlan_id      - 2 bytes
+	 * src_ip       - 16 bytes
+	 * inner_mac    - 6 bytes
+	 * cloud_mode   - 2 bytes
+	 * tni_vni      - 4 bytes
+	 * dst_ip       - 16 bytes
+	 * src_port     - 2 bytes
+	 * dst_port     - 2 bytes
+	 * flex_bytes   - 2 bytes
+	 * bkt_hash     - 2 bytes
+	 */
+	struct {
+		u8 vm_pool;
+		u8 flow_type;
+		__be16 vlan_id;
+		__be32 dst_ip[4];
+		__be32 dst_ip_mask[4];
+		__be32 src_ip[4];
+		__be32 src_ip_mask[4];
+		u8 inner_mac[6];
+		u8 inner_mac_mask[6];
+		__be16 tunnel_type;
+		__be32 tni_vni;
+		__be16 src_port;
+		__be16 src_port_mask;
+		__be16 dst_port;
+		__be16 dst_port_mask;
+		__be16 flex_bytes;
+		__be16 bkt_hash;
+	} formatted;
+	struct {
+		u8 vm_poll;
+		u8 flow_type;
+		u16 vlan_id;
+		__be16 proto;
+		__be16 resv;
+		__be32 nouse[12];
+	} layer2_formate;
+	__be32 dword_stream[14];
+};
+
+/* BitTimes (BT) conversion */
+#define RNP_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024))
+#define RNP_B2BT(BT) (BT * 8)
+
+/* Calculate Delay to respond to PFC */
+#define RNP_PFC_D 672
+
+/* Calculate Cable Delay */
+#define RNP_CABLE_DC 5556 /* Delay Copper */
+#define RNP_CABLE_DO 5000 /* Delay Optical */
+
+/* Calculate Interface Delay X540 */
+#define RNP_PHY_DC 25600 /* Delay 10G BASET */
+#define RNP_MAC_DC 8192 /* Delay Copper XAUI interface */
+#define RNP_XAUI_DC (2 * 2048) /* Delay Copper Phy */
+
+#define RNP_ID_X540 (RNP_MAC_DC + RNP_XAUI_DC + RNP_PHY_DC)
+
+/* Calculate Interface Delay 82598, n10 */
+#define RNP_PHY_D 12800
+#define RNP_MAC_D 4096
+#define RNP_XAUI_D (2 * 1024)
+
+/* PHY MDI STANDARD CONFIG */
+#define RNP_MDI_PHY_ID1_OFFSET 2
+#define RNP_MDI_PHY_ID2_OFFSET 3
+#define RNP_MDI_PHY_ID_MASK 0xFFFFFC00U
+#define RNP_MDI_PHY_SPEED_SELECT1 0x0040
+#define RNP_MDI_PHY_DUPLEX 0x0100
+#define RNP_MDI_PHY_RESTART_AN 0x0200
+#define RNP_MDI_PHY_ANE 0x1000
+#define RNP_MDI_PHY_SPEED_SELECT0 0x2000
+#define RNP_MDI_PHY_RESET
+
+#define NGBE_PHY_RST_WAIT_PERIOD 50
+
+#define RNP_ID (RNP_MAC_D + RNP_XAUI_D + RNP_PHY_D)
+
+/* Calculate Delay incurred from higher layer */
+#define RNP_HD 6144
+
+/* Calculate PCI Bus delay for low thresholds */
+#define RNP_PCI_DELAY 10000
+
+/* Flow Director compressed ATR hash input struct */
+union rnp_atr_hash_dword {
+	struct {
+		u8 vm_pool;
+		u8 flow_type;
+		__be16 vlan_id;
+	} formatted;
+	__be32 ip;
+	struct {
+		__be16 src;
+		__be16 dst;
+	} port;
+	__be16 flex_bytes;
+	__be32 dword;
+};
+
+enum rnp_eeprom_type {
+	rnp_eeprom_uninitialized = 0,
+	rnp_eeprom_spi,
+	rnp_flash,
+	rnp_eeprom_none /* No NVM support */
+};
+
+enum mac_type {
+	mac_dwc_xlg,
+	mac_dwc_g,
+
+};
+
+enum rnp_mac_type {
+	rnp_mac_unknown = 0,
+	rnp_mac_n10g_x8_40G,
+	rnp_mac_n10g_x2_10G,
+	rnp_mac_n10g_x4_10G,
+	rnp_mac_n10g_x8_10G,
+	rnp_mac_n10l_x8_1G,
+	rnp_num_macs
+};
+
+enum rnp_rss_type {
+	rnp_rss_uv440 = 0,
+	rnp_rss_uv3p,
+	rnp_rss_n10,
+	rnp_rss_n20,
+};
+
+enum rnp_hw_type {
+	rnp_hw_uv440 = 0,
+	rnp_hw_uv3p,
+	rnp_hw_n10,
+	rnp_hw_n20,
+	rnp_hw_n400
+};
+
+enum rnp_eth_type { rnp_eth_n10 = 0 };
+
+enum rnp_phy_type {
+	rnp_phy_unknown = 0,
+	rnp_phy_none,
+	rnp_phy_sfp,
+	rnp_phy_sfp_unsupported,
+	rnp_phy_generic,
+	rnp_phy_sfp_unknown,
+	rnp_phy_sgmii,
+};
+
+enum rnp_sfp_type {
+	rnp_sfp_type_da_cu = 0,
+	rnp_sfp_type_sr = 1,
+	rnp_sfp_type_lr = 2,
+	rnp_sfp_type_da_cu_core0 = 3,
+	rnp_sfp_type_da_cu_core1 = 4,
+	rnp_sfp_type_srlr_core0 = 5,
+	rnp_sfp_type_srlr_core1 = 6,
+	rnp_sfp_type_da_act_lmt_core0 = 7,
+	rnp_sfp_type_da_act_lmt_core1 = 8,
+	rnp_sfp_type_1g_cu_core0 = 9,
+	rnp_sfp_type_1g_cu_core1 = 10,
+	rnp_sfp_type_1g_sx_core0 = 11,
+	rnp_sfp_type_1g_sx_core1 = 12,
+	rnp_sfp_type_1g_lx_core0 = 13,
+	rnp_sfp_type_1g_lx_core1 = 14,
+	rnp_sfp_type_not_present = 0xFFFE,
+	rnp_sfp_type_unknown = 0xFFFF
+};
+
+enum rnp_media_type {
+	rnp_media_type_unknown = 0,
+	rnp_media_type_fiber,
+	rnp_media_type_copper,
+	rnp_media_type_backplane,
+	rnp_media_type_cx4,
+	rnp_media_type_da,
+	rnp_media_type_virtual
+
+};
+
+/* Flow Control Settings */
+enum rnp_fc_mode {
+	rnp_fc_none = 0,
+	rnp_fc_rx_pause,
+	rnp_fc_tx_pause,
+	rnp_fc_full,
+	rnp_fc_default
+};
+
+#define PAUSE_TX (0x1)
+#define PAUSE_RX (0x2)
+#define PAUSE_AUTO (0x10)
+
+#define ASYM_PAUSE BIT(11)
+#define SYM_PAUSE BIT(10)
+
+struct rnp_addr_filter_info {
+	u32 num_mc_addrs;
+	u32 rar_used_count;
+	u32 mta_in_use;
+	u32 overflow_promisc;
+	bool uc_set_promisc;
+	bool user_set_promisc;
+};
+
+/* Bus parameters */
+struct rnp_bus_info {
+	u16 func;
+	u16 lan_id;
+};
+
+/* Flow control parameters */
+struct rnp_fc_info {
+	u32 high_water[RNP_MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
+	u32 low_water[RNP_MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */
+	u16 pause_time; /* Flow Control Pause timer */
+	bool send_xon; /* Flow control send XON */
+	bool strict_ieee; /* Strict IEEE mode */
+	bool disable_fc_autoneg; /* Do not autonegotiate FC */
+	bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
+	enum rnp_fc_mode current_mode; /* FC mode in effect */
+	u32 requested_mode; /* FC mode requested by caller */
+};
+
+/* Statistics counters collected by the MAC */
+struct rnp_hw_stats {
+	u64 dma_to_dma;
+	u64 dma_to_switch;
+	u64 mac_to_mac;
+	u64 switch_to_switch;
+	u64 mac_to_dma;
+	u64 switch_to_dma;
+	u64 vlan_add_cnt;
+	u64 vlan_strip_cnt;
+	/* === error */
+	u64 invalid_dropped_packets;
+	u64 filter_dropped_packets;
+	/* == drop == */
+	u64 rx_capabity_lost;
+	u64 host_l2_match_drop;
+	u64 redir_input_match_drop;
+	u64 redir_etype_match_drop;
+	u64 redir_tcp_syn_match_drop;
+	u64 redir_tuple5_match_drop;
+	u64 redir_tcam_match_drop;
+
+	u64 bmc_dropped_packets;
+	u64 switch_dropped_packets;
+	/* === rx */
+	u64 dma_to_host;
+	/* === dma-tx == */
+	u64 port0_tx_packets;
+	u64 port1_tx_packets;
+	u64 port2_tx_packets;
+	u64 port3_tx_packets;
+	/* === emac 1to4 tx == */
+	u64 in0_tx_pkts;
+	u64 in1_tx_pkts;
+	u64 in2_tx_pkts;
+	u64 in3_tx_pkts;
+	/* === phy tx == */
+	u64 port0_to_phy_pkts;
+	u64 port1_to_phy_pkts;
+	u64 port2_to_phy_pkts;
+	u64 port3_to_phy_pkts;
+	/* === mac rx === */
+	u64 mac_rx_broadcast;
+	u64 mac_rx_multicast;
+	u64 mac_rx_pause_count;
+	u64 mac_tx_pause_count;
+	u64 tx_broadcast;
+	u64 tx_multicast;
+
+	u64 dma_rx_drop_cnt_0;
+	u64 dma_rx_drop_cnt_1;
+	u64 dma_rx_drop_cnt_2;
+	u64 dma_rx_drop_cnt_3;
+	u64 dma_rx_drop_cnt_4;
+	u64 dma_rx_drop_cnt_5;
+	u64 dma_rx_drop_cnt_6;
+	u64 dma_rx_drop_cnt_7;
+};
+
+/* forward declaration */
+struct rnp_hw;
+struct rnp_eth_info;
+struct rnp_dma_info;
+struct rnp_mac_info;
+
+/* iterator type for walking multicast address lists */
+typedef u8 *(*rnp_mc_addr_itr)(struct rnp_hw *hw, u8 **mc_addr_ptr, u32 *vmdq);
+
+/* Function pointer table */
+struct rnp_eeprom_operations {
+	s32 (*init_params)(struct rnp_hw *hw);
+	s32 (*read)(struct rnp_hw *hw, u16, u16 *);
+	s32 (*read_buffer)(struct rnp_hw *, u16, u16, u16 *);
+	s32 (*write)(struct rnp_hw *, u16, u16);
+	s32 (*write_buffer)(struct rnp_hw *, u16, u16, u16 *);
+	s32 (*validate_checksum)(struct rnp_hw *, u16 *);
+	s32 (*update_checksum)(struct rnp_hw *);
+	u16 (*calc_checksum)(struct rnp_hw *);
+};
+
+/* add nic operations */
+struct rnp_eth_operations {
+	/* RAR, Multicast, VLAN */
+	s32 (*get_mac_addr)(struct rnp_eth_info *, u8 *);
+	s32 (*set_rar)(struct rnp_eth_info *, u32, u8 *, bool);
+	s32 (*clear_rar)(struct rnp_eth_info *, u32);
+	s32 (*set_vmdq)(struct rnp_eth_info *, u32, u32);
+	s32 (*clear_vmdq)(struct rnp_eth_info *, u32, u32);
+	s32 (*update_mc_addr_list)(struct rnp_eth_info *, struct net_device *,
+				   bool);
+	void (*clr_mc_addr)(struct rnp_eth_info *);
+	int (*set_rss_hfunc)(struct rnp_eth_info *, int hfunc);
+	void (*set_rss_key)(struct rnp_eth_info *, bool);
+	void (*set_rss_table)(struct rnp_eth_info *);
+	void (*set_rx_hash)(struct rnp_eth_info *, bool, bool);
+	/* ncsi */
+	void (*ncsi_set_vfta)(struct rnp_eth_info *);
+	void (*ncsi_set_uc_addr)(struct rnp_eth_info *);
+	void (*ncsi_set_mc_mta)(struct rnp_eth_info *);
+	void (*set_layer2_remapping)(struct rnp_eth_info *,
+				     union rnp_atr_input *, u16, u8, bool);
+	void (*clr_layer2_remapping)(struct rnp_eth_info *, u16);
+	void (*clr_all_layer2_remapping)(struct rnp_eth_info *);
+	void (*set_tuple5_remapping)(struct rnp_eth_info *,
+				     union rnp_atr_input *, u16, u8, bool);
+	void (*clr_tuple5_remapping)(struct rnp_eth_info *, u16);
+	void (*clr_all_tuple5_remapping)(struct rnp_eth_info *);
+	void (*set_tcp_sync_remapping)(struct rnp_eth_info *, int, bool, bool);
+	void (*set_rx_skip)(struct rnp_eth_info *, int, bool);
+	void (*set_min_max_packet)(struct rnp_eth_info *, int, int);
+	void (*set_vlan_strip)(struct rnp_eth_info *, u16, bool);
+	s32 (*set_vfta)(struct rnp_eth_info *, u32, bool);
+	void (*clr_vfta)(struct rnp_eth_info *);
+	void (*set_vlan_filter)(struct rnp_eth_info *, bool);
+	void (*set_outer_vlan_type)(struct rnp_eth_info *, int type);
+	void (*set_double_vlan)(struct rnp_eth_info *, bool);
+	void (*set_vxlan_port)(struct rnp_eth_info *, u32);
+	void (*set_vxlan_mode)(struct rnp_eth_info *, bool);
+	s32 (*set_fc_mode)(struct rnp_eth_info *);
+	void (*set_rx)(struct rnp_eth_info *, bool);
+	void (*set_fcs)(struct rnp_eth_info *, bool);
+	void (*set_vf_vlan_mode)(struct rnp_eth_info *, u16, int, bool);
+};
+
+enum {
+	rnp_driver_insmod,
+	rnp_driver_suspuse,
+	rnp_driver_force_control_mac,
+};
+
+struct rnp_hw_operations {
+	s32 (*init_hw)(struct rnp_hw *);
+	s32 (*reset_hw)(struct rnp_hw *);
+	s32 (*start_hw)(struct rnp_hw *);
+	void (*set_mtu)(struct rnp_hw *, int);
+	void (*set_vlan_filter_en)(struct rnp_hw *, bool);
+	void (*set_vlan_filter)(struct rnp_hw *, u16, bool, bool);
+	int (*set_veb_vlan_mask)(struct rnp_hw *, u16, int, bool);
+	void (*set_vf_vlan_filter)(struct rnp_hw *, u16, int, bool, bool);
+	void (*clr_vfta)(struct rnp_hw *);
+	void (*set_vlan_strip)(struct rnp_hw *, u16, bool);
+	void (*set_mac)(struct rnp_hw *, u8 *mac, bool);
+	void (*set_rx_mode)(struct rnp_hw *, struct net_device *netdev, bool);
+	void (*set_rar_with_vf)(struct rnp_hw *hw, u8 *mac, int, u32, bool);
+	void (*clr_rar)(struct rnp_hw *hw, int idx);
+	void (*clr_rar_all)(struct rnp_hw *hw);
+	void (*clr_vlan_veb)(struct rnp_hw *);
+	void (*set_txvlan_mode)(struct rnp_hw *, bool);
+	void (*set_tx_maxrate)(struct rnp_hw *, bool);
+	void (*set_fcs_mode)(struct rnp_hw *, bool);
+	void (*set_vxlan_port)(struct rnp_hw *, u32);
+	void (*set_vxlan_mode)(struct rnp_hw *, bool);
+	void (*set_mac_speed)(struct rnp_hw *, bool, u32, bool);
+	void (*set_mac_rx)(struct rnp_hw *, bool);
+	void (*update_sriov_info)(struct rnp_hw *);
+	void (*set_sriov_status)(struct rnp_hw *, bool);
+	void (*set_sriov_vf_mc)(struct rnp_hw *, u16);
+	void (*set_pause_mode)(struct rnp_hw *);
+	void (*get_pause_mode)(struct rnp_hw *);
+	void (*update_hw_info)(struct rnp_hw *);
+	void (*set_rx_hash)(struct rnp_hw *, bool, bool);
+	int (*set_rss_hfunc)(struct rnp_hw *, u8 hfunc);
+	void (*set_rss_key)(struct rnp_hw *, bool);
+	void (*set_rss_table)(struct rnp_hw *);
+	void (*set_mbx_link_event)(struct rnp_hw *, int);
+	void (*set_mbx_ifup)(struct rnp_hw *, int);
+	s32 (*get_thermal_sensor_data)(struct rnp_hw *);
+	s32 (*init_thermal_sensor_thresh)(struct rnp_hw *hw);
+	void (*disable_tx_laser)(struct rnp_hw *);
+	void (*enable_tx_laser)(struct rnp_hw *);
+	void (*flap_tx_laser)(struct rnp_hw *);
+	s32 (*check_link)(struct rnp_hw *, rnp_link_speed *, bool *, bool *,
+			  bool);
+	s32 (*setup_link)(struct rnp_hw *, rnp_link_speed, u32, u32, u32);
+	void (*clean_link)(struct rnp_hw *);
+	s32 (*get_link_capabilities)(struct rnp_hw *, rnp_link_speed *, bool *);
+	s32 (*init_rx_addrs)(struct rnp_hw *);
+	void (*set_layer2_remapping)(struct rnp_hw *, union rnp_atr_input *,
+				     u16, u8, bool);
+	void (*clr_layer2_remapping)(struct rnp_hw *, u16);
+	void (*clr_all_layer2_remapping)(struct rnp_hw *);
+	void (*set_tuple5_remapping)(struct rnp_hw *, union rnp_atr_input *,
+				     u16, u8, bool);
+	void (*clr_tuple5_remapping)(struct rnp_hw *, u16);
+	void (*clr_all_tuple5_remapping)(struct rnp_hw *);
+	void (*set_tcp_sync_remapping)(struct rnp_hw *, int queue, bool, bool);
+	void (*set_rx_skip)(struct rnp_hw *, int count, bool);
+	void (*set_outer_vlan_type)(struct rnp_hw *, int);
+	void (*update_hw_status)(struct rnp_hw *, struct rnp_hw_stats *,
+				 struct net_device_stats *);
+	void (*update_msix_count)(struct rnp_hw *, int msix_count);
+	void (*update_rx_drop)(struct rnp_hw *);
+	void (*setup_ethtool)(struct net_device *);
+	s32 (*phy_read_reg)(struct rnp_hw *, u32, u32, u16 *);
+	s32 (*phy_write_reg)(struct rnp_hw *, u32, u32, u16);
+	void (*setup_wol)(struct rnp_hw *, u32);
+	void (*set_vf_vlan_mode)(struct rnp_hw *, u16, int, bool);
+	void (*driver_status)(struct rnp_hw *, bool, int);
+};
+
+struct rnp_mac_operations {
+	void (*set_mac_rx)(struct rnp_mac_info *mac, bool);
+	void (*set_mac_speed)(struct rnp_mac_info *, bool, u32, bool);
+	void (*set_mac_fcs)(struct rnp_mac_info *mac, bool);
+	s32 (*set_fc_mode)(struct rnp_mac_info *mac);
+	void (*check_link)(struct rnp_mac_info *, rnp_link_speed *, bool *,
+			   bool);
+	void (*set_mac)(struct rnp_mac_info *, u8 *, int);
+	int (*mdio_write)(struct rnp_mac_info *, int phyreg, int phydata);
+	int (*mdio_read)(struct rnp_mac_info *, int phyreg, int *regvalue);
+	void (*pmt)(struct rnp_mac_info *, u32);
+};
+
+struct rnp_eeprom_info {
+	struct rnp_eeprom_operations ops;
+	enum rnp_eeprom_type type;
+	u32 semaphore_delay;
+	u16 word_size;
+	u16 address_bits;
+	u16 word_page_size;
+};
+
+struct rnp_dma_operations {
+	void (*set_tx_maxrate)(struct rnp_dma_info *dma, u16, u32);
+	void (*set_veb_mac)(struct rnp_dma_info *dma, u8 *, u32, u32);
+	/* only set own vlan */
+	void (*set_veb_vlan)(struct rnp_dma_info *dma, u16, u32);
+	void (*set_veb_vlan_mask)(struct rnp_dma_info *dma, u16, u16, int);
+	void (*clr_veb_all)(struct rnp_dma_info *dma);
+};
+
+struct rnp_dma_info {
+	struct rnp_dma_operations ops;
+	u8 __iomem *dma_base_addr;
+	u8 __iomem *dma_ring_addr;
+	void *back;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	u32 dma_version;
+};
+
+#define RNP_MAX_MTA 128
+struct rnp_eth_info {
+	struct rnp_eth_operations ops;
+	u8 __iomem *eth_base_addr;
+	enum rnp_eth_type eth_type;
+	void *back;
+
+	u32 mta_shadow[RNP_MAX_MTA];
+	s32 mc_filter_type;
+	u32 mcft_size;
+	u32 vft_size;
+	u32 num_rar_entries;
+	u32 rar_highwater;
+	u32 rx_pb_size;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	u32 reg_off;
+	u32 orig_autoc;
+	u32 cached_autoc;
+	u32 orig_autoc2;
+};
+
+struct rnp_nic_info {
+	u8 __iomem *nic_base_addr;
+};
+
+struct mii_regs {
+	unsigned int addr; /* MII Address */
+	unsigned int data; /* MII Data */
+	unsigned int addr_shift; /* MII address shift */
+	unsigned int reg_shift; /* MII reg shift */
+	unsigned int addr_mask; /* MII address mask */
+	unsigned int reg_mask; /* MII reg mask */
+	unsigned int clk_csr_shift;
+	unsigned int clk_csr_mask;
+};
+
+#define RNP_FLAGS_DOUBLE_RESET_REQUIRED 0x01
+#define RNP_FLAGS_INIT_MAC_ADDRESS 0x02
+struct rnp_mac_info {
+	struct rnp_mac_operations ops;
+	u8 __iomem *mac_addr;
+	void *back;
+	struct mii_regs mii;
+	int phy_addr;
+	int clk_csr;
+	enum rnp_mac_type type;
+	enum mac_type mac_type;
+	u8 addr[ETH_ALEN];
+	u8 perm_addr[ETH_ALEN];
+	/* prefix for World Wide Node Name (WWNN) */
+	u16 wwnn_prefix;
+	/* prefix for World Wide Port Name (WWPN) */
+	u16 wwpn_prefix;
+	u16 max_msix_vectors;
+	u32 mta_shadow[RNP_MAX_MTA];
+	s32 mc_filter_type;
+	u32 mcft_size;
+	u32 vft_size;
+	u32 num_rar_entries;
+	u32 rar_highwater;
+	u32 rx_pb_size;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	u32 reg_off;
+	u32 orig_autoc;
+	u32 cached_autoc;
+	u32 orig_autoc2;
+	bool orig_link_settings_stored;
+	bool autotry_restart;
+	u8 mac_flags;
+};
+
+struct rnp_phy_info {
+	struct mdio_if_info mdio;
+	enum rnp_phy_type type;
+	u32 id;
+	u32 phy_addr;
+	bool is_mdix;
+	u8 mdix;
+	enum rnp_sfp_type sfp_type;
+	bool sfp_setup_needed;
+	u32 revision;
+	enum rnp_media_type media_type;
+	bool reset_disable;
+	rnp_autoneg_advertised autoneg_advertised;
+	bool smart_speed_active;
+	bool multispeed_fiber;
+	bool reset_if_overtemp;
+};
+
+#include "rnp_mbx.h"
+
+struct rnp_pcs_operations {
+	u32 (*read)(struct rnp_hw *hw, int num, u32 addr);
+	void (*write)(struct rnp_hw *hw, int num, u32 addr, u32 value);
+};
+
+struct rnp_mbx_operations {
+	s32 (*init_params)(struct rnp_hw *hw);
+	s32 (*read)(struct rnp_hw *, u32 *, u16, enum MBX_ID);
+	s32 (*write)(struct rnp_hw *, u32 *, u16, enum MBX_ID);
+	s32 (*read_posted)(struct rnp_hw *, u32 *, u16, enum MBX_ID);
+	s32 (*write_posted)(struct rnp_hw *, u32 *, u16, enum MBX_ID);
+	s32 (*check_for_msg)(struct rnp_hw *, enum MBX_ID);
+	s32 (*check_for_ack)(struct rnp_hw *, enum MBX_ID);
+	s32 (*configure)(struct rnp_hw *hw, int nr_vec, bool enable);
+};
+
+struct rnp_mbx_stats {
+	u32 msgs_tx;
+	u32 msgs_rx;
+	u32 acks;
+	u32 reqs;
+	u32 rsts;
+};
+
+struct rnp_pcs_info {
+	struct rnp_pcs_operations ops;
+	int pcs_count;
+};
+
+struct mbx_fw_cmd_reply;
+
+typedef void (*cookie_cb)(struct mbx_fw_cmd_reply *reply, void *priv);
+
+enum cookie_stat{
+	COOKIE_FREE=0,
+	COOKIE_FREE_WAIT_TIMEOUT,
+	COOKIE_ALLOCED,
+};
+
+struct mbx_req_cookie {
+	u64 alloced_jiffies;
+	enum cookie_stat stat;
+	cookie_cb cb;
+	int timeout_jiffes;
+	int errcode;
+	wait_queue_head_t wait;
+	int done;
+	int priv_len;
+#define MAX_PRIV_LEN 64
+	char priv[MAX_PRIV_LEN];
+};
+
+struct mbx_req_cookie_pool {
+#define MAX_COOKIES_ITEMS (20*400)
+	struct mbx_req_cookie cookies[MAX_COOKIES_ITEMS];
+	int next_idx;
+};
+
+struct rnp_mbx_info {
+	struct rnp_mbx_operations ops;
+	struct rnp_mbx_stats stats;
+	u32 timeout;
+	u32 usec_delay;
+	u32 v2p_mailbox;
+	u16 size;
+	u16 vf_req[64];
+	u16 vf_ack[64];
+	u16 cpu_req;
+	u16 cpu_ack;
+	struct mutex lock;
+	bool other_irq_enabled;
+	int mbx_size;
+	int mbx_mem_size;
+#define MBX_FEATURE_NO_ZERO BIT(0)
+#define MBX_FEATURE_WRITE_DELAY BIT(1)
+	u32 mbx_feature;
+	/* cm3 <-> pf mbx */
+	u32 cpu_pf_shm_base;
+	u32 pf2cpu_mbox_ctrl;
+	u32 pf2cpu_mbox_mask;
+	u32 cpu_pf_mbox_mask;
+	u32 cpu2pf_mbox_vec;
+	/* pf <--> vf mbx */
+	u32 pf_vf_shm_base;
+	u32 pf2vf_mbox_ctrl_base;
+	u32 pf_vf_mbox_mask_lo;
+	u32 pf_vf_mbox_mask_hi;
+	u32 pf2vf_mbox_vec_base;
+	u32 vf2pf_mbox_vec_base;
+	u32 cpu_vf_share_ram;
+	int share_size;
+	struct mbx_req_cookie_pool cookie_pool;
+};
+
+struct vf_vebvlans {
+	struct list_head l;
+	bool free;
+	int veb_entry;
+	u16 vid;
+	u16 mask;
+};
+
+#define RNP_MBX_VF_CPU_SHM_PF_BASE (0xA8000)
+#define RNP_NCSI_MC_COUNT (11)
+#define RNP_NCSI_VLAN_COUNT (1)
+
+#define RNP_VF_CPU_SHM_BASE_NR62 (RNP_MBX_VF_CPU_SHM_PF_BASE + 62 * 64)
+struct ncsi_shm_info {
+	u32 valid;
+#define RNP_NCSI_SHM_VALID 0xa5000000
+#define RNP_NCSI_SHM_VALID_MASK 0xff000000
+#define RNP_MC_VALID BIT(0)
+#define RNP_UC_VALID BIT(1)
+#define RNP_VLAN_VALID BIT(2)
+
+	struct {
+		u32 uc_addr_lo;
+		u32 uc_addr_hi;
+	} uc;
+
+	struct {
+		u32 mc_addr_lo;
+		u32 mc_addr_hi;
+	} mc[RNP_NCSI_MC_COUNT];
+	u32 ncsi_vlan;
+};
+
+struct rnp_hw {
+	void *back;
+	u8 __iomem *hw_addr;
+	u8 __iomem *ring_msix_base;
+	u8 __iomem *rpu_addr;
+	u8 pfvfnum;
+	struct pci_dev *pdev;
+	u16 device_id;
+	u16 vendor_id;
+	u16 subsystem_device_id;
+	u16 subsystem_vendor_id;
+	char lane_mask;
+	u16 mac_type;
+	u16 phy_type;
+	int nr_lane;
+	u8 is_backplane : 1;
+	u8 is_sgmii : 1;
+	u8 force_10g_1g_speed_ablity : 1;
+	u8 force_speed_stat : 2;
+#define FORCE_SPEED_STAT_DISABLED 0
+#define FORCE_SPEED_STAT_1G 1
+#define FORCE_SPEED_STAT_10G 2
+	u8 rpu_en : 1;
+	u8 rpu_availble : 1;
+	u8 ncsi_en;
+	u8 ncsi_rar_entries;
+	u16 ncsi_mc_count;
+	u16 ncsi_vlan_count;
+	u32 ncsi_vf_cpu_shm_pf_base;
+	u32 saved_force_link_speed;
+	u32 pcode;
+	u32 supported_link;
+	u32 advertised_link;
+	u32 autoneg;
+	u32 tp_mdx;
+	u32 tp_mdix_ctrl;
+	u32 phy_id;
+	u8 fw_lldp_ablity;
+	u8 link;
+	u8 pci_gen;
+	u8 pci_lanes;
+	u16 max_msix_vectors;
+	int speed;
+	int duplex;
+	u32 dma_version;
+	u32 wol;
+	u32 eco;
+	u32 force_status;
+	u32 force_link_supported;
+	u16 min_length;
+	u16 max_length;
+	u16 min_length_current;
+	u16 max_length_current;
+	/* rss info */
+#define HW_MAX_RETA_ENTRIES 512
+	u8 rss_indir_tbl[HW_MAX_RETA_ENTRIES];
+#define HW_MAX_TC_ENTRIES 8
+	u8 rss_tc_tbl[HW_MAX_TC_ENTRIES];
+	int rss_indir_tbl_num;
+	int rss_tc_tbl_num;
+	u32 rss_tbl_setup_flag;
+#define HW_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
+	u8 rss_key[HW_RSS_KEY_SIZE];
+	u32 rss_key_setup_flag;
+	u32 vfnum;
+	int dma_split_size;
+	int num_rar_entries;
+	int max_vfs;
+	int max_vfs_noari;
+	int sriov_ring_limit;
+	int max_pf_macvlans;
+	int num_vebvlan_entries;
+	int fdir_mode;
+	int layer2_count;
+	int tuple5_count;
+	int veb_ring;
+	int default_vf_num;
+	int vf_promisc_mode;
+	int vf_promisc_num;
+	u32 fdir_pballoc;
+	enum rnp_rss_type rss_type;
+	enum rnp_hw_type hw_type;
+	struct rnp_hw_operations ops;
+	struct rnp_nic_info nic;
+	struct rnp_dma_info dma;
+	struct rnp_eth_info eth;
+	struct rnp_mac_info mac;
+	struct rnp_addr_filter_info addr_ctrl;
+	struct rnp_fc_info fc;
+	struct rnp_phy_info phy;
+	struct rnp_eeprom_info eeprom;
+	struct rnp_bus_info bus;
+	struct rnp_mbx_info mbx;
+	struct rnp_pcs_info pcs;
+	bool adapter_stopped;
+	bool force_full_reset;
+	bool mng_fw_enabled;
+	bool wol_enabled;
+	unsigned long wol_supported;
+	int fw_version;
+	u8 sfp_connector;
+	struct vf_vebvlans vf_vas;
+	struct vf_vebvlans *vv_list;
+	u32 axi_mhz;
+	u32 bd_uid;
+	union {
+		u8 port_id[4];
+		u32 port_ids;
+	};
+	int mode;
+	int default_rx_queue;
+	u32 usecstocount;
+#define RNP_NET_FEATURE_SG ((u32)(1 << 0))
+#define RNP_NET_FEATURE_TX_CHECKSUM ((u32)(1 << 1))
+#define RNP_NET_FEATURE_RX_CHECKSUM ((u32)(1 << 2))
+#define RNP_NET_FEATURE_TSO ((u32)(1 << 3))
+#define RNP_NET_FEATURE_TX_UDP_TUNNEL ((1 << 4))
+#define RNP_NET_FEATURE_VLAN_FILTER ((1 << 5))
+#define RNP_NET_FEATURE_VLAN_OFFLOAD ((1 << 6))
+#define RNP_NET_FEATURE_RX_NTUPLE_FILTER ((1 << 7))
+#define RNP_NET_FEATURE_TCAM ((1 << 8))
+#define RNP_NET_FEATURE_RX_HASH ((1 << 9))
+#define RNP_NET_FEATURE_RX_FCS ((1 << 10))
+#define RNP_NET_FEATURE_HW_TC ((1 << 11))
+#define RNP_NET_FEATURE_USO ((1 << 12))
+#define RNP_NET_FEATURE_STAG_FILTER ((1 << 13))
+#define RNP_NET_FEATURE_STAG_OFFLOAD ((1 << 14))
+#define RNP_NET_FEATURE_VF_FIXED ((1 << 15))
+#define RNP_VEB_VLAN_MASK_EN ((1 << 16))
+
+	u32 feature_flags;
+	struct rnp_thermal_sensor_data thermal_sensor_data;
+
+	struct {
+		int version;
+		int len;
+		int flag;
+	} dump;
+};
+
+struct rnp_info {
+	enum rnp_mac_type mac;
+	enum rnp_rss_type rss_type;
+	enum rnp_hw_type hw_type;
+	s32 (*get_invariants)(struct rnp_hw *);
+	struct rnp_mac_operations *mac_ops;
+	struct rnp_eeprom_operations *eeprom_ops;
+	struct rnp_mbx_operations *mbx_ops;
+	struct rnp_pcs_operations *pcs_ops;
+	bool one_pf_with_two_dma;
+	int reg_off;
+	int adapter_cnt;
+	char lane_mask;
+	int hi_dma;
+	int total_queue_pair_cnts;
+	int dma2_in_1pf;
+	char *hw_addr;
+};
+
+/* Error Codes */
+#define RNP_ERR_EEPROM -1
+#define RNP_ERR_EEPROM_CHECKSUM -2
+#define RNP_ERR_PHY -3
+#define RNP_ERR_CONFIG -4
+#define RNP_ERR_PARAM -5
+#define RNP_ERR_MAC_TYPE -6
+#define RNP_ERR_UNKNOWN_PHY -7
+#define RNP_ERR_LINK_SETUP -8
+#define RNP_ERR_ADAPTER_STOPPED -9
+#define RNP_ERR_INVALID_MAC_ADDR -10
+#define RNP_ERR_DEVICE_NOT_SUPPORTED -11
+#define RNP_ERR_MASTER_REQUESTS_PENDING -12
+#define RNP_ERR_INVALID_LINK_SETTINGS -13
+#define RNP_ERR_AUTONEG_NOT_COMPLETE -14
+#define RNP_ERR_RESET_FAILED -15
+#define RNP_ERR_SWFW_SYNC -16
+#define RNP_ERR_PHY_ADDR_INVALID -17
+#define RNP_ERR_I2C -18
+#define RNP_ERR_SFP_NOT_SUPPORTED -19
+#define RNP_ERR_SFP_NOT_PRESENT -20
+#define RNP_ERR_SFP_NO_INIT_SEQ_PRESENT -21
+#define RNP_ERR_FDIR_REINIT_FAILED -23
+#define RNP_ERR_EEPROM_VERSION -24
+#define RNP_ERR_NO_SPACE -25
+#define RNP_ERR_OVERTEMP -26
+#define RNP_ERR_FC_NOT_NEGOTIATED -27
+#define RNP_ERR_FC_NOT_SUPPORTED -28
+#define RNP_ERR_SFP_SETUP_NOT_COMPLETE -30
+#define RNP_ERR_PBA_SECTION -31
+#define RNP_ERR_INVALID_ARGUMENT -32
+#define RNP_ERR_HOST_INTERFACE_COMMAND -33
+#define RNP_NOT_IMPLEMENTED 0x7FFFFFFF
+
+#define RNP_RAH_AV 0x80000000
+/* eth fix code */
+#define RNP_FCTRL_BPE BIT(10)
+#define RNP_FCTRL_UPE BIT(9)
+#define RNP_FCTRL_MPE BIT(8)
+
+#define RNP_MCSTCTRL_MTA BIT(2)
+#define RNP_MCSTCTRL_UTA BIT(3)
+
+#define RNP_MAX_LAYER2_FILTERS (16)
+#define RNP_MAX_TUPLE5_FILTERS (128)
+#define RNP_MAX_TCAM_FILTERS (4096)
+
+#define RNP_SRC_IP_MASK BIT(0)
+#define RNP_DST_IP_MASK BIT(1)
+#define RNP_SRC_PORT_MASK BIT(2)
+#define RNP_DST_PORT_MASK BIT(3)
+#define RNP_L4_PROTO_MASK BIT(4)
+#endif /* _RNP_TYPE_H_ */
diff --git a/drivers/net/ethernet/mucse/rnp/version.h b/drivers/net/ethernet/mucse/rnp/version.h
new file mode 100644
index 0000000000000..ca720dae0c478
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnp/version.h
@@ -0,0 +1,4 @@
+#ifndef VERSION_H
+#define VERSION_H
+#define GIT_COMMIT " 83aa5f1"
+#endif
diff --git a/drivers/net/ethernet/mucse/rnpgbe/Makefile b/drivers/net/ethernet/mucse/rnpgbe/Makefile
new file mode 100644
index 0000000000000..f3c1d159fe346
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2022 - 2024 Mucse Corporation
+#
+# Makefile for the Mucse(R) 1GbE PCI Express ethernet driver
+#
+#
+
+obj-$(CONFIG_MGBE) += rnpgbe.o
+rnpgbe-objs :=   \
+		rnpgbe_main.o \
+		rnpgbe_common.o \
+		rnpgbe_ethtool.o \
+		rnpgbe_lib.o \
+		rnpgbe_mbx.o \
+		rnpgbe_chip.o \
+		rnpgbe_mbx_fw.o\
+		rnpgbe_sriov.o \
+		rnpgbe_param.o \
+		rnpgbe_sysfs.o \
+		rnpgbe_sfc.o \
+		rnpgbe_ptp.o
+
+rnpgbe-$(CONFIG_DEBUG_FS) += rnpgbe_debugfs.o
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
new file mode 100644
index 0000000000000..f0c801e482875
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h
@@ -0,0 +1,1184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBE_H_
+#define _RNPGBE_H_
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "rnpgbe_type.h"
+#include "rnpgbe_common.h"
+
+extern struct rnpgbe_info rnpgbe_n500_info;
+extern struct rnpgbe_info rnpgbe_n210_info;
+/* common prefix used by pr_<> macros */
+#undef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define RNP_ALLOC_PAGE_ORDER 0
+#define RNP_PAGE_BUFFER_NUMS(ring)                                             \
+	((1 << RNP_ALLOC_PAGE_ORDER) * PAGE_SIZE /                             \
+	 ALIGN((rnpgbe_rx_offset(ring) + rnpgbe_rx_bufsz(ring) +               \
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +               \
+		RNP_RX_HWTS_OFFSET),                                           \
+	       1024))
+#define RNP_DEFAULT_TX_WORK (128)
+#define RNP_MIN_TX_WORK (32)
+#define RNP_MAX_TX_WORK (512)
+#define RNP_MIN_RX_WORK (32)
+#define RNP_MAX_RX_WORK (512)
+#define RNP_WORK_ALIGN (2)
+#define RNP_MIN_TX_FRAME (1)
+#define RNP_MAX_TX_FRAME (256)
+#define RNP_MIN_TX_USEC (10)
+#define RNP_MAX_TX_USEC (10000)
+#define RNP_MIN_RX_FRAME (1)
+#define RNP_MAX_RX_FRAME (256)
+#define RNP_MIN_RX_USEC (2)
+#define RNP_MAX_RX_USEC (10000)
+#define RNP_MAX_TXD (4096)
+#define RNP_MIN_TXD (64)
+/* Default LPI timers */
+#define RNP_DEFAULT_LIT_LS 0x3E8
+#define RNP_DEFAULT_TWT_LS 0x1E
+#define RNP_START_ITR 648 /* ~6000 ints/sec */
+#define RNP_4K_ITR 980
+#define RNP_20K_ITR 196
+#define RNP_70K_ITR
+#define RNP_LOWEREST_ITR 5
+#define ACTION_TO_MPE (130)
+#define MPE_PORT (10)
+#define AUTO_ALL_MODES 0
+/* TX/RX descriptor defines */
+#define RNP_DEFAULT_TXD 512
+#define RNP_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define RNP_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define RNP_DEFAULT_RXD 512
+#define RNP_MAX_RXD 4096
+#define RNP_MIN_RXD 64
+/* flow control */
+#define RNP_MIN_FCRTL 0x40
+#define RNP_MAX_FCRTL 0x7FF80
+#define RNP_MIN_FCRTH 0x600
+#define RNP_MAX_FCRTH 0x7FFF0
+#define RNP_DEFAULT_FCPAUSE 0xFFFF
+#define RNP10_DEFAULT_HIGH_WATER 0x320
+#define RNP10_DEFAULT_LOW_WATER 0x270
+#define RNP500_DEFAULT_HIGH_WATER 400
+#define RNP500_DEFAULT_LOW_WATER 256
+#define RNP_MIN_FCPAUSE 0
+#define RNP_MAX_FCPAUSE 0xFFFF
+/* Supported Rx Buffer Sizes */
+#define RNP_RXBUFFER_256 256 /* Used for skb receive header */
+#define RNP_RXBUFFER_1536 1536
+#define RNP_RXBUFFER_2K 2048
+#define RNP_RXBUFFER_3K 3072
+#define RNP_RXBUFFER_4K 4096
+#define RNP_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
+#define RNP_RXBUFFER_MAX (RNP_RXBUFFER_2K)
+#define MAX_Q_VECTORS 128
+#define RNP_RING_COUNTS_PEER_PF 8
+#define RNP_GSO_PARTIAL_FEATURES                                               \
+	(NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |     \
+	 NETIF_F_GSO_UDP_TUNNEL_CSUM)
+/*
+ * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
+ * this adds up to 448 bytes of extra data.
+ *
+ * Since netdev_alloc_skb now allocates a page fragment we can use a value
+ * of 256 and the resultant skb will have a truesize of 960 or less.
+ */
+#define RNP_RX_HDR_SIZE RNP_RXBUFFER_256
+#define RNP_ITR_ADAPTIVE_MIN_INC 2
+#define RNP_ITR_ADAPTIVE_MIN_USECS 5
+#define RNP_ITR_ADAPTIVE_MAX_USECS 800
+#define RNP_ITR_ADAPTIVE_LATENCY 0x400
+#define RNP_ITR_ADAPTIVE_BULK 0x00
+#define RNP_ITR_ADAPTIVE_MASK_USECS                                            \
+	(RNP_ITR_ADAPTIVE_LATENCY - RNP_ITR_ADAPTIVE_MIN_INC)
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#ifdef OPTM_WITH_LPAGE
+#define RNP_RX_BUFFER_WRITE (PAGE_SIZE / 2048) /* Must be power of 2 */
+#else /* OPTM_WITH_LPAGE */
+#define RNP_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#endif /* OPTM_WITH_LPAGE */
+
+enum rnpgbe_tx_flags {
+	/* cmd_type flags */
+	RNP_TX_FLAGS_HW_VLAN = 0x01,
+	RNP_TX_FLAGS_TSO = 0x02,
+	RNP_TX_FLAGS_TSTAMP = 0x04,
+	/* olinfo flags */
+	RNP_TX_FLAGS_CC = 0x08,
+	RNP_TX_FLAGS_IPV4 = 0x10,
+	RNP_TX_FLAGS_CSUM = 0x20,
+	/* software defined flags */
+	RNP_TX_FLAGS_SW_VLAN = 0x40,
+	RNP_TX_FLAGS_FCOE = 0x80,
+};
+#ifndef RNP_MAX_VF_CNT
+#define RNP_MAX_VF_CNT 64
+#endif /* RNP_MAX_VF_CNT */
+#define RNP_RX_RATE_HIGH 450000
+#define RNP_RX_COAL_TIME_HIGH 128
+#define RNP_RX_SIZE_THRESH 1024
+#define RNP_RX_RATE_THRESH (1000000 / RNP_RX_COAL_TIME_HIGH)
+#define RNP_SAMPLE_INTERVAL 0
+#define RNP_AVG_PKT_SMALL 256
+#define RNP_MAX_VF_MC_ENTRIES 30
+#define RNP_MAX_VF_FUNCTIONS RNP_MAX_VF_CNT
+#define RNP_MAX_VFTA_ENTRIES 128
+#define MAX_EMULATION_MAC_ADDRS 16
+#define RNP_MAX_PF_MACVLANS_N10 15
+/* we resver 2 for ncsi */
+#define RNP_MAX_PF_MACVLANS_N500 (15 - 2)
+#define PF_RING_CNT_WHEN_IOV_ENABLED 2
+#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
+
+enum vf_link_state {
+	rnpgbe_link_state_auto,
+	rnpgbe_link_state_on,
+	rnpgbe_link_state_off,
+};
+
+struct vf_data_storage {
+	unsigned char vf_mac_addresses[ETH_ALEN];
+	u16 vf_mc_hashes[RNP_MAX_VF_MC_ENTRIES];
+	u16 num_vf_mc_hashes;
+	u16 default_vf_vlan_id;
+	u16 vlans_enabled;
+	bool clear_to_send;
+	bool pf_set_mac;
+	u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+	u16 vf_vlan; /* vf just can set 1 vlan */
+	u16 pf_qos;
+	u16 tx_rate;
+	int link_state;
+	u16 vlan_count;
+	u8 spoofchk_enabled;
+	u8 trusted;
+	unsigned long status;
+	unsigned int vf_api;
+};
+
+enum vf_state_t {
+	__VF_MBX_USED,
+};
+
+struct vf_macvlans {
+	struct list_head l;
+	int vf;
+	int rar_entry;
+	bool free;
+	bool is_macvlan;
+	u8 vf_macvlan[ETH_ALEN];
+};
+
+/* now tx max 4k for one desc */
+/* feiteng use 12k can get better netperf performance */
+#define RNP_MAX_TXD_PWR 12
+#define RNP_MAX_DATA_PER_TXD (1 << RNP_MAX_TXD_PWR)
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), RNP_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffers
+ */
+struct rnpgbe_tx_buffer {
+	struct rnpgbe_tx_desc *next_to_watch;
+	unsigned long time_stamp;
+	struct sk_buff *skb;
+	unsigned int bytecount;
+	unsigned short gso_segs;
+	bool gso_need_padding;
+	__be16 protocol;
+	__be16 priv_tags;
+	DEFINE_DMA_UNMAP_ADDR(dma);
+	DEFINE_DMA_UNMAP_LEN(len);
+	union {
+		u32 mss_len_vf_num;
+		struct {
+			__le16 mss_len;
+			u8 vf_num;
+			u8 l4_hdr_len;
+		};
+	};
+	union {
+		u32 inner_vlan_tunnel_len;
+		struct {
+			u8 tunnel_hdr_len;
+			u8 inner_vlan_l;
+			u8 inner_vlan_h;
+			u8 resv;
+		};
+	};
+	bool ctx_flag;
+};
+
+struct rnpgbe_rx_buffer {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	struct page *page;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+	__u32 page_offset;
+#else /* (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) */
+	__u16 page_offset;
+#endif /* (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) */
+	__u16 pagecnt_bias;
+};
+
+struct rnpgbe_queue_stats {
+	u64 packets;
+	u64 bytes;
+};
+
+struct rnpgbe_tx_queue_stats {
+	u64 restart_queue;
+	u64 tx_busy;
+	u64 tx_done_old;
+	u64 clean_desc;
+	u64 poll_count;
+	u64 irq_more_count;
+	u64 send_bytes;
+	u64 send_bytes_to_hw;
+	u64 todo_update;
+	u64 send_done_bytes;
+	u64 vlan_add;
+	u64 tx_next_to_clean;
+	u64 tx_irq_miss;
+	u64 tx_equal_count;
+	u64 tx_clean_times;
+	u64 tx_clean_count;
+};
+
+struct rnpgbe_rx_queue_stats {
+	u64 driver_drop_packets;
+	u64 rsc_count;
+	u64 rsc_flush;
+	u64 non_eop_descs;
+	u64 alloc_rx_page_failed;
+	u64 alloc_rx_buff_failed;
+	u64 alloc_rx_page;
+	u64 csum_err;
+	u64 csum_good;
+	u64 poll_again_count;
+	u64 vlan_remove;
+	u64 rx_next_to_clean;
+	u64 rx_irq_miss;
+	u64 rx_equal_count;
+	u64 rx_clean_times;
+	u64 rx_clean_count;
+};
+
+enum rnpgbe_ring_state_t {
+	__RNP_RX_3K_BUFFER,
+	__RNP_RX_BUILD_SKB_ENABLED,
+	__RNP_TX_FDIR_INIT_DONE,
+	__RNP_TX_XPS_INIT_DONE,
+	__RNP_TX_DETECT_HANG,
+	__RNP_HANG_CHECK_ARMED,
+	__RNP_RX_CSUM_UDP_ZERO_ERR,
+	__RNP_RX_FCOE,
+};
+
+enum {
+	PART_FW,
+	PART_CFG,
+	PART_MACSN,
+	PART_PCSPHY,
+	PART_PXE,
+};
+
+#define ring_uses_build_skb(ring)                                              \
+	test_bit(__RNP_RX_BUILD_SKB_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) test_bit(__RNP_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring)                                            \
+	set_bit(__RNP_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring)                                          \
+	clear_bit(__RNP_TX_DETECT_HANG, &(ring)->state)
+
+struct rnpgbe_ring {
+	struct rnpgbe_ring *next; /* pointer to next ring in q_vector */
+	struct rnpgbe_q_vector *q_vector; /* backpointer to host q_vector */
+	struct net_device *netdev; /* netdev ring belongs to */
+	struct device *dev; /* device for DMA mapping */
+	void *desc; /* descriptor ring memory */
+	union {
+		struct rnpgbe_tx_buffer *tx_buffer_info;
+		struct rnpgbe_rx_buffer *rx_buffer_info;
+	};
+	unsigned long last_rx_timestamp;
+	unsigned long state;
+	u8 __iomem *ring_addr;
+	u8 __iomem *tail;
+	u8 __iomem *dma_int_stat;
+	u8 __iomem *dma_int_mask;
+	u8 __iomem *dma_int_clr;
+	dma_addr_t dma; /* phys. address of descriptor ring */
+	unsigned int size; /* length in bytes */
+	u32 ring_flags;
+#define RNP_RING_FLAG_DELAY_SETUP_RX_LEN ((u32)(1 << 0))
+#define RNP_RING_FLAG_CHANGE_RX_LEN ((u32)(1 << 1))
+#define RNP_RING_FLAG_DO_RESET_RX_LEN ((u32)(1 << 2))
+#define RNP_RING_SKIP_TX_START ((u32)(1 << 3))
+#define RNP_RING_NO_TUNNEL_SUPPORT ((u32)(1 << 4))
+#define RNP_RING_SIZE_CHANGE_FIX ((u32)(1 << 5))
+#define RNP_RING_SCATER_SETUP ((u32)(1 << 6))
+#define RNP_RING_STAGS_SUPPORT ((u32)(1 << 7))
+#define RNP_RING_DOUBLE_VLAN_SUPPORT ((u32)(1 << 8))
+#define RNP_RING_VEB_MULTI_FIX ((u32)(1 << 9))
+#define RNP_RING_IRQ_MISS_FIX ((u32)(1 << 10))
+#define RNP_RING_OUTER_VLAN_FIX ((u32)(1 << 11))
+#define RNP_RING_CHKSM_FIX ((u32)(1 << 12))
+#define RNP_RING_LOWER_ITR ((u32)(1 << 13))
+	u8 pfvfnum;
+	u16 count; /* amount of descriptors */
+	u16 temp_count;
+	u16 reset_count;
+	u8 queue_index; /* queue_index needed for multiqueue queue management */
+	u8 rnpgbe_queue_idx; /* the real ring,used by dma*/
+	u16 next_to_use; /* tail (not-dma-mapped) */
+	u16 next_to_clean; /* soft-saved-head */
+	u16 device_id;
+#ifdef OPTM_WITH_LPAGE
+	u16 rx_page_buf_nums;
+	u32 rx_per_buf_mem;
+	struct sk_buff *skb;
+#endif /* OPTM_WITH_LPAGE */
+	union {
+		u16 next_to_alloc;
+		struct {
+			u8 atr_sample_rate;
+			u8 atr_count;
+		};
+	};
+
+	u8 dcb_tc;
+	struct rnpgbe_queue_stats stats;
+	struct u64_stats_sync syncp;
+	union {
+		struct rnpgbe_tx_queue_stats tx_stats;
+		struct rnpgbe_rx_queue_stats rx_stats;
+	};
+} ____cacheline_internodealigned_in_smp;
+
+#define RING2ADAPT(ring) netdev_priv((ring)->netdev)
+
+enum rnpgbe_ring_f_enum {
+	RING_F_NONE = 0,
+	RING_F_VMDQ, /* SR-IOV uses the same ring feature */
+	RING_F_RSS,
+	RING_F_FDIR,
+	RING_F_ARRAY_SIZE /* must be last in enum set */
+};
+
+#define RNP_MAX_RSS_INDICES 128
+#define RNP_MAX_RSS_INDICES_UV3P 8
+#define RNP_MAX_VMDQ_INDICES 64
+#define RNP_MAX_FDIR_INDICES 63 /* based on q_vector limit */
+#define RNP_MAX_FCOE_INDICES 8
+#define MAX_RX_QUEUES (128)
+#define MAX_TX_QUEUES (128)
+struct rnpgbe_ring_feature {
+	u16 limit; /* upper limit on feature indices */
+	u16 indices; /* current value of indices */
+	u16 mask; /* Mask used for feature to ring mapping */
+	u16 offset; /* offset to start of feature */
+} ____cacheline_internodealigned_in_smp;
+
+#define RNP_n10_VMDQ_8Q_MASK 0x78
+#define RNP_n10_VMDQ_4Q_MASK 0x7C
+#define RNP_n10_VMDQ_2Q_MASK 0x7E
+
+/*
+ * FCoE requires that all Rx buffers be over 2200 bytes in length.  Since
+ * this is twice the size of a half page we need to double the page order
+ * for FCoE enabled Rx queues.
+ */
+static inline unsigned int rnpgbe_rx_bufsz(struct rnpgbe_ring *ring)
+{
+	/* 1 rx-desc trans max half page(2048), for jumbo frame sg is needed */
+	return (RNP_RXBUFFER_1536 - NET_IP_ALIGN);
+}
+
+/* SG , 1 rx-desc use one page */
+static inline unsigned int rnpgbe_rx_pg_order(struct rnpgbe_ring *ring)
+{
+	/* fixed 1 page */
+	/* we don't support 3k buffer */
+	return 0;
+}
+#define rnpgbe_rx_pg_size(_ring) (PAGE_SIZE << rnpgbe_rx_pg_order(_ring))
+#define DEFAULT_ADV (RNP_LINK_SPEED_1GB_FULL | RNP_LINK_SPEED_100_FULL | \
+	RNP_LINK_SPEED_10_FULL | RNP_LINK_SPEED_10_HALF | \
+	RNP_LINK_SPEED_100_HALF)
+
+struct rnpgbe_ring_container {
+	struct rnpgbe_ring *ring; /* pointer to linked list of rings */
+	unsigned long next_update; /* jiffies value of last update */
+	unsigned int total_bytes; /* total bytes processed this int */
+	unsigned int total_packets; /* total packets processed this int */
+	unsigned int total_packets_old;
+	u16 work_limit; /* total work allowed per interrupt */
+	u16 count; /* total number of rings in vector */
+	u16 itr; /* current ITR/MSIX vector setting for ring */
+	u16 add_itr;
+	int update_count;
+};
+
+/* iterator for handling rings in ring container */
+#define rnpgbe_for_each_ring(pos, head)                                        \
+	for (pos = (head).ring; pos != NULL; pos = pos->next)
+
+#define MAX_RX_PACKET_BUFFERS ((adapter->flags & RNP_FLAG_DCB_ENABLED) ? 8 : 1)
+#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
+
+/* MAX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+
+struct rnpgbe_q_vector {
+	int old_rx_count;
+	int new_rx_count;
+	int large_times;
+	int small_times;
+	int too_small_times;
+	int middle_time;
+	struct rnpgbe_adapter *adapter;
+	int v_idx;
+	/* index of q_vector within array, also used for
+	 * finding the bit in EICR and friends that
+	 * represents the vector for this ring
+	 */
+	u16 itr_rx;
+	u16 itr_tx;
+	struct rnpgbe_ring_container rx, tx;
+	struct napi_struct napi;
+	cpumask_t affinity_mask;
+	struct irq_affinity_notify affinity_notify;
+	int numa_node;
+	struct rcu_head rcu; /* to avoid race with update stats on free */
+	u32 vector_flags;
+#define RNP_QVECTOR_FLAG_IRQ_MISS_CHECK ((u32)(1 << 0))
+#define RNP_QVECTOR_FLAG_ITR_FEATURE ((u32)(1 << 1))
+#define RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS ((u32)(1 << 2))
+	int irq_check_usecs;
+	struct hrtimer irq_miss_check_timer; // to check irq miss
+	char name[IFNAMSIZ + 9];
+	/* for dynamic allocation of rings associated with this q_vector */
+	struct rnpgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
+};
+
+static inline __le16 rnpgbe_test_ext_cmd(union rnpgbe_rx_desc *rx_desc,
+					 const u16 stat_err_bits)
+{
+	return rx_desc->wb.rev1 & cpu_to_le16(stat_err_bits);
+}
+
+#ifdef RNPGBE_HWMON
+#define RNPGBE_HWMON_TYPE_LOC 0
+#define RNPGBE_HWMON_TYPE_TEMP 1
+#define RNPGBE_HWMON_TYPE_CAUTION 2
+#define RNPGBE_HWMON_TYPE_MAX 3
+#define RNPGBE_HWMON_TYPE_NAME 4
+
+struct hwmon_attr {
+	struct device_attribute dev_attr;
+	struct rnpgbe_hw *hw;
+	struct rnpgbe_thermal_diode_data *sensor;
+	char name[12];
+};
+
+struct hwmon_buff {
+	struct attribute_group group;
+	const struct attribute_group *groups[2];
+	struct attribute *attrs[RNPGBE_MAX_SENSORS * 4 + 1];
+	struct hwmon_attr hwmon_list[RNPGBE_MAX_SENSORS * 4];
+	unsigned int n_hwmon;
+};
+#endif /* RNPGBE_HWMON */
+
+/* rnpgbe_test_staterr - tests bits in Rx descriptor status and error fields */
+static inline __le16 rnpgbe_test_staterr(union rnpgbe_rx_desc *rx_desc,
+					 const u16 stat_err_bits)
+{
+	return rx_desc->wb.cmd & cpu_to_le16(stat_err_bits);
+}
+
+static inline __le16 rnpgbe_get_stat(union rnpgbe_rx_desc *rx_desc,
+				     const u16 stat_mask)
+{
+	return rx_desc->wb.cmd & cpu_to_le16(stat_mask);
+}
+
+static inline u16 rnpgbe_desc_unused(struct rnpgbe_ring *ring)
+{
+	u16 ntc = ring->next_to_clean;
+	u16 ntu = ring->next_to_use;
+
+	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
+
+static inline u16 rnpgbe_desc_unused_rx(struct rnpgbe_ring *ring)
+{
+	u16 ntc = ring->next_to_clean;
+	u16 ntu = ring->next_to_use;
+
+	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
+
+#define RNP_RX_DESC(R, i) (&(((union rnpgbe_rx_desc *)((R)->desc))[i]))
+#define RNP_TX_DESC(R, i) (&(((struct rnpgbe_tx_desc *)((R)->desc))[i]))
+#define RNP_TX_CTXTDESC(R, i) (&(((struct rnpgbe_tx_ctx_desc *)((R)->desc))[i]))
+
+#define RNP_MAX_JUMBO_FRAME_SIZE 9590 /* Maximum Supported Size 9.5KB */
+#define RNP_MIN_MTU 68
+#define RNP500_MAX_JUMBO_FRAME_SIZE 9722 /* Maximum Supported Size 9728 */
+
+#define OTHER_VECTOR 1
+#define NON_Q_VECTORS (OTHER_VECTOR)
+
+/* default to trying for four seconds */
+#define RNP_TRY_LINK_TIMEOUT (4 * HZ)
+
+#define RNP_MAX_USER_PRIO (8)
+#define RNP_MAX_TCS_NUM (4)
+struct rnpgbe_pfc_cfg {
+	u8 pfc_max; /* hardware can enabled max pfc channel */
+	u8 hw_pfc_map; /* enable the prio channel bit */
+	u8 pfc_num; /* at present enabled the pfc-channel num */
+	u8 pfc_en; /* enabled the pfc feature or not */
+};
+
+struct rnpgbe_dcb_num_tcs {
+	u8 pg_tcs;
+	u8 pfc_tcs;
+};
+
+struct rnpgbe_dcb_cfg {
+	u8 tc_num;
+	u16 delay; /* pause time */
+	u8 dcb_en; /* enabled the dcb feature or not */
+	u8 dcbx_mode;
+	struct rnpgbe_pfc_cfg pfc_cfg;
+	struct rnpgbe_dcb_num_tcs num_tcs;
+
+	/* statistic info */
+	u64 requests[RNP_MAX_TCS_NUM];
+	u64 indications[RNP_MAX_TCS_NUM];
+
+	enum rnpgbe_fc_mode last_lfc_mode;
+};
+
+struct rnpgbe_pps_cfg {
+	bool available;
+	struct timespec64 start;
+	struct timespec64 period;
+};
+
+enum rss_func_mode_enum {
+	rss_func_top,
+	rss_func_xor,
+	rss_func_order,
+};
+
+enum outer_vlan_type_enum {
+	outer_vlan_type_88a8,
+	outer_vlan_type_9100,
+	outer_vlan_type_9200,
+	outer_vlan_type_max,
+};
+
+enum irq_mode_enum {
+	irq_mode_legency,
+	irq_mode_msi,
+	irq_mode_msix,
+};
+
+/* board specific private data structure */
+struct rnpgbe_adapter {
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+	unsigned long active_vlans_stags[BITS_TO_LONGS(VLAN_N_VID)];
+	/* OS defined structs */
+	u16 vf_vlan;
+	u16 vlan_count;
+	int miss_time;
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	bool quit_poll_thread;
+	struct task_struct *rx_poll_thread;
+	unsigned long state;
+	spinlock_t link_stat_lock;
+	/* this var is used for auto itr modify */
+	/* hw not Supported well */
+	unsigned long last_moder_packets[MAX_RX_QUEUES];
+	unsigned long last_moder_tx_packets;
+	unsigned long last_moder_bytes[MAX_RX_QUEUES];
+	unsigned long last_moder_jiffies;
+	int last_moder_time[MAX_RX_QUEUES];
+	/* only rx itr is Supported */
+	int shut_down_temp;
+	int usecendcount;
+	u16 rx_usecs;
+	u16 rx_frames;
+	u16 usecstocount;
+	u16 tx_frames;
+	u16 tx_usecs;
+	u32 pkt_rate_low;
+	u16 rx_usecs_low;
+	u32 pkt_rate_high;
+	u16 rx_usecs_high;
+	u32 sample_interval;
+	u32 adaptive_rx_coal;
+	u32 adaptive_tx_coal;
+	u32 auto_rx_coal;
+	int napi_budge;
+	int eee_enabled; /* eee controler */
+	int eee_active; /* eee true status */
+	int tx_lpi_timer;
+	bool tx_path_in_lpi_mode;
+	bool en_tx_lpi_clockgating;
+	int eee_timer;
+	int local_eee;
+	int partner_eee;
+	struct mutex eee_lock;
+
+	union {
+		int phy_addr;
+		struct {
+			u8 mod_abs : 1;
+			u8 fault : 1;
+			u8 tx_dis : 1;
+			u8 los : 1;
+		} sfp;
+	};
+
+	struct {
+		u32 main;
+		u32 pre;
+		u32 post;
+		u32 tx_boost;
+	} si;
+
+	int speed;
+
+	u8 an : 1;
+	u8 fec : 1;
+	u8 link_traing : 1;
+	u8 duplex : 1;
+	u8 rpu_inited : 1;
+
+	/* Some features need tri-state capability,
+	 * thus the additional *_CAPABLE flags.
+	 */
+	u32 vf_num_for_pf;
+	u32 flags;
+	u32 gephy_test_mode;
+#define RNP_FLAG_MSI_CAPABLE ((u32)(1 << 0))
+#define RNP_FLAG_MSI_ENABLED ((u32)(1 << 1))
+#define RNP_FLAG_MSIX_CAPABLE ((u32)(1 << 2))
+#define RNP_FLAG_MSIX_ENABLED ((u32)(1 << 3))
+#define RNP_FLAG_RX_1BUF_CAPABLE ((u32)(1 << 4))
+#define RNP_FLAG_RX_PS_CAPABLE ((u32)(1 << 5))
+#define RNP_FLAG_RX_PS_ENABLED ((u32)(1 << 6))
+#define RNP_FLAG_IN_NETPOLL ((u32)(1 << 7))
+#define RNP_FLAG_DCA_ENABLED ((u32)(1 << 8))
+#define RNP_FLAG_DCA_CAPABLE ((u32)(1 << 9))
+#define RNP_FLAG_IMIR_ENABLED ((u32)(1 << 10))
+#define RNP_FLAG_MQ_CAPABLE ((u32)(1 << 11))
+#define RNP_FLAG_DCB_ENABLED ((u32)(1 << 12))
+#define RNP_FLAG_VMDQ_CAPABLE ((u32)(1 << 13))
+#define RNP_FLAG_VMDQ_ENABLED ((u32)(1 << 14))
+#define RNP_FLAG_FAN_FAIL_CAPABLE ((u32)(1 << 15))
+#define RNP_FLAG_NEED_LINK_UPDATE ((u32)(1 << 16))
+#define RNP_FLAG_NEED_LINK_CONFIG ((u32)(1 << 17))
+#define RNP_FLAG_FDIR_HASH_CAPABLE ((u32)(1 << 18))
+#define RNP_FLAG_FDIR_PERFECT_CAPABLE ((u32)(1 << 19))
+#define RNP_FLAG_FCOE_CAPABLE ((u32)(1 << 20))
+#define RNP_FLAG_FCOE_ENABLED ((u32)(1 << 21))
+#define RNP_FLAG_SRIOV_CAPABLE ((u32)(1 << 22))
+#define RNP_FLAG_SRIOV_ENABLED ((u32)(1 << 23))
+#define RNP_FLAG_VXLAN_OFFLOAD_CAPABLE ((u32)(1 << 24))
+#define RNP_FLAG_VXLAN_OFFLOAD_ENABLE ((u32)(1 << 25))
+#define RNP_FLAG_SWITCH_LOOPBACK_EN ((u32)(1 << 26))
+#define RNP_FLAG_SRIOV_INIT_DONE ((u32)(1 << 27))
+#define RNP_FLAG_IN_IRQ ((u32)(1 << 28))
+#define RNP_FLAG_VF_INIT_DONE ((u32)(1 << 29))
+#define RNP_FLAG_LEGACY_CAPABLE ((u32)(1 << 30))
+#define RNP_FLAG_LEGACY_ENABLED ((u32)(1 << 31))
+	u32 flags2;
+#define RNP_FLAG2_RSC_CAPABLE ((u32)(1 << 0))
+#define RNP_FLAG2_RSC_ENABLED ((u32)(1 << 1))
+#define RNP_FLAG2_TEMP_SENSOR_CAPABLE ((u32)(1 << 2))
+#define RNP_FLAG2_TEMP_SENSOR_EVENT ((u32)(1 << 3))
+#define RNP_FLAG2_SEARCH_FOR_SFP ((u32)(1 << 4))
+#define RNP_FLAG2_SFP_NEEDS_RESET ((u32)(1 << 5))
+#define RNP_FLAG2_RESET_REQUESTED ((u32)(1 << 6))
+#define RNP_FLAG2_FDIR_REQUIRES_REINIT ((u32)(1 << 7))
+#define RNP_FLAG2_RSS_FIELD_IPV4_UDP ((u32)(1 << 8))
+#define RNP_FLAG2_RSS_FIELD_IPV6_UDP ((u32)(1 << 9))
+#define RNP_FLAG2_PTP_ENABLED ((u32)(1 << 10))
+#define RNP_FLAG2_PTP_PPS_ENABLED ((u32)(1 << 11))
+#define RNP_FLAG2_BRIDGE_MODE_VEB ((u32)(1 << 12))
+#define RNP_FLAG2_VLAN_STAGS_ENABLED ((u32)(1 << 13))
+#define RNP_FLAG2_UDP_TUN_REREG_NEEDED ((u32)(1 << 14))
+#define RNP_FLAG2_RESET_PF ((u32)(1 << 15))
+#define RNP_FLAG2_CHKSM_FIX ((u32)(1 << 16))
+#define RNP_FLAG2_INSMOD ((u32)(1 << 17))
+	u32 priv_flags;
+#define RNP_PRIV_FLAG_MAC_LOOPBACK BIT(0)
+#define RNP_PRIV_FLAG_SWITCH_LOOPBACK BIT(1)
+#define RNP_PRIV_FLAG_VEB_ENABLE BIT(2)
+#define RNP_PRIV_FLAG_FT_PADDING BIT(3)
+#define RNP_PRIV_FLAG_PADDING_DEBUG BIT(4)
+#define RNP_PRIV_FLAG_PTP_DEBUG BIT(5)
+#define RNP_PRIV_FLAG_SIMUATE_DOWN BIT(6)
+#define RNP_PRIV_FLAG_VXLAN_INNER_MATCH BIT(7)
+#define RNP_PRIV_FLAG_ULTRA_SHORT BIT(8)
+#define RNP_PRIV_FLAG_DOUBLE_VLAN BIT(9)
+#define RNP_PRIV_FLAG_TCP_SYNC BIT(10)
+#define RNP_PRIV_FLAG_PAUSE_OWN BIT(11)
+#define RNP_PRIV_FLAG_JUMBO BIT(12)
+#define RNP_PRIV_FLAG_TX_PADDING BIT(13)
+#define RNP_PRIV_FLAG_RX_ALL BIT(14)
+#define RNP_PRIV_FLAG_REC_HDR_LEN_ERR BIT(15)
+#define RNP_PRIV_FLAG_RX_FCS BIT(16)
+#define RNP_PRIV_FLAG_DOUBLE_VLAN_RECEIVE BIT(17)
+#define RNP_PRIV_FLGA_TEST_TX_HANG BIT(18)
+#define RNP_PRIV_FLAG_RX_SKIP_EN BIT(19)
+#define RNP_PRIV_FLAG_TCP_SYNC_PRIO BIT(20)
+#define RNP_PRIV_FLAG_REMAP_PRIO BIT(21)
+#define RNP_PRIV_FLAG_8023_PRIO BIT(22)
+#define RNP_PRIV_FLAG_SRIOV_VLAN_MODE BIT(23)
+#define RNP_PRIV_FLAG_SOFT_TX_PADDING BIT(24)
+#define RNP_PRIV_FLAG_TX_COALESCE BIT(25)
+#define RNP_PRIV_FLAG_RX_COALESCE BIT(26)
+#define RNP_PRIV_FLAG_LLDP BIT(27)
+#define RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE BIT(28)
+
+#define PRIV_DATA_EN BIT(7)
+	int rss_func_mode;
+	int outer_vlan_type;
+	int tcp_sync_queue;
+	int priv_skip_count;
+
+	u64 rx_drop_status;
+	int drop_time;
+	/* Tx fast path data */
+	unsigned int num_tx_queues;
+	unsigned int max_ring_pair_counts;
+	u16 tx_work_limit;
+	/* Rx fast path data */
+	int num_rx_queues;
+	u16 rx_itr_setting;
+	u32 eth_queue_idx;
+	u32 max_rate[MAX_TX_QUEUES];
+	/* TX */
+	struct rnpgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
+	int tx_ring_item_count;
+
+	u64 restart_queue;
+	u64 lsc_int;
+	u32 tx_timeout_count;
+
+	/* RX */
+	struct rnpgbe_ring *rx_ring[MAX_RX_QUEUES];
+	int rx_ring_item_count;
+
+	u64 hw_csum_rx_error;
+	u64 hw_csum_rx_good;
+	u64 hw_rx_no_dma_resources;
+	u64 rsc_total_count;
+	u64 rsc_total_flush;
+	u64 non_eop_descs;
+	u32 alloc_rx_page_failed;
+	u32 alloc_rx_buff_failed;
+
+	int num_other_vectors;
+	int irq_mode;
+	struct rnpgbe_q_vector *q_vector[MAX_Q_VECTORS];
+
+	/*used for IEEE 1588 ptp clock start*/
+	u8 __iomem *ptp_addr;
+	int gmac4;
+	const struct rnpgbe_hwtimestamp *hwts_ops;
+	struct ptp_clock *ptp_clock;
+	struct ptp_clock_info ptp_clock_ops;
+	struct sk_buff *ptp_tx_skb;
+	struct hwtstamp_config tstamp_config;
+	u32 ptp_config_value;
+	spinlock_t ptp_lock; /* Used to protect the SYSTIME registers. */
+
+	u64 clk_ptp_rate; /*uint is HZ 1MHz=1 000 000Hz*/
+	u32 sub_second_inc;
+	u32 systime_flags;
+	struct timespec64 ptp_prev_hw_time;
+	unsigned int default_addend;
+	bool ptp_tx_en;
+	bool ptp_rx_en;
+
+	struct work_struct tx_hwtstamp_work;
+	unsigned long tx_hwtstamp_start;
+	unsigned long tx_hwtstamp_skipped;
+	unsigned long tx_timeout_factor;
+	u64 tx_hwtstamp_timeouts;
+	/*used for IEEE 1588 ptp clock end */
+
+	/* DCB parameters */
+	struct rnpgbe_dcb_cfg dcb_cfg;
+	u8 prio_tc_map[RNP_MAX_USER_PRIO * 2];
+	u8 num_tc;
+
+	int num_q_vectors; /* current number of q_vectors for device */
+	int max_q_vectors; /* true count of q_vectors for device */
+	struct rnpgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
+	struct msix_entry *msix_entries;
+
+	u32 test_icr;
+	struct rnpgbe_ring test_tx_ring;
+	struct rnpgbe_ring test_rx_ring;
+
+	/* structs defined in rnpgbe_hw.h */
+	struct rnpgbe_hw hw;
+	u16 msg_enable;
+	struct rnpgbe_hw_stats hw_stats;
+
+	u64 tx_busy;
+
+	u32 link_speed;
+	bool link_up;
+	bool duplex_status;
+	u32 link_speed_old;
+	bool link_up_old;
+	bool duplex_old;
+	unsigned long link_check_timeout;
+
+	struct timer_list service_timer;
+	struct work_struct service_task;
+	struct timer_list eee_ctrl_timer;
+
+	/* fdir relative */
+	struct hlist_head fdir_filter_list;
+	unsigned long fdir_overflow; /* number of times ATR was backed off */
+	union rnpgbe_atr_input fdir_mask;
+	int fdir_mode;
+	int fdir_filter_count;
+	/* fixme to bitmap */
+	// unsigned long layer2_bit[BITS_TO_LONGS(RNP_MAX_LAYER2_FILTERS)];
+	int layer2_count;
+	/* fixme to bitmap */
+	// unsigned long tuple5_bit[BITS_TO_LONGS(RNP_MAX_TCAM_FILTERS)];
+	int tuple_5_count;
+	u32 fdir_pballoc; // total count
+	u32 atr_sample_rate;
+	spinlock_t fdir_perfect_lock;
+
+	u8 __iomem *io_addr_bar0;
+	u8 __iomem *io_addr;
+	u32 wol;
+
+	u16 bd_number;
+	u16 q_vector_off;
+
+	u16 eeprom_verh;
+	u16 eeprom_verl;
+	u16 eeprom_cap;
+
+	u16 stags_vid;
+	// sysfs debug info
+	u32 sysfs_tx_ring_num;
+	u32 sysfs_rx_ring_num;
+	u32 sysfs_tx_desc_num;
+	u32 sysfs_rx_desc_num;
+
+	u32 sysfs_mii_reg;
+	u32 sysfs_mii_value;
+	u32 sysfs_mii_control;
+
+	u32 interrupt_event;
+	u32 led_reg;
+
+	/* maintain */
+	char *maintain_buf;
+	int maintain_buf_len;
+	void *maintain_dma_buf;
+	dma_addr_t maintain_dma_phy;
+	int maintain_dma_size;
+	int maintain_in_bytes;
+
+	/* SR-IOV */
+	DECLARE_BITMAP(active_vfs, RNP_MAX_VF_FUNCTIONS);
+	unsigned int num_vfs;
+	struct vf_data_storage *vfinfo;
+	int vf_rate_link_speed;
+	struct vf_macvlans vf_mvs;
+	struct vf_macvlans *mv_list;
+
+	u32 timer_event_accumulator;
+	u32 vferr_refcount;
+	struct kobject *info_kobj;
+#ifdef RNPGBE_SYSFS
+#ifdef RNPGBE_HWMON
+	struct hwmon_buff *rnpgbe_hwmon_buff;
+#endif /* RNPGBE_HWMON */
+#endif /* RNPM_SYSFS */
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *rnpgbe_dbg_adapter;
+#endif /*CONFIG_DEBUG_FS*/
+	u8 default_up;
+	u8 port; /* nr_pf_port: 0 or 1 */
+	u8 portid_of_card; /* port num in card*/
+#define RNP_MAX_RETA_ENTRIES 512
+	u8 rss_indir_tbl[RNP_MAX_RETA_ENTRIES];
+#define RNP_MAX_TC_ENTRIES 8
+	u8 rss_tc_tbl[RNP_MAX_TC_ENTRIES];
+	int rss_indir_tbl_num;
+	int rss_tc_tbl_num;
+	u32 rss_tbl_setup_flag;
+#define RNP_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
+	u8 rss_key[RNP_RSS_KEY_SIZE];
+	u32 rss_key_setup_flag;
+	bool dma2_in_1pf;
+	char name[60];
+};
+
+struct rnpgbe_fdir_filter {
+	struct hlist_node fdir_node;
+	union rnpgbe_atr_input filter;
+	u16 sw_idx;
+	u16 hw_idx;
+	u32 vf_num;
+	u64 action;
+};
+
+enum rnpgbe_state_t {
+	__RNP_TESTING,
+	__RNP_RESETTING,
+	__RNP_DOWN,
+	__RNP_SERVICE_SCHED,
+	__RNP_IN_SFP_INIT,
+	__RNP_READ_I2C,
+	__RNP_PTP_TX_IN_PROGRESS,
+	__RNP_USE_VFINFI,
+	__RNP_IN_IRQ,
+	__RNP_REMOVE,
+	__RNP_SERVICE_CHECK,
+	__RNP_EEE_REMOVE,
+};
+
+struct rnpgbe_cb {
+	union { /* Union defining head/tail partner */
+		struct sk_buff *head;
+		struct sk_buff *tail;
+	};
+	dma_addr_t dma;
+	u16 append_cnt;
+	bool page_released;
+};
+#define RNP_CB(skb) ((struct rnpgbe_cb *)(skb)->cb)
+
+enum rnpgbe_boards {
+	board_n10_709_1pf_2x10G,
+	board_vu440s,
+	board_n10,
+	board_n400,
+	board_n20,
+	board_n500,
+	board_n210,
+};
+
+extern char rnpgbe_driver_name[];
+extern const char rnpgbe_driver_version[];
+extern void rnpgbe_up(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_down(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_reinit_locked(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_reset(struct rnpgbe_adapter *adapter);
+extern int rnpgbe_setup_rx_resources(struct rnpgbe_ring *ring,
+				     struct rnpgbe_adapter *adapter);
+extern int rnpgbe_setup_tx_resources(struct rnpgbe_ring *ring,
+				     struct rnpgbe_adapter *adapter);
+extern void rnpgbe_free_rx_resources(struct rnpgbe_ring *ring);
+extern void rnpgbe_free_tx_resources(struct rnpgbe_ring *ring);
+extern void rnpgbe_configure_rx_ring(struct rnpgbe_adapter *adapter,
+				     struct rnpgbe_ring *ring);
+extern void rnpgbe_configure_tx_ring(struct rnpgbe_adapter *adapter,
+				     struct rnpgbe_ring *ring);
+extern void rnpgbe_disable_rx_queue(struct rnpgbe_adapter *adapter,
+				    struct rnpgbe_ring *ring);
+extern void rnpgbe_update_stats(struct rnpgbe_adapter *adapter);
+extern int rnpgbe_init_interrupt_scheme(struct rnpgbe_adapter *adapter);
+extern int rnpgbe_wol_supported(struct rnpgbe_adapter *adapter, u16 device_id);
+extern void rnpgbe_clear_interrupt_scheme(struct rnpgbe_adapter *adapter);
+extern void
+rnpgbe_unmap_and_free_tx_resource(struct rnpgbe_ring *ring,
+				  struct rnpgbe_tx_buffer *tx_buffer_info);
+extern void rnpgbe_alloc_rx_buffers(struct rnpgbe_ring *ring, u16 count);
+extern int rnpgbe_poll(struct napi_struct *napi, int budget);
+extern int ethtool_ioctl(struct ifreq *ifr);
+extern s32 rnpgbe_reinit_fdir_tables_n10(struct rnpgbe_hw *hw);
+extern s32 rnpgbe_init_fdir_signature_n10(struct rnpgbe_hw *hw, u32 fdirctrl);
+extern s32 rnpgbe_init_fdir_perfect_n10(struct rnpgbe_hw *hw, u32 fdirctrl);
+extern s32 rnpgbe_fdir_add_signature_filter_n10(
+	struct rnpgbe_hw *hw, union rnpgbe_atr_hash_dword input,
+	union rnpgbe_atr_hash_dword common, u8 queue);
+
+extern void rnpgbe_release_hw_control(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_get_hw_control(struct rnpgbe_adapter *adapter);
+extern s32 rnpgbe_fdir_set_input_mask_n10(struct rnpgbe_hw *hw,
+					  union rnpgbe_atr_input *input_mask);
+extern s32 rnpgbe_fdir_write_perfect_filter_n10(struct rnpgbe_hw *hw,
+						union rnpgbe_atr_input *input,
+						u16 soft_id, u8 queue);
+extern s32 rnpgbe_fdir_write_perfect_filter(int fdir_mode, struct rnpgbe_hw *hw,
+					    union rnpgbe_atr_input *filter,
+					    u16 hw_id, u8 queue,
+					    bool prio_flag);
+extern s32 rnpgbe_fdir_erase_perfect_filter_n10(struct rnpgbe_hw *hw,
+						union rnpgbe_atr_input *input,
+						u16 soft_id);
+extern void rnpgbe_atr_compute_perfect_hash_n10(union rnpgbe_atr_input *input,
+						union rnpgbe_atr_input *mask);
+extern bool rnpgbe_verify_lesm_fw_enabled_n10(struct rnpgbe_hw *hw);
+extern void rnpgbe_set_rx_mode(struct net_device *netdev);
+extern int rnpgbe_setup_tx_maxrate(struct rnpgbe_ring *tx_ring, u64 max_rate,
+				   int sample_interval);
+extern int rnpgbe_setup_tc(struct net_device *dev, u8 tc);
+
+void rnpgbe_check_options(struct rnpgbe_adapter *adapter);
+
+void rnpgbe_maybe_tx_ctxtdesc(struct rnpgbe_ring *tx_ring,
+			      struct rnpgbe_tx_buffer *first, u32 type_tucmd);
+
+extern void rnpgbe_store_reta(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_store_key(struct rnpgbe_adapter *adapter);
+extern int rnpgbe_init_rss_key(struct rnpgbe_adapter *adapter);
+extern int rnpgbe_init_rss_table(struct rnpgbe_adapter *adapter);
+extern s32 rnpgbe_fdir_erase_perfect_filter(int fdir_mode, struct rnpgbe_hw *hw,
+					    union rnpgbe_atr_input *input,
+					    u16 hw_id);
+extern u32 rnpgbe_rss_indir_tbl_entries(struct rnpgbe_adapter *adapter);
+#ifdef CONFIG_RNPGBE_HWMON
+extern void rnpgbe_sysfs_exit(struct rnpgbe_adapter *adapter);
+extern int rnpgbe_sysfs_init(struct rnpgbe_adapter *adapter);
+#endif /* CONFIG_RNPGBE_HWMON */
+#ifdef CONFIG_DEBUG_FS
+extern void rnpgbe_dbg_adapter_init(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_dbg_adapter_exit(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_dbg_init(void);
+extern void rnpgbe_dbg_exit(void);
+#else /* CONFIG_DEBUG_FS */
+static inline void rnpgbe_dbg_adapter_init(struct rnpgbe_adapter *adapter)
+{
+}
+static inline void rnpgbe_dbg_adapter_exit(struct rnpgbe_adapter *adapter)
+{
+}
+static inline void rnpgbe_dbg_init(void)
+{
+}
+static inline void rnpgbe_dbg_exit(void)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+static inline struct netdev_queue *txring_txq(const struct rnpgbe_ring *ring)
+{
+	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
+extern void rnpgbe_ptp_init(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_ptp_stop(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_ptp_overflow_check(struct rnpgbe_adapter *adapter);
+extern void rnpgbe_ptp_rx_hang(struct rnpgbe_adapter *adapter);
+extern void __rnpgbe_ptp_rx_hwtstamp(struct rnpgbe_q_vector *q_vector,
+				     struct sk_buff *skb);
+
+static inline void rnpgbe_ptp_rx_hwtstamp(struct rnpgbe_ring *rx_ring,
+					  union rnpgbe_rx_desc *rx_desc,
+					  struct sk_buff *skb)
+{
+	if (unlikely(!rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_PTP)))
+		return;
+	/*
+	 * Update the last_rx_timestamp timer in order to enable watchdog check
+	 * for error case of latched timestamp on a dropped packet.
+	 */
+	rx_ring->last_rx_timestamp = jiffies;
+}
+
+static inline int ignore_veb_vlan(struct rnpgbe_adapter *adapter,
+				  union rnpgbe_rx_desc *rx_desc)
+{
+	if (unlikely((adapter->flags & RNP_FLAG_SRIOV_ENABLED) &&
+		     (cpu_to_le16(rx_desc->wb.rev1) & VEB_VF_IGNORE_VLAN))) {
+		return 1;
+	}
+	return 0;
+}
+
+static inline int ignore_veb_pkg_err(struct rnpgbe_adapter *adapter,
+				     union rnpgbe_rx_desc *rx_desc)
+{
+	if (unlikely((adapter->flags & RNP_FLAG_SRIOV_ENABLED) &&
+		     (cpu_to_le16(rx_desc->wb.rev1) & VEB_VF_PKG))) {
+		return 1;
+	}
+	return 0;
+}
+
+int rnpgbe_update_ethtool_fdir_entry(struct rnpgbe_adapter *adapter,
+				     struct rnpgbe_fdir_filter *input,
+				     u16 sw_idx);
+
+static inline int rnpgbe_is_pf1(struct pci_dev *pdev)
+{
+	return ((pdev->devfn & 0x1) ? 1 : 0);
+}
+
+static inline int rnpgbe_get_fuc(struct pci_dev *pdev)
+{
+	return pdev->devfn;
+}
+
+extern void rnpgbe_sysfs_exit(struct rnpgbe_adapter *adapter);
+extern int rnpgbe_sysfs_init(struct rnpgbe_adapter *adapter);
+
+#ifdef CONFIG_PCI_IOV
+void rnpgbe_sriov_reinit(struct rnpgbe_adapter *adapter);
+#endif /* CONFIG_PCI_IOV */
+
+#define SET_BIT(n, var) (var = (var | (1 << n)))
+#define CLR_BIT(n, var) (var = (var & (~(1 << n))))
+#define CHK_BIT(n, var) (var & (1 << n))
+#define RNP_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
+static inline bool rnpgbe_removed(void __iomem *addr)
+{
+	return unlikely(!addr);
+}
+#define RNPGBE_REMOVED(a) rnpgbe_removed(a)
+int rnpgbe_fw_msg_handler(struct rnpgbe_adapter *adapter);
+int rnp500_fw_update(struct rnpgbe_hw *hw, int partition, const u8 *fw_bin,
+		     int bytes);
+int rnpgbe_fw_update(struct rnpgbe_hw *hw, int partition, const u8 *fw_bin,
+		     int bytes);
+#define RNPM_FW_VERSION_NEW_ETHTOOL 0x00050010
+void rnpgbe_service_event_schedule(struct rnpgbe_adapter *adapter);
+static inline bool rnpgbe_fw_is_old_ethtool(struct rnpgbe_hw *hw)
+{
+	return hw->fw_version >= RNPM_FW_VERSION_NEW_ETHTOOL ? false : true;
+}
+
+int rsp_hal_sfc_flash_erase(struct rnpgbe_hw *hw, u32 size);
+
+#endif /* _RNPGBE_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c
new file mode 100644
index 0000000000000..e0795ba6166b6
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c
@@ -0,0 +1,4165 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+#include 
+
+#include "rnpgbe.h"
+#include "rnpgbe_phy.h"
+#include "rnpgbe_mbx.h"
+#include "rnpgbe_ethtool.h"
+#include "rnpgbe_sriov.h"
+
+#define RNP_N500_PKT_LEN_ERR (2)
+#define RNP_N500_HDR_LEN_ERR (1)
+#define RNP_N500_MAX_VF 8
+#define RNP_N500_RSS_TBL_NUM 128
+#define RNP_N500_RSS_TC_TBL_NUM 8
+#define RNP_N500_MAX_TX_QUEUES 8
+#define RNP_N500_MAX_RX_QUEUES 8
+#define NCSI_RAR_NUM (2)
+#define NCSI_MC_NUM (5)
+/* we reseve 2 rar for ncsi */
+#define RNP_N500_RAR_ENTRIES (32 - NCSI_RAR_NUM)
+#define NCSI_RAR_IDX_START (32 - NCSI_RAR_NUM)
+#define RNP_N500_MC_TBL_SIZE 128
+#define RNP_N500_VFT_TBL_SIZE 128
+#define RNP_N500_MSIX_VECTORS 32
+
+#define RNP500_MAX_LAYER2_FILTERS 16
+#define RNP500_MAX_TUPLE5_FILTERS 128
+
+
+enum n500_priv_bits {
+	n500_mac_loopback = 0,
+	n500_padding_enable = 8,
+};
+
+static const char rnp500_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define RNP500_MAC_LOOPBACK BIT(0)
+#define RNP500_TX_SOLF_PADDING BIT(1)
+#define RNP500_PADDING_DEBUG BIT(2)
+#define RNP500_SIMULATE_DOWN BIT(3)
+#define RNP500_ULTRA_SHORT BIT(4)
+#define RNP500_DOUBLE_VLAN BIT(5)
+#define RNP500_PAUSE_OWN BIT(6)
+#define RNP500_STAGS_ENABLE BIT(7)
+#define RNP500_JUMBO_ENABLE BIT(8)
+#define RNP500_TX_PADDING BIT(9)
+#define RNP500_REC_HDR_LEN_ERR BIT(10)
+#define RNP500_DOUBLE_VLAN_RECEIVE BIT(11)
+#define RNP500_RX_SKIP_EN BIT(12)
+#define RNP500_TCP_SYNC_PRIO BIT(13)
+#define RNP500_REMAP_PRIO BIT(14)
+#define RNP500_8023_PRIO BIT(15)
+#define RNP500_SRIOV_VLAN_MODE BIT(16)
+#define RNP500_LLDP_EN BIT(17)
+#define RNP500_FORCE_CLOSE BIT(18)
+	"mac_loopback",
+	"soft_tx_padding_off",
+	"padding_debug",
+	"simulate_link_down",
+	"ultra_short_packet",
+	"double_vlan",
+	"pause_use_own_address",
+	"stags_enable",
+	"jumbo_enable",
+	"mac_tx_padding_off",
+	"mask_len_err",
+	"double_vlan_receive",
+	"rx_skip_en",
+	"tcp_sync_prio",
+	"remap_prio",
+	"8023_prio",
+	"sriov_vlan_mode",
+	"lldp_en",
+	"link_down_on_close",
+};
+
+#define RNP500_PRIV_FLAGS_STR_LEN ARRAY_SIZE(rnp500_priv_flags_strings)
+/* setup queue speed limit to max_rate */
+static void rnpgbe_dma_set_tx_maxrate_n500(struct rnpgbe_dma_info *dma,
+					   u16 queue, u32 max_rate)
+{
+	/* todo */
+}
+
+/* setup mac with vf_num to veb table */
+static void rnpgbe_dma_set_veb_mac_n500(struct rnpgbe_dma_info *dma, u8 *mac,
+					u32 vfnum, u32 ring)
+{
+	/* n500 only has 1 port veb table */
+	u32 maclow, machi, ring_vfnum;
+	int port;
+
+	maclow = (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5];
+	machi = (mac[0] << 8) | mac[1];
+	ring_vfnum = ring | ((0x80 | vfnum) << 8);
+	for (port = 0; port < 1; port++) {
+		dma_wr32(dma, RNP500_DMA_PORT_VBE_MAC_LO_TBL(port, vfnum),
+			 maclow);
+		dma_wr32(dma, RNP500_DMA_PORT_VBE_MAC_HI_TBL(port, vfnum),
+			 machi);
+		dma_wr32(dma, RNP500_DMA_PORT_VEB_VF_RING_TBL(port, vfnum),
+			 ring_vfnum);
+	}
+}
+
+/* setup vlan with vf_num to veb table */
+static void rnpgbe_dma_set_veb_vlan_n500(struct rnpgbe_dma_info *dma, u16 vlan,
+					 u32 vfnum)
+{
+	int port;
+
+	/* each vf can support only one vlan */
+	for (port = 0; port < 1; port++)
+		dma_wr32(dma, RNP500_DMA_PORT_VEB_VID_TBL(port, vfnum), vlan);
+}
+
+static void rnpgbe_dma_set_veb_vlan_mask_n500(struct rnpgbe_dma_info *dma,
+					      u16 vlan, u16 mask, int entry)
+{
+	/* bit 19:12 is mask bit 11:0 is vid */
+	dma_wr32(dma, RNP500_DMA_PORT_VEB_VID_TBL(0, entry),
+		 (mask << 12) | vlan);
+}
+
+static void rnpgbe_dma_clr_veb_all_n500(struct rnpgbe_dma_info *dma)
+{
+	int port, i;
+
+	for (port = 0; port < 1; port++) {
+		for (i = 0; i < RNP500_VEB_TBL_CNTS; i++) {
+			dma_wr32(dma, RNP500_DMA_PORT_VBE_MAC_LO_TBL(port, i),
+				 0);
+			dma_wr32(dma, RNP500_DMA_PORT_VBE_MAC_HI_TBL(port, i),
+				 0);
+			dma_wr32(dma, RNP500_DMA_PORT_VEB_VID_TBL(port, i), 0);
+			dma_wr32(dma, RNP500_DMA_PORT_VEB_VF_RING_TBL(port, i),
+				 0);
+		}
+	}
+}
+
+static struct rnpgbe_dma_operations dma_ops_n500 = {
+	.set_tx_maxrate = &rnpgbe_dma_set_tx_maxrate_n500,
+	.set_veb_mac = &rnpgbe_dma_set_veb_mac_n500,
+	.set_veb_vlan = &rnpgbe_dma_set_veb_vlan_n500,
+	.set_veb_vlan_mask = &rnpgbe_dma_set_veb_vlan_mask_n500,
+	.clr_veb_all = &rnpgbe_dma_clr_veb_all_n500,
+
+};
+
+/**
+ *  rnpgbe_eth_set_rar_n500 - Set Rx address register
+ *  @eth: pointer to eth structure
+ *  @index: Receive address register to write
+ *  @addr: Address to put into receive address register
+ *  @vmdq: VMDq "set" or "pool" index
+ *  @enable_addr: set flag that address is active
+ *  @sriov_flag
+ *
+ *  Puts an ethernet address into a receive address register.
+ **/
+static s32 rnpgbe_eth_set_rar_n500(struct rnpgbe_eth_info *eth,
+				   u32 index, u8 *addr,
+				   bool enable_addr)
+{
+	u32 mcstctrl;
+	u32 rar_low, rar_high = 0;
+	u32 rar_entries = eth->num_rar_entries;
+
+	/* Make sure we are using a valid rar index range */
+	if (index >= rar_entries) {
+		rnpgbe_err("RAR index %d is out of range.\n", index);
+		return RNP_ERR_INVALID_ARGUMENT;
+	}
+
+	eth_dbg(eth, "    RAR[%d] <= %pM.  vmdq:%d enable:0x%x\n", index, addr);
+
+	/*
+	 * HW expects these in big endian so we reverse the byte
+	 * order from network order (big endian) to little endian
+	 */
+	rar_low = ((u32)addr[5] | ((u32)addr[4] << 8) | ((u32)addr[3] << 16) |
+		   ((u32)addr[2] << 24));
+	/*
+	 * Some parts put the VMDq setting in the extra RAH bits,
+	 * so save everything except the lower 16 bits that hold part
+	 * of the address and the address valid bit.
+	 */
+	rar_high = eth_rd32(eth, RNP500_ETH_RAR_RH(index));
+	rar_high &= ~(0x0000FFFF | RNP500_RAH_AV);
+	rar_high |= ((u32)addr[1] | ((u32)addr[0] << 8));
+
+	if (enable_addr)
+		rar_high |= RNP500_RAH_AV;
+
+	eth_wr32(eth, RNP500_ETH_RAR_RL(index), rar_low);
+	eth_wr32(eth, RNP500_ETH_RAR_RH(index), rar_high);
+
+	/* open unicast filter */
+	/* we now not use unicast */
+	/* but we must open this since dest-mac filter | unicast table */
+	/* all packets up if close unicast table */
+	mcstctrl = eth_rd32(eth, RNP500_ETH_DMAC_MCSTCTRL);
+	mcstctrl |= RNP500_MCSTCTRL_UNICASE_TBL_EN;
+	eth_wr32(eth, RNP500_ETH_DMAC_MCSTCTRL, mcstctrl);
+
+	return 0;
+}
+
+/**
+ *  rnpgbe_eth_clear_rar_n500 - Remove Rx address register
+ *  @eth: pointer to eth structure
+ *  @index: Receive address register to write
+ *
+ *  Clears an ethernet address from a receive address register.
+ **/
+static s32 rnpgbe_eth_clear_rar_n500(struct rnpgbe_eth_info *eth,
+				     u32 index)
+{
+	u32 rar_high;
+	u32 rar_entries = eth->num_rar_entries;
+
+	/* Make sure we are using a valid rar index range */
+	if (index >= rar_entries) {
+		eth_dbg(eth, "RAR index %d is out of range.\n", index);
+		return RNP_ERR_INVALID_ARGUMENT;
+	}
+
+	/*
+	 * Some parts put the VMDq setting in the extra RAH bits,
+	 * so save everything except the lower 16 bits that hold part
+	 * of the address and the address valid bit.
+	 */
+	rar_high = eth_rd32(eth, RNP500_ETH_RAR_RH(index));
+	rar_high &= ~(0x0000FFFF | RNP500_RAH_AV);
+	eth_wr32(eth, RNP500_ETH_RAR_RL(index), 0);
+	eth_wr32(eth, RNP500_ETH_RAR_RH(index), rar_high);
+
+	/* clear VMDq pool/queue selection for this RAR */
+	eth->ops.clear_vmdq(eth, index, RNP_CLEAR_VMDQ_ALL);
+
+	return 0;
+}
+
+/**
+ *  rnpgbe_eth_set_vmdq_n500 - Associate a VMDq pool index with a rx address
+ *  @eth: pointer to eth struct
+ *  @rar: receive address register index to associate with a VMDq index
+ *  @vmdq: VMDq pool index
+ *  only mac->vf
+ **/
+static s32 rnpgbe_eth_set_vmdq_n500(struct rnpgbe_eth_info *eth,
+				    u32 rar, u32 vmdq)
+{
+	u32 rar_entries = eth->num_rar_entries;
+
+	/* Make sure we are using a valid rar index range */
+	if (rar >= rar_entries) {
+		eth_dbg(eth, "RAR index %d is out of range.\n", rar);
+		return RNP_ERR_INVALID_ARGUMENT;
+	}
+
+	eth_wr32(eth, RNP500_VM_DMAC_MPSAR_RING(rar), vmdq);
+
+	return 0;
+}
+
+/**
+ *  rnpgbe_eth_clear_vmdq_n500 - Disassociate a VMDq pool index from a rx address
+ *  @eth: pointer to eth struct
+ *  @rar: receive address register index to disassociate
+ *  @vmdq: VMDq pool index to remove from the rar
+ **/
+static s32 rnpgbe_eth_clear_vmdq_n500(struct rnpgbe_eth_info *eth,
+				      u32 rar, u32 vmdq)
+{
+	u32 rar_entries = eth->num_rar_entries;
+
+	/* Make sure we are using a valid rar index range */
+	if (rar >= rar_entries) {
+		eth_dbg(eth, "RAR index %d is out of range.\n", rar);
+		return RNP_ERR_INVALID_ARGUMENT;
+	}
+
+	eth_wr32(eth, RNP500_VM_DMAC_MPSAR_RING(rar), 0);
+
+	return 0;
+}
+
+static s32 rnp500_mta_vector(struct rnpgbe_eth_info *eth, u8 *mc_addr)
+{
+	u32 vector = 0;
+
+	switch (eth->mc_filter_type) {
+	case 0: /* use bits [36:47] of the address */
+		vector = ((mc_addr[4] << 8) | (((u16)mc_addr[5])));
+		break;
+	case 1: /* use bits [35:46] of the address */
+		vector = ((mc_addr[4] << 7) | (((u16)mc_addr[5]) >> 1));
+		break;
+	case 2: /* use bits [34:45] of the address */
+		vector = ((mc_addr[4] << 6) | (((u16)mc_addr[5]) >> 2));
+		break;
+	case 3: /* use bits [32:43] of the address */
+		vector = ((mc_addr[4] << 5) | (((u16)mc_addr[5]) >> 3));
+		break;
+	case 4: /* use bits [32:43] of the address */
+		vector = ((mc_addr[0] << 8) | (((u16)mc_addr[1])));
+		vector = (vector >> 4);
+		break;
+	case 5: /* use bits [32:43] of the address */
+		vector = ((mc_addr[0] << 8) | (((u16)mc_addr[1])));
+		vector = (vector >> 3);
+		break;
+	case 6: /* use bits [32:43] of the address */
+		vector = ((mc_addr[0] << 8) | (((u16)mc_addr[1])));
+		vector = (vector >> 2);
+		break;
+	case 7: /* use bits [32:43] of the address */
+		vector = ((mc_addr[0] << 8) | (((u16)mc_addr[1])));
+		break;
+	default: /* Invalid mc_filter_type */
+		hw_dbg(hw, "MC filter type param set incorrectly\n");
+		break;
+	}
+
+	/* vector can only be 12-bits or boundary will be exceeded */
+	vector &= 0xFFF;
+	return vector;
+}
+
+static void rnp500_set_mta(struct rnpgbe_hw *hw, u8 *mc_addr)
+{
+	u32 vector;
+	u32 vector_bit;
+	u32 vector_reg;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	hw->addr_ctrl.mta_in_use++;
+
+	vector = rnp500_mta_vector(eth, mc_addr);
+
+	/*
+	 * The MTA is a register array of 128 32-bit registers. It is treated
+	 * like an array of 4096 bits.  We want to set bit
+	 * BitArray[vector_value]. So we figure out what register the bit is
+	 * in, read it, OR in the new bit, then write back the new value.  The
+	 * register is determined by the upper 7 bits of the vector value and
+	 * the bit within that register are determined by the lower 5 bits of
+	 * the value.
+	 */
+	vector_reg = (vector >> 5) & 0x7F;
+	vector_bit = vector & 0x1F;
+	hw_dbg(hw, "\t\t%pM: MTA-BIT:%4d, MTA_REG[%d][%d] <= 1\n", mc_addr,
+	       vector, vector_reg, vector_bit);
+	eth->mta_shadow[vector_reg] |= (1 << vector_bit);
+}
+
+static void rnp500_set_vf_mta(struct rnpgbe_hw *hw, u16 vector)
+{
+	/* vf/pf use the same multicast table */
+	u32 vector_bit;
+	u32 vector_reg;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	hw->addr_ctrl.mta_in_use++;
+
+	vector_reg = (vector >> 5) & 0x7F;
+	vector_bit = vector & 0x1F;
+	hw_dbg(hw, "\t\t vf M: MTA-BIT:%4d, MTA_REG[%d][%d] <= 1\n", vector,
+	       vector_reg, vector_bit);
+	eth->mta_shadow[vector_reg] |= (1 << vector_bit);
+}
+
+static u8 *rnpgbe_addr_list_itr(struct rnpgbe_hw __maybe_unused *hw,
+				u8 **mc_addr_ptr)
+{
+	struct netdev_hw_addr *mc_ptr;
+	u8 *addr = *mc_addr_ptr;
+
+	mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]);
+	if (mc_ptr->list.next) {
+		struct netdev_hw_addr *ha;
+
+		ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list);
+		*mc_addr_ptr = ha->addr;
+	} else {
+		*mc_addr_ptr = NULL;
+	}
+
+	return addr;
+}
+
+/**
+ *  rnpgbe_update_mc_addr_list_n500 - Updates MAC list of multicast addresses
+ *  @hw: pointer to hardware structure
+ *  @netdev: pointer to net device structure
+ *
+ *  The given list replaces any existing list. Clears the MC addrs from receive
+ *  address registers and the multicast table. Uses unused receive address
+ *  registers for the first multicast addresses, and hashes the rest into the
+ *  multicast table.
+ **/
+static s32 rnpgbe_eth_update_mc_addr_list_n500(struct rnpgbe_eth_info *eth,
+					       struct net_device *netdev,
+					       bool sriov_on)
+{
+	struct rnpgbe_hw *hw = (struct rnpgbe_hw *)eth->back;
+	struct netdev_hw_addr *ha;
+	u32 i;
+	u32 v;
+	int addr_count = 0;
+	u8 *addr_list = NULL;
+	int ret;
+	u8 ncsi_mc_addr[6];
+
+	/*
+	 * Set the new number of MC addresses that we are being requested to
+	 * use.
+	 */
+	hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
+	hw->addr_ctrl.mta_in_use = 0;
+
+	/* Clear mta_shadow */
+	eth_dbg(eth, " Clearing MTA(multicast table)\n");
+	memset(ð->mta_shadow, 0, sizeof(eth->mta_shadow));
+
+	/* Update mta shadow */
+	eth_dbg(eth, " Updating MTA..\n");
+	addr_count = netdev_mc_count(netdev);
+
+	ha = list_first_entry(&netdev->mc.list, struct netdev_hw_addr, list);
+	addr_list = ha->addr;
+	for (i = 0; i < addr_count; i++) {
+		eth_dbg(eth, " Adding the multicast addresses:\n");
+		rnp500_set_mta(hw, rnpgbe_addr_list_itr(hw, &addr_list));
+	}
+
+	if (sriov_on) {
+		struct rnpgbe_adapter *adapter =
+			(struct rnpgbe_adapter *)hw->back;
+
+		for (i = 0; i < adapter->num_vfs; i++) {
+			if (adapter->vfinfo) {
+				struct vf_data_storage *vfinfo =
+					&adapter->vfinfo[i];
+				int j;
+
+				for (j = 0; j < vfinfo->num_vf_mc_hashes; j++)
+					rnp500_set_vf_mta(
+						hw, vfinfo->vf_mc_hashes[j]);
+			}
+		}
+	}
+	/* update ncsi multicast address */
+	for (i = NCSI_RAR_NUM; i < NCSI_MC_NUM; i++) {
+		ret = hw->ops.get_ncsi_mac(hw, ncsi_mc_addr, i);
+		if (!ret)
+			rnp500_set_mta(hw, ncsi_mc_addr);
+	}
+
+	/* Enable mta */
+	for (i = 0; i < hw->eth.mcft_size; i++) {
+		if (hw->addr_ctrl.mta_in_use) {
+			eth_wr32(eth, RNP500_ETH_MUTICAST_HASH_TABLE(i),
+				 eth->mta_shadow[i]);
+		}
+	}
+
+	if (hw->addr_ctrl.mta_in_use > 0) {
+		v = eth_rd32(eth, RNP500_ETH_DMAC_MCSTCTRL);
+		eth_wr32(eth, RNP500_ETH_DMAC_MCSTCTRL,
+			 v | RNP500_MCSTCTRL_MULTICASE_TBL_EN |
+				 eth->mc_filter_type);
+	}
+
+	eth_dbg(eth, " update MTA Done. mta_in_use:%d\n",
+		hw->addr_ctrl.mta_in_use);
+	return hw->addr_ctrl.mta_in_use;
+}
+
+/* clean all mc addr */
+static void rnpgbe_eth_clr_mc_addr_n500(struct rnpgbe_eth_info *eth)
+{
+	int i;
+
+	for (i = 0; i < eth->mcft_size; i++)
+		eth_wr32(eth, RNP500_ETH_MUTICAST_HASH_TABLE(i), 0);
+}
+
+/**
+ *  rnpgbe_eth_set_rss_hfunc_n500 - Remove Rx address register
+ *  @eth: pointer to eth structure
+ *  @hfunc type
+ *
+ *  update rss key to eth regs
+ **/
+static int rnpgbe_eth_set_rss_hfunc_n500(struct rnpgbe_eth_info *eth, int hfunc)
+{
+	u32 data;
+
+	data = eth_rd32(eth, RNP500_ETH_RSS_CONTROL);
+	/* clean mode only bit[14:15] */
+	data &= ~(BIT(14) | BIT(15));
+
+	if (hfunc == rss_func_top) {
+		/* do nothing */
+
+	} else if (hfunc == rss_func_xor)
+		data |= BIT(14);
+	else if (hfunc == rss_func_order)
+		data |= BIT(15);
+	else
+		return -EINVAL;
+
+	/* update to hardware */
+	eth_wr32(eth, RNP500_ETH_RSS_CONTROL, data);
+
+	return 0;
+}
+
+/**
+ *  rnpgbe_eth_update_rss_key_n500 - Remove Rx address register
+ *  @eth: pointer to eth structure
+ *  @sriov_flag sriov status
+ *
+ *  update rss key to eth regs
+ **/
+
+static void rnpgbe_eth_update_rss_key_n500(struct rnpgbe_eth_info *eth,
+					   bool sriov_flag)
+{
+	struct rnpgbe_hw *hw = (struct rnpgbe_hw *)eth->back;
+	int i;
+	u8 *key_temp;
+	int key_len = RNP_RSS_KEY_SIZE;
+	u8 *key = hw->rss_key;
+	u32 data;
+	u32 iov_en = (sriov_flag) ? RNP500_IOV_ENABLED : 0;
+	u32 *value;
+
+	data = eth_rd32(eth, RNP500_ETH_RSS_CONTROL);
+
+	key_temp = kmalloc(key_len, GFP_KERNEL);
+	/* reoder the key */
+	for (i = 0; i < key_len; i++)
+		*(key_temp + key_len - i - 1) = *(key + i);
+
+	value = (u32 *)key_temp;
+
+	for (i = 0; i < key_len; i = i + 4)
+		eth_wr32(eth, RNP500_ETH_RSS_KEY + i, *(value + i / 4));
+	kfree(key_temp);
+
+	data |= (RNP500_ETH_ENABLE_RSS_ONLY | iov_en);
+	eth_wr32(eth, RNP500_ETH_RSS_CONTROL, data);
+}
+
+/**
+ *  rnpgbe_eth_update_rss_table_n500 - Remove Rx address register
+ *  @eth: pointer to eth structure
+ *
+ *  update rss table to eth regs
+ **/
+static void rnpgbe_eth_update_rss_table_n500(struct rnpgbe_eth_info *eth)
+{
+	struct rnpgbe_hw *hw = (struct rnpgbe_hw *)eth->back;
+	u32 reta_entries = hw->rss_indir_tbl_num;
+	u32 tc_entries = hw->rss_tc_tbl_num;
+	int i;
+
+	for (i = 0; i < tc_entries; i++)
+		eth_wr32(eth, RNP500_ETH_TC_IPH_OFFSET_TABLE(i),
+			 hw->rss_tc_tbl[i]);
+
+	for (i = 0; i < reta_entries; i++)
+		eth_wr32(eth, RNP500_ETH_RSS_INDIR_TBL(i),
+			 hw->rss_indir_tbl[i]);
+
+	/* if we update rss table,
+	 * we should update deault ring same with rss[0]
+	 **/
+	eth_wr32(eth, RNP500_ETH_DEFAULT_RX_RING, hw->rss_indir_tbl[0]);
+}
+
+/**
+ *  rnpgbe_eth_set_vfta_n500 - Set VLAN filter table
+ *  @eth: pointer to eth structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ *
+ *  Turn on/off specified VLAN in the VLAN filter table.
+ **/
+static s32 rnpgbe_eth_set_vfta_n500(struct rnpgbe_eth_info *eth, u32 vlan,
+				    bool vlan_on)
+{
+	s32 regindex;
+	u32 bitindex;
+	u32 vfta;
+	u32 targetbit;
+	bool vfta_changed = false;
+
+	/* todo in vf mode vlvf regester can be set according to vind*/
+	if (vlan > 4095)
+		return RNP_ERR_PARAM;
+
+	regindex = (vlan >> 5) & 0x7F;
+	bitindex = vlan & 0x1F;
+	targetbit = (1 << bitindex);
+	vfta = eth_rd32(eth, RNP500_VFTA(regindex));
+
+	if (vlan_on) {
+		if (!(vfta & targetbit)) {
+			vfta |= targetbit;
+			vfta_changed = true;
+		}
+	} else {
+		if ((vfta & targetbit)) {
+			vfta &= ~targetbit;
+			vfta_changed = true;
+		}
+	}
+
+	if (vfta_changed)
+		eth_wr32(eth, RNP500_VFTA(regindex), vfta);
+
+	return 0;
+}
+
+static void rnpgbe_eth_clr_vfta_n500(struct rnpgbe_eth_info *eth)
+{
+	u32 offset;
+
+	for (offset = 0; offset < eth->vft_size; offset++)
+		eth_wr32(eth, RNP500_VFTA(offset), 0);
+}
+
+static void rnpgbe_eth_set_doulbe_vlan_n500(struct rnpgbe_eth_info *eth,
+					    bool on)
+{
+	if (on)
+		eth_wr32(eth, RNP500_ETH_VLAN_RM_TYPE, 1);
+	else
+		eth_wr32(eth, RNP500_ETH_VLAN_RM_TYPE, 0);
+}
+
+static void rnpgbe_eth_set_outer_vlan_type_n500(struct rnpgbe_eth_info *eth,
+						int type)
+{
+	u32 data = 0x88a8;
+
+	switch (type) {
+	case outer_vlan_type_88a8:
+		data = 0x88a8;
+		break;
+	case outer_vlan_type_9100:
+		data = 0x9100;
+		break;
+	case outer_vlan_type_9200:
+		data = 0x9200;
+		break;
+	}
+	eth_wr32(eth, RNP500_ETH_WRAP_FIELD_TYPE, data);
+	eth_wr32(eth, RNP500_ETH_TX_VLAN_TYPE, data);
+}
+
+/**
+ *  rnpgbe_eth_set_vlan_filter_n500 - Set VLAN filter table
+ *  @eth: pointer to eth structure
+ *  @status: on |off
+ *  Turn on/off VLAN filter table.
+ **/
+static void rnpgbe_eth_set_vlan_filter_n500(struct rnpgbe_eth_info *eth,
+					    bool status)
+{
+#define ETH_VLAN_FILTER_BIT (30)
+	u32 value = eth_rd32(eth, RNP500_ETH_VLAN_FILTER_ENABLE);
+
+	/* clear bit first */
+	value &= (~(0x01 << ETH_VLAN_FILTER_BIT));
+	if (status)
+		value |= (0x01 << ETH_VLAN_FILTER_BIT);
+	eth_wr32(eth, RNP500_ETH_VLAN_FILTER_ENABLE, value);
+}
+
+static u16 rnpgbe_layer2_pritologic_n500(u16 hw_id)
+{
+	return hw_id;
+}
+
+static void rnpgbe_eth_set_layer2_n500(struct rnpgbe_eth_info *eth,
+				       union rnpgbe_atr_input *input,
+				       u16 pri_id,
+				       u8 queue, bool prio_flag)
+{
+	u16 hw_id;
+
+	hw_id = rnpgbe_layer2_pritologic_n500(pri_id);
+	/* enable layer2 */
+	eth_wr32(eth, RNP500_ETH_LAYER2_ETQF(hw_id),
+		 (0x1 << 31) | (ntohs(input->layer2_formate.proto)));
+
+	/* setup action */
+	if (queue == RNP_FDIR_DROP_QUEUE) {
+		eth_wr32(eth, RNP500_ETH_LAYER2_ETQS(hw_id), (0x1 << 31));
+	} else {
+		/* setup ring_number */
+		if (prio_flag)
+			eth_wr32(eth, RNP500_ETH_LAYER2_ETQS(hw_id),
+				 (0x1 << 30) | (queue << 20) | (0x1 << 28));
+		else
+			eth_wr32(eth, RNP500_ETH_LAYER2_ETQS(hw_id),
+				 (0x1 << 30) | (queue << 20));
+	}
+}
+
+static void rnpgbe_eth_clr_layer2_n500(struct rnpgbe_eth_info *eth, u16 pri_id)
+{
+	u16 hw_id;
+
+	hw_id = rnpgbe_layer2_pritologic_n500(pri_id);
+	eth_wr32(eth, RNP500_ETH_LAYER2_ETQF(hw_id), 0);
+}
+
+static void rnpgbe_eth_clr_all_layer2_n500(struct rnpgbe_eth_info *eth)
+{
+	int i;
+#define RNP500_MAX_LAYER2_FILTERS 16
+	for (i = 0; i < RNP500_MAX_LAYER2_FILTERS; i++)
+		eth_wr32(eth, RNP500_ETH_LAYER2_ETQF(i), 0);
+}
+
+static u16 rnpgbe_tuple5_pritologic_n500(u16 hw_id)
+{
+	return hw_id;
+}
+
+static void rnpgbe_eth_set_tuple5_n500(struct rnpgbe_eth_info *eth,
+				       union rnpgbe_atr_input *input,
+				       u16 pri_id,
+				       u8 queue, bool prio_flag)
+{
+#define RNP500_SRC_IP_MASK BIT(0)
+#define RNP500_DST_IP_MASK BIT(1)
+#define RNP500_SRC_PORT_MASK BIT(2)
+#define RNP500_DST_PORT_MASK BIT(3)
+#define RNP500_L4_PROTO_MASK BIT(4)
+	u32 port = 0;
+	u8 mask_temp = 0;
+	u8 l4_proto_type = 0;
+	u16 hw_id;
+
+	hw_id = rnpgbe_tuple5_pritologic_n500(pri_id);
+	dbg("try to eable tuple 5 %x\n", hw_id);
+	if (input->formatted.src_ip[0] != 0) {
+		eth_wr32(eth, RNP500_ETH_TUPLE5_SAQF(hw_id),
+			 htonl(input->formatted.src_ip[0]));
+	} else {
+		mask_temp |= RNP500_SRC_IP_MASK;
+	}
+	if (input->formatted.dst_ip[0] != 0) {
+		eth_wr32(eth, RNP500_ETH_TUPLE5_DAQF(hw_id),
+			 htonl(input->formatted.dst_ip[0]));
+	} else
+		mask_temp |= RNP500_DST_IP_MASK;
+	if (input->formatted.src_port != 0)
+		port |= (htons(input->formatted.src_port));
+	else
+		mask_temp |= RNP500_SRC_PORT_MASK;
+	if (input->formatted.dst_port != 0)
+		port |= (htons(input->formatted.dst_port) << 16);
+	else
+		mask_temp |= RNP500_DST_PORT_MASK;
+
+	if (port != 0)
+		eth_wr32(eth, RNP500_ETH_TUPLE5_SDPQF(hw_id), port);
+
+	switch (input->formatted.flow_type) {
+	case RNP_ATR_FLOW_TYPE_TCPV4:
+		l4_proto_type = IPPROTO_TCP;
+		break;
+	case RNP_ATR_FLOW_TYPE_UDPV4:
+		l4_proto_type = IPPROTO_UDP;
+		break;
+	case RNP_ATR_FLOW_TYPE_SCTPV4:
+		l4_proto_type = IPPROTO_SCTP;
+		break;
+	case RNP_ATR_FLOW_TYPE_IPV4:
+		l4_proto_type = input->formatted.inner_mac[0];
+		break;
+	default:
+		l4_proto_type = 0;
+	}
+
+	if (l4_proto_type == 0)
+		mask_temp |= RNP500_L4_PROTO_MASK;
+
+	/* setup ftqf*/
+	/* always set 0x3 */
+	eth_wr32(eth, RNP500_ETH_TUPLE5_FTQF(hw_id),
+		 (1 << 31) | (mask_temp << 25) | (l4_proto_type << 16) | 0x3);
+
+	/* setup action */
+	if (queue == RNP_FDIR_DROP_QUEUE) {
+		eth_wr32(eth, RNP500_ETH_TUPLE5_POLICY(hw_id), (0x1 << 31));
+	} else {
+		/* setup ring_number */
+		if (prio_flag)
+			eth_wr32(eth, RNP500_ETH_TUPLE5_POLICY(hw_id),
+				 ((0x1 << 30) | (queue << 20) | (0x1 << 28)));
+		else
+			eth_wr32(eth, RNP500_ETH_TUPLE5_POLICY(hw_id),
+				 ((0x1 << 30) | (queue << 20)));
+	}
+}
+
+static void rnpgbe_eth_clr_tuple5_n500(struct rnpgbe_eth_info *eth, u16 pri_id)
+{
+	u16 hw_id;
+
+	hw_id = rnpgbe_tuple5_pritologic_n500(pri_id);
+	eth_wr32(eth, RNP500_ETH_TUPLE5_FTQF(hw_id), 0);
+}
+
+static void rnpgbe_eth_clr_all_tuple5_n500(struct rnpgbe_eth_info *eth)
+{
+	int i;
+
+	for (i = 0; i < RNP500_MAX_TUPLE5_FILTERS; i++)
+		eth_wr32(eth, RNP500_ETH_TUPLE5_FTQF(i), 0);
+}
+
+static void rnpgbe_eth_set_tcp_sync_n500(struct rnpgbe_eth_info *eth,
+					 int queue,
+					 bool flag, bool prio)
+{
+	if (flag) {
+		eth_wr32(eth, RNP500_ETH_SYNQF, (0x1 << 30) | (queue << 20));
+		if (prio)
+			eth_wr32(eth, RNP500_ETH_SYNQF_PRIORITY,
+				 (0x1 << 31) | 0x1);
+		else
+			eth_wr32(eth, RNP500_ETH_SYNQF_PRIORITY, (0x1 << 31));
+	} else {
+		eth_wr32(eth, RNP500_ETH_SYNQF, 0);
+		eth_wr32(eth, RNP500_ETH_SYNQF_PRIORITY, 0);
+	}
+}
+
+static void rnpgbe_eth_set_rx_skip_n500(struct rnpgbe_eth_info *eth,
+					int count,
+					bool flag)
+{
+	if (flag) {
+		eth_wr32(eth, RNP500_ETH_PRIV_DATA_CONTROL_REG,
+			 PRIV_DATA_EN | count);
+	} else {
+		eth_wr32(eth, RNP500_ETH_PRIV_DATA_CONTROL_REG, 0);
+	}
+}
+
+static void rnpgbe_eth_set_min_max_packets_n500(struct rnpgbe_eth_info *eth,
+						int min, int max)
+{
+	eth_wr32(eth, RNP500_ETH_DEFAULT_RX_MIN_LEN, min);
+	eth_wr32(eth, RNP500_ETH_DEFAULT_RX_MAX_LEN, max);
+}
+
+static void rnpgbe_eth_set_vlan_strip_n500(struct rnpgbe_eth_info *eth,
+					   u16 queue, bool enable)
+{
+	u32 reg = RNP500_ETH_VLAN_VME_REG(queue / 32);
+	u32 offset = queue % 32;
+	u32 data = eth_rd32(eth, reg);
+
+	if (enable == true)
+		data |= (1 << offset);
+	else
+		data &= ~(1 << offset);
+
+	eth_wr32(eth, reg, data);
+}
+
+static void rnpgbe_eth_set_vxlan_port_n500(struct rnpgbe_eth_info *eth,
+					   u32 port)
+{
+}
+
+static void rnpgbe_eth_set_vxlan_mode_n500(struct rnpgbe_eth_info *eth,
+					   bool inner)
+{
+}
+
+static void rnpgbe_eth_set_rx_hash_n500(struct rnpgbe_eth_info *eth,
+					bool status, bool sriov_flag)
+{
+	u32 iov_en = (sriov_flag) ? RNP500_IOV_ENABLED : 0;
+	u32 data;
+
+	data = eth_rd32(eth, RNP500_ETH_RSS_CONTROL);
+	data &= ~RNP500_ETH_RSS_MASK;
+
+	if (status) {
+		data |= RNP500_ETH_ENABLE_RSS_ONLY;
+		eth_wr32(eth, RNP500_ETH_RSS_CONTROL, data | iov_en);
+	} else {
+		eth_wr32(eth, RNP500_ETH_RSS_CONTROL, data | iov_en);
+	}
+}
+
+static void rnpgbe_eth_set_rx_n500(struct rnpgbe_eth_info *eth, bool status)
+{
+	if (status) {
+		eth_wr32(eth, RNP500_ETH_EXCEPT_DROP_PROC, 0);
+		eth_wr32(eth, RNP500_ETH_TX_MUX_DROP, 0);
+	} else {
+		eth_wr32(eth, RNP500_ETH_EXCEPT_DROP_PROC, 1);
+		eth_wr32(eth, RNP500_ETH_TX_MUX_DROP, 1);
+	}
+}
+
+static void rnpgbe_eth_fcs_n500(struct rnpgbe_eth_info *eth, bool status)
+{
+	if (status)
+		eth_wr32(eth, RNP500_ETH_FCS_EN, 1);
+	else
+		eth_wr32(eth, RNP500_ETH_FCS_EN, 0);
+}
+
+static void rnpgbe_eth_set_vf_vlan_mode_n500(struct rnpgbe_eth_info *eth,
+					     u16 vlan, int vf, bool enable)
+{
+	u32 value = vlan;
+
+	if (enable)
+		value |= BIT(31);
+
+	eth_wr32(eth, RNP500_VLVF(vf), value);
+	/* n500 1 vf only can setup 1 vlan */
+	eth_wr32(eth, RNP500_VLVF_TABLE(vf), vf);
+}
+
+static s32 rnpgbe_eth_set_fc_mode_n500(struct rnpgbe_eth_info *eth)
+{
+	struct rnpgbe_hw *hw = (struct rnpgbe_hw *)eth->back;
+	s32 ret_val = 0;
+	int i;
+	/* n500 has only 1 traffic class */
+	for (i = 0; i < 1; i++) {
+		if ((hw->fc.current_mode & rnpgbe_fc_tx_pause) &&
+		    hw->fc.high_water[i]) {
+			if (!hw->fc.low_water[i] ||
+			    hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+				hw_dbg(hw,
+				       "Invalid water mark configuration\n");
+				ret_val = RNP_ERR_INVALID_LINK_SETTINGS;
+				goto out;
+			}
+		}
+	}
+
+	for (i = 0; i < 1; i++) {
+		if ((hw->fc.current_mode & rnpgbe_fc_tx_pause)) {
+			if (hw->fc.high_water[i]) {
+				eth_wr32(eth, RNP500_ETH_HIGH_WATER(i),
+					 hw->fc.high_water[i]);
+			}
+			if (hw->fc.low_water[i]) {
+				eth_wr32(eth, RNP500_ETH_LOW_WATER(i),
+					 hw->fc.low_water[i]);
+			}
+		}
+	}
+out:
+	return ret_val;
+}
+
+static struct rnpgbe_eth_operations eth_ops_n500 = {
+	.set_rar = &rnpgbe_eth_set_rar_n500,
+	.clear_rar = &rnpgbe_eth_clear_rar_n500,
+	.set_vmdq = &rnpgbe_eth_set_vmdq_n500,
+	.clear_vmdq = &rnpgbe_eth_clear_vmdq_n500,
+	.update_mc_addr_list = &rnpgbe_eth_update_mc_addr_list_n500,
+	.clr_mc_addr = &rnpgbe_eth_clr_mc_addr_n500,
+	/* store rss info to eth */
+	.set_rss_hfunc = &rnpgbe_eth_set_rss_hfunc_n500,
+	.set_rss_key = &rnpgbe_eth_update_rss_key_n500,
+	.set_rss_table = &rnpgbe_eth_update_rss_table_n500,
+	.set_vfta = &rnpgbe_eth_set_vfta_n500,
+	.clr_vfta = &rnpgbe_eth_clr_vfta_n500,
+	.set_vlan_filter = &rnpgbe_eth_set_vlan_filter_n500,
+	.set_outer_vlan_type = &rnpgbe_eth_set_outer_vlan_type_n500,
+	.set_double_vlan = &rnpgbe_eth_set_doulbe_vlan_n500,
+	.set_layer2_remapping = &rnpgbe_eth_set_layer2_n500,
+	.clr_layer2_remapping = &rnpgbe_eth_clr_layer2_n500,
+	.clr_all_layer2_remapping = &rnpgbe_eth_clr_all_layer2_n500,
+	.set_tuple5_remapping = &rnpgbe_eth_set_tuple5_n500,
+	.clr_tuple5_remapping = &rnpgbe_eth_clr_tuple5_n500,
+	.clr_all_tuple5_remapping = &rnpgbe_eth_clr_all_tuple5_n500,
+	.set_tcp_sync_remapping = &rnpgbe_eth_set_tcp_sync_n500,
+	.set_rx_skip = &rnpgbe_eth_set_rx_skip_n500,
+	.set_min_max_packet = &rnpgbe_eth_set_min_max_packets_n500,
+	.set_vlan_strip = &rnpgbe_eth_set_vlan_strip_n500,
+	.set_vxlan_port = &rnpgbe_eth_set_vxlan_port_n500,
+	.set_vxlan_mode = &rnpgbe_eth_set_vxlan_mode_n500,
+	.set_rx_hash = &rnpgbe_eth_set_rx_hash_n500,
+	.set_fc_mode = &rnpgbe_eth_set_fc_mode_n500,
+	.set_rx = &rnpgbe_eth_set_rx_n500,
+	.set_fcs = &rnpgbe_eth_fcs_n500,
+	.set_vf_vlan_mode = &rnpgbe_eth_set_vf_vlan_mode_n500,
+};
+
+/**
+ *  rnpgbe_init_hw_n500 - Generic hardware initialization
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the hardware by resetting the hardware, filling the bus info
+ *  structure and media type, clears all on chip counters, initializes receive
+ *  address registers, multicast table, VLAN filter table, calls routine to set
+ *  up link and flow control settings, and leaves transmit and receive units
+ *  disabled and uninitialized
+ **/
+static s32 rnpgbe_init_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	s32 status = 0;
+
+	/* Reset the hardware */
+	status = hw->ops.reset_hw(hw);
+
+	if (status == 0) {
+		/* Start the HW */
+		status = hw->ops.start_hw(hw);
+	}
+
+	return status;
+}
+
+static s32 rnpgbe_get_permtion_mac_addr_n500(struct rnpgbe_hw *hw,
+					     u8 *mac_addr)
+{
+	if (rnpgbe_fw_get_macaddr(hw, hw->pfvfnum, mac_addr, hw->nr_lane)) {
+		printk(KERN_DEBUG "generate ramdom macaddress...\n");
+		eth_random_addr(mac_addr);
+	} else {
+		printk(KERN_DEBUG "get mac addr %x:%x:%x:%x:%x:%x\n",
+		       mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
+		       mac_addr[4], mac_addr[5]);
+		if (!is_valid_ether_addr(mac_addr))
+			eth_random_addr(mac_addr);
+	}
+	hw->mac.mac_flags |= RNP_FLAGS_INIT_MAC_ADDRESS;
+	dbg("%s mac:%pM\n", __func__, mac_addr);
+
+	return 0;
+}
+
+static s32 rnpgbe_reset_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	int i;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	/* Call adapter stop to disable tx/rx and clear interrupts */
+	dma_wr32(dma, RNP_DMA_AXI_EN, 0);
+
+	rnpgbe_mbx_fw_reset_phy(hw);
+	/* tcam not reset */
+	eth->ops.clr_all_tuple5_remapping(eth);
+	/* Store the permanent mac address */
+	if (!(hw->mac.mac_flags & RNP_FLAGS_INIT_MAC_ADDRESS)) {
+		rnpgbe_get_permtion_mac_addr_n500(hw, hw->mac.perm_addr);
+		memcpy(hw->mac.addr, hw->mac.perm_addr, ETH_ALEN);
+	}
+
+	hw->ops.init_rx_addrs(hw);
+	/* n500 should do this ? */
+	eth_wr32(eth, RNP500_ETH_ERR_MASK_VECTOR,
+		 RNP_N500_PKT_LEN_ERR | RNP_N500_HDR_LEN_ERR);
+	wr32(hw, RNP_DMA_RX_DATA_PROG_FULL_THRESH, 0xa);
+
+	for (i = 0; i < 12; i++)
+		rnpgbe_wr_reg(hw->ring_msix_base + RING_VECTOR(i), 0);
+	{
+		u32 value = 0;
+
+		value |= RNP_MODE_NO_SA_INSER << RNP_SARC_OFFSET;
+		value &= (~RNP_TWOKPE_MASK);
+		value &= (~RNP_SFTERR_MASK);
+		value |= (RNP_CST_MASK);
+		value |= RNP_TC_MASK;
+		value &= (~RNP_WD_MASK);
+		value &= (~RNP_JD_MASK);
+		value &= (~RNP_BE_MASK);
+		value |= (RNP_JE_MASK);
+		value |= (RNP_IFG_96 << RNP_IFG_OFFSET);
+		value &= (~RNP_DCRS_MASK);
+		value &= (~RNP_PS_MASK);
+		value &= (~RNP_FES_MASK);
+		value &= (~RNP_DO_MASK);
+		value &= (~RNP_LM_MASK);
+		value |= RNP_DM_MASK;
+		value |= RNP_IPC_MASK; /* open rx checksum */
+		value &= (~RNP_DR_MASK);
+		value &= (~RNP_LUD_MASK);
+		value |= (RNP_BL_MODE << RNP_BL_OFFSET);
+		value &= (~RNP_DC_MASK);
+		value |= RNP_TE_MASK;
+		value |= (RNP_PRELEN_MODE);
+	}
+
+	if (hw->ncsi_en)
+		rnpgbe_mbx_phy_pause_get(hw, &hw->fc.requested_mode);
+	else
+		rnpgbe_mbx_phy_pause_set(hw, hw->fc.requested_mode);
+
+	rnpgbe_mbx_get_lane_stat(hw);
+	hw->link = 0;
+
+	return 0;
+}
+
+static s32 rnpgbe_start_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	s32 ret_val = 0;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+
+	eth_wr32(eth, RNP500_ETH_ERR_MASK_VECTOR,
+		 RNP_N500_PKT_LEN_ERR | RNP_N500_HDR_LEN_ERR);
+	eth_wr32(eth, RNP500_ETH_BYPASS, 0);
+	eth_wr32(eth, RNP500_ETH_DEFAULT_RX_RING, 0);
+	dma_wr32(dma, RNP_DMA_CONFIG, DMA_VEB_BYPASS);
+	dma_wr32(dma, RNP_DMA_AXI_EN, (RX_AXI_RW_EN | TX_AXI_RW_EN));
+
+	{
+		int value = dma_rd32(dma, RNP_DMA_DUMY);
+
+		value |= RC_CONTROL_HW;
+		dma_wr32(dma, RNP_DMA_DUMY, value);
+	}
+	return ret_val;
+}
+
+/* set n500 min/max packet according to new_mtu
+ * we support mtu + 14 + 4 * 3 as max packet LENGTH_ERROR
+ */
+static void rnpgbe_set_mtu_hw_ops_n500(struct rnpgbe_hw *hw, int new_mtu)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+
+	int min;
+	int max = new_mtu + ETH_HLEN + ETH_FCS_LEN * 2;
+#define ULTRA_SHORT 33
+#define DEFAULT_SHORT 60
+	if ((adapter->priv_flags & RNP_PRIV_FLAG_ULTRA_SHORT) ||
+	    (adapter->priv_flags & RNP_PRIV_FLAG_RX_ALL))
+		min = ULTRA_SHORT;
+	else
+		min = DEFAULT_SHORT;
+
+	/* we receive jumbo fram only in jumbo enable or rx all mode */
+	if ((adapter->priv_flags & RNP_PRIV_FLAG_JUMBO) ||
+	    (adapter->priv_flags & RNP_PRIV_FLAG_RX_ALL))
+		max = hw->max_length;
+
+	hw->min_length_current = min;
+	hw->max_length_current = max;
+	eth->ops.set_min_max_packet(eth, min, max);
+}
+
+/* setup n500 vlan filter status */
+static void rnpgbe_set_vlan_filter_en_hw_ops_n500(struct rnpgbe_hw *hw,
+						  bool status)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_vlan_filter(eth, status);
+}
+
+/* set vlan to n500 vlan filter table & veb */
+/* pf setup call */
+static void rnpgbe_set_vlan_filter_hw_ops_n500(struct rnpgbe_hw *hw, u16 vid,
+					       bool enable, bool sriov_flag)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	u16 ncsi_vid;
+	int i;
+	int ret;
+
+	//todo set up own veb , use the last vfnum
+	u32 vfnum = hw->max_vfs - 1;
+	/* setup n500 eth vlan table */
+	eth->ops.set_vfta(eth, vid, enable);
+
+	/* setup veb */
+	if (sriov_flag) {
+		if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN) {
+			/* we update veb int other location */
+		} else {
+			if (enable)
+				dma->ops.set_veb_vlan(dma, vid, vfnum);
+			else
+				dma->ops.set_veb_vlan(dma, 0, vfnum);
+		}
+	}
+	/* always setup nsci vid */
+	for (i = 0; i < 2; i++) {
+		ret = hw->ops.get_ncsi_vlan(hw, &ncsi_vid, i);
+		if (!ret) {
+			eth->ops.set_vfta(eth, ncsi_vid, 1);
+			printk(KERN_DEBUG "update ncsi vid %d\n", ncsi_vid);
+		}
+	}
+}
+
+static int rnpgbe_set_veb_vlan_mask_hw_ops_n500(struct rnpgbe_hw *hw, u16 vid,
+						int vf, bool enable)
+{
+	struct list_head *pos;
+	struct vf_vebvlans *entry;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	bool find = false;
+	int err = 0;
+	/* 1 try to find is this vid is in vlan mask table */
+	list_for_each(pos, &hw->vf_vas.l) {
+		entry = list_entry(pos, struct vf_vebvlans, l);
+		if (entry->vid == vid) {
+			find = true;
+			break;
+		}
+	}
+	if (find) {
+		/* this vid is used before */
+		if (enable) {
+			entry->mask |= (1 << vf);
+		} else {
+			entry->mask &= (~(1 << vf));
+			/* if mask is zero free this */
+			if (!entry) {
+				entry->vid = -1;
+				entry->free = true;
+			}
+		}
+	} else {
+		/* 2 try to get new entries */
+		list_for_each(pos, &hw->vf_vas.l) {
+			entry = list_entry(pos, struct vf_vebvlans, l);
+			if (entry->free == true) {
+				find = true;
+				break;
+			}
+		}
+		if (find) {
+			entry->free = false;
+			entry->vid = vid;
+			entry->mask |= (1 << vf);
+		} else {
+			err = -1;
+			goto err_out;
+		}
+	}
+	/* 3 update new vlan mask to hw */
+	dma->ops.set_veb_vlan_mask(dma, entry->vid, entry->mask,
+				   entry->veb_entry);
+err_out:
+	return err;
+}
+
+static void rnpgbe_set_vf_vlan_filter_hw_ops_n500(struct rnpgbe_hw *hw, u16 vid,
+						  int vf, bool enable,
+						  bool veb_only)
+{
+	struct rnpgbe_dma_info *dma = &hw->dma;
+
+	if (!veb_only) {
+		/* call set vfta without veb setup */
+		hw->ops.set_vlan_filter(hw, vid, enable, false);
+
+	} else {
+		if (enable)
+			dma->ops.set_veb_vlan(dma, vid, vf);
+		else
+			dma->ops.set_veb_vlan(dma, 0, vf);
+	}
+}
+
+static void rnpgbe_clr_vlan_veb_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	u32 vfnum = hw->vfnum;
+
+	dma->ops.set_veb_vlan(dma, 0, vfnum);
+}
+
+/* setup n500 vlan strip status */
+static void rnpgbe_set_vlan_strip_hw_ops_n500(struct rnpgbe_hw *hw, u16 queue,
+					      bool strip)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_vlan_strip(eth, queue, strip);
+}
+
+/* update new n500 mac */
+static void rnpgbe_set_mac_hw_ops_n500(struct rnpgbe_hw *hw, u8 *mac,
+				       bool sriov_flag)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	struct rnpgbe_mac_info *mac_info = &hw->mac;
+	/* use this queue index to setup veb */
+	/* now pf use queu 0 /1
+	 * vfnum is the last vfnum
+	 */
+	int queue = hw->veb_ring;
+	int vfnum = hw->vfnum;
+
+	eth->ops.set_rar(eth, 0, mac, true);
+	if (sriov_flag) {
+		eth->ops.set_vmdq(eth, 0, queue / hw->sriov_ring_limit);
+		dma->ops.set_veb_mac(dma, mac, vfnum, queue);
+	}
+
+	mac_info->ops.set_mac(mac_info, mac, 0);
+}
+
+/**
+ * rnpgbe_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ *                0 on no addresses written
+ *                X on writing X addresses to the RAR table
+ **/
+static int rnpgbe_write_uc_addr_list_n500(struct rnpgbe_hw *hw,
+					  struct net_device *netdev,
+					  bool sriov_flag)
+{
+	unsigned int rar_entries = hw->num_rar_entries - 1;
+	u32 vfnum = hw->vfnum;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	int count = 0;
+	int i = 0;
+	u8 ncsi_addr[6];
+	int ret;
+
+	/* In SR-IOV mode significantly less RAR entries are available */
+	if (sriov_flag)
+		rar_entries = hw->max_pf_macvlans - 1;
+
+	/* return ENOMEM indicating insufficient memory for addresses */
+	if (netdev_uc_count(netdev) > rar_entries)
+		return -ENOMEM;
+
+	if (!netdev_uc_empty(netdev)) {
+		struct netdev_hw_addr *ha;
+
+		hw_dbg(hw, "%s: rar_entries:%d, uc_count:%d\n", __func__,
+		       hw->num_rar_entries, netdev_uc_count(netdev));
+
+		/* return error if we do not support writing to RAR table */
+		if (!eth->ops.set_rar)
+			return -ENOMEM;
+
+		netdev_for_each_uc_addr(ha, netdev) {
+			if (!rar_entries)
+				break;
+			eth->ops.set_rar(eth, rar_entries, ha->addr,
+					 RNP500_RAH_AV);
+			if (sriov_flag)
+				eth->ops.set_vmdq(eth, rar_entries, vfnum);
+
+			rar_entries--;
+
+			count++;
+		}
+	}
+	for (i = 0; i < NCSI_RAR_NUM; i++) {
+		ret = hw->ops.get_ncsi_mac(hw, ncsi_addr, i);
+		if (!ret) {
+			eth->ops.set_rar(eth, NCSI_RAR_IDX_START + i, ncsi_addr,
+					 RNP500_RAH_AV);
+		}
+	}
+
+	/* write the addresses in reverse order to avoid write combining */
+	hw_dbg(hw, "%s: Clearing RAR[1 - %d]\n", __func__, rar_entries);
+	for (; rar_entries > 0; rar_entries--)
+		eth->ops.clear_rar(eth, rar_entries);
+
+	return count;
+}
+
+static void rnpgbe_set_rx_mode_hw_ops_n500(struct rnpgbe_hw *hw,
+					   struct net_device *netdev,
+					   bool sriov_flag)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	u32 fctrl;
+	netdev_features_t features = netdev->features;
+	int count;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	hw_dbg(hw, "%s\n", __func__);
+
+	/* broadcast always bypass */
+	fctrl = eth_rd32(eth, RNP500_ETH_DMAC_FCTRL) | RNP500_FCTRL_BPE;
+
+	/* clear the bits we are changing the status of */
+	fctrl &= ~(RNP500_FCTRL_UPE | RNP500_FCTRL_MPE);
+	/* promisc mode */
+	if (netdev->flags & IFF_PROMISC) {
+		hw->addr_ctrl.user_set_promisc = true;
+		fctrl |= (RNP500_FCTRL_UPE | RNP500_FCTRL_MPE);
+		/* disable hardware filter vlans in promisc mode */
+		features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+		features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+	} else {
+		if (netdev->flags & IFF_ALLMULTI) {
+			fctrl |= RNP500_FCTRL_MPE;
+		} else {
+			/* Write addresses to the MTA, if the attempt fails
+			 * then we should just turn on promiscuous mode so
+			 * that we can at least receive multicast traffic
+			 */
+			count = eth->ops.update_mc_addr_list(eth, netdev, true);
+			if (count < 0) {
+				fctrl |= RNP500_FCTRL_MPE;
+			} else if (count) {
+
+			}
+		}
+		hw->addr_ctrl.user_set_promisc = false;
+	}
+
+	/*
+	 * Write addresses to available RAR registers, if there is not
+	 * sufficient space to store all the addresses then enable
+	 * unicast promiscuous mode
+	 */
+	if (rnpgbe_write_uc_addr_list_n500(hw, netdev, sriov_flag) < 0)
+		fctrl |= RNP500_FCTRL_UPE;
+
+	eth_wr32(eth, RNP500_ETH_DMAC_FCTRL, fctrl);
+	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+		eth->ops.set_vlan_filter(eth, true);
+	else
+		eth->ops.set_vlan_filter(eth, false);
+
+	if ((hw->addr_ctrl.user_set_promisc == true) ||
+	    (adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR)) {
+		eth_wr32(eth, RNP500_ETH_ERR_MASK_VECTOR, 0);
+	} else {
+		/* set pkt_len_err and hdr_len_err default to 1 */
+		eth_wr32(eth, RNP500_ETH_ERR_MASK_VECTOR,
+			 PKT_LEN_ERR | HDR_LEN_ERR);
+	}
+
+	hw->ops.set_mtu(hw, netdev->mtu);
+}
+
+/* setup an rar with vfnum */
+static void rnpgbe_set_rar_with_vf_hw_ops_n500(struct rnpgbe_hw *hw, u8 *mac,
+					       int idx, u32 vfnum, bool enable)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_rar(eth, idx, mac, enable);
+	/* should check error or not ?*/
+	eth->ops.set_vmdq(eth, idx, vfnum);
+}
+
+static void rnpgbe_clr_rar_hw_ops_n500(struct rnpgbe_hw *hw, int idx)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.clear_rar(eth, idx);
+}
+
+static void rnpgbe_clr_rar_all_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	unsigned int rar_entries = hw->num_rar_entries - 1;
+	int i;
+
+	for (i = 0; i < rar_entries; i++)
+		eth->ops.clear_rar(eth, rar_entries);
+}
+
+static void rnpgbe_set_fcs_mode_hw_ops_n500(struct rnpgbe_hw *hw, bool status)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	mac->ops.set_mac_fcs(mac, status);
+	eth->ops.set_fcs(eth, status);
+}
+
+static void rnpgbe_set_vxlan_port_hw_ops_n500(struct rnpgbe_hw *hw, u32 port)
+{
+	/* n500 not support */
+}
+
+static void rnpgbe_set_vxlan_mode_hw_ops_n500(struct rnpgbe_hw *hw, bool inner)
+{
+	/* n500 not support */
+}
+
+static void rnpgbe_set_mac_speed_hw_ops_n500(struct rnpgbe_hw *hw, bool link,
+					     u32 speed, bool duplex)
+{
+	/* n500 hw control this */
+}
+
+static void rnpgbe_set_mac_rx_hw_ops_n500(struct rnpgbe_hw *hw, bool status)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_mac_info *mac = &hw->mac;
+
+	if (status) {
+		mac->ops.set_mac_rx(mac, status);
+		eth->ops.set_rx(eth, status);
+	} else {
+		eth->ops.set_rx(eth, status);
+		mac->ops.set_mac_rx(mac, status);
+	}
+}
+
+static void rnpgbe_set_sriov_status_hw_ops_n500(struct rnpgbe_hw *hw,
+						bool status)
+{
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	u32 v, fctrl;
+
+	fctrl = eth_rd32(eth, RNP500_ETH_DMAC_FCTRL);
+#define RNP500_DMAC_MASK (0x7f)
+	fctrl &= ~RNP500_DMAC_MASK;
+
+	if (status) {
+		fctrl |= hw->veb_ring;
+		eth_wr32(eth, RNP500_ETH_DMAC_FCTRL, fctrl);
+		/* setup default ring */
+		dma_wr32(dma, RNP_DMA_CONFIG,
+			 dma_rd32(dma, RNP_DMA_CONFIG) & (~DMA_VEB_BYPASS));
+		v = eth_rd32(eth, RNP500_MRQC_IOV_EN);
+		v |= RNP500_IOV_ENABLED;
+		eth_wr32(eth, RNP500_MRQC_IOV_EN, v);
+		/* 1 setup veb vlan type */
+
+	} else {
+		eth_wr32(eth, RNP500_ETH_DMAC_FCTRL, fctrl);
+		v = eth_rd32(eth, RNP500_MRQC_IOV_EN);
+		v &= ~(RNP500_IOV_ENABLED);
+		eth_wr32(eth, RNP500_MRQC_IOV_EN, v);
+		dma->ops.clr_veb_all(dma);
+	}
+}
+
+static void rnpgbe_set_sriov_vf_mc_hw_ops_n500(struct rnpgbe_hw *hw,
+					       u16 mc_addr)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	u32 vector_bit;
+	u32 vector_reg;
+	u32 mta_reg;
+	/* pf/ vf share one mc table */
+
+	vector_reg = (mc_addr >> 5) & 0x7F;
+	vector_bit = mc_addr & 0x1F;
+	mta_reg = eth_rd32(eth, RNP500_ETH_MUTICAST_HASH_TABLE(vector_reg));
+	mta_reg |= (1 << vector_bit);
+	eth_wr32(eth, RNP500_ETH_MUTICAST_HASH_TABLE(vector_reg), mta_reg);
+}
+
+static void rnpgbe_update_sriov_info_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	/* update sriov info to hw */
+}
+
+static void rnpgbe_set_pause_mode_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	mac->ops.set_fc_mode(mac);
+	eth->ops.set_fc_mode(eth);
+}
+
+static void rnpgbe_get_pause_mode_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	/* n500 can get pause mode in link event */
+}
+
+static void rnpgbe_update_hw_info_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+	u32 data;
+	/* 1 enable eth filter */
+	eth_wr32(eth, RNP500_HOST_FILTER_EN, 1);
+	/* 2 open redir en */
+	eth_wr32(eth, RNP500_REDIR_EN, 1);
+	/* 3 open sctp checksum and other checksum */
+	if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM)
+		eth_wr32(eth, RNP500_ETH_SCTP_CHECKSUM_EN, 1);
+
+	/* 4 mark muticaset as broadcast */
+	dma_wr32(dma, RNP_VEB_MAC_MASK_LO, 0xffffffff);
+	dma_wr32(dma, RNP_VEB_MAC_MASK_HI, 0xfeff);
+	/* test only */
+	dma_wr32(dma, 0x00b0, 615);
+	/* 5 setup ft padding and veb vlan mode */
+	data = dma_rd32(dma, RNP_DMA_CONFIG);
+	/* force close padding in n500 */
+	CLR_BIT(8, data);
+
+#define N500_VLAN_POLL_EN BIT(3)
+	if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN)
+		data |= N500_VLAN_POLL_EN;
+
+	dma_wr32(dma, RNP_DMA_CONFIG, data);
+	/* 6 setup vlan mode */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_DOUBLE_VLAN)
+		eth->ops.set_double_vlan(eth, true);
+	else
+		eth->ops.set_double_vlan(eth, false);
+
+	/* 7 setup rss-hash mode */
+	eth->ops.set_rss_hfunc(eth, adapter->rss_func_mode);
+	/* 8 setup outer-vlan type */
+	eth->ops.set_outer_vlan_type(eth, adapter->outer_vlan_type);
+	/* 9 setup tcp sync remapping */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC) {
+		if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC_PRIO)
+			hw->ops.set_tcp_sync_remapping(
+				hw, adapter->tcp_sync_queue, true, true);
+		else
+			hw->ops.set_tcp_sync_remapping(
+				hw, adapter->tcp_sync_queue, true, false);
+	} else {
+		hw->ops.set_tcp_sync_remapping(hw, adapter->tcp_sync_queue,
+					       false, false);
+	}
+	/* 10 setup pause status */
+	data = mac_rd32(mac, GMAC_FLOW_CTRL);
+	if (adapter->priv_flags & RNP_PRIV_FLAG_PAUSE_OWN)
+		data |= GMAC_FLOW_CTRL_UP;
+	else
+		data &= (~GMAC_FLOW_CTRL_UP);
+
+	mac_wr32(mac, GMAC_FLOW_CTRL, data);
+
+	/* 11 open tx double vlan according to stags */
+	eth_wr32(eth, RNP500_ETH_TX_VLAN_CONTROL_EANBLE, 1);
+
+	/* 12 test */
+	/* eth_wr32(eth, RNP500_ETH_RX_MAC_LEN_REG, 1); */
+	eth_wr32(eth, RNP500_ETH_WHOLE_PKT_LEN_ERR_DROP, 1);
+
+	/* 13 setup double vlan drop */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_DOUBLE_VLAN_RECEIVE)
+		eth_wr32(eth, RNP500_ETH_DOUBLE_VLAN_DROP, 0);
+	else
+		eth_wr32(eth, RNP500_ETH_DOUBLE_VLAN_DROP, 1);
+
+	/* 14 open error mask if in rx all mode */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_RX_ALL) {
+		eth_wr32(eth, RNP500_MAC_ERR_MASK,
+			 RUN_FRAME_ERROR | GAINT_FRAME_ERROR | CRC_ERROR |
+				 LENGTH_ERROR);
+		/* we open this in rx all mode */
+		eth_wr32(eth, RNP500_ETH_DOUBLE_VLAN_DROP, 0);
+#define FORWARD_ALL_CONTROL (0x2)
+		eth_wr32(eth, RNP500_BAD_PACKETS_RECEIVE_EN, 1);
+		mac_wr32(mac, GMAC_FRAME_FILTER,
+			 0x00000001 | (FORWARD_ALL_CONTROL << 6));
+	} else {
+		eth_wr32(eth, RNP500_MAC_ERR_MASK,
+			 RUN_FRAME_ERROR | GAINT_FRAME_ERROR);
+		eth_wr32(eth, RNP500_BAD_PACKETS_RECEIVE_EN, 0);
+		mac_wr32(mac, GMAC_FRAME_FILTER, 0x00000001);
+	}
+	/* 15 update water acoording to max length */
+	{
+#define FIFO_ALL (1024)
+		int water_high =
+			FIFO_ALL - ((hw->max_length_current + 15) >> 4);
+		/* n500 only use one */
+		hw->fc.high_water[0] = water_high;
+		hw->fc.low_water[0] = water_high;
+
+		dma_wr32(dma, RNP500_DMA_RBUF_FIFO,
+			 ((hw->max_length_current + 15) >> 4) + 5);
+
+		eth_wr32(eth, RNP500_ETH_EMAC_PARSE_PROGFULL_THRESH,
+			 ((hw->max_length_current + 15) >> 4) + 2);
+	}
+	/* 16 setup fcs mode */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_RX_FCS)
+		hw->ops.set_fcs_mode(hw, true);
+	else
+		hw->ops.set_fcs_mode(hw, false);
+
+	/* 17 setup tso fifo */
+	dma_wr32(dma, RNP_DMA_PKT_FIFO_DATA_PROG_FULL_THRESH, 36);
+	/* 18 setup priv skip */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_RX_SKIP_EN)
+		data = PRIV_DATA_EN | adapter->priv_skip_count;
+	else
+		data = 0;
+	eth_wr32(eth, RNP500_ETH_PRIV_DATA_CONTROL_REG, data);
+	/* 19 setup mac count read self clear */
+	data = mac_rd32(mac, RNP500_MAC_COUNT_CONTROL);
+#define READ_CLEAR BIT(2)
+	data |= READ_CLEAR;
+	mac_wr32(mac, RNP500_MAC_COUNT_CONTROL, data);
+	/* 20 setup prio */
+	if (adapter->priv_flags &
+	    (RNP_PRIV_FLAG_8023_PRIO | RNP_PRIV_FLAG_REMAP_PRIO)) {
+		eth_wr32(eth, RNP500_PRIORITY_1_MARK, RNP500_PRIORITY_1);
+		eth_wr32(eth, RNP500_PRIORITY_0_MARK, RNP500_PRIORITY_0);
+		eth_wr32(eth, RNP500_PRIORITY_EN, 1);
+		if (adapter->priv_flags & RNP_PRIV_FLAG_8023_PRIO)
+			eth_wr32(eth, RNP500_PRIORITY_EN_8023, 1);
+		else
+			eth_wr32(eth, RNP500_PRIORITY_EN_8023, 0);
+	} else {
+		eth_wr32(eth, RNP500_PRIORITY_EN, 0);
+	}
+}
+
+static void rnpgbe_update_hw_rx_drop_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+	int i;
+	struct rnpgbe_ring *ring;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		ring = adapter->rx_ring[i];
+		if (adapter->rx_drop_status & BIT(i)) {
+			ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH,
+				  adapter->drop_time);
+		} else {
+			ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, 0);
+		}
+	}
+}
+
+static void rnpgbe_set_rx_hash_hw_ops_n500(struct rnpgbe_hw *hw, bool status,
+					   bool sriov_flag)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_rx_hash(eth, status, sriov_flag);
+}
+
+/*
+ * setup mac to rar 0
+ * clean vmdq
+ * clean mc addr
+ */
+static s32 rnpgbe_init_rx_addrs_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	u32 i;
+	u32 rar_entries = eth->num_rar_entries;
+	u32 v;
+
+	hw_dbg(hw, "init_rx_addrs:rar_entries:%d, mac.addr:%pM\n", rar_entries,
+	       hw->mac.addr);
+	/*
+	 * If the current mac address is valid, assume it is a software override
+	 * to the permanent address.
+	 * Otherwise, use the permanent address from the eeprom.
+	 */
+	if (!is_valid_ether_addr(hw->mac.addr)) {
+		/* Get the MAC address from the RAR0 for later reference */
+		memcpy(hw->mac.addr, hw->mac.perm_addr, ETH_ALEN);
+		hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
+	} else {
+		/* Setup the receive address. */
+		hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
+		hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
+
+		eth->ops.set_rar(eth, 0, hw->mac.addr, true);
+		/*  clear VMDq pool/queue selection for RAR 0 */
+		eth->ops.clear_vmdq(eth, 0, RNP_CLEAR_VMDQ_ALL);
+	}
+	hw->addr_ctrl.overflow_promisc = 0;
+	hw->addr_ctrl.rar_used_count = 1;
+
+	/* Zero out the other receive addresses. */
+	hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
+	if (!hw->ncsi_en) {
+		for (i = 1; i < rar_entries; i++)
+			eth->ops.clear_rar(eth, i);
+	}
+
+	/* Clear the MTA */
+	hw->addr_ctrl.mta_in_use = 0;
+	v = eth_rd32(eth, RNP500_ETH_DMAC_MCSTCTRL);
+	v &= (~0x3);
+	v |= eth->mc_filter_type;
+	eth_wr32(eth, RNP500_ETH_DMAC_MCSTCTRL, v);
+
+	hw_dbg(hw, " Clearing MTA\n");
+	if (!hw->ncsi_en)
+		eth->ops.clr_mc_addr(eth);
+
+	return 0;
+}
+
+static void rnpgbe_clr_vfta_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.clr_vfta(eth);
+}
+
+static void rnpgbe_set_txvlan_mode_hw_ops_n500(struct rnpgbe_hw *hw, bool cvlan)
+{
+	/* n500 not support this */
+}
+
+static int rnpgbe_set_rss_hfunc_hw_ops_n500(struct rnpgbe_hw *hw, u8 hfunc)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+
+	switch (hfunc) {
+	case ETH_RSS_HASH_TOP:
+		adapter->rss_func_mode = rss_func_top;
+		break;
+
+	case ETH_RSS_HASH_XOR:
+		adapter->rss_func_mode = rss_func_xor;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	eth->ops.set_rss_hfunc(eth, adapter->rss_func_mode);
+	return 0;
+}
+
+static void rnpgbe_set_rss_key_hw_ops_n500(struct rnpgbe_hw *hw,
+					   bool sriov_flag)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+	int key_len = RNP_RSS_KEY_SIZE;
+
+	memcpy(hw->rss_key, adapter->rss_key, key_len);
+
+	eth->ops.set_rss_key(eth, sriov_flag);
+}
+
+static void rnpgbe_set_rss_table_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_rss_table(eth);
+}
+
+static void rnpgbe_set_mbx_link_event_hw_ops_n500(struct rnpgbe_hw *hw,
+						  int enable)
+{
+	rnpgbe_mbx_link_event_enable(hw, enable);
+}
+
+static void rnpgbe_set_mbx_ifup_hw_ops_n500(struct rnpgbe_hw *hw, int enable)
+{
+	rnpgbe_mbx_ifup_down(hw, enable);
+}
+
+/**
+ *  rnpgbe_check_mac_link_n500 - Determine link and speed status
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @link_up: true when link is up
+ *  @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ *  Reads the links register to determine if link is up and the current speed
+ **/
+static s32 rnpgbe_check_mac_link_hw_ops_n500(struct rnpgbe_hw *hw,
+					     rnpgbe_link_speed *speed,
+					     bool *link_up,
+					     bool *duplex,
+					     bool link_up_wait_to_complete)
+{
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+
+	if (hw->speed == 10)
+		*speed = RNP_LINK_SPEED_10_FULL;
+	else if (hw->speed == 100)
+		*speed = RNP_LINK_SPEED_100_FULL;
+	else if (hw->speed == 1000)
+		*speed = RNP_LINK_SPEED_1GB_FULL;
+	else if (hw->speed == 10000)
+		*speed = RNP_LINK_SPEED_10GB_FULL;
+	else if (hw->speed == 25000)
+		*speed = RNP_LINK_SPEED_25GB_FULL;
+	else if (hw->speed == 40000)
+		*speed = RNP_LINK_SPEED_40GB_FULL;
+	else
+		*speed = RNP_LINK_SPEED_UNKNOWN;
+
+	*link_up = hw->link;
+
+	if (adapter->priv_flags & RNP_PRIV_FLGA_TEST_TX_HANG)
+		*link_up = 0;
+
+	*duplex = hw->duplex;
+
+	return 0;
+}
+
+static s32 rnpgbe_setup_mac_link_hw_ops_n500(struct rnpgbe_hw *hw,
+					     u32 adv,
+					     u32 autoneg,
+					     u32 speed, u32 duplex)
+{
+	rnpgbe_mbx_phy_link_set(hw, adv, autoneg, speed, duplex,
+				hw->tp_mdix_ctrl);
+
+	return 0;
+}
+
+static void rnpgbe_clean_link_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	hw->link = 0;
+}
+
+static s32 rnpgbe_get_link_capabilities_hw_ops_n500(struct rnpgbe_hw *hw,
+						    rnpgbe_link_speed *speed,
+						    bool *autoneg)
+{
+	/* fix setup */
+	/* reletive with firmware */
+	*speed = RNP_LINK_SPEED_10GB_FULL;
+	*autoneg = false;
+
+	return 0;
+}
+
+static void rnpgbe_set_layer2_hw_ops_n500(struct rnpgbe_hw *hw,
+					  union rnpgbe_atr_input *input,
+					  u16 pri_id, u8 queue, bool prio_flag)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_layer2_remapping(eth, input, pri_id, queue, prio_flag);
+}
+
+static void rnpgbe_clr_layer2_hw_ops_n500(struct rnpgbe_hw *hw, u16 pri_id)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.clr_layer2_remapping(eth, pri_id);
+}
+
+static void rnpgbe_clr_all_layer2_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.clr_all_layer2_remapping(eth);
+}
+
+static void rnpgbe_clr_all_tuple5_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.clr_all_tuple5_remapping(eth);
+}
+
+static void rnpgbe_set_tcp_sync_hw_ops_n500(struct rnpgbe_hw *hw, int queue,
+					    bool flag, bool prio)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_tcp_sync_remapping(eth, queue, flag, prio);
+}
+
+static void rnpgbe_set_rx_skip_hw_ops_n500(struct rnpgbe_hw *hw, int count,
+					   bool flag)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_rx_skip(eth, count, flag);
+}
+
+static void rnpgbe_set_outer_vlan_type_hw_ops_n500(struct rnpgbe_hw *hw,
+						   int type)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_outer_vlan_type(eth, type);
+}
+
+static s32 rnpgbe_phy_read_reg_hw_ops_n500(struct rnpgbe_hw *hw,
+					   u32 reg_addr,
+					   u32 device_type,
+					   u16 *phy_data)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	s32 status = 0;
+	u32 data = 0;
+
+	status = mac->ops.mdio_read(mac, reg_addr, &data);
+	*phy_data = data & 0xffff;
+
+	return status;
+}
+
+static s32 rnpgbe_phy_write_reg_hw_ops_n500(struct rnpgbe_hw *hw,
+					    u32 reg_addr,
+					    u32 device_type,
+					    u16 phy_data)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	s32 status = 0;
+
+	status = mac->ops.mdio_write(mac, reg_addr, (u32)phy_data);
+
+	return status;
+}
+
+static void rnpgbe_setup_wol_hw_ops_n500(struct rnpgbe_hw *hw,
+					 u32 mode)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+
+	mac->ops.pmt(mac, mode, !!hw->ncsi_en);
+}
+
+static void rnpgbe_setup_eee_hw_ops_n500(struct rnpgbe_hw *hw,
+					 int ls, int tw,
+					 u32 local_eee)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+
+	mac->ops.set_eee_timer(mac, ls, tw);
+	rnpgbe_mbx_phy_eee_set(hw, tw, local_eee);
+}
+
+static void rnpgbe_set_eee_mode_hw_ops_n500(struct rnpgbe_hw *hw,
+					    bool en_tx_lpi_clockgating)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+
+	mac->ops.set_eee_mode(mac, en_tx_lpi_clockgating);
+}
+
+static void rnpgbe_reset_eee_mode_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+
+	mac->ops.reset_eee_mode(mac);
+}
+
+static void rnpgbe_set_eee_pls_hw_ops_n500(struct rnpgbe_hw *hw,
+					   int link)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+
+	mac->ops.set_eee_pls(mac, link);
+}
+
+static u32 rnpgbe_get_lpi_status_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+
+	return mac->ops.get_lpi_status(mac);
+}
+
+static int rnpgbe_get_ncsi_mac_hw_ops_n500(struct rnpgbe_hw *hw,
+					   u8 *addr, int idx)
+{
+#define NCSI_MAC_H(i) (0x48 + i * 0x8)
+#define NCSI_MAC_L(i) (0x4C + i * 0x8)
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	u32 rar_h, rar_l;
+
+	rar_h = mac_rd32(mac, NCSI_MAC_H(idx));
+	rar_l = mac_rd32(mac, NCSI_MAC_L(idx));
+
+	if (((rar_h & 0x0000ffff) != 0x0000ffff) || (rar_l != 0xffffffff)) {
+		*(addr + 3) = (rar_l & 0xff000000) >> 24;
+		*(addr + 2) = (rar_l & 0xff0000) >> 16;
+		*(addr + 1) = (rar_l & 0xff00) >> 8;
+		*(addr + 0) = (rar_l & 0xff) >> 0;
+		*(addr + 5) = (rar_h & 0xff00) >> 8;
+		*(addr + 4) = (rar_h & 0xff) >> 0;
+		return 0;
+	} else
+		return -1;
+}
+
+static int rnpgbe_get_ncsi_vlan_hw_ops_n500(struct rnpgbe_hw *hw,
+					    u16 *vlan, int idx)
+{
+#define NCSI_VLAN(i) (0x80 + i * 0x10)
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	u32 vid;
+
+	vid = mac_rd32(mac, NCSI_VLAN(idx));
+
+	if (vid & 0x80000000) {
+		*vlan = (u16)(vid & 0x0000ffff);
+		return 0;
+	} else
+		return -1;
+}
+
+static void rnpgbe_set_lldp_hw_ops_n500(struct rnpgbe_hw *hw,
+					bool enable)
+{
+	rnpgbe_mbx_lldp_set(hw, enable);
+}
+
+static void rnpgbe_get_lldp_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+}
+
+static void rnpgbe_set_eee_timer_hw_ops_n500(struct rnpgbe_hw *hw,
+					     int ls, int tw)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+
+	mac->ops.set_eee_timer(mac, ls, tw);
+}
+
+static void rnpgbe_set_vf_vlan_mode_hw_ops_n500(struct rnpgbe_hw *hw,
+						u16 vlan, int vf,
+						bool enable)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE)
+		eth->ops.set_vf_vlan_mode(eth, vlan, vf, enable);
+}
+
+static void rnpgbe_driver_status_hw_ops_n500(struct rnpgbe_hw *hw,
+					     bool enable,
+					     int mode)
+{
+	switch (mode) {
+	case rnpgbe_driver_insmod:
+		rnpgbe_mbx_ifinsmod(hw, enable);
+		break;
+	case rnpgbe_driver_suspuse:
+		rnpgbe_mbx_ifsuspuse(hw, enable);
+		break;
+	case rnpgbe_driver_force_control_phy:
+		rnpgbe_mbx_ifforce_control_mac(hw, enable);
+
+		break;
+	}
+}
+
+static void rnpgbe_set_tuple5_hw_ops_n500(struct rnpgbe_hw *hw,
+					  union rnpgbe_atr_input *input,
+					  u16 pri_id, u8 queue, bool prio_flag)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.set_tuple5_remapping(eth, input, pri_id, queue, prio_flag);
+}
+
+static void rnpgbe_clr_tuple5_hw_ops_n500(struct rnpgbe_hw *hw, u16 pri_id)
+{
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	eth->ops.clr_tuple5_remapping(eth, pri_id);
+}
+
+static void
+rnpgbe_update_hw_status_hw_ops_n500(struct rnpgbe_hw *hw,
+				    struct rnpgbe_hw_stats *hw_stats,
+				    struct net_device_stats *net_stats)
+{
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	int i;
+
+	net_stats->rx_errors += eth_rd32(eth, RNP500_RX_MAC_GFCS_ERR_NUM) +
+				eth_rd32(eth, RNP500_RX_MAC_LEN_ERR_NUM) +
+				eth_rd32(eth, RNP500_RX_MAC_SFCS_ERR_NUM) +
+				eth_rd32(eth, RNP500_RX_MAC_GLEN_ERR_NUM) +
+				eth_rd32(eth, RNP500_RX_MAC_SLEN_ERR_NUM);
+
+	net_stats->collisions = eth_rd32(eth, RNP500_RX_MAC_LCS_ERR_NUM);
+	net_stats->rx_over_errors = eth_rd32(eth, RNP500_RX_MAC_CUT_NUM);
+	net_stats->rx_crc_errors = eth_rd32(eth, RNP500_RX_MAC_GFCS_ERR_NUM);
+	hw_stats->invalid_droped_packets =
+		eth_rd32(eth, RNP500_RX_DROP_PKT_NUM);
+
+	hw_stats->rx_capabity_lost = eth_rd32(eth, RNP500_RXTRANS_DROP) +
+				     eth_rd32(eth, RNP500_RXTRANS_CUT_ERR_PKTS);
+	hw_stats->filter_dropped_packets =
+		eth_rd32(eth, RNP500_DECAP_PKT_DROP1_NUM);
+	hw_stats->host_l2_match_drop =
+		eth_rd32(eth, RNP500_ETH_HOST_L2_DROP_PKTS);
+	hw_stats->redir_input_match_drop =
+		eth_rd32(eth, RNP500_ETH_REDIR_INPUT_MATCH_DROP_PKTS);
+	hw_stats->redir_etype_match_drop =
+		eth_rd32(eth, RNP500_ETH_ETYPE_DROP_PKTS);
+	hw_stats->redir_tcp_syn_match_drop =
+		eth_rd32(eth, RNP500_ETH_TCP_SYN_DROP_PKTS);
+	hw_stats->redir_tuple5_match_drop =
+		eth_rd32(eth, RNP500_ETH_REDIR_TUPLE5_DROP_PKTS);
+	hw_stats->tx_multicast = eth_rd32(eth, RNP500_TX_MULTI_NUM);
+	hw_stats->tx_broadcast = eth_rd32(eth, RNP500_TX_BROADCAST_NUM);
+	hw_stats->mac_rx_broadcast = 0;
+	hw_stats->mac_rx_multicast = 0;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct rnpgbe_ring *tx_ring = adapter->tx_ring[i];
+		int idx = tx_ring->rnpgbe_queue_idx;
+
+		hw_stats->mac_rx_multicast +=
+			dma_rd32(dma, RNP500_VEB_VFMPRC(idx));
+		hw_stats->mac_rx_broadcast +=
+			dma_rd32(dma, RNP500_VEB_VFBPRC(idx));
+	}
+	hw_stats->dma_rx_drop_cnt_0 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(0));
+	hw_stats->dma_rx_drop_cnt_1 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(1));
+	hw_stats->dma_rx_drop_cnt_2 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(2));
+	hw_stats->dma_rx_drop_cnt_3 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(3));
+	hw_stats->dma_rx_drop_cnt_4 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(4));
+	hw_stats->dma_rx_drop_cnt_5 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(5));
+	hw_stats->dma_rx_drop_cnt_6 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(6));
+	hw_stats->dma_rx_drop_cnt_7 = dma_rd32(dma, RNP500_RX_TIMEOUT_DROP(7));
+
+	net_stats->multicast = hw_stats->mac_rx_multicast;
+
+	hw_stats->ultra_short_cnt +=
+		mac_rd32(mac, GMAC_MANAGEMENT_RX_UNDERSIZE);
+	hw_stats->jumbo_cnt += mac_rd32(mac, RNP500_MAC_GLEN_ERR_NUM);
+	hw_stats->tx_pause += mac_rd32(mac, GMAC_MANAGEMENT_TX_PAUSE);
+	hw_stats->rx_pause += mac_rd32(mac, GMAC_MANAGEMENT_RX_PAUSE);
+}
+
+const struct rnpgbe_stats rnp500_gstrings_net_stats[] = {
+	RNP_NETDEV_STAT(rx_packets),
+	RNP_NETDEV_STAT(tx_packets),
+	RNP_NETDEV_STAT(rx_bytes),
+	RNP_NETDEV_STAT(tx_bytes),
+	RNP_NETDEV_STAT(rx_errors),
+	RNP_NETDEV_STAT(tx_errors),
+	RNP_NETDEV_STAT(rx_dropped),
+	RNP_NETDEV_STAT(tx_dropped),
+	RNP_NETDEV_STAT(multicast),
+	RNP_NETDEV_STAT(collisions),
+	RNP_NETDEV_STAT(rx_over_errors),
+	RNP_NETDEV_STAT(rx_crc_errors),
+	RNP_NETDEV_STAT(rx_frame_errors),
+	RNP_NETDEV_STAT(rx_fifo_errors),
+	RNP_NETDEV_STAT(rx_missed_errors),
+	RNP_NETDEV_STAT(tx_aborted_errors),
+	RNP_NETDEV_STAT(tx_carrier_errors),
+	RNP_NETDEV_STAT(tx_fifo_errors),
+	RNP_NETDEV_STAT(tx_heartbeat_errors),
+};
+
+#define RNP500_GLOBAL_STATS_LEN ARRAY_SIZE(rnp500_gstrings_net_stats)
+
+static struct rnpgbe_stats rnp500_hwstrings_stats[] = {
+	RNP_HW_STAT("vlan_add_cnt", hw_stats.vlan_add_cnt),
+	RNP_HW_STAT("vlan_strip_cnt", hw_stats.vlan_strip_cnt),
+	RNP_HW_STAT("invalid_droped_packets", hw_stats.invalid_droped_packets),
+	RNP_HW_STAT("rx_capabity_drop", hw_stats.rx_capabity_lost),
+	RNP_HW_STAT("filter_dropped_packets", hw_stats.filter_dropped_packets),
+	RNP_HW_STAT("host_l2_match_drop", hw_stats.host_l2_match_drop),
+	RNP_HW_STAT("redir_input_match_drop", hw_stats.redir_input_match_drop),
+	RNP_HW_STAT("redir_etype_match_drop", hw_stats.redir_etype_match_drop),
+	RNP_HW_STAT("redir_tcp_syn_match_drop",
+		    hw_stats.redir_tcp_syn_match_drop),
+	RNP_HW_STAT("redir_tuple5_match_drop",
+		    hw_stats.redir_tuple5_match_drop),
+	RNP_HW_STAT("tx_multicast", hw_stats.tx_multicast),
+	RNP_HW_STAT("tx_broadcast", hw_stats.tx_broadcast),
+	RNP_HW_STAT("rx_csum_offload_errors", hw_csum_rx_error),
+	RNP_HW_STAT("rx_csum_offload_good", hw_csum_rx_good),
+	RNP_HW_STAT("rx_broadcast_count", hw_stats.mac_rx_broadcast),
+	RNP_HW_STAT("rx_multicast_count", hw_stats.mac_rx_multicast),
+	RNP_HW_STAT("ultra_short_packets", hw_stats.ultra_short_cnt),
+	RNP_HW_STAT("jumbo_packets", hw_stats.jumbo_cnt),
+	RNP_HW_STAT("mac_rx_pause_count", hw_stats.rx_pause),
+	RNP_HW_STAT("mac_tx_pause_count", hw_stats.tx_pause),
+};
+
+#define RNP500_HWSTRINGS_STATS_LEN ARRAY_SIZE(rnp500_hwstrings_stats)
+
+#define RNP500_STATS_LEN                                                       \
+	(RNP500_GLOBAL_STATS_LEN + RNP500_HWSTRINGS_STATS_LEN +                \
+	 RNP_QUEUE_STATS_LEN)
+
+static const char rnp500_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Register test  (offline)", "Eeprom test    (offline)",
+	"Interrupt test (offline)", "Loopback test  (offline)",
+	"Link test   (on/offline)"
+};
+
+#define RNP500_TEST_LEN (sizeof(rnp500_gstrings_test) / ETH_GSTRING_LEN)
+
+static int rnp500_get_link_ksettings(struct net_device *netdev,
+				     struct ethtool_link_ksettings *cmd)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	rnpgbe_link_speed supported_link;
+	rnpgbe_link_speed advertised_link;
+	bool autoneg = hw->autoneg;
+
+	ethtool_link_ksettings_zero_link_mode(cmd, supported);
+	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+	supported_link = hw->supported_link;
+	advertised_link = hw->advertised_link;
+
+	if (hw->is_sgmii) {
+		if (supported_link & RNP_LINK_SPEED_1GB_FULL)
+			ethtool_link_ksettings_add_link_mode(cmd, supported,
+							     1000baseT_Full);
+		if (supported_link & RNP_LINK_SPEED_100_FULL)
+			ethtool_link_ksettings_add_link_mode(cmd, supported,
+							     100baseT_Full);
+		if (supported_link & RNP_LINK_SPEED_10_FULL)
+			ethtool_link_ksettings_add_link_mode(cmd, supported,
+							     10baseT_Full);
+		if (supported_link & RNP_LINK_SPEED_1GB_HALF)
+			ethtool_link_ksettings_add_link_mode(cmd, supported,
+							     1000baseT_Half);
+		if (supported_link & RNP_LINK_SPEED_100_HALF)
+			ethtool_link_ksettings_add_link_mode(cmd, supported,
+							     100baseT_Half);
+		if (supported_link & RNP_LINK_SPEED_10_HALF)
+			ethtool_link_ksettings_add_link_mode(cmd, supported,
+							     10baseT_Half);
+
+		if ((autoneg) && (!hw->fake_autoneg)) {
+			if (advertised_link & RNP_LINK_SPEED_1GB_FULL)
+				ethtool_link_ksettings_add_link_mode(
+					cmd, advertising, 1000baseT_Full);
+			if (advertised_link & RNP_LINK_SPEED_100_FULL)
+				ethtool_link_ksettings_add_link_mode(
+					cmd, advertising, 100baseT_Full);
+			if (advertised_link & RNP_LINK_SPEED_10_FULL)
+				ethtool_link_ksettings_add_link_mode(
+					cmd, advertising, 10baseT_Full);
+			if (advertised_link & RNP_LINK_SPEED_1GB_HALF)
+				ethtool_link_ksettings_add_link_mode(
+					cmd, advertising, 1000baseT_Half);
+			if (advertised_link & RNP_LINK_SPEED_100_HALF)
+				ethtool_link_ksettings_add_link_mode(
+					cmd, advertising, 100baseT_Half);
+			if (advertised_link & RNP_LINK_SPEED_10_HALF)
+				ethtool_link_ksettings_add_link_mode(
+					cmd, advertising, 10baseT_Half);
+		}
+
+		ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+
+		if (!hw->fake_autoneg)
+			ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+
+		cmd->base.port = PORT_TP;
+		cmd->base.phy_address = adapter->phy_addr;
+		cmd->base.duplex = adapter->duplex;
+		if (adapter->hw.link)
+			cmd->base.eth_tp_mdix = hw->tp_mdx;
+		else
+			cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+
+		cmd->base.eth_tp_mdix_ctrl = hw->tp_mdix_ctrl;
+	} else {
+		if (supported_link & RNP_LINK_SPEED_1GB_FULL) {
+			ethtool_link_ksettings_add_link_mode(cmd, supported,
+							     1000baseKX_Full);
+		}
+
+		if (advertised_link & RNP_LINK_SPEED_1GB_FULL)
+			ethtool_link_ksettings_add_link_mode(cmd, advertising,
+							     1000baseKX_Full);
+		ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+		ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+		cmd->base.port = PORT_FIBRE;
+	}
+
+	ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+
+	if (autoneg) {
+		ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+		cmd->base.autoneg = AUTONEG_ENABLE;
+	} else
+		cmd->base.autoneg = AUTONEG_DISABLE;
+
+	if (hw->fake_autoneg)
+		cmd->base.autoneg = AUTONEG_DISABLE;
+
+	/* set pause support */
+	ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+	ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
+
+	if (hw->fc.requested_mode & PAUSE_AUTO)
+		ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+	else {
+		if ((hw->fc.requested_mode & PAUSE_TX) &&
+		    (hw->fc.requested_mode & PAUSE_RX))
+			ethtool_link_ksettings_add_link_mode(cmd, advertising,
+							     Pause);
+		else if (hw->fc.requested_mode & PAUSE_TX)
+			ethtool_link_ksettings_add_link_mode(cmd, advertising,
+							     Asym_Pause);
+		else if (hw->fc.requested_mode & PAUSE_RX)
+			ethtool_link_ksettings_add_link_mode(cmd, advertising,
+							     Asym_Pause);
+		else
+			ethtool_link_ksettings_add_link_mode(cmd, advertising,
+							     Asym_Pause);
+	}
+
+	if (adapter->hw.link) {
+		cmd->base.speed = hw->speed;
+		if (adapter->hw.duplex)
+			cmd->base.duplex = DUPLEX_FULL;
+		else
+			cmd->base.duplex = DUPLEX_HALF;
+	} else {
+		cmd->base.speed = SPEED_UNKNOWN;
+		cmd->base.duplex = DUPLEX_UNKNOWN;
+	}
+
+	return 0;
+}
+
+static int rnp500_set_link_ksettings(struct net_device *netdev,
+				     const struct ethtool_link_ksettings *cmd)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 advertised, old;
+	s32 err = 0;
+	u32 speed = 0, autoneg = 0, duplex = 0;
+
+	if ((hw->phy_type == rnpgbe_media_type_copper) ||
+	    (hw->phy.multispeed_fiber)) {
+		/*
+		 * this function does not support duplex forcing, but can
+		 * limit the advertising of the adapter to the specified speed
+		 */
+		/* only allow one speed at a time if no  */
+		if (!cmd->base.autoneg) {
+			if ((cmd->base.speed != SPEED_100) &&
+			    (cmd->base.speed != SPEED_10) &&
+			    (cmd->base.speed != SPEED_1000))
+				return -EINVAL;
+			autoneg = 0;
+			speed = cmd->base.speed;
+			duplex = cmd->base.duplex;
+			/* if set force 1000, we should open autoneg */
+			if (cmd->base.speed == SPEED_1000) {
+				autoneg = 1;
+				hw->fake_autoneg = 1;
+			}
+		} else {
+			hw->fake_autoneg = 0;
+			autoneg = 1;
+		}
+
+		if (cmd->base.eth_tp_mdix_ctrl) {
+			if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
+			    (cmd->base.autoneg != AUTONEG_ENABLE)) {
+				dev_err(&adapter->pdev->dev,
+					"forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
+				return -EINVAL;
+			}
+		}
+
+		hw->autoneg = autoneg;
+		hw->tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
+
+		old = hw->advertised_link;
+		advertised = 0;
+
+		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+							  1000baseT_Full))
+			advertised |= RNP_LINK_SPEED_1GB_FULL;
+
+		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+							  100baseT_Full))
+			advertised |= RNP_LINK_SPEED_100_FULL;
+
+		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+							  10baseT_Full))
+			advertised |= RNP_LINK_SPEED_10_FULL;
+
+		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+							  100baseT_Half))
+			advertised |= RNP_LINK_SPEED_100_HALF;
+
+		if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+							  10baseT_Half))
+			advertised |= RNP_LINK_SPEED_10_HALF;
+
+		/* if autoneg on, adv can not set 0 */
+		if ((!advertised) && (autoneg))
+			return -EINVAL;
+
+		/* this sets the link speed and restarts auto-neg */
+		while (test_and_set_bit(__RNP_IN_SFP_INIT, &adapter->state))
+			usleep_range(1000, 2000);
+
+		hw->mac.autotry_restart = true;
+		err = hw->ops.setup_link(hw, advertised, autoneg, speed,
+					 duplex);
+		if (err) {
+			printk(KERN_DEBUG "setup link failed with code %d\n",
+			       err);
+			hw->ops.setup_link(hw, old, autoneg, speed, duplex);
+		} else {
+			hw->advertised_link = advertised;
+		}
+		clear_bit(__RNP_IN_SFP_INIT, &adapter->state);
+	} else {
+		/* if not sgmii, we not support close autoneg */
+		if ((!cmd->base.autoneg) && (!hw->is_sgmii))
+			return -EINVAL;
+		if (cmd->base.duplex == DUPLEX_HALF)
+			return -EINVAL;
+		if (cmd->base.speed != SPEED_1000)
+			return -EINVAL;
+	}
+
+	return err;
+}
+
+static int rnp500_get_regs_len(struct net_device *netdev)
+{
+#define RNP500_REGS_LEN 1
+	return RNP500_REGS_LEN * sizeof(u32);
+}
+
+static void rnp500_get_drvinfo(struct net_device *netdev,
+			       struct ethtool_drvinfo *drvinfo)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	strncpy(drvinfo->driver, rnpgbe_driver_name, sizeof(drvinfo->driver));
+	strncpy(drvinfo->version, rnpgbe_driver_version,
+		sizeof(drvinfo->version));
+
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		 "%d.%d.%d.%d", ((unsigned char *)&(hw->fw_version))[3],
+		 ((unsigned char *)&(hw->fw_version))[2],
+		 ((unsigned char *)&(hw->fw_version))[1],
+		 ((unsigned char *)&(hw->fw_version))[0]);
+	strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info));
+	drvinfo->n_stats = RNP500_STATS_LEN;
+	drvinfo->testinfo_len = RNP500_TEST_LEN;
+	drvinfo->regdump_len = rnp500_get_regs_len(netdev);
+	drvinfo->n_priv_flags = RNP500_PRIV_FLAGS_STR_LEN;
+}
+
+static int rnp500_get_eeprom_len(struct net_device *netdev)
+{
+	/* not support this */
+	return 0;
+}
+
+static int rnp500_get_eeprom(struct net_device *netdev,
+			     struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 *eeprom_buff;
+	int first_u32, last_u32, eeprom_len;
+	int ret_val = 0;
+
+	if (hw->hw_type == rnpgbe_hw_n210)
+		return -EPERM;
+
+	if (eeprom->len == 0)
+		return -EINVAL;
+
+	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+	/* assign to u32 */
+	first_u32 = eeprom->offset >> 2;
+	last_u32 = (eeprom->offset + eeprom->len - 1) >> 2;
+	eeprom_len = last_u32 - first_u32 + 1;
+
+	eeprom_buff = kmalloc_array(eeprom_len, sizeof(u32), GFP_KERNEL);
+	if (!eeprom_buff)
+		return -ENOMEM;
+
+	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 0x03), eeprom->len);
+	kfree(eeprom_buff);
+
+	return ret_val;
+}
+
+static int rnp500_set_eeprom(struct net_device *netdev,
+			     struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	return 0;
+}
+
+static void rnp500_get_pauseparam(struct net_device *netdev,
+				  struct ethtool_pauseparam *pause)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	if (hw->fc.requested_mode & PAUSE_AUTO)
+		pause->autoneg = 1;
+	else
+		pause->autoneg = 0;
+
+	if (hw->fc.current_mode == rnpgbe_fc_rx_pause) {
+		pause->rx_pause = 1;
+	} else if (hw->fc.current_mode == rnpgbe_fc_tx_pause) {
+		pause->tx_pause = 1;
+	} else if (hw->fc.current_mode == rnpgbe_fc_full) {
+		pause->rx_pause = 1;
+		pause->tx_pause = 1;
+	} else {
+		pause->rx_pause = 0;
+		pause->tx_pause = 0;
+	}
+}
+
+static int rnp500_set_pauseparam(struct net_device *netdev,
+				 struct ethtool_pauseparam *pause)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_fc_info fc = hw->fc;
+
+	/* we not support change in dcb mode */
+	if (adapter->flags & RNP_FLAG_DCB_ENABLED)
+		return -EINVAL;
+
+	fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
+
+	fc.requested_mode = 0;
+
+	if (pause->autoneg) {
+		fc.requested_mode |= PAUSE_AUTO;
+	} else {
+		if (pause->tx_pause)
+			fc.requested_mode |= PAUSE_TX;
+		if (pause->rx_pause)
+			fc.requested_mode |= PAUSE_RX;
+	}
+	rnpgbe_mbx_phy_pause_set(hw, fc.requested_mode);
+
+	hw->fc = fc;
+	return 0;
+}
+
+static void rnp500_get_regs(struct net_device *netdev,
+			    struct ethtool_regs *regs, void *p)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 *regs_buff = p;
+	int i;
+
+	memset(p, 0, RNP500_REGS_LEN * sizeof(u32));
+
+	for (i = 0; i < RNP500_REGS_LEN; i++)
+		regs_buff[i] = rd32(hw, i * 4);
+}
+
+static int rnp500_nway_reset(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (netif_running(netdev))
+		rnpgbe_reinit_locked(adapter);
+
+	return 0;
+}
+
+static void rnp500_get_strings(struct net_device *netdev, u32 stringset,
+			       u8 *data)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	char *p = (char *)data;
+	int i;
+	struct rnpgbe_ring *ring;
+	u32 dma_ch;
+
+	switch (stringset) {
+	/* maybe we don't support test? */
+	case ETH_SS_TEST:
+		for (i = 0; i < RNP500_TEST_LEN; i++) {
+			memcpy(data, rnp500_gstrings_test[i], ETH_GSTRING_LEN);
+			data += ETH_GSTRING_LEN;
+		}
+		break;
+	case ETH_SS_STATS:
+		for (i = 0; i < RNP500_GLOBAL_STATS_LEN; i++) {
+			memcpy(p, rnp500_gstrings_net_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		for (i = 0; i < RNP500_HWSTRINGS_STATS_LEN; i++) {
+			memcpy(p, rnp500_hwstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		for (i = 0; i < RNP_NUM_TX_QUEUES; i++) {
+			/* ====  tx ======== */
+			ring = adapter->tx_ring[i];
+			dma_ch = ring->rnpgbe_queue_idx;
+			sprintf(p, "---\n     queue%u_tx_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_bytes", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_tx_restart", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_busy", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_done_old", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_clean_desc", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_poll_count", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_irq_more", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_tx_hw_head", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_hw_tail", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_sw_next_to_clean", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_sw_next_to_use", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_send_bytes", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_send_bytes_to_hw", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_todo_update", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_send_done_bytes", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_added_vlan_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_next_to_clean", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_irq_miss", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_tx_equal_count", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_clean_times", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_clean_count", i);
+			p += ETH_GSTRING_LEN;
+
+			/* ====  rx ======== */
+			ring = adapter->rx_ring[i];
+			dma_ch = ring->rnpgbe_queue_idx;
+			sprintf(p, "queue%u_rx_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_bytes", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_driver_drop_packets", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_rx_rsc", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_rsc_flush", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_non_eop_descs", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_alloc_page_failed", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_alloc_buff_failed", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_alloc_page", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_csum_offload_errs", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_csum_offload_good", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_poll_again_count", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_rm_vlan_packets", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_rx_hw_head", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_hw_tail", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_sw_next_to_use", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_sw_next_to_clean", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_rx_next_to_clean", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_irq_miss", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_equal_count", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_rx_clean_times", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_clean_count", i);
+			p += ETH_GSTRING_LEN;
+		}
+
+		break;
+	case ETH_SS_PRIV_FLAGS:
+		memcpy(data, rnp500_priv_flags_strings,
+		       RNP500_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+		break;
+	}
+}
+
+
+static int rnp500_get_sset_count(struct net_device *netdev, int sset)
+{
+	switch (sset) {
+	/* now we don't support test */
+	case ETH_SS_TEST:
+		return RNP500_TEST_LEN;
+	case ETH_SS_STATS:
+		return RNP500_STATS_LEN;
+	case ETH_SS_PRIV_FLAGS:
+		return RNP500_PRIV_FLAGS_STR_LEN;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static u32 rnp500_get_priv_flags(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter =
+		(struct rnpgbe_adapter *)netdev_priv(netdev);
+	u32 priv_flags = 0;
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_MAC_LOOPBACK)
+		priv_flags |= RNP500_MAC_LOOPBACK;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG)
+		priv_flags |= RNP500_PADDING_DEBUG;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_SIMUATE_DOWN)
+		priv_flags |= RNP500_SIMULATE_DOWN;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_ULTRA_SHORT)
+		priv_flags |= RNP500_ULTRA_SHORT;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_DOUBLE_VLAN)
+		priv_flags |= RNP500_DOUBLE_VLAN;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_PAUSE_OWN)
+		priv_flags |= RNP500_PAUSE_OWN;
+	if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED)
+		priv_flags |= RNP500_STAGS_ENABLE;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_JUMBO)
+		priv_flags |= RNP500_JUMBO_ENABLE;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING)
+		priv_flags |= RNP500_TX_PADDING;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_SOFT_TX_PADDING)
+		priv_flags |= RNP500_TX_SOLF_PADDING;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR)
+		priv_flags |= RNP500_REC_HDR_LEN_ERR;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_DOUBLE_VLAN_RECEIVE)
+		priv_flags |= RNP500_DOUBLE_VLAN_RECEIVE;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_RX_SKIP_EN)
+		priv_flags |= RNP500_RX_SKIP_EN;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC_PRIO)
+		priv_flags |= RNP500_TCP_SYNC_PRIO;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_REMAP_PRIO)
+		priv_flags |= RNP500_REMAP_PRIO;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_8023_PRIO)
+		priv_flags |= RNP500_8023_PRIO;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE)
+		priv_flags |= RNP500_SRIOV_VLAN_MODE;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_LLDP)
+		priv_flags |= RNP500_LLDP_EN;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)
+		priv_flags |= RNP500_FORCE_CLOSE;
+
+	return priv_flags;
+}
+
+static int rnp500_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+	struct rnpgbe_adapter *adapter =
+		(struct rnpgbe_adapter *)netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	u32 data_old;
+	u32 data_new;
+
+	data_old = dma_rd32(dma, RNP_DMA_CONFIG);
+	data_new = data_old;
+	dbg("data old is %x\n", data_old);
+
+	if (priv_flags & RNP500_MAC_LOOPBACK) {
+		SET_BIT(n500_mac_loopback, data_new);
+		adapter->priv_flags |= RNP_PRIV_FLAG_MAC_LOOPBACK;
+	} else if (adapter->priv_flags & RNP_PRIV_FLAG_MAC_LOOPBACK) {
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_MAC_LOOPBACK);
+		CLR_BIT(n500_mac_loopback, data_new);
+	}
+
+	if (priv_flags & RNP500_PADDING_DEBUG)
+		adapter->priv_flags |= RNP_PRIV_FLAG_PADDING_DEBUG;
+	else if (adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG)
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_PADDING_DEBUG);
+
+	if (priv_flags & RNP500_SIMULATE_DOWN) {
+		adapter->priv_flags |= RNP_PRIV_FLAG_SIMUATE_DOWN;
+		/* set check link again */
+		adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE;
+	} else if (adapter->priv_flags & RNP_PRIV_FLAG_SIMUATE_DOWN) {
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_SIMUATE_DOWN);
+		/* set check link again */
+		adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE;
+	}
+
+	/* if open ultra short function */
+	if (priv_flags & RNP500_ULTRA_SHORT) {
+		int min = 33;
+
+		adapter->priv_flags |= RNP_PRIV_FLAG_ULTRA_SHORT;
+		eth_wr32(eth, RNP500_ETH_DEFAULT_RX_MIN_LEN, min);
+
+	} else {
+		int min = 60;
+
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_ULTRA_SHORT);
+		eth_wr32(eth, RNP500_ETH_DEFAULT_RX_MIN_LEN, min);
+	}
+	if (priv_flags & RNP500_PAUSE_OWN) {
+		u32 data;
+
+		data = mac_rd32(mac, GMAC_FLOW_CTRL);
+		data |= GMAC_FLOW_CTRL_UP;
+		adapter->priv_flags |= RNP_PRIV_FLAG_PAUSE_OWN;
+		mac_wr32(mac, GMAC_FLOW_CTRL, data);
+
+	} else {
+		u32 data;
+
+		data = mac_rd32(mac, GMAC_FLOW_CTRL);
+		data &= (~GMAC_FLOW_CTRL_UP);
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_PAUSE_OWN);
+		mac_wr32(mac, GMAC_FLOW_CTRL, data);
+	}
+
+	if (priv_flags & RNP500_DOUBLE_VLAN) {
+		adapter->priv_flags |= RNP_PRIV_FLAG_DOUBLE_VLAN;
+		eth->ops.set_double_vlan(eth, true);
+	} else {
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_DOUBLE_VLAN);
+		eth->ops.set_double_vlan(eth, false);
+	}
+
+	if (priv_flags & RNP500_STAGS_ENABLE) {
+		eth_wr32(eth, RNP500_ETH_TX_VLAN_CONTROL_EANBLE, 1);
+		adapter->flags2 |= RNP_FLAG2_VLAN_STAGS_ENABLED;
+		eth->ops.set_vfta(eth, adapter->stags_vid, true);
+	} else {
+		int true_remove = 1;
+		int vid = adapter->stags_vid;
+
+		eth_wr32(eth, RNP500_ETH_TX_VLAN_CONTROL_EANBLE, 0);
+		adapter->flags2 &= (~RNP_FLAG2_VLAN_STAGS_ENABLED);
+		if (vid) {
+			if (test_bit(vid, adapter->active_vlans))
+				true_remove = 0;
+			if (test_bit(vid, adapter->active_vlans_stags))
+				true_remove = 0;
+			if (true_remove)
+				hw->ops.set_vlan_filter(hw, vid, false, false);
+		}
+	}
+
+	if (priv_flags & RNP500_JUMBO_ENABLE) {
+		adapter->priv_flags |= RNP_PRIV_FLAG_JUMBO;
+		hw->ops.set_mtu(hw, netdev->mtu);
+	} else {
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_JUMBO);
+		hw->ops.set_mtu(hw, netdev->mtu);
+	}
+
+	if (priv_flags & RNP500_TX_PADDING)
+		adapter->priv_flags |= RNP_PRIV_FLAG_TX_PADDING;
+	else
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_TX_PADDING);
+
+	if (priv_flags & RNP500_TX_SOLF_PADDING)
+		adapter->priv_flags |= RNP_PRIV_FLAG_SOFT_TX_PADDING;
+	else
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_SOFT_TX_PADDING);
+
+	if (priv_flags & RNP500_REC_HDR_LEN_ERR) {
+		adapter->priv_flags |= RNP_PRIV_FLAG_REC_HDR_LEN_ERR;
+		eth_wr32(eth, RNP500_ETH_ERR_MASK_VECTOR, 0);
+
+	} else if (adapter->priv_flags & RNP_PRIV_FLAG_REC_HDR_LEN_ERR) {
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_REC_HDR_LEN_ERR);
+		eth_wr32(eth, RNP500_ETH_ERR_MASK_VECTOR,
+			 PKT_LEN_ERR | HDR_LEN_ERR);
+	}
+
+	if (priv_flags & RNP500_DOUBLE_VLAN_RECEIVE) {
+		adapter->priv_flags |= RNP_PRIV_FLAG_DOUBLE_VLAN_RECEIVE;
+		if (!(adapter->priv_flags & RNP_PRIV_FLAG_RX_ALL))
+			eth_wr32(eth, RNP500_ETH_DOUBLE_VLAN_DROP, 0);
+	} else {
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_DOUBLE_VLAN_RECEIVE);
+		if (!(adapter->priv_flags & RNP_PRIV_FLAG_RX_ALL))
+			eth_wr32(eth, RNP500_ETH_DOUBLE_VLAN_DROP, 1);
+	}
+
+	if (priv_flags & RNP500_TCP_SYNC_PRIO)
+		adapter->priv_flags |= RNP_PRIV_FLAG_TCP_SYNC_PRIO;
+	else
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_TCP_SYNC_PRIO);
+
+	if (priv_flags & RNP500_SRIOV_VLAN_MODE) {
+		int i;
+
+		adapter->priv_flags |= RNP_PRIV_FLAG_SRIOV_VLAN_MODE;
+		if (!(adapter->flags & RNP_FLAG_SRIOV_INIT_DONE))
+			goto skip_setup_vf_vlan_n500;
+		/* should setup vlvf table */
+		for (i = 0; i < adapter->num_vfs; i++) {
+			if (hw->ops.set_vf_vlan_mode) {
+				if (adapter->vfinfo[i].vf_vlan)
+					hw->ops.set_vf_vlan_mode(
+						hw, adapter->vfinfo[i].vf_vlan,
+						i, true);
+
+				if (adapter->vfinfo[i].pf_vlan)
+					hw->ops.set_vf_vlan_mode(
+						hw, adapter->vfinfo[i].pf_vlan,
+						i, true);
+			}
+		}
+
+	} else if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) {
+		int i;
+
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_SRIOV_VLAN_MODE);
+		/* should clean vlvf table */
+		for (i = 0; i < hw->max_vfs; i++) {
+			if (hw->ops.set_vf_vlan_mode)
+				hw->ops.set_vf_vlan_mode(hw, 0, i, false);
+		}
+	}
+
+	if (priv_flags & RNP500_LLDP_EN) {
+		/* open lldp */
+		hw->ops.set_lldp(hw, true);
+		adapter->priv_flags |= RNP_PRIV_FLAG_LLDP;
+
+	} else if (adapter->priv_flags & RNP_PRIV_FLAG_LLDP) {
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_LLDP);
+		/* close lldp */
+		hw->ops.set_lldp(hw, false);
+	}
+
+	/* if force close */
+	if (hw->force_cap) {
+		if (priv_flags & RNP500_FORCE_CLOSE) {
+			if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) {
+				adapter->priv_flags |= RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE;
+				if (hw->ops.driver_status)
+					hw->ops.driver_status(hw, true,
+						rnpgbe_driver_force_control_phy);
+			}
+		} else if (adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE) {
+			adapter->priv_flags &= (~RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE);
+			if (hw->ops.driver_status)
+				hw->ops.driver_status(hw, false,
+						rnpgbe_driver_force_control_phy);
+
+		}
+	} else if (priv_flags & RNP500_FORCE_CLOSE) {
+		rnpgbe_err("firmware not support set this feature.\n");
+		return -1;
+
+	}
+
+skip_setup_vf_vlan_n500:
+	if (priv_flags & RNP500_8023_PRIO) {
+		adapter->priv_flags |= RNP_PRIV_FLAG_8023_PRIO;
+		eth_wr32(eth, RNP500_PRIORITY_EN_8023, 1);
+	} else {
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_8023_PRIO);
+		eth_wr32(eth, RNP500_PRIORITY_EN_8023, 0);
+	}
+
+	if (priv_flags & RNP500_REMAP_PRIO)
+		adapter->priv_flags |= RNP_PRIV_FLAG_REMAP_PRIO;
+	else
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_REMAP_PRIO);
+
+	if (priv_flags & (RNP500_8023_PRIO | RNP500_REMAP_PRIO)) {
+		eth_wr32(eth, RNP500_PRIORITY_1_MARK, RNP500_PRIORITY_1);
+		eth_wr32(eth, RNP500_PRIORITY_0_MARK, RNP500_PRIORITY_0);
+		eth_wr32(eth, RNP500_PRIORITY_EN, 1);
+	} else {
+		eth_wr32(eth, RNP500_PRIORITY_EN, 0);
+	}
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC) {
+		if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC_PRIO)
+			hw->ops.set_tcp_sync_remapping(
+				hw, adapter->tcp_sync_queue, true, true);
+		else
+			hw->ops.set_tcp_sync_remapping(
+				hw, adapter->tcp_sync_queue, true, false);
+	}
+
+	if (data_old != data_new)
+		dma_wr32(dma, RNP_DMA_CONFIG, data_new);
+	/* if ft_padding changed */
+	if (CHK_BIT(n500_padding_enable, data_old) !=
+	    CHK_BIT(n500_padding_enable, data_new)) {
+		rnpgbe_msg_post_status(adapter, PF_FT_PADDING_STATUS);
+	}
+
+	return 0;
+}
+
+static void rnp500_get_ethtool_stats(struct net_device *netdev,
+				     struct ethtool_stats *stats, u64 *data)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct net_device_stats *net_stats = &netdev->stats;
+	struct rnpgbe_ring *ring;
+	int i, j;
+	char *p = NULL;
+
+	rnpgbe_update_stats(adapter);
+
+	for (i = 0; i < RNP500_GLOBAL_STATS_LEN; i++) {
+		p = (char *)net_stats +
+		    rnp500_gstrings_net_stats[i].stat_offset;
+		data[i] = (rnp500_gstrings_net_stats[i].sizeof_stat ==
+			   sizeof(u64)) ?
+				  *(u64 *)p :
+				  *(u32 *)p;
+	}
+	for (j = 0; j < RNP500_HWSTRINGS_STATS_LEN; j++, i++) {
+		p = (char *)adapter + rnp500_hwstrings_stats[j].stat_offset;
+		data[i] =
+			(rnp500_hwstrings_stats[j].sizeof_stat == sizeof(u64)) ?
+				*(u64 *)p :
+				*(u32 *)p;
+	}
+
+	BUG_ON(RNP_NUM_TX_QUEUES != RNP_NUM_RX_QUEUES);
+
+	for (j = 0; j < RNP_NUM_TX_QUEUES; j++) {
+		int idx;
+		/* tx-ring */
+		ring = adapter->tx_ring[j];
+		if (!ring) {
+			/* tx */
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			/* rx */
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			continue;
+		}
+		idx = ring->rnpgbe_queue_idx;
+
+		data[i++] = ring->stats.packets;
+		data[i++] = ring->stats.bytes;
+
+		data[i++] = ring->tx_stats.restart_queue;
+		data[i++] = ring->tx_stats.tx_busy;
+		data[i++] = ring->tx_stats.tx_done_old;
+		data[i++] = ring->tx_stats.clean_desc;
+		data[i++] = ring->tx_stats.poll_count;
+		data[i++] = ring->tx_stats.irq_more_count;
+
+		/* rnpgbe_tx_queue_ring_stat */
+		data[i++] = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_HEAD);
+		data[i++] = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_TAIL);
+		data[i++] = ring->next_to_clean;
+		data[i++] = ring->next_to_use;
+		data[i++] = ring->tx_stats.send_bytes;
+		data[i++] = ring->tx_stats.send_bytes_to_hw;
+		data[i++] = ring->tx_stats.todo_update;
+		data[i++] = ring->tx_stats.send_done_bytes;
+		data[i++] = ring->tx_stats.vlan_add;
+		if (ring->tx_stats.tx_next_to_clean == -1)
+			data[i++] = ring->count;
+		else
+			data[i++] = ring->tx_stats.tx_next_to_clean;
+		data[i++] = ring->tx_stats.tx_irq_miss;
+		data[i++] = ring->tx_stats.tx_equal_count;
+		data[i++] = ring->tx_stats.tx_clean_times;
+		data[i++] = ring->tx_stats.tx_clean_count;
+
+		/* rx-ring */
+		ring = adapter->rx_ring[j];
+		if (!ring) {
+			/* rx */
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			continue;
+		}
+		idx = ring->rnpgbe_queue_idx;
+		data[i++] = ring->stats.packets;
+		data[i++] = ring->stats.bytes;
+
+		data[i++] = ring->rx_stats.driver_drop_packets;
+		data[i++] = ring->rx_stats.rsc_count;
+		data[i++] = ring->rx_stats.rsc_flush;
+		data[i++] = ring->rx_stats.non_eop_descs;
+		data[i++] = ring->rx_stats.alloc_rx_page_failed;
+		data[i++] = ring->rx_stats.alloc_rx_buff_failed;
+		data[i++] = ring->rx_stats.alloc_rx_page;
+		data[i++] = ring->rx_stats.csum_err;
+		data[i++] = ring->rx_stats.csum_good;
+		data[i++] = ring->rx_stats.poll_again_count;
+		data[i++] = ring->rx_stats.vlan_remove;
+
+		/* rnpgbe_rx_queue_ring_stat */
+		data[i++] = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD);
+		data[i++] = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_TAIL);
+		data[i++] = ring->next_to_use;
+		data[i++] = ring->next_to_clean;
+		if (ring->rx_stats.rx_next_to_clean == -1)
+			data[i++] = ring->count;
+		else
+			data[i++] = ring->rx_stats.rx_next_to_clean;
+		data[i++] = ring->rx_stats.rx_irq_miss;
+		data[i++] = ring->rx_stats.rx_equal_count;
+		data[i++] = ring->rx_stats.rx_clean_times;
+		data[i++] = ring->rx_stats.rx_clean_count;
+	}
+}
+
+static const struct ethtool_ops rnp500_ethtool_ops = {
+
+	.get_link_ksettings = rnp500_get_link_ksettings,
+	.set_link_ksettings = rnp500_set_link_ksettings,
+	.get_drvinfo = rnp500_get_drvinfo,
+	.get_regs_len = rnp500_get_regs_len,
+	.get_regs = rnp500_get_regs,
+	.get_wol = rnpgbe_get_wol,
+	.set_wol = rnpgbe_set_wol,
+	.nway_reset = rnp500_nway_reset,
+	.get_link = ethtool_op_get_link,
+	.get_eeprom_len = rnp500_get_eeprom_len,
+	.get_eeprom = rnp500_get_eeprom,
+	.set_eeprom = rnp500_set_eeprom,
+	.get_ringparam = rnpgbe_get_ringparam,
+	.set_ringparam = rnpgbe_set_ringparam,
+	.get_pauseparam = rnp500_get_pauseparam,
+	.set_pauseparam = rnp500_set_pauseparam,
+	.get_msglevel = rnpgbe_get_msglevel,
+	.set_msglevel = rnpgbe_set_msglevel,
+	.self_test = rnpgbe_diag_test,
+	.get_strings = rnp500_get_strings,
+	.set_phys_id = rnpgbe_set_phys_id,
+	.get_sset_count = rnp500_get_sset_count,
+	.get_priv_flags = rnp500_get_priv_flags,
+	.set_priv_flags = rnp500_set_priv_flags,
+	.get_ethtool_stats = rnp500_get_ethtool_stats,
+	.get_coalesce = rnpgbe_get_coalesce,
+	.set_coalesce = rnpgbe_set_coalesce,
+	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+				     ETHTOOL_COALESCE_MAX_FRAMES,
+	.get_rxnfc = rnpgbe_get_rxnfc,
+	.set_rxnfc = rnpgbe_set_rxnfc,
+	.get_eee = rnpgbe_get_eee,
+	.set_eee = rnpgbe_set_eee,
+	.get_channels = rnpgbe_get_channels,
+	.set_channels = rnpgbe_set_channels,
+	.get_module_info = rnpgbe_get_module_info,
+	.get_module_eeprom = rnpgbe_get_module_eeprom,
+	.get_ts_info = rnpgbe_get_ts_info,
+	.get_rxfh_indir_size = rnpgbe_rss_indir_size,
+	.get_rxfh_key_size = rnpgbe_get_rxfh_key_size,
+	.get_rxfh = rnpgbe_get_rxfh,
+	.set_rxfh = rnpgbe_set_rxfh,
+	.get_dump_flag = rnpgbe_get_dump_flag,
+	.get_dump_data = rnpgbe_get_dump_data,
+	.set_dump = rnpgbe_set_dump,
+	.flash_device = rnpgbe_flash_device,
+};
+
+void rnpgbe_set_ethtool_hw_ops_n500(struct net_device *netdev)
+{
+	netdev->ethtool_ops = &rnp500_ethtool_ops;
+}
+
+/**
+ * rnpgbe_get_thermal_sensor_data_hw_ops_n500 - Gathers thermal sensor data
+ * @hw: pointer to hardware structure
+ * Returns the thermal sensor data structure
+ **/
+static s32 rnpgbe_get_thermal_sensor_data_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	int voltage = 0;
+	struct rnpgbe_thermal_sensor_data *data = &hw->thermal_sensor_data;
+
+	data->sensor[0].temp = rnpgbe_mbx_get_temp(hw, &voltage);
+
+	return 0;
+}
+
+/**
+ * rnpgbe_init_thermal_sensor_thresh_hw_ops_n500 - Inits thermal sensor thresholds
+ * @hw: pointer to hardware structure
+ * Inits the thermal sensor thresholds according to the NVM map
+ * and save off the threshold and location values into mac.thermal_sensor_data
+ **/
+static s32 rnpgbe_init_thermal_sensor_thresh_hw_ops_n500(struct rnpgbe_hw *hw)
+{
+	u8 i;
+	struct rnpgbe_thermal_sensor_data *data = &hw->thermal_sensor_data;
+
+	for (i = 0; i < RNPGBE_MAX_SENSORS; i++) {
+		data->sensor[i].location = i + 1;
+		data->sensor[i].caution_thresh = 90;
+		data->sensor[i].max_op_thresh = 100;
+	}
+
+	return 0;
+}
+
+static struct rnpgbe_hw_operations hw_ops_n500 = {
+	.init_hw = &rnpgbe_init_hw_ops_n500,
+	.reset_hw = &rnpgbe_reset_hw_ops_n500,
+	.start_hw = &rnpgbe_start_hw_ops_n500,
+	.set_mtu = &rnpgbe_set_mtu_hw_ops_n500,
+	.set_vlan_filter_en = &rnpgbe_set_vlan_filter_en_hw_ops_n500,
+	.set_vlan_filter = &rnpgbe_set_vlan_filter_hw_ops_n500,
+	.set_veb_vlan_mask = &rnpgbe_set_veb_vlan_mask_hw_ops_n500,
+	.set_vf_vlan_filter = &rnpgbe_set_vf_vlan_filter_hw_ops_n500,
+	.set_vlan_strip = &rnpgbe_set_vlan_strip_hw_ops_n500,
+	.set_mac = &rnpgbe_set_mac_hw_ops_n500,
+	.set_rx_mode = &rnpgbe_set_rx_mode_hw_ops_n500,
+	.set_rar_with_vf = &rnpgbe_set_rar_with_vf_hw_ops_n500,
+	.clr_rar = &rnpgbe_clr_rar_hw_ops_n500,
+	.clr_rar_all = &rnpgbe_clr_rar_all_hw_ops_n500,
+	.clr_vlan_veb = &rnpgbe_clr_vlan_veb_hw_ops_n500,
+	.set_txvlan_mode = &rnpgbe_set_txvlan_mode_hw_ops_n500,
+	.set_fcs_mode = &rnpgbe_set_fcs_mode_hw_ops_n500,
+	.set_vxlan_port = &rnpgbe_set_vxlan_port_hw_ops_n500,
+	.set_vxlan_mode = &rnpgbe_set_vxlan_mode_hw_ops_n500,
+	.set_mac_rx = &rnpgbe_set_mac_rx_hw_ops_n500,
+	.set_mac_speed = &rnpgbe_set_mac_speed_hw_ops_n500,
+	.set_rx_hash = &rnpgbe_set_rx_hash_hw_ops_n500,
+	.set_pause_mode = &rnpgbe_set_pause_mode_hw_ops_n500,
+	.get_pause_mode = &rnpgbe_get_pause_mode_hw_ops_n500,
+	.update_hw_info = &rnpgbe_update_hw_info_hw_ops_n500,
+	.update_rx_drop = &rnpgbe_update_hw_rx_drop_hw_ops_n500,
+	.update_sriov_info = &rnpgbe_update_sriov_info_hw_ops_n500,
+	.set_sriov_status = &rnpgbe_set_sriov_status_hw_ops_n500,
+	.set_sriov_vf_mc = &rnpgbe_set_sriov_vf_mc_hw_ops_n500,
+	.init_rx_addrs = &rnpgbe_init_rx_addrs_hw_ops_n500,
+	.clr_vfta = &rnpgbe_clr_vfta_hw_ops_n500,
+	.set_rss_hfunc = &rnpgbe_set_rss_hfunc_hw_ops_n500,
+	.set_rss_key = &rnpgbe_set_rss_key_hw_ops_n500,
+	.set_rss_table = &rnpgbe_set_rss_table_hw_ops_n500,
+	.update_hw_status = &rnpgbe_update_hw_status_hw_ops_n500,
+	.set_mbx_link_event = &rnpgbe_set_mbx_link_event_hw_ops_n500,
+	.set_mbx_ifup = &rnpgbe_set_mbx_ifup_hw_ops_n500,
+	.check_link = &rnpgbe_check_mac_link_hw_ops_n500,
+	.setup_link = &rnpgbe_setup_mac_link_hw_ops_n500,
+	.clean_link = &rnpgbe_clean_link_hw_ops_n500,
+	.get_link_capabilities = &rnpgbe_get_link_capabilities_hw_ops_n500,
+	.set_layer2_remapping = &rnpgbe_set_layer2_hw_ops_n500,
+	.clr_layer2_remapping = &rnpgbe_clr_layer2_hw_ops_n500,
+	.clr_all_layer2_remapping = &rnpgbe_clr_all_layer2_hw_ops_n500,
+	.set_tuple5_remapping = &rnpgbe_set_tuple5_hw_ops_n500,
+	.clr_tuple5_remapping = &rnpgbe_clr_tuple5_hw_ops_n500,
+	.clr_all_tuple5_remapping = &rnpgbe_clr_all_tuple5_hw_ops_n500,
+	.set_tcp_sync_remapping = &rnpgbe_set_tcp_sync_hw_ops_n500,
+	.set_rx_skip = &rnpgbe_set_rx_skip_hw_ops_n500,
+	.set_outer_vlan_type = &rnpgbe_set_outer_vlan_type_hw_ops_n500,
+	.setup_ethtool = &rnpgbe_set_ethtool_hw_ops_n500,
+	.get_thermal_sensor_data = &rnpgbe_get_thermal_sensor_data_hw_ops_n500,
+	.init_thermal_sensor_thresh =
+		&rnpgbe_init_thermal_sensor_thresh_hw_ops_n500,
+	.phy_read_reg = &rnpgbe_phy_read_reg_hw_ops_n500,
+	.phy_write_reg = &rnpgbe_phy_write_reg_hw_ops_n500,
+	.setup_wol = &rnpgbe_setup_wol_hw_ops_n500,
+	.set_vf_vlan_mode = &rnpgbe_set_vf_vlan_mode_hw_ops_n500,
+	.driver_status = &rnpgbe_driver_status_hw_ops_n500,
+	.setup_eee = &rnpgbe_setup_eee_hw_ops_n500,
+	.set_eee_mode = &rnpgbe_set_eee_mode_hw_ops_n500,
+	.reset_eee_mode = &rnpgbe_reset_eee_mode_hw_ops_n500,
+	.set_eee_timer = &rnpgbe_set_eee_timer_hw_ops_n500,
+	.set_eee_pls = &rnpgbe_set_eee_pls_hw_ops_n500,
+	.get_lpi_status = &rnpgbe_get_lpi_status_hw_ops_n500,
+	.get_ncsi_mac = &rnpgbe_get_ncsi_mac_hw_ops_n500,
+	.get_ncsi_vlan = &rnpgbe_get_ncsi_vlan_hw_ops_n500,
+	.set_lldp = &rnpgbe_set_lldp_hw_ops_n500,
+	.get_lldp = &rnpgbe_get_lldp_hw_ops_n500,
+};
+
+static void rnpgbe_mac_set_rx_n500(struct rnpgbe_mac_info *mac, bool status)
+{
+	u32 value = mac_rd32(mac, GMAC_CONTROL);
+
+	if (status)
+		value |= GMAC_CONTROL_TE | GMAC_CONTROL_RE;
+	else
+		value &= ~(GMAC_CONTROL_RE);
+
+	mac_wr32(mac, GMAC_CONTROL, value);
+	value = mac_rd32(mac, GMAC_FRAME_FILTER);
+	mac_wr32(mac, GMAC_FRAME_FILTER, value | 1);
+}
+
+static void rnpgbe_mac_set_speed_n500(struct rnpgbe_mac_info *mac, bool link,
+				      u32 speed, bool duplex)
+{
+#define SPEED_MASK (RNP_DM_MASK | RNP_FES_MASK | RNP_PS_MASK | RNP_LUD_MASK)
+	u32 value = mac_rd32(mac, GMAC_CONTROL);
+
+	value &= (~SPEED_MASK);
+
+	if (link)
+		value |= RNP_LUD_MASK;
+
+	if (duplex)
+		value |= RNP_DM_MASK;
+
+	switch (speed) {
+	case RNP_LINK_SPEED_100_FULL:
+		value |= RNP_PS_MASK;
+		value |= RNP_FES_MASK;
+		break;
+	case RNP_LINK_SPEED_10_FULL:
+		value |= RNP_PS_MASK;
+		break;
+	}
+
+	mac_wr32(mac, GMAC_CONTROL, value);
+}
+
+static void rnpgbe_mac_fcs_n500(struct rnpgbe_mac_info *mac, bool status)
+{
+#define RNP500_CST_MASK BIT(25)
+	u32 value = mac_rd32(mac, GMAC_CONTROL);
+
+	if (status)
+		value &= (~RNP500_CST_MASK);
+	else
+		value |= (RNP500_CST_MASK);
+	mac_wr32(mac, GMAC_CONTROL, value);
+}
+
+/**
+ *  rnpgbe_fc_mode_n500 - Enable flow control
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable flow control according to the current settings.
+ **/
+static s32 rnpgbe_mac_fc_mode_n500(struct rnpgbe_mac_info *mac)
+{
+	struct rnpgbe_hw *hw = (struct rnpgbe_hw *)mac->back;
+	s32 ret_val = 0;
+	unsigned int flow = GMAC_FLOW_CTRL_UP;
+
+	flow = mac_rd32(mac, GMAC_FLOW_CTRL);
+	flow &= GMAC_FLOW_CTRL_UP;
+
+	/*
+	 * Validate the water mark configuration for packet buffer 0.  Zero
+	 * water marks indicate that the packet buffer was not configured
+	 * and the watermarks for packet buffer 0 should always be configured.
+	 */
+	if (!hw->fc.pause_time) {
+		ret_val = RNP_ERR_INVALID_LINK_SETTINGS;
+		goto out;
+	}
+
+	switch (hw->fc.current_mode) {
+	case rnpgbe_fc_none:
+		/*
+		 * Flow control is disabled by software override or autoneg.
+		 * The code below will actually disable it in the HW.
+		 */
+		break;
+	case rnpgbe_fc_rx_pause:
+		/*
+		 * Rx Flow control is enabled and Tx Flow control is
+		 * disabled by software override. Since there really
+		 * isn't a way to advertise that we are capable of RX
+		 * Pause ONLY, we will advertise that we support both
+		 * symmetric and asymmetric Rx PAUSE.  Later, we will
+		 * disable the adapter's ability to send PAUSE frames.
+		 */
+		flow |= GMAC_FLOW_CTRL_RFE;
+		break;
+	case rnpgbe_fc_tx_pause:
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is
+		 * disabled by software override.
+		 */
+		flow |= GMAC_FLOW_CTRL_TFE;
+		break;
+	case rnpgbe_fc_full:
+		/* Flow control (both Rx and Tx) is enabled by SW override. */
+		flow |= GMAC_FLOW_CTRL_RFE;
+		flow |= GMAC_FLOW_CTRL_TFE;
+		break;
+	default:
+		hw_dbg(hw, "Flow control param set incorrectly\n");
+		ret_val = RNP_ERR_CONFIG;
+		goto out;
+	}
+
+	flow |= (hw->fc.pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
+	mac_wr32(mac, GMAC_FLOW_CTRL, flow);
+
+out:
+	return ret_val;
+}
+
+static bool poll_free_mdio(u8 __iomem *addr, u32 mask, int count)
+{
+	unsigned int value;
+	int con = 0;
+
+	do {
+		value = rnpgbe_rd_reg(addr);
+		usleep_range(10, 100);
+		con++;
+	} while ((value & mask) && (con < count));
+
+	return !!(con >= count);
+}
+
+static int rnpgbe_mdio_read(struct rnpgbe_mac_info *mac, int phyreg)
+{
+#define MII_BUSY 0x00000001
+#define MII_WRITE 0x00000002
+#define MII_DATA_MASK GENMASK(15, 0)
+
+	unsigned int mii_address = mac->mii.addr;
+	unsigned int mii_data = mac->mii.data;
+	u32 value = MII_BUSY;
+	int data = 0;
+	int phyaddr = mac->phy_addr;
+
+	value |= (phyaddr << mac->mii.addr_shift) & mac->mii.addr_mask;
+	value |= (phyreg << mac->mii.reg_shift) & mac->mii.reg_mask;
+	value |= (mac->clk_csr << mac->mii.clk_csr_shift) &
+		 mac->mii.clk_csr_mask;
+
+	if (poll_free_mdio(mac->mac_addr + mii_address, MII_BUSY, 100))
+		return -EBUSY;
+
+	mac_wr32(mac, mii_data, data);
+	mac_wr32(mac, mii_address, value);
+
+	if (poll_free_mdio(mac->mac_addr + mii_address, MII_BUSY, 100))
+		return -EBUSY;
+	/* Read the data from the MII data register */
+	data = (int)mac_rd32(mac, mii_data) & MII_DATA_MASK;
+
+	return data;
+}
+
+static void rnpgbe_mac_check_link_n500(struct rnpgbe_mac_info *mac,
+				       rnpgbe_link_speed *speed,
+				       bool *link_up,
+				       bool link_up_wait_to_complete)
+{
+	struct rnpgbe_hw *hw = (struct rnpgbe_hw *)mac->back;
+	/* always assume link is up, if no check link function */
+	u32 data;
+#define AUTONEGOTATION_COMPLETE (0x20)
+#define LINK_IS_UP (0x04)
+#define TEST_PHY (AUTONEGOTATION_COMPLETE | LINK_IS_UP)
+
+	data = rnpgbe_mdio_read(mac, 1);
+	if ((data & TEST_PHY) == TEST_PHY) {
+		data = rnpgbe_mdio_read(mac, 0);
+#define DUPLEX_MODE (0x100)
+		if (data & DUPLEX_MODE) {
+			if (data & 0x40) {
+				*speed = RNP_LINK_SPEED_1GB_FULL;
+				hw->speed = SPEED_1000;
+			} else if (data & 0x2000) {
+				*speed = RNP_LINK_SPEED_100_FULL;
+				hw->speed = SPEED_100;
+			} else {
+				*speed = RNP_LINK_SPEED_10_FULL;
+				hw->speed = SPEED_10;
+			}
+		} else {
+			if (data & 0x40) {
+				*speed = RNP_LINK_SPEED_1GB_HALF;
+				hw->speed = SPEED_1000;
+			} else if (data & 0x2000) {
+				*speed = RNP_LINK_SPEED_100_HALF;
+				hw->speed = SPEED_100;
+			} else {
+				*speed = RNP_LINK_SPEED_10_HALF;
+				hw->speed = SPEED_10;
+			}
+		}
+		*link_up = true;
+		hw->link = true;
+	} else {
+		*link_up = false;
+		hw->link = false;
+		*speed = RNP_LINK_SPEED_UNKNOWN;
+	}
+}
+
+static void rnpgbe_mac_set_mac_n500(struct rnpgbe_mac_info *mac,
+				    u8 *addr, int index)
+{
+	u32 rar_low, rar_high = 0;
+
+	rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | ((u32)addr[2] << 16) |
+		   ((u32)addr[3] << 24));
+	rar_high = RNP_RAH_AV | ((u32)addr[4] | (u32)addr[5] << 8);
+	mac_wr32(mac, RNP500_MAC_UNICAST_HIGH(index), rar_high);
+	mac_wr32(mac, RNP500_MAC_UNICAST_LOW(index), rar_low);
+}
+
+static int rnpgbe_mac_mdio_read_n500(struct rnpgbe_mac_info *mac,
+				     u32 phyreg,
+				     u32 *regvalue)
+{
+	unsigned int mii_address = mac->mii.addr;
+	unsigned int mii_data = mac->mii.data;
+	u32 value = MII_BUSY;
+	int data = 0;
+	int phyaddr = mac->phy_addr;
+
+	value |= (phyaddr << mac->mii.addr_shift) & mac->mii.addr_mask;
+	value |= (phyreg << mac->mii.reg_shift) & mac->mii.reg_mask;
+	value |= (mac->clk_csr << mac->mii.clk_csr_shift) &
+		 mac->mii.clk_csr_mask;
+
+	if (poll_free_mdio(mac->mac_addr + mii_address, MII_BUSY, 100))
+		return -EBUSY;
+
+	mac_wr32(mac, mii_data, data);
+	mac_wr32(mac, mii_address, value);
+
+	if (poll_free_mdio(mac->mac_addr + mii_address, MII_BUSY, 100))
+		return -EBUSY;
+	/* Read the data from the MII data register */
+	data = (int)mac_rd32(mac, mii_data) & MII_DATA_MASK;
+
+	*regvalue = data;
+
+	return data;
+}
+
+static int rnpgbe_mac_mdio_write_n500(struct rnpgbe_mac_info *mac,
+				      int phyreg,
+				      int phydata)
+{
+	unsigned int mii_address = mac->mii.addr;
+	unsigned int mii_data = mac->mii.data;
+	u32 value = MII_BUSY;
+	int data = phydata;
+	int phyaddr = mac->phy_addr;
+
+	value |= (phyaddr << mac->mii.addr_shift) & mac->mii.addr_mask;
+	value |= (phyreg << mac->mii.reg_shift) & mac->mii.reg_mask;
+
+	value |= (mac->clk_csr << mac->mii.clk_csr_shift) &
+		 mac->mii.clk_csr_mask;
+	value |= MII_WRITE;
+
+	/* Wait until any existing MII operation is complete */
+	if (poll_free_mdio(mac->mac_addr + mii_address, MII_BUSY, 100))
+		return -EBUSY;
+	/* Set the MII address register to write */
+	mac_wr32(mac, mii_data, data);
+	mac_wr32(mac, mii_address, value);
+
+	/* Wait until any existing MII operation is complete */
+	return poll_free_mdio(mac->mac_addr + mii_address, MII_BUSY, 100);
+}
+
+static void rnpgbe_mac_pmt_n500(struct rnpgbe_mac_info *mac,
+				u32 mode, bool ncsi_en)
+{
+	unsigned int pmt = 0;
+
+	if (mode & RNP_WUFC_MAG) {
+		rnpgbe_dbg("GMAC: WOL Magic frame\n");
+		pmt |= magic_pkt_en;
+	}
+	if (mode & RNP_WUFC_EX) {
+		rnpgbe_dbg("GMAC: WOL on global unicast\n");
+		pmt |= global_unicast | wake_up_frame_en;
+	}
+	/* only pmt down not ocp */
+	if (!ncsi_en)
+		pmt |= power_down;
+
+	mac_wr32(mac, GMAC_PMT, pmt);
+}
+
+static void rnpgbe_mac_set_eee_mode_n500(struct rnpgbe_mac_info *mac,
+					 bool en_tx_lpi_clockgating)
+{
+	u32 value = 0;
+
+	/*TODO - en_tx_lpi_clockgating treatment */
+
+	/*
+	 * Enable the link status receive on RGMII, SGMII ore SMII
+	 * receive path and instruct the transmit to enter in LPI
+	 * state.
+	 */
+	value |= LPI_CTRL_STATUS_PLS;
+	value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+	mac_wr32(mac, GMAC_LPI_CTRL_STATUS, value);
+}
+
+static void rnpgbe_mac_reset_eee_mode_n500(struct rnpgbe_mac_info *mac)
+{
+	u32 value = 0;
+
+	value |= LPI_CTRL_STATUS_PLS;
+	value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
+	mac_wr32(mac, GMAC_LPI_CTRL_STATUS, value);
+}
+
+static void rnpgbe_mac_set_eee_timer_n500(struct rnpgbe_mac_info *mac,
+					  int ls, int tw)
+{
+	int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
+
+	/* Program the timers in the LPI timer control register:
+	 * LS: minimum time (ms) for which the link
+	 *  status from PHY should be ok before transmitting
+	 *  the LPI pattern.
+	 * TW: minimum time (us) for which the core waits
+	 *  after it has stopped transmitting the LPI pattern.
+	 */
+	mac_wr32(mac, GMAC_LPI_TIMER_CTRL, value);
+}
+
+static void rnpgbe_mac_set_eee_pls_n500(struct rnpgbe_mac_info *mac, int link)
+{
+	u32 value = 0;
+
+	value = mac_rd32(mac, GMAC_LPI_CTRL_STATUS);
+
+	if (link)
+		value |= LPI_CTRL_STATUS_PLS;
+	else
+		value &= ~LPI_CTRL_STATUS_PLS;
+
+	mac_wr32(mac, GMAC_LPI_CTRL_STATUS, value);
+}
+
+static u32 rnpgbe_mac_get_lpi_status_n500(struct rnpgbe_mac_info *mac)
+{
+	if (mac_rd32(mac, GMAC_INT_STATUS) & GMAC_INT_STATUS_LPIIS)
+		return mac_rd32(mac, GMAC_LPI_CTRL_STATUS);
+	else
+		return 0;
+}
+
+static struct rnpgbe_mac_operations mac_ops_n500 = {
+	.set_mac_rx = &rnpgbe_mac_set_rx_n500,
+	.set_mac_speed = &rnpgbe_mac_set_speed_n500,
+	.set_mac_fcs = &rnpgbe_mac_fcs_n500,
+	.set_fc_mode = &rnpgbe_mac_fc_mode_n500,
+	.check_link = &rnpgbe_mac_check_link_n500,
+	.set_mac = &rnpgbe_mac_set_mac_n500,
+	.mdio_write = &rnpgbe_mac_mdio_write_n500,
+	.mdio_read = &rnpgbe_mac_mdio_read_n500,
+	.pmt = &rnpgbe_mac_pmt_n500,
+	.set_eee_mode = rnpgbe_mac_set_eee_mode_n500,
+	.reset_eee_mode = rnpgbe_mac_reset_eee_mode_n500,
+	.set_eee_timer = rnpgbe_mac_set_eee_timer_n500,
+	.set_eee_pls = rnpgbe_mac_set_eee_pls_n500,
+	.get_lpi_status = rnpgbe_mac_get_lpi_status_n500,
+};
+
+static s32 rnpgbe_get_invariants_n500(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_nic_info *nic = &hw->nic;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+	int i;
+
+	nic->nic_base_addr = hw->hw_addr + RNP500_NIC_BASE;
+	/* setup dma info */
+	dma->dma_base_addr = hw->hw_addr;
+	dma->dma_ring_addr = hw->hw_addr + RNP500_RING_BASE;
+	dma->max_tx_queues = RNP_N500_MAX_TX_QUEUES;
+	dma->max_rx_queues = RNP_N500_MAX_RX_QUEUES;
+	dma->back = hw;
+	memcpy(&hw->dma.ops, &dma_ops_n500, sizeof(hw->dma.ops));
+
+	/* setup eth info */
+	memcpy(&hw->eth.ops, ð_ops_n500, sizeof(hw->eth.ops));
+
+	eth->eth_base_addr = hw->hw_addr + RNP500_ETH_BASE;
+	eth->back = hw;
+	eth->mc_filter_type = 4;
+	eth->mcft_size = RNP_N500_MC_TBL_SIZE;
+	eth->vft_size = RNP_N500_VFT_TBL_SIZE;
+	eth->num_rar_entries = RNP_N500_RAR_ENTRIES + NCSI_RAR_NUM;
+	eth->max_rx_queues = RNP_N500_MAX_RX_QUEUES;
+	eth->max_tx_queues = RNP_N500_MAX_TX_QUEUES;
+
+	/* setup mac info */
+	memcpy(&hw->mac.ops, &mac_ops_n500, sizeof(hw->mac.ops));
+	mac->mac_addr = hw->hw_addr + RNP500_MAC_BASE;
+	mac->back = hw;
+	mac->mac_type = mac_dwc_g;
+	/* move this to eth todo */
+	mac->mc_filter_type = 4;
+	mac->mcft_size = 2;
+	mac->vft_size = 1;
+	mac->num_rar_entries = RNP_N500_RAR_ENTRIES;
+	mac->max_rx_queues = RNP_N500_MAX_RX_QUEUES;
+	mac->max_tx_queues = RNP_N500_MAX_TX_QUEUES;
+	mac->max_msix_vectors = RNP_N500_MSIX_VECTORS;
+
+	mac->mii.addr = GMAC_MII_ADDR;
+	mac->mii.data = GMAC_MII_DATA;
+	mac->mii.addr_shift = 11;
+	mac->mii.addr_mask = 0x0000F800;
+	mac->mii.reg_shift = 6;
+	mac->mii.reg_mask = 0x000007C0;
+	mac->mii.clk_csr_shift = 2;
+	mac->mii.clk_csr_mask = GENMASK(5, 2);
+	mac->clk_csr = 0x02; /* csr 25M */
+
+	mac->phy_addr = 0x11;
+
+	if (!hw->axi_mhz)
+		hw->usecstocount = 125;
+	else
+		hw->usecstocount = hw->axi_mhz;
+
+	printk(KERN_DEBUG "now hw->usecstocount is %d\n", hw->usecstocount);
+
+	hw->feature_flags |=
+		RNP_NET_FEATURE_SG | RNP_NET_FEATURE_TX_CHECKSUM |
+		RNP_NET_FEATURE_RX_CHECKSUM | RNP_NET_FEATURE_TSO |
+		RNP_NET_FEATURE_VLAN_FILTER | RNP_NET_FEATURE_VLAN_OFFLOAD |
+		RNP_NET_FEATURE_RX_NTUPLE_FILTER | RNP_NET_FEATURE_RX_HASH |
+		RNP_NET_FEATURE_USO | RNP_NET_FEATURE_RX_FCS |
+		RNP_NET_FEATURE_STAG_FILTER | RNP_NET_FEATURE_STAG_OFFLOAD;
+	/* maybe supported future*/
+	hw->feature_flags |= RNP_HW_FEATURE_EEE;
+
+	/* setup some fdir resource */
+	hw->min_length = RNP_MIN_MTU;
+	hw->max_length = RNP500_MAX_JUMBO_FRAME_SIZE;
+	hw->max_msix_vectors = RNP_N500_MSIX_VECTORS;
+	hw->num_rar_entries = RNP_N500_RAR_ENTRIES;
+	hw->fdir_mode = fdir_mode_tuple5;
+	hw->max_vfs = RNP_N500_MAX_VF;
+	hw->max_vfs_noari = 1;
+	hw->layer2_count = RNP500_MAX_LAYER2_FILTERS - 1;
+	hw->tuple5_count = RNP500_MAX_TUPLE5_FILTERS - 1;
+
+	/* n500 support magic wol */
+	hw->wol_supported = WAKE_MAGIC;
+	hw->num_vebvlan_entries = 8;
+	hw->default_rx_queue = 0;
+	hw->rss_indir_tbl_num = RNP_N500_RSS_TBL_NUM;
+	hw->rss_tc_tbl_num = RNP_N500_RSS_TC_TBL_NUM;
+	/* vf use the last vfnum */
+	hw->vfnum = RNP_N500_MAX_VF - 1;
+
+	hw->sriov_ring_limit = 1;
+	hw->max_pf_macvlans = RNP_MAX_PF_MACVLANS_N500;
+
+	hw->veb_ring = RNP_N500_MAX_RX_QUEUES - 1;
+
+	memcpy(&hw->ops, &hw_ops_n500, sizeof(hw->ops));
+	hw->supported_link = RNP_LINK_SPEED_1GB_FULL;
+	mbx->mbx_feature |= MBX_FEATURE_NO_ZERO;
+
+	/* mbx setup */
+	mbx->vf2pf_mbox_vec_base = 0x28900;
+	mbx->cpu2pf_mbox_vec = 0x28b00;
+	mbx->pf_vf_shm_base = 0x29000;
+	mbx->mbx_mem_size = 64;
+	mbx->pf2vf_mbox_ctrl_base = 0x2a100;
+	mbx->pf_vf_mbox_mask_lo = 0x2a200;
+	mbx->pf_vf_mbox_mask_hi = 0;
+	mbx->cpu_pf_shm_base = 0x2d000;
+	mbx->pf2cpu_mbox_ctrl = 0x2e000;
+	mbx->cpu_pf_mbox_mask = 0x2e200;
+	mbx->cpu_vf_share_ram = 0x2b000;
+	mbx->share_size = 512;
+
+	adapter->priv_flags |= RNP_PRIV_FLAG_PAUSE_OWN;
+	adapter->drop_time = 100;
+
+	/*initialization default pause flow */
+	/* we start from auto */
+	hw->fc.requested_mode = PAUSE_AUTO;
+	hw->fc.pause_time = RNP_DEFAULT_FCPAUSE;
+	hw->autoneg = 1;
+
+	hw->tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+	for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) {
+		hw->fc.high_water[i] = RNP500_DEFAULT_HIGH_WATER;
+		hw->fc.low_water[i] = RNP500_DEFAULT_LOW_WATER;
+	}
+	hw->eeprom.word_size = 10;
+
+	return 0;
+}
+
+static s32 rnpgbe_get_invariants_n210(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_mac_info *mac = &hw->mac;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	struct rnpgbe_nic_info *nic = &hw->nic;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+	int i;
+
+	nic->nic_base_addr = hw->hw_addr + RNP500_NIC_BASE;
+	/* setup dma info */
+	dma->dma_base_addr = hw->hw_addr;
+	dma->dma_ring_addr = hw->hw_addr + RNP500_RING_BASE;
+	dma->max_tx_queues = RNP_N500_MAX_TX_QUEUES;
+	dma->max_rx_queues = RNP_N500_MAX_RX_QUEUES;
+	dma->back = hw;
+	memcpy(&hw->dma.ops, &dma_ops_n500, sizeof(hw->dma.ops));
+
+	/* setup eth info */
+	memcpy(&hw->eth.ops, ð_ops_n500, sizeof(hw->eth.ops));
+
+	eth->eth_base_addr = hw->hw_addr + RNP500_ETH_BASE;
+	eth->back = hw;
+	eth->mc_filter_type = 4;
+	eth->mcft_size = RNP_N500_MC_TBL_SIZE;
+	eth->vft_size = RNP_N500_VFT_TBL_SIZE;
+	eth->num_rar_entries = RNP_N500_RAR_ENTRIES + NCSI_RAR_NUM;
+	eth->max_rx_queues = RNP_N500_MAX_RX_QUEUES;
+	eth->max_tx_queues = RNP_N500_MAX_TX_QUEUES;
+
+	/* setup mac info */
+	memcpy(&hw->mac.ops, &mac_ops_n500, sizeof(hw->mac.ops));
+	mac->mac_addr = hw->hw_addr + RNP500_MAC_BASE;
+	mac->back = hw;
+	mac->mac_type = mac_dwc_g;
+	/* move this to eth todo */
+	mac->mc_filter_type = 4;
+	mac->mcft_size = 2;
+	mac->vft_size = 1;
+	mac->num_rar_entries = RNP_N500_RAR_ENTRIES;
+	mac->max_rx_queues = RNP_N500_MAX_RX_QUEUES;
+	mac->max_tx_queues = RNP_N500_MAX_TX_QUEUES;
+	mac->max_msix_vectors = RNP_N500_MSIX_VECTORS;
+
+	mac->mii.addr = GMAC_MII_ADDR;
+	mac->mii.data = GMAC_MII_DATA;
+	mac->mii.addr_shift = 11;
+	mac->mii.addr_mask = 0x0000F800;
+	mac->mii.reg_shift = 6;
+	mac->mii.reg_mask = 0x000007C0;
+	mac->mii.clk_csr_shift = 2;
+	mac->mii.clk_csr_mask = GENMASK(5, 2);
+	mac->clk_csr = 0x02; /* csr 25M */
+
+	mac->phy_addr = 0x11;
+
+	if (!hw->axi_mhz)
+		hw->usecstocount = 62;
+	else
+		hw->usecstocount = hw->axi_mhz;
+
+	hw->feature_flags |=
+		RNP_NET_FEATURE_SG | RNP_NET_FEATURE_TX_CHECKSUM |
+		RNP_NET_FEATURE_RX_CHECKSUM | RNP_NET_FEATURE_TSO |
+		RNP_NET_FEATURE_VLAN_FILTER | RNP_NET_FEATURE_VLAN_OFFLOAD |
+		RNP_NET_FEATURE_RX_NTUPLE_FILTER | RNP_NET_FEATURE_RX_HASH |
+		RNP_NET_FEATURE_USO | RNP_NET_FEATURE_RX_FCS |
+		RNP_NET_FEATURE_STAG_FILTER | RNP_NET_FEATURE_STAG_OFFLOAD;
+
+	/* setup some fdir resource */
+	hw->min_length = RNP_MIN_MTU;
+	hw->max_length = RNP500_MAX_JUMBO_FRAME_SIZE;
+	hw->max_msix_vectors = RNP_N500_MSIX_VECTORS;
+	hw->num_rar_entries = RNP_N500_RAR_ENTRIES;
+	hw->fdir_mode = fdir_mode_tuple5;
+	hw->max_vfs = RNP_N500_MAX_VF;
+	hw->max_vfs_noari = 1;
+	hw->layer2_count = RNP500_MAX_LAYER2_FILTERS - 1;
+	hw->tuple5_count = RNP500_MAX_TUPLE5_FILTERS - 1;
+
+	/* n500 support magic wol */
+	hw->wol_supported = WAKE_MAGIC;
+
+	hw->num_vebvlan_entries = 8;
+	hw->default_rx_queue = 0;
+	hw->rss_indir_tbl_num = RNP_N500_RSS_TBL_NUM;
+	hw->rss_tc_tbl_num = RNP_N500_RSS_TC_TBL_NUM;
+	/* vf use the last vfnum */
+	hw->vfnum = RNP_N500_MAX_VF - 1;
+
+	hw->sriov_ring_limit = 1;
+	hw->max_pf_macvlans = RNP_MAX_PF_MACVLANS_N500;
+	hw->veb_ring = RNP_N500_MAX_RX_QUEUES - 1;
+	memcpy(&hw->ops, &hw_ops_n500, sizeof(hw->ops));
+	hw->supported_link = RNP_LINK_SPEED_1GB_FULL;
+	mbx->mbx_feature |= MBX_FEATURE_NO_ZERO;
+
+	/* mbx setup */
+	mbx->vf2pf_mbox_vec_base = 0x29200;
+	mbx->cpu2pf_mbox_vec = 0x29400;
+	mbx->pf_vf_shm_base = 0x29900;
+	mbx->mbx_mem_size = 64;
+	mbx->pf2vf_mbox_ctrl_base = 0x2aa00;
+	mbx->pf_vf_mbox_mask_lo = 0x2ab00;
+	mbx->pf_vf_mbox_mask_hi = 0;
+	mbx->cpu_pf_shm_base = 0x2d900;
+	mbx->pf2cpu_mbox_ctrl = 0x2e900;
+	mbx->cpu_pf_mbox_mask = 0x2eb00;
+	mbx->cpu_vf_share_ram = 0x2b900;
+	mbx->share_size = 512;
+
+	adapter->priv_flags |= RNP_PRIV_FLAG_PAUSE_OWN;
+	adapter->drop_time = 100;
+
+	/* initialization default pause flow */
+	hw->fc.requested_mode = PAUSE_AUTO;
+	hw->fc.pause_time = RNP_DEFAULT_FCPAUSE;
+	hw->autoneg = 1;
+
+	/* we start from auto mode */
+	hw->tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+	for (i = 0; i < RNP_MAX_TRAFFIC_CLASS; i++) {
+		hw->fc.high_water[i] = RNP500_DEFAULT_HIGH_WATER;
+		hw->fc.low_water[i] = RNP500_DEFAULT_LOW_WATER;
+	}
+	hw->eeprom.word_size = 10;
+
+	return 0;
+}
+
+struct rnpgbe_info rnpgbe_n500_info = {
+	.one_pf_with_two_dma = false,
+	.total_queue_pair_cnts = RNP_N500_MAX_TX_QUEUES,
+	.adapter_cnt = 1,
+	.rss_type = rnpgbe_rss_n500,
+	.hw_type = rnpgbe_hw_n500,
+	.get_invariants = &rnpgbe_get_invariants_n500,
+	.mac_ops = &mac_ops_n500,
+	.eeprom_ops = NULL,
+	.mbx_ops = &rnpgbe_mbx_ops_generic,
+};
+
+struct rnpgbe_info rnpgbe_n210_info = {
+	.one_pf_with_two_dma = false,
+	.total_queue_pair_cnts = RNP_N500_MAX_TX_QUEUES,
+	.adapter_cnt = 1,
+	.rss_type = rnpgbe_rss_n500,
+	.hw_type = rnpgbe_hw_n210,
+	.get_invariants = &rnpgbe_get_invariants_n210,
+	.mac_ops = &mac_ops_n500,
+	.eeprom_ops = NULL,
+	.mbx_ops = &rnpgbe_mbx_ops_generic,
+};
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_common.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_common.c
new file mode 100644
index 0000000000000..fe4fa7518b732
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_common.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "rnpgbe.h"
+#include "rnpgbe_common.h"
+#include "rnpgbe_mbx.h"
+
+unsigned int rnpgbe_loglevel = 0x00;
+module_param(rnpgbe_loglevel, uint, 0600);
+
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_common.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_common.h
new file mode 100644
index 0000000000000..e1ab95213fb09
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_common.h
@@ -0,0 +1,384 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBE_COMMON_H_
+#define _RNPGBE_COMMON_H_
+
+#include 
+#include 
+#include "rnpgbe_type.h"
+#include "rnpgbe.h"
+#include "rnpgbe_regs.h"
+
+struct rnpgbe_adapter;
+
+#define TRACE() printk(KERN_DEBUG "==[ %s %d ] ==\n", __func__, __LINE__)
+
+#ifdef CONFIG_RNP_RX_DEBUG
+#define rx_debug_printk printk
+#define rx_buf_dump buf_dump
+#define rx_dbg(fmt, args...)                                                   \
+	printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args)
+#else /* CONFIG_RNP_RX_DEBUG */
+#define rx_debug_printk(fmt, args...)
+#define rx_buf_dump(a, b, c)
+#define rx_dbg(fmt, args...)
+#endif /* CONFIG_RNP_RX_DEBUG */
+
+#ifdef CONFIG_RNP_TX_DEBUG
+#define desc_hex_dump(msg, buf, len)                                           \
+	print_hex_dump(KERN_WARNING, msg, DUMP_PREFIX_OFFSET, 16, 1, (buf),    \
+		       (len), false)
+#define rnpgbe_skb_dump _rnpgbe_skb_dump
+#define tx_dbg(fmt, args...)                                                   \
+	printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args)
+#else /* CONFIG_RNP_TX_DEBUG */
+#define desc_hex_dump(msg, buf, len)
+#define rnpgbe_skb_dump(skb, full_pkt)
+#define tx_dbg(fmt, args...)
+#endif /* CONFIG_RNP_TX_DEBUG */
+
+#ifdef DEBUG
+#define dbg(fmt, args...)                                                      \
+	printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args)
+#else /* DEBUG */
+#define dbg(fmt, args...)
+#endif /* DEBUG */
+
+#ifdef CONFIG_RNP_VF_DEBUG
+#define vf_dbg(fmt, args...)                                                   \
+	printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args)
+#else /* CONFIG_RNP_VF_DEBUG */
+#define vf_dbg(fmt, args...)
+#endif /* CONFIG_RNP_VF_DEBUG */
+
+/* ================= registers  read/write helper ===== */
+#define p_rnpgbe_wr_reg(reg, val)                                              \
+	do {                                                                   \
+		printk(KERN_DEBUG " wr-reg: %p <== 0x%08x \t#%-4d %s\n",       \
+		       (reg), (val), __LINE__, __FILE__);                      \
+		iowrite32((val), (void *)(reg));                               \
+	} while (0)
+
+static inline unsigned int prnpgbe_rd_reg(void *reg)
+{
+	unsigned int v = ioread32((void *)(reg));
+
+	printk(KERN_DEBUG "  %p => 0x%08x\n", reg, v);
+	return v;
+}
+
+#ifdef IO_PRINT
+static inline unsigned int rnpgbe_rd_reg(void *reg)
+{
+	unsigned int v = ioread32((void *)(reg));
+
+	dbg(" rd-reg: %p <== 0x%08x\n", reg, v);
+	return v;
+}
+#define rnpgbe_wr_reg(reg, val)                                                \
+	do {                                                                   \
+		dbg(" wr-reg: %p <== 0x%08x \t#%-4d %s\n", (reg), (val),       \
+		    __LINE__, __FILE__);                                       \
+		iowrite32((val), (void *)(reg));                               \
+	} while (0)
+#else /* IO_PRINT */
+#define rnpgbe_rd_reg(reg) readl((void *)(reg))
+#define rnpgbe_wr_reg(reg, val) writel((val), (void *)(reg))
+#endif /* IO_PRINT */
+
+#define rd32(hw, off) rnpgbe_rd_reg((hw)->hw_addr + (off))
+#define wr32(hw, off, val) rnpgbe_wr_reg((hw)->hw_addr + (off), (val))
+
+#define nic_rd32(nic, off) rnpgbe_rd_reg((nic)->nic_base_addr + (off))
+#define nic_wr32(nic, off, val)                                                \
+	rnpgbe_wr_reg((nic)->nic_base_addr + (off), (val))
+
+#define dma_rd32(dma, off) rnpgbe_rd_reg((dma)->dma_base_addr + (off))
+#define dma_wr32(dma, off, val)                                                \
+	rnpgbe_wr_reg((dma)->dma_base_addr + (off), (val))
+
+#define dma_ring_rd32(dma, off) rnpgbe_rd_reg((dma)->dma_ring_addr + (off))
+#define dma_ring_wr32(dma, off, val)                                           \
+	rnpgbe_wr_reg((dma)->dma_ring_addr + (off), (val))
+
+#define eth_rd32(eth, off) rnpgbe_rd_reg((eth)->eth_base_addr + (off))
+#define eth_wr32(eth, off, val)                                                \
+	rnpgbe_wr_reg((eth)->eth_base_addr + (off), (val))
+
+#define mac_rd32(mac, off) rnpgbe_rd_reg((mac)->mac_addr + (off))
+#define mac_wr32(mac, off, val) rnpgbe_wr_reg((mac)->mac_addr + (off), (val))
+#ifdef debug_ring
+static inline unsigned int rnpgbe_rd_reg_1(int ring, u32 off, void *reg)
+{
+	unsigned int v = ioread32((void *)(reg));
+
+	printk(KERN_DEBUG "%d rd-reg: %x <== 0x%08x\n", ring, off, v);
+	return v;
+}
+
+#define ring_rd32(ring, off)                                                   \
+	rnpgbe_rd_reg_1(ring->rnpgbe_queue_idx, off, (ring)->ring_addr + (off))
+#define ring_wr32(ring, off, val)                                              \
+	rnpgbe_wr_reg((ring)->ring_addr + (off), (val))
+#else /* debug_ring */
+#define ring_rd32(ring, off) rnpgbe_rd_reg((ring)->ring_addr + (off))
+#define ring_wr32(ring, off, val)                                              \
+	rnpgbe_wr_reg((ring)->ring_addr + (off), (val))
+#endif /* debug_ring */
+
+#define pwr32(hw, off, val) p_rnpgbe_wr_reg((hw)->hw_addr + (off), (val))
+
+#define rnpgbe_mbx_rd(hw, off) rnpgbe_rd_reg((hw)->ring_msix_base + (off))
+#define rnpgbe_mbx_wr(hw, off, val)                                            \
+	rnpgbe_wr_reg((hw)->ring_msix_base + (off), val)
+
+static inline void hw_queue_strip_rx_vlan(struct rnpgbe_hw *hw, u8 ring_num,
+					  bool enable)
+{
+	u32 reg = RNP_ETH_VLAN_VME_REG(ring_num / 32);
+	u32 offset = ring_num % 32;
+	u32 data = rd32(hw, reg);
+
+	if (enable == true)
+		data |= (1 << offset);
+	else
+		data &= ~(1 << offset);
+	wr32(hw, reg, data);
+}
+
+#define rnpgbe_set_reg_bit(hw, reg_def, bit)                                   \
+	do {                                                                   \
+		u32 reg = reg_def;                                             \
+		u32 value = rd32(hw, reg);                                     \
+		dbg("before set  %x %x\n", reg, value);                        \
+		value |= (0x01 << bit);                                        \
+		dbg("after set %x %x\n", reg, value);                          \
+		wr32(hw, reg, value);                                          \
+	} while (0)
+
+#define rnpgbe_clr_reg_bit(hw, reg_def, bit)                                   \
+	do {                                                                   \
+		u32 reg = reg_def;                                             \
+		u32 value = rd32(hw, reg);                                     \
+		dbg("before clr %x %x\n", reg, value);                         \
+		value &= (~(0x01 << bit));                                     \
+		dbg("after clr %x %x\n", reg, value);                          \
+		wr32(hw, reg, value);                                          \
+	} while (0)
+
+#define rnpgbe_vlan_filter_on(hw)                                              \
+	rnpgbe_set_reg_bit(hw, RNP_ETH_VLAN_FILTER_ENABLE, 30)
+#define rnpgbe_vlan_filter_off(hw)                                             \
+	rnpgbe_clr_reg_bit(hw, RNP_ETH_VLAN_FILTER_ENABLE, 30)
+
+#define DPRINTK(nlevel, klevel, fmt, args...)                                  \
+	((NETIF_MSG_##nlevel & adapter->msg_enable) ?                          \
+		 (void)(netdev_printk(KERN_##klevel, adapter->netdev, fmt,     \
+				      ##args)) :                               \
+		 NULL)
+
+/* ==== log helper === */
+#ifdef HW_DEBUG
+#define hw_dbg(hw, fmt, args...) printk(KERN_DEBUG "hw-dbg : " fmt, ##args)
+#define eth_dbg(eth, fmt, args...) printk(KERN_DEBUG "hw-dbg : " fmt, ##args)
+#else
+#define hw_dbg(hw, fmt, args...)
+#define eth_dbg(hw, fmt, args...)
+#endif
+
+#ifdef RNP_DEBUG_OPEN
+#define rnpgbe_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
+#else /* RNP_DEBUG_OPEN */
+#define rnpgbe_dbg(fmt, args...)
+#endif /* RNP_DEBUG_OPEN */
+#define rnpgbe_info(fmt, args...) printk(KERN_DEBUG "rnp-info: " fmt, ##args)
+#define rnpgbe_warn(fmt, args...) printk(KERN_DEBUG "rnp-warn: " fmt, ##args)
+#define rnpgbe_err(fmt, args...) printk(KERN_ERR "rnp-err : " fmt, ##args)
+
+#define e_info(msglvl, format, arg...)                                         \
+	netif_info(adapter, msglvl, adapter->netdev, format, ##arg)
+#define e_err(msglvl, format, arg...)                                          \
+	netif_err(adapter, msglvl, adapter->netdev, format, ##arg)
+#define e_warn(msglvl, format, arg...)                                         \
+	netif_warn(adapter, msglvl, adapter->netdev, format, ##arg)
+#define e_crit(msglvl, format, arg...)                                         \
+	netif_crit(adapter, msglvl, adapter->netdev, format, ##arg)
+
+#define e_dev_info(format, arg...) dev_info(&adapter->pdev->dev, format, ##arg)
+#define e_dev_warn(format, arg...) dev_warn(&adapter->pdev->dev, format, ##arg)
+#define e_dev_err(format, arg...) dev_err(&adapter->pdev->dev, format, ##arg)
+
+#ifdef CONFIG_RNP_TX_DEBUG
+static inline void buf_dump_line(const char *msg, int line, void *buf, int len)
+{
+	int i, offset = 0;
+	int msg_len = 1024;
+	u8 msg_buf[1024];
+	u8 *ptr = (u8 *)buf;
+
+	offset += snprintf(msg_buf + offset, msg_len,
+			   "=== %s #%d line:%d buf:%p==\n000: ", msg, len, line,
+			   buf);
+
+	for (i = 0; i < len; ++i) {
+		if ((i != 0) && (i % 16) == 0 && (offset >= (1024 - 10 * 16))) {
+			printk(KERN_DEBUG "%s\n", msg_buf);
+			offset = 0;
+		}
+
+		if ((i != 0) && (i % 16) == 0) {
+			offset += snprintf(msg_buf + offset, msg_len,
+					   "\n%03x: ", i);
+		}
+		offset += snprintf(msg_buf + offset, msg_len, "%02x ", ptr[i]);
+	}
+
+	offset += snprintf(msg_buf + offset, msg_len, "\n");
+	printk(KERN_DEBUG "%s\n", msg_buf);
+}
+#else /* CONFIG_RNP_TX_DEBUG */
+#define buf_dump_line(msg, line, buf, len)
+#endif /* CONFIG_RNP_TX_DEBUG */
+
+static inline __le64 build_ctob(u32 vlan_cmd, u32 mac_ip_len, u32 size)
+{
+	return cpu_to_le64(((u64)vlan_cmd << 32) | ((u64)mac_ip_len << 16) |
+			   ((u64)size));
+}
+
+static inline void buf_dump(const char *msg, void *buf, int len)
+{
+	int i, offset = 0;
+	int msg_len = 1024;
+	char msg_buf[1024];
+	u8 *ptr = (u8 *)buf;
+
+	offset += snprintf(msg_buf + offset, msg_len,
+			   "=== %s #%d ==\n000: ", msg, len);
+
+	for (i = 0; i < len; ++i) {
+		if ((i != 0) && (i % 16) == 0 && (offset >= (1024 - 10 * 16))) {
+			printk(KERN_DEBUG "%s\n", msg_buf);
+			offset = 0;
+		}
+
+		if ((i != 0) && (i % 16) == 0) {
+			offset += snprintf(msg_buf + offset, msg_len,
+					   "\n%03x: ", i);
+		}
+		offset += snprintf(msg_buf + offset, msg_len, "%02x ", ptr[i]);
+	}
+
+	offset += snprintf(msg_buf + offset, msg_len, "\n=== done ==\n");
+	printk(KERN_DEBUG "%s\n", msg_buf);
+}
+
+#ifndef NO_SKB_DUMP
+static inline void _rnpgbe_skb_dump(const struct sk_buff *skb, bool full_pkt)
+{
+	static atomic_t can_dump_full = ATOMIC_INIT(5);
+#ifdef DEBUG
+	struct skb_shared_info *sh = skb_shinfo(skb);
+#endif /* DEBUG */
+	struct net_device *dev = skb->dev;
+	struct sk_buff *list_skb;
+	bool has_mac, has_trans;
+	int headroom, tailroom;
+	int i, len, seg_len;
+	const char *level = KERN_WARNING;
+
+	if (full_pkt)
+		full_pkt = atomic_dec_if_positive(&can_dump_full) >= 0;
+
+	if (full_pkt)
+		len = skb->len;
+	else
+		len = min_t(int, skb->len, MAX_HEADER + 128);
+
+	headroom = skb_headroom(skb);
+	tailroom = skb_tailroom(skb);
+
+	has_mac = skb_mac_header_was_set(skb);
+	has_trans = skb_transport_header_was_set(skb);
+
+	dbg("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
+	    "mac=(%d,%d) net=(%d,%d) trans=%d\n"
+	    "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
+	    "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
+	    "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n",
+	    level, skb->len, headroom, skb_headlen(skb), tailroom,
+	    has_mac ? skb->mac_header : -1,
+	    has_mac ? (skb->network_header - skb->mac_header) : -1,
+	    skb->network_header, has_trans ? skb_network_header_len(skb) : -1,
+	    has_trans ? skb->transport_header : -1, sh->tx_flags, sh->nr_frags,
+	    sh->gso_size, sh->gso_type, sh->gso_segs, skb->csum, skb->ip_summed,
+	    skb->csum_complete_sw, skb->csum_valid, skb->csum_level, skb->hash,
+	    skb->sw_hash, skb->l4_hash, ntohs(skb->protocol), skb->pkt_type,
+	    skb->skb_iif);
+
+	if (dev)
+		dbg("%sdev name=%s feat=0x%pNF\n", level, dev->name,
+		    &dev->features);
+
+	seg_len = min_t(int, skb_headlen(skb), len);
+	if (seg_len)
+		print_hex_dump(level, "skb linear:   ", DUMP_PREFIX_OFFSET, 16,
+			       1, skb->data, seg_len, false);
+	len -= seg_len;
+
+	for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		u32 p_len;
+		struct page *p;
+		u8 *vaddr;
+
+		p = skb_frag_address(frag);
+		p_len = skb_frag_size(frag);
+		seg_len = min_t(int, p_len, len);
+		vaddr = kmap_atomic(p);
+		print_hex_dump(level, "skb frag:     ", DUMP_PREFIX_OFFSET, 16,
+			       1, vaddr, seg_len, false);
+		kunmap_atomic(vaddr);
+		len -= seg_len;
+		if (!len)
+			break;
+	}
+
+	if (full_pkt && skb_has_frag_list(skb)) {
+		dbg("skb fraglist:\n");
+		skb_walk_frags(skb, list_skb) _rnpgbe_skb_dump(list_skb, true);
+	}
+}
+#endif /* NO_SKB_DUMP */
+
+enum RNP_LOG_EVT {
+	LOG_MBX_IN,
+	LOG_MBX_OUT,
+	LOG_MBX_MSG_IN,
+	LOG_MBX_MSG_OUT,
+	LOG_LINK_EVENT,
+	LOG_ADPT_STAT,
+	LOG_MBX_ABLI,
+	LOG_MBX_LINK_STAT,
+	LOG_MBX_IFUP_DOWN,
+	LOG_MBX_LOCK,
+	LOG_ETHTOOL,
+	LOG_PHY,
+
+};
+
+#define MII_BUSY 0x00000001
+#define MII_WRITE 0x00000002
+#define MII_DATA_MASK GENMASK(15, 0)
+
+extern unsigned int rnpgbe_loglevel;
+
+#define rnpgbe_logd(evt, fmt, args...)                                         \
+	do {                                                                   \
+		if (BIT(evt) & rnpgbe_loglevel) {                              \
+			printk(KERN_DEBUG fmt, ##args);                        \
+		}                                                              \
+	} while (0)
+
+#endif /* _RNPGBE_COMMON_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_debugfs.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_debugfs.c
new file mode 100644
index 0000000000000..822950c0f77b1
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_debugfs.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+
+#include "rnpgbe.h"
+
+#ifdef HAVE_RNP_DEBUG_FS
+static struct dentry *rnpgbe_dbg_root;
+static char rnpgbe_dbg_reg_ops_buf[256] = "";
+
+/**
+ * rnpgbe_dbg_reg_ops_read - read for reg_ops datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t rnpgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
+				       size_t count, loff_t *ppos)
+{
+	struct rnpgbe_adapter *adapter = filp->private_data;
+	char *buf;
+	int len;
+
+	/* don't allow partial reads */
+	if (*ppos != 0)
+		return 0;
+
+	buf = kasprintf(GFP_KERNEL, "%s: %s\n", adapter->name,
+			rnpgbe_dbg_reg_ops_buf);
+	if (!buf)
+		return -ENOMEM;
+
+	if (count < strlen(buf)) {
+		kfree(buf);
+		return -ENOSPC;
+	}
+
+	len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+
+	kfree(buf);
+	return len;
+}
+
+/**
+ * rnpgbe_dbg_reg_ops_write - write into reg_ops datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t rnpgbe_dbg_reg_ops_write(struct file *filp,
+					const char __user *buffer, size_t count,
+					loff_t *ppos)
+{
+	struct rnpgbe_adapter *adapter = filp->private_data;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int len;
+
+	/* don't allow partial writes */
+	if (*ppos != 0)
+		return 0;
+	if (count >= sizeof(rnpgbe_dbg_reg_ops_buf))
+		return -ENOSPC;
+
+	len = simple_write_to_buffer(rnpgbe_dbg_reg_ops_buf,
+				     sizeof(rnpgbe_dbg_reg_ops_buf) - 1, ppos,
+				     buffer, count);
+	if (len < 0)
+		return len;
+
+	rnpgbe_dbg_reg_ops_buf[len] = '\0';
+
+	if (strncmp(rnpgbe_dbg_reg_ops_buf, "write", 5) == 0) {
+		u32 reg, value;
+		int cnt;
+
+		cnt = sscanf(&rnpgbe_dbg_reg_ops_buf[5], "%x %x", ®, &value);
+		if (cnt == 2) {
+			if (reg >= 0x30000000) {
+				rnpgbe_mbx_reg_write(hw, reg, value);
+				e_dev_info("write: 0x%08x = 0x%08x\n", reg,
+					   value);
+			} else {
+				rnpgbe_wr_reg(hw->hw_addr + reg, value);
+				value = rnpgbe_rd_reg(hw->hw_addr + reg);
+				e_dev_info("write: 0x%08x = 0x%08x\n", reg,
+					   value);
+			}
+		} else {
+			e_dev_info("write  \n");
+		}
+	} else if (strncmp(rnpgbe_dbg_reg_ops_buf, "read", 4) == 0) {
+		u32 reg, value;
+		int cnt;
+
+		cnt = sscanf(&rnpgbe_dbg_reg_ops_buf[4], "%x", ®);
+		if (cnt == 1) {
+			if (reg >= 0x30000000)
+				value = rnpgbe_mbx_fw_reg_read(hw, reg);
+			else
+				value = rnpgbe_rd_reg(hw->hw_addr + reg);
+
+			snprintf(rnpgbe_dbg_reg_ops_buf,
+				 sizeof(rnpgbe_dbg_reg_ops_buf),
+				 "0x%08x: 0x%08x", reg, value);
+			e_dev_info("read 0x%08x = 0x%08x\n", reg, value);
+		} else {
+			e_dev_info("read \n");
+		}
+	} else {
+		e_dev_info("Unknown command %s\n", rnpgbe_dbg_reg_ops_buf);
+		e_dev_info("Available commands:\n");
+		e_dev_info("   read \n");
+		e_dev_info("   write  \n");
+	}
+	return count;
+}
+
+static const struct file_operations rnpgbe_dbg_reg_ops_fops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = rnpgbe_dbg_reg_ops_read,
+	.write = rnpgbe_dbg_reg_ops_write,
+};
+
+static char rnpgbe_dbg_netdev_ops_buf[256] = "";
+
+/**
+ * rnpgbe_dbg_netdev_ops_read - read for netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t rnpgbe_dbg_netdev_ops_read(struct file *filp,
+					  char __user *buffer, size_t count,
+					  loff_t *ppos)
+{
+	struct rnpgbe_adapter *adapter = filp->private_data;
+	char *buf;
+	int len;
+
+	/* don't allow partial reads */
+	if (*ppos != 0)
+		return 0;
+
+	buf = kasprintf(GFP_KERNEL, "%s: %s\n", adapter->name,
+			rnpgbe_dbg_netdev_ops_buf);
+	if (!buf)
+		return -ENOMEM;
+
+	if (count < strlen(buf)) {
+		kfree(buf);
+		return -ENOSPC;
+	}
+
+	len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+
+	kfree(buf);
+	return len;
+}
+
+/**
+ * rnpgbe_dbg_netdev_ops_write - write into netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t rnpgbe_dbg_netdev_ops_write(struct file *filp,
+					   const char __user *buffer,
+					   size_t count, loff_t *ppos)
+{
+	struct rnpgbe_adapter *adapter = filp->private_data;
+	int len;
+
+	/* don't allow partial writes */
+	if (*ppos != 0)
+		return 0;
+	if (count >= sizeof(rnpgbe_dbg_netdev_ops_buf))
+		return -ENOSPC;
+
+	len = simple_write_to_buffer(rnpgbe_dbg_netdev_ops_buf,
+				     sizeof(rnpgbe_dbg_netdev_ops_buf) - 1,
+				     ppos, buffer, count);
+	if (len < 0)
+		return len;
+
+	rnpgbe_dbg_netdev_ops_buf[len] = '\0';
+
+	if (strncmp(rnpgbe_dbg_netdev_ops_buf, "stat", 4) == 0) {
+		rnpgbe_info("adapter->stat=0x%lx\n", adapter->state);
+		rnpgbe_info("adapter->tx_timeout_count=%d\n",
+			    adapter->tx_timeout_count);
+	} else if (strncmp(rnpgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
+		adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev,
+							    UINT_MAX);
+		e_dev_info("tx_timeout called\n");
+	} else {
+		e_dev_info("Unknown command: %s\n", rnpgbe_dbg_netdev_ops_buf);
+		e_dev_info("Available commands:\n");
+		e_dev_info("    tx_timeout\n");
+	}
+	return count;
+}
+
+static const struct file_operations rnpgbe_dbg_netdev_ops_fops = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = rnpgbe_dbg_netdev_ops_read,
+	.write = rnpgbe_dbg_netdev_ops_write,
+};
+
+/**
+ * rnpgbe_dbg_netdev_temp_read - read temperature from hw
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t rnpgbe_dbg_netdev_temp_read(struct file *filp,
+					   char __user *buffer, size_t count,
+					   loff_t *ppos)
+{
+	struct rnpgbe_adapter *adapter = filp->private_data;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	char *buf;
+	int len;
+	int temp = 0, voltage = 0;
+
+	/* don't allow partial reads */
+	if (*ppos != 0)
+		return 0;
+
+	temp = rnpgbe_mbx_get_temp(hw, &voltage);
+
+	buf = kasprintf(GFP_KERNEL, "%s: temp: %d oC voltage:%d mV\n",
+			adapter->name, temp, voltage);
+	if (!buf)
+		return -ENOMEM;
+
+	if (count < strlen(buf)) {
+		kfree(buf);
+		return -ENOSPC;
+	}
+
+	len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+
+	kfree(buf);
+	return len;
+}
+static const struct file_operations rnpgbe_dbg_netdev_temp = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = rnpgbe_dbg_netdev_temp_read,
+};
+
+/**
+ * rnpgbe_dbg_adapter_init - setup the debugfs directory for the adapter
+ * @adapter: the adapter that is starting up
+ **/
+void rnpgbe_dbg_adapter_init(struct rnpgbe_adapter *adapter)
+{
+	const char *name = adapter->name;
+	struct dentry *pfile;
+
+	adapter->rnpgbe_dbg_adapter = debugfs_create_dir(name, rnpgbe_dbg_root);
+	if (adapter->rnpgbe_dbg_adapter) {
+		pfile = debugfs_create_file("reg_ops", 0600,
+					    adapter->rnpgbe_dbg_adapter,
+					    adapter, &rnpgbe_dbg_reg_ops_fops);
+		if (!pfile)
+			e_dev_err("debugfs reg_ops for %s failed\n", name);
+		pfile = debugfs_create_file("netdev_ops", 0600,
+					    adapter->rnpgbe_dbg_adapter,
+					    adapter,
+					    &rnpgbe_dbg_netdev_ops_fops);
+		if (!pfile)
+			e_dev_err("debugfs netdev_ops for %s failed\n", name);
+
+		pfile = debugfs_create_file("temp", 0600,
+					    adapter->rnpgbe_dbg_adapter,
+					    adapter, &rnpgbe_dbg_netdev_temp);
+		if (!pfile)
+			e_dev_err("debugfs temp for %s failed\n", name);
+	} else {
+		e_dev_err("debugfs entry for %s failed\n", name);
+	}
+}
+
+/**
+ * rnpgbe_dbg_adapter_exit - clear out the adapter's debugfs entries
+ * @adapter: the adapter that is starting up
+ **/
+void rnpgbe_dbg_adapter_exit(struct rnpgbe_adapter *adapter)
+{
+	debugfs_remove_recursive(adapter->rnpgbe_dbg_adapter);
+	adapter->rnpgbe_dbg_adapter = NULL;
+}
+
+/**
+ * rnpgbe_dbg_init - start up debugfs for the driver
+ **/
+void rnpgbe_dbg_init(void)
+{
+	rnpgbe_dbg_root = debugfs_create_dir(rnpgbe_driver_name, NULL);
+	if (rnpgbe_dbg_root == NULL)
+		pr_err("init of debugfs failed\n");
+}
+
+/**
+ * rnpgbe_dbg_exit - clean out the driver's debugfs entries
+ **/
+void rnpgbe_dbg_exit(void)
+{
+	debugfs_remove_recursive(rnpgbe_dbg_root);
+}
+#endif
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ethtool.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ethtool.c
new file mode 100644
index 0000000000000..481edfe34ca7d
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ethtool.c
@@ -0,0 +1,2212 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "rnpgbe.h"
+#include "rnpgbe_phy.h"
+#include "rnpgbe_sriov.h"
+#include "rnpgbe_mbx_fw.h"
+#include "rnpgbe_ethtool.h"
+
+int rnpgbe_wol_exclusion(struct rnpgbe_adapter *adapter,
+			 struct ethtool_wolinfo *wol)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int retval = 0;
+
+	if (!hw->wol_en) {
+		retval = 1;
+		wol->supported = 0;
+	}
+
+	/* WOL not supported for all devices */
+	if (!rnpgbe_wol_supported(adapter, hw->device_id)) {
+		retval = 1;
+		wol->supported = 0;
+	}
+
+	return retval;
+}
+
+void rnpgbe_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	// we only support magic wol
+	wol->supported = hw->wol_supported;
+	wol->wolopts = 0;
+
+	/* we now can't wol */
+	if (rnpgbe_wol_exclusion(adapter, wol) ||
+	    !device_can_wakeup(&adapter->pdev->dev))
+		return;
+
+	if (adapter->wol & RNP_WUFC_EX)
+		wol->wolopts |= WAKE_UCAST;
+	if (adapter->wol & RNP_WUFC_MC)
+		wol->wolopts |= WAKE_MCAST;
+	if (adapter->wol & RNP_WUFC_BC)
+		wol->wolopts |= WAKE_BCAST;
+	if (adapter->wol & RNP_WUFC_MAG)
+		wol->wolopts |= WAKE_MAGIC;
+}
+
+int rnpgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int ret = 0;
+	u32 new_wol = 0;
+
+	if (wol->wolopts & (~hw->wol_supported))
+		return -EOPNOTSUPP;
+
+	if (wol->wolopts & WAKE_UCAST)
+		new_wol |= RNP_WUFC_EX;
+	if (wol->wolopts & WAKE_MCAST)
+		new_wol |= RNP_WUFC_MC;
+	if (wol->wolopts & WAKE_BCAST)
+		new_wol |= RNP_WUFC_BC;
+	if (wol->wolopts & WAKE_MAGIC)
+		new_wol |= RNP_WUFC_MAG;
+
+	ret = rnpgbe_mbx_wol_set(hw, new_wol);
+	if (ret != 0)
+		return -EOPNOTSUPP;
+
+	adapter->wol = new_wol;
+	// setup mbx
+	device_set_wakeup_enable(&adapter->pdev->dev, !!adapter->wol);
+
+	return 0;
+}
+
+/* ethtool register test data */
+struct rnpgbe_reg_test {
+	u16 reg;
+	u8 array_len;
+	u8 test_type;
+	u32 mask;
+	u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x40 bytes apart, or in contiguous tables.  We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define WRITE_NO_TEST 3
+#define TABLE32_TEST 4
+#define TABLE64_TEST_LO 5
+#define TABLE64_TEST_HI 6
+
+/* default n10 register test */
+static struct rnpgbe_reg_test reg_test_n10[] = {
+	//{RNP_DMA_CONFIG, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF},
+	/*
+	 * { RNP_FCRTL_n10(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+	 * { RNP_FCRTH_n10(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+	 * { RNP_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	 * { RNP_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
+	 * { RNP_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+	 * { RNP_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	 * { RNP_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+	 * { RNP_RXDCTL(0), 4, WRITE_NO_TEST, 0, RNPGBE_RXDCTL_ENABLE },
+	 * { RNP_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+	 * { RNP_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
+	 * { RNP_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
+	 * { RNP_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	 * { RNP_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+	 * { RNP_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	 * { RNP_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+	 * { RNP_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
+	 * { RNP_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
+	 * { RNP_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
+	 * { RNP_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+	 */
+	{ .reg = 0 },
+};
+
+/* write and read check */
+static bool reg_pattern_test(struct rnpgbe_adapter *adapter, u64 *data, int reg,
+			     u32 mask, u32 write)
+{
+	u32 pat, val, before;
+	static const u32 test_pattern[] = { 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000,
+					    0xFFFFFFFF };
+
+	for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
+		before = readl(adapter->hw.hw_addr + reg);
+		printk(KERN_DEBUG "before reg %x is %x\n", reg, before);
+		writel((test_pattern[pat] & write),
+		       (adapter->hw.hw_addr + reg));
+		val = readl(adapter->hw.hw_addr + reg);
+		if (val != (test_pattern[pat] & write & mask)) {
+			e_err(drv,
+			      "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
+			      reg, val, (test_pattern[pat] & write & mask));
+			*data = reg;
+			writel(before, adapter->hw.hw_addr + reg);
+			return 1;
+		}
+		writel(before, adapter->hw.hw_addr + reg);
+	}
+	return 0;
+}
+
+static bool reg_set_and_check(struct rnpgbe_adapter *adapter, u64 *data,
+			      int reg, u32 mask, u32 write)
+{
+	u32 val, before;
+
+	before = readl(adapter->hw.hw_addr + reg);
+	writel((write & mask), (adapter->hw.hw_addr + reg));
+	val = readl(adapter->hw.hw_addr + reg);
+	if ((write & mask) != (val & mask)) {
+		e_err(drv,
+		      "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+		      reg, (val & mask), (write & mask));
+		*data = reg;
+		writel(before, (adapter->hw.hw_addr + reg));
+		return 1;
+	}
+	writel(before, (adapter->hw.hw_addr + reg));
+	return 0;
+}
+
+static bool rnpgbe_reg_test(struct rnpgbe_adapter *adapter, u64 *data)
+{
+	struct rnpgbe_reg_test *test;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 i;
+
+	if (RNPGBE_REMOVED(hw->hw_addr)) {
+		e_err(drv, "Adapter removed - register test blocked\n");
+		*data = 1;
+		return true;
+	}
+
+	test = reg_test_n10;
+	/*
+	 * Perform the remainder of the register test, looping through
+	 * the test table until we either fail or reach the null entry.
+	 */
+	while (test->reg) {
+		for (i = 0; i < test->array_len; i++) {
+			bool b = false;
+
+			switch (test->test_type) {
+			case PATTERN_TEST:
+				b = reg_pattern_test(adapter, data,
+						     test->reg + (i * 0x40),
+						     test->mask, test->write);
+				break;
+			case SET_READ_TEST:
+				b = reg_set_and_check(adapter, data,
+						      test->reg + (i * 0x40),
+						      test->mask, test->write);
+				break;
+			case WRITE_NO_TEST:
+				wr32(hw, test->reg + (i * 0x40), test->write);
+				break;
+			case TABLE32_TEST:
+				b = reg_pattern_test(adapter, data,
+						     test->reg + (i * 4),
+						     test->mask, test->write);
+				break;
+			case TABLE64_TEST_LO:
+				b = reg_pattern_test(adapter, data,
+						     test->reg + (i * 8),
+						     test->mask, test->write);
+				break;
+			case TABLE64_TEST_HI:
+				b = reg_pattern_test(adapter, data,
+						     (test->reg + 4) + (i * 8),
+						     test->mask, test->write);
+				break;
+			}
+			if (b)
+				return true;
+		}
+		test++;
+	}
+
+	*data = 0;
+	return false;
+}
+
+static int rnpgbe_link_test(struct rnpgbe_adapter *adapter, u64 *data)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	bool link_up;
+	u32 link_speed = 0;
+	bool duplex;
+	*data = 0;
+
+	hw->ops.check_link(hw, &link_speed, &link_up, &duplex, true);
+	if (!link_up)
+		*data = 1;
+	return *data;
+}
+
+void rnpgbe_diag_test(struct net_device *netdev, struct ethtool_test *eth_test,
+		      u64 *data)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	bool if_running = netif_running(netdev);
+
+	set_bit(__RNP_TESTING, &adapter->state);
+	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+		if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+			int i;
+
+			for (i = 0; i < adapter->num_vfs; i++) {
+				if (adapter->vfinfo[i].clear_to_send) {
+					netdev_warn(
+						netdev, "%s",
+						"offline diagnostic is not supported when VFs "
+						"are present, just show pass\n");
+					data[0] = 0;
+					data[1] = 0;
+					data[2] = 0;
+					data[3] = 0;
+					if (rnpgbe_link_test(adapter, &data[4]))
+						eth_test->flags |= ETH_TEST_FL_FAILED;
+					//eth_test->flags |= ETH_TEST_FL_FAILED;
+					clear_bit(__RNP_TESTING,
+						  &adapter->state);
+					goto skip_ol_tests;
+				}
+			}
+		}
+
+		/* Offline tests */
+		e_info(hw, "offline testing starting\n");
+
+		/* bringing adapter down disables SFP+ optics */
+		if (hw->ops.enable_tx_laser)
+			hw->ops.enable_tx_laser(hw);
+
+		/* Link test performed before hardware reset so autoneg doesn't
+		 * interfere with test result
+		 */
+		if (rnpgbe_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		e_info(hw, "register testing starting\n");
+		if (rnpgbe_reg_test(adapter, &data[0]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		data[1] = 0;
+		data[2] = 0;
+		/*
+		 * rnpgbe_reset(adapter);
+		 * e_info(hw, "eeprom testing starting\n");
+		 * if (rnpgbe_eeprom_test(adapter, &data[1]))
+		 * eth_test->flags |= ETH_TEST_FL_FAILED;
+		 * rnpgbe_reset(adapter);
+		 * e_info(hw, "interrupt testing starting\n");
+		 * if (rnpgbe_intr_test(adapter, &data[2]))
+		 * eth_test->flags |= ETH_TEST_FL_FAILED;
+		 */
+		/* If SRIOV or VMDq is enabled then skip MAC
+		 * loopback diagnostic.
+		 */
+		if (adapter->flags &
+		    (RNP_FLAG_SRIOV_ENABLED | RNP_FLAG_VMDQ_ENABLED)) {
+			e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
+			data[3] = 0;
+			goto skip_loopback;
+		}
+
+		data[3] = 0;
+	skip_loopback:
+		/* clear testing bit and return adapter to previous state */
+		clear_bit(__RNP_TESTING, &adapter->state);
+	} else {
+		e_info(hw, "online testing starting\n");
+
+		/* if adapter is down, SFP+ optics will be disabled */
+		if (!if_running && hw->ops.enable_tx_laser)
+			hw->ops.enable_tx_laser(hw);
+
+		/* Online tests */
+		if (rnpgbe_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		/* Offline tests aren't run; pass by default */
+		data[0] = 0;
+		data[1] = 0;
+		data[2] = 0;
+		data[3] = 0;
+
+		clear_bit(__RNP_TESTING, &adapter->state);
+	}
+
+	/* if adapter was down, ensure SFP+ optics are disabled again */
+	if (!if_running && hw->ops.disable_tx_laser)
+		hw->ops.disable_tx_laser(hw);
+skip_ol_tests:
+	msleep_interruptible(4 * 1000);
+}
+
+u32 rnpgbe_get_msglevel(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	return adapter->msg_enable;
+}
+
+void rnpgbe_set_msglevel(struct net_device *netdev, u32 data)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	adapter->msg_enable = data;
+}
+
+int rnpgbe_set_phys_id(struct net_device *netdev,
+		       enum ethtool_phys_id_state state)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	switch (state) {
+	case ETHTOOL_ID_ACTIVE:
+		rnpgbe_mbx_led_set(hw, 1);
+		return 2;
+
+	case ETHTOOL_ID_ON:
+		rnpgbe_mbx_led_set(hw, 2);
+		break;
+
+	case ETHTOOL_ID_OFF:
+		rnpgbe_mbx_led_set(hw, 3);
+		break;
+
+	case ETHTOOL_ID_INACTIVE:
+		rnpgbe_mbx_led_set(hw, 0);
+		break;
+	}
+	return 0;
+}
+
+int rnpgbe_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+
+	/*For we juse set it as pf0 */
+	if (!(adapter->flags2 & RNP_FLAG2_PTP_ENABLED))
+		return ethtool_op_get_ts_info(dev, info);
+
+	if (adapter->ptp_clock)
+		info->phc_index = ptp_clock_index(adapter->ptp_clock);
+	else
+		info->phc_index = -1;
+
+	dbg("phc_index is %d\n", info->phc_index);
+	info->so_timestamping =
+		SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE |
+		SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_TX_SOFTWARE |
+		SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE;
+
+	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+			   BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+			   BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+			   /* 802.AS1 */
+			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+			   BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+			   BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+			   BIT(HWTSTAMP_FILTER_ALL);
+
+	return 0;
+}
+
+static unsigned int rnpgbe_max_channels(struct rnpgbe_adapter *adapter)
+{
+	unsigned int max_combined;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+		/* SR-IOV currently only allows 2 queue on the PF */
+		max_combined = hw->sriov_ring_limit;
+	} else if (adapter->flags & RNP_FLAG_DCB_ENABLED) {
+		/* dcb on max support 32 */
+		max_combined = 32;
+	} else {
+		/* support up to 16 queues with RSS */
+		max_combined = adapter->max_ring_pair_counts;
+		/* should not large than q_vectors ? */
+	}
+
+	return max_combined;
+}
+
+int rnpgbe_get_keee(struct net_device *netdev, struct ethtool_keee *edata)
+{
+
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	if (!(hw->feature_flags & RNP_HW_FEATURE_EEE))
+		return -EOPNOTSUPP;
+
+	if (!(hw->eee_capability))
+		return -EOPNOTSUPP;
+
+	edata->supported_u32 = 0;
+
+	if (hw->eee_capability & EEE_1000BT)
+		edata->supported_u32 |= SUPPORTED_1000baseT_Full;
+	if (hw->eee_capability & EEE_100BT)
+		edata->supported_u32 |= SUPPORTED_100baseT_Full;
+
+	if (adapter->eee_enabled)
+		edata->eee_enabled = true;
+
+	edata->lp_advertised_u32 =
+		mmd_eee_adv_to_ethtool_adv_t(adapter->partner_eee);
+	edata->advertised_u32 = mmd_eee_adv_to_ethtool_adv_t(adapter->local_eee);
+
+	/* @eee_active: Result of the eee auto negotiation. */
+	if ((adapter->eee_enabled) &&
+	    (adapter->local_eee & adapter->partner_eee))
+		edata->eee_active = true;
+	/* @tx_lpi_enabled: Whether the interface should assert its tx lpi */
+	edata->tx_lpi_enabled = adapter->tx_path_in_lpi_mode;
+	edata->tx_lpi_timer = adapter->tx_lpi_timer;
+
+	/* if in half duplex fixme */
+	if (!hw->duplex) {
+		edata->eee_enabled = false;
+		edata->eee_active = false;
+		edata->tx_lpi_enabled = false;
+		edata->advertised_u32 &= ~edata->advertised_u32;
+	}
+	return 0;
+
+}
+
+void eee_to_keee(struct ethtool_keee *keee,
+                 const struct ethtool_eee *eee)
+{
+        memset(keee, 0, sizeof(*keee));
+
+        keee->supported_u32 = eee->supported;
+        keee->advertised_u32 = eee->advertised;
+        keee->lp_advertised_u32 = eee->lp_advertised;
+        keee->eee_active = eee->eee_active;
+        keee->eee_enabled = eee->eee_enabled;
+        keee->tx_lpi_enabled = eee->tx_lpi_enabled;
+        keee->tx_lpi_timer = eee->tx_lpi_timer;
+
+        ethtool_convert_legacy_u32_to_link_mode(keee->supported,
+                                                eee->supported);
+        ethtool_convert_legacy_u32_to_link_mode(keee->advertised,
+                                                eee->advertised);
+        ethtool_convert_legacy_u32_to_link_mode(keee->lp_advertised,
+                                                eee->lp_advertised);
+}
+
+bool ethtool_eee_use_linkmodes(const struct ethtool_keee *eee)
+{
+        return false;
+}
+
+
+void keee_to_eee(struct ethtool_eee *eee,
+                 const struct ethtool_keee *keee)
+{
+        memset(eee, 0, sizeof(*eee));
+
+        eee->eee_active = keee->eee_active;
+        eee->eee_enabled = keee->eee_enabled;
+        eee->tx_lpi_enabled = keee->tx_lpi_enabled;
+        eee->tx_lpi_timer = keee->tx_lpi_timer;
+
+        if (ethtool_eee_use_linkmodes(keee)) {
+                bool overflow;
+
+                overflow = !ethtool_convert_link_mode_to_legacy_u32(&eee->supported,
+                                                                    keee->supported);
+                ethtool_convert_link_mode_to_legacy_u32(&eee->advertised,
+                                                        keee->advertised);
+                ethtool_convert_link_mode_to_legacy_u32(&eee->lp_advertised,
+                                                        keee->lp_advertised);
+                if (overflow)
+                        pr_warn("Ethtool ioctl interface doesn't support passing EEE linkmodes beyond bit 32\n");
+        } else {
+                eee->supported = keee->supported_u32;
+                eee->advertised = keee->advertised_u32;
+                eee->lp_advertised = keee->lp_advertised_u32;
+        }
+}
+
+
+int rnpgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+	struct ethtool_keee kedata;
+	int ret;
+
+	eee_to_keee(&kedata, edata);
+	ret = rnpgbe_get_keee(netdev, &kedata);
+	keee_to_eee(edata, &kedata);
+
+	return ret;
+}
+
+int rnpgbe_set_keee(struct net_device *netdev, struct ethtool_keee *edata)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct ethtool_keee eee_curr;
+	s32 ret_val;
+
+	if (!(hw->feature_flags & RNP_HW_FEATURE_EEE))
+		return -EOPNOTSUPP;
+
+	memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+
+	ret_val = rnpgbe_get_keee(netdev, &eee_curr);
+
+	if (ret_val)
+		return ret_val;
+
+	if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && (edata->eee_enabled)) {
+		dev_err(pci_dev_to_dev(adapter->pdev),
+			"not supported enable eee with sriov on\n");
+
+		return -EINVAL;
+	}
+
+	if (edata->eee_enabled) {
+		if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
+			dev_err(pci_dev_to_dev(adapter->pdev),
+				"Setting EEE tx-lpi is not supported\n");
+			return -EINVAL;
+		}
+
+		if (!edata->advertised_u32 ||
+		    (edata->advertised_u32 &
+		     ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) {
+			dev_err(pci_dev_to_dev(adapter->pdev),
+				"EEE Advertisement supports 100Base-Tx Full Duplex(0x08) 1000Base-T Full Duplex(0x20) or both(0x28)\n");
+			return -EINVAL;
+		}
+		adapter->local_eee = 0;
+		if (edata->advertised_u32 & ADVERTISE_100_FULL)
+			adapter->local_eee |= EEE_100BT;
+		if (edata->advertised_u32 & SUPPORTED_1000baseT_Full)
+			adapter->local_eee |= EEE_1000BT;
+
+	} else if (!edata->eee_enabled) {
+		/* we set local eee to control eee */
+		adapter->local_eee = 0;
+	}
+
+	if (edata->eee_enabled)
+		adapter->eee_enabled = 1;
+	else
+		adapter->eee_enabled = 0;
+
+	adapter->tx_lpi_timer = edata->tx_lpi_timer;
+
+	/* link will down is re-auto */
+	if (hw->ops.setup_eee)
+		hw->ops.setup_eee(hw, RNP_DEFAULT_LIT_LS, adapter->tx_lpi_timer,
+				  adapter->local_eee);
+
+	return 0;
+}
+
+int rnpgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+	struct ethtool_keee kedata;
+	int ret;
+
+	eee_to_keee(&kedata, edata);
+	ret = rnpgbe_set_keee(netdev, &kedata);
+	keee_to_eee(edata, &kedata);
+
+	return ret;
+}
+
+void rnpgbe_get_channels(struct net_device *dev, struct ethtool_channels *ch)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+
+	/* report maximum channels */
+	ch->max_combined = rnpgbe_max_channels(adapter);
+
+	/* report info for other vector */
+	ch->max_other = NON_Q_VECTORS;
+	ch->other_count = NON_Q_VECTORS;
+
+	/* record RSS queues */
+	ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
+
+	/* nothing else to report if RSS is disabled */
+	if (ch->combined_count == 1)
+		return;
+
+	/* we do not support ATR queueing if SR-IOV is enabled */
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED)
+		return;
+
+	/* same thing goes for being DCB enabled */
+	if (netdev_get_num_tc(dev) > 1)
+		return;
+}
+
+int rnpgbe_set_channels(struct net_device *dev, struct ethtool_channels *ch)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+	unsigned int count = ch->combined_count;
+
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED)
+		return -EINVAL;
+
+	/* verify they are not requesting separate vectors */
+	if (!count || ch->rx_count || ch->tx_count)
+		return -EINVAL;
+
+	/* verify other_count has not changed */
+	if (ch->other_count != NON_Q_VECTORS)
+		return -EINVAL;
+
+	dbg("call set channels %d %d %d\n", count, ch->rx_count, ch->tx_count);
+	dbg("max channels %d\n", rnpgbe_max_channels(adapter));
+	/* verify the number of channels does not exceed hardware limits */
+	if (count > rnpgbe_max_channels(adapter))
+		return -EINVAL;
+
+	/* update feature limits from largest to smallest supported values */
+	adapter->ring_feature[RING_F_FDIR].limit = count;
+
+	/* cap RSS limit at 16 */
+	if (count > adapter->max_ring_pair_counts)
+		count = adapter->max_ring_pair_counts;
+	adapter->ring_feature[RING_F_RSS].limit = count;
+
+	/* use setup TC to update any traffic class queue mapping */
+	return rnpgbe_setup_tc(dev, netdev_get_num_tc(dev));
+}
+
+int rnpgbe_get_module_info(struct net_device *dev,
+			   struct ethtool_modinfo *modinfo)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u8 module_id, diag_supported;
+	int rc;
+
+	rnpgbe_mbx_get_lane_stat(hw);
+
+	if (hw->is_sgmii)
+		return -EIO;
+
+	rc = rnpgbe_mbx_sfp_module_eeprom_info(hw, 0xA0, SFF_MODULE_ID_OFFSET,
+					       1, &module_id);
+	if (rc || module_id == 0xff)
+		return -EIO;
+	rc = rnpgbe_mbx_sfp_module_eeprom_info(
+		hw, 0xA0, SFF_DIAG_SUPPORT_OFFSET, 1, &diag_supported);
+	if (!rc) {
+		switch (module_id) {
+		case SFF_MODULE_ID_SFP:
+			modinfo->type = ETH_MODULE_SFF_8472;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+			if (!diag_supported)
+				modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+			break;
+		case SFF_MODULE_ID_QSFP:
+		case SFF_MODULE_ID_QSFP_PLUS:
+			modinfo->type = ETH_MODULE_SFF_8436;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+			break;
+		case SFF_MODULE_ID_QSFP28:
+			modinfo->type = ETH_MODULE_SFF_8636;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+			break;
+		default:
+			printk(KERN_DEBUG
+			       "%s: module_id:0x%x diag_supported:0x%x\n",
+			       __func__, module_id, diag_supported);
+			rc = -EOPNOTSUPP;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+int rnpgbe_get_module_eeprom(struct net_device *dev,
+			     struct ethtool_eeprom *eeprom, u8 *data)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u16 start = eeprom->offset, length = eeprom->len;
+	int rc = 0;
+
+	rnpgbe_mbx_get_lane_stat(hw);
+
+	if (hw->is_sgmii)
+		return -EIO;
+
+	memset(data, 0, eeprom->len);
+
+	/* Read A0 portion of the EEPROM */
+	if (start < ETH_MODULE_SFF_8436_LEN) {
+		if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
+			length = ETH_MODULE_SFF_8436_LEN - start;
+		rc = rnpgbe_mbx_sfp_module_eeprom_info(hw, 0xA0, start, length,
+						       data);
+		if (rc)
+			return rc;
+		start += length;
+		data += length;
+		length = eeprom->len - length;
+	}
+
+	/* Read A2 portion of the EEPROM */
+	if (length) {
+		start -= ETH_MODULE_SFF_8436_LEN;
+		rc = rnpgbe_mbx_sfp_module_eeprom_info(hw, 0xA2, start, length,
+						       data);
+	}
+
+	return rc;
+}
+
+void rnpgbe_get_ringparam(struct net_device *netdev,
+			  struct ethtool_ringparam *ring,
+			  struct kernel_ethtool_ringparam __always_unused *ker,
+			  struct netlink_ext_ack __always_unused *extack)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	/* all ring share the same status*/
+
+	ring->rx_max_pending = RNP_MAX_RXD;
+	ring->tx_max_pending = RNP_MAX_TXD;
+	ring->rx_mini_max_pending = 0;
+	ring->rx_jumbo_max_pending = 0;
+	ring->rx_pending = adapter->rx_ring_item_count;
+	ring->tx_pending = adapter->tx_ring_item_count;
+	ring->rx_mini_pending = 0;
+	ring->rx_jumbo_pending = 0;
+}
+
+int rnpgbe_set_ringparam(struct net_device *netdev,
+			 struct ethtool_ringparam *ring,
+			 struct kernel_ethtool_ringparam __always_unused *ker,
+			 struct netlink_ext_ack __always_unused *extack)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_ring *temp_ring;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int i, err = 0;
+	u32 new_rx_count, new_tx_count;
+
+	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+		return -EINVAL;
+
+	if ((ring->tx_pending < RNP_MIN_TXD) ||
+	    (ring->tx_pending > RNP_MAX_TXD) ||
+	    (ring->rx_pending < RNP_MIN_RXD) ||
+	    (ring->rx_pending > RNP_MAX_RXD)) {
+		netdev_info(
+			netdev,
+			"Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
+			ring->tx_pending, ring->rx_pending, RNP_MIN_TXD,
+			RNP_MAX_TXD);
+		return -EINVAL;
+	}
+
+	new_tx_count = clamp_t(u32, ring->tx_pending, RNP_MIN_TXD, RNP_MAX_TXD);
+	new_tx_count = ALIGN(new_tx_count, RNP_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+	new_rx_count = clamp_t(u32, ring->rx_pending, RNP_MIN_RXD, RNP_MAX_RXD);
+	new_rx_count = ALIGN(new_rx_count, RNP_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+	if ((new_tx_count == adapter->tx_ring_item_count) &&
+	    (new_rx_count == adapter->rx_ring_item_count)) {
+		/* nothing to do */
+		return 0;
+	}
+
+	while (test_and_set_bit(__RNP_RESETTING, &adapter->state))
+		usleep_range(1000, 2000);
+
+	if (!netif_running(adapter->netdev)) {
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			adapter->tx_ring[i]->count = new_tx_count;
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			adapter->rx_ring[i]->count = new_rx_count;
+		adapter->tx_ring_item_count = new_tx_count;
+		adapter->rx_ring_item_count = new_rx_count;
+		goto clear_reset;
+	}
+
+	/* allocate temporary buffer to store rings in */
+	i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
+	temp_ring = vmalloc(i * sizeof(struct rnpgbe_ring));
+	if (!temp_ring) {
+		err = -ENOMEM;
+		goto clear_reset;
+	}
+	memset(temp_ring, 0x00, i * sizeof(struct rnpgbe_ring));
+
+	if (new_rx_count != adapter->rx_ring_item_count) {
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			adapter->rx_ring[i]->reset_count = new_rx_count;
+			if (!(adapter->rx_ring[i]->ring_flags &
+			      RNP_RING_SIZE_CHANGE_FIX))
+				adapter->rx_ring[i]->ring_flags |=
+					RNP_RING_FLAG_CHANGE_RX_LEN;
+		}
+	}
+
+	/* if now we are in force mode, never need force, if not force it */
+	if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) {
+		hw->ops.set_mac_rx(hw, false);
+		if (hw->ops.driver_status)
+			hw->ops.driver_status(hw, true,
+					      rnpgbe_driver_force_control_phy);
+	}
+
+	rnpgbe_down(adapter);
+	/*
+	 * Setup new Tx resources and free the old Tx resources in that order.
+	 * We can then assign the new resources to the rings via a memcpy.
+	 * The advantage to this approach is that we are guaranteed to still
+	 * have resources even in the case of an allocation failure.
+	 */
+	if (new_tx_count != adapter->tx_ring_item_count) {
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			memcpy(&temp_ring[i], adapter->tx_ring[i],
+			       sizeof(struct rnpgbe_ring));
+
+			temp_ring[i].count = new_tx_count;
+			err = rnpgbe_setup_tx_resources(&temp_ring[i], adapter);
+			if (err) {
+				while (i) {
+					i--;
+					rnpgbe_free_tx_resources(&temp_ring[i]);
+				}
+				goto err_setup;
+			}
+		}
+
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			rnpgbe_free_tx_resources(adapter->tx_ring[i]);
+			memcpy(adapter->tx_ring[i], &temp_ring[i],
+			       sizeof(struct rnpgbe_ring));
+		}
+
+		adapter->tx_ring_item_count = new_tx_count;
+	}
+
+	/* Repeat the process for the Rx rings if needed */
+	if (new_rx_count != adapter->rx_ring_item_count) {
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			memcpy(&temp_ring[i], adapter->rx_ring[i],
+			       sizeof(struct rnpgbe_ring));
+			/* setup ring count */
+			if (!(adapter->rx_ring[i]->ring_flags &
+			      RNP_RING_FLAG_DELAY_SETUP_RX_LEN)) {
+				temp_ring[i].count = new_rx_count;
+			} else {
+				/* setup temp count */
+				temp_ring[i].count = temp_ring[i].temp_count;
+				adapter->rx_ring[i]->reset_count = new_rx_count;
+			}
+			err = rnpgbe_setup_rx_resources(&temp_ring[i], adapter);
+			if (err) {
+				while (i) {
+					i--;
+					rnpgbe_free_rx_resources(&temp_ring[i]);
+				}
+				goto err_setup;
+			}
+		}
+
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			rnpgbe_free_rx_resources(adapter->rx_ring[i]);
+			memcpy(adapter->rx_ring[i], &temp_ring[i],
+			       sizeof(struct rnpgbe_ring));
+		}
+		adapter->rx_ring_item_count = new_rx_count;
+	}
+
+err_setup:
+	rnpgbe_up(adapter);
+	vfree(temp_ring);
+	if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) {
+		if (hw->ops.driver_status)
+			hw->ops.driver_status(hw, false,
+					      rnpgbe_driver_force_control_phy);
+	}
+clear_reset:
+	clear_bit(__RNP_RESETTING, &adapter->state);
+	return err;
+}
+
+int rnpgbe_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	rnpgbe_mbx_get_dump_flags(&adapter->hw);
+
+	dump->flag = adapter->hw.dump.flag;
+	dump->len = adapter->hw.dump.len;
+	dump->version = adapter->hw.dump.version;
+
+	return 0;
+}
+
+int rnpgbe_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
+			 void *buffer)
+{
+	int err;
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	err = rnpgbe_mbx_get_dump(&adapter->hw, dump->flag, buffer, dump->len);
+	if (err)
+		return err;
+
+	dump->flag = adapter->hw.dump.flag;
+	dump->len = adapter->hw.dump.len;
+	dump->version = adapter->hw.dump.version;
+
+	return 0;
+}
+
+int rnpgbe_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	rnpgbe_mbx_set_dump(&adapter->hw, dump->flag);
+
+	return 0;
+}
+
+int rnpgbe_get_coalesce(struct net_device *netdev,
+			struct ethtool_coalesce *coal,
+			struct kernel_ethtool_coalesce *kernel_coal,
+			struct netlink_ext_ack *extack)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_TX_COALESCE) {
+		coal->use_adaptive_tx_coalesce = 0;
+		coal->tx_coalesce_usecs = adapter->tx_usecs;
+		coal->tx_coalesce_usecs_irq = 0;
+		coal->tx_max_coalesced_frames = adapter->tx_frames;
+		coal->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
+		coal->tx_coalesce_usecs_low = 0;
+		coal->tx_max_coalesced_frames_low = 0;
+		coal->tx_coalesce_usecs_high = 0;
+		coal->tx_max_coalesced_frames_high = 0;
+	} else {
+		coal->use_adaptive_tx_coalesce = 1;
+		coal->tx_coalesce_usecs = adapter->tx_usecs;
+		coal->tx_coalesce_usecs_irq = 0;
+		coal->tx_max_coalesced_frames = adapter->tx_frames;
+		coal->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
+		coal->tx_coalesce_usecs_low = 0;
+		coal->tx_max_coalesced_frames_low = 0;
+		coal->tx_coalesce_usecs_high = 0;
+		coal->tx_max_coalesced_frames_high = 0;
+	}
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_RX_COALESCE) {
+		coal->use_adaptive_rx_coalesce = 0;
+		coal->rx_coalesce_usecs_irq = 0;
+		coal->rx_coalesce_usecs = adapter->rx_usecs;
+		coal->rx_max_coalesced_frames = adapter->rx_frames;
+		coal->rx_max_coalesced_frames_irq = adapter->napi_budge;
+		coal->rx_coalesce_usecs_low = 0;
+		coal->rx_max_coalesced_frames_low = 0;
+		coal->rx_coalesce_usecs_high = 0;
+		coal->rx_max_coalesced_frames_high = 0;
+	} else {
+		coal->use_adaptive_rx_coalesce = 1;
+		coal->rx_coalesce_usecs_irq = 0;
+		coal->rx_coalesce_usecs = adapter->rx_usecs;
+		coal->rx_max_coalesced_frames = adapter->rx_frames;
+		coal->rx_max_coalesced_frames_irq = adapter->napi_budge;
+		coal->rx_coalesce_usecs_low = 0;
+		coal->rx_max_coalesced_frames_low = 0;
+		coal->rx_coalesce_usecs_high = 0;
+		coal->rx_max_coalesced_frames_high = 0;
+	}
+
+	/* this is not support */
+	coal->pkt_rate_low = 0;
+	coal->pkt_rate_high = 0;
+	coal->rate_sample_interval = 0;
+
+	return 0;
+}
+
+int rnpgbe_set_coalesce(struct net_device *netdev,
+			struct ethtool_coalesce *ec,
+			struct kernel_ethtool_coalesce *kernel_coal,
+			struct netlink_ext_ack *extack)
+{
+	int reset = 0;
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	u32 value;
+	if (!ec->use_adaptive_tx_coalesce)
+		adapter->priv_flags |= RNP_PRIV_FLAG_TX_COALESCE;
+	else
+		adapter->priv_flags &= ~RNP_PRIV_FLAG_TX_COALESCE;
+
+	if (!ec->use_adaptive_rx_coalesce)
+		adapter->priv_flags |= RNP_PRIV_FLAG_RX_COALESCE;
+	else
+		adapter->priv_flags &= ~RNP_PRIV_FLAG_RX_COALESCE;
+
+	if ((ec->tx_max_coalesced_frames_irq < RNP_MIN_TX_WORK) ||
+	    (ec->tx_max_coalesced_frames_irq > RNP_MAX_TX_WORK))
+		return -EINVAL;
+	/* check coalesce frame irq */
+	value = clamp_t(u32, ec->tx_max_coalesced_frames_irq, RNP_MIN_TX_WORK,
+			RNP_MAX_TX_WORK);
+	value = ALIGN(value, RNP_WORK_ALIGN);
+
+	if (adapter->tx_work_limit != value) {
+		reset = 1;
+		adapter->tx_work_limit = value;
+	}
+	/* check vlaue */
+	if ((ec->tx_max_coalesced_frames < RNP_MIN_TX_FRAME) ||
+	    (ec->tx_max_coalesced_frames > RNP_MAX_TX_FRAME))
+		return -EINVAL;
+
+	value = clamp_t(u32, ec->tx_max_coalesced_frames, RNP_MIN_TX_FRAME,
+			RNP_MAX_TX_FRAME);
+	if (adapter->tx_frames != value) {
+		reset = 1;
+		adapter->tx_frames = value;
+	}
+
+	if ((ec->tx_coalesce_usecs < RNP_MIN_TX_USEC) ||
+	    (ec->tx_coalesce_usecs > RNP_MAX_TX_USEC))
+		return -EINVAL;
+	/* check vlaue */
+	value = clamp_t(u32, ec->tx_coalesce_usecs, RNP_MIN_TX_USEC,
+			RNP_MAX_TX_USEC);
+	if (adapter->tx_usecs != value) {
+		reset = 1;
+		adapter->tx_usecs = value;
+	}
+
+	if ((ec->rx_max_coalesced_frames_irq < RNP_MIN_RX_WORK) ||
+	    (ec->rx_max_coalesced_frames_irq > RNP_MAX_RX_WORK))
+		return -EINVAL;
+
+	value = clamp_t(u32, ec->rx_max_coalesced_frames_irq, RNP_MIN_RX_WORK,
+			RNP_MAX_RX_WORK);
+	value = ALIGN(value, RNP_WORK_ALIGN);
+
+	if (adapter->napi_budge != value) {
+		reset = 1;
+		adapter->napi_budge = value;
+	}
+
+	if ((ec->rx_max_coalesced_frames < RNP_MIN_RX_FRAME) ||
+	    (ec->rx_max_coalesced_frames > RNP_MAX_RX_FRAME))
+		return -EINVAL;
+
+	value = clamp_t(u32, ec->rx_max_coalesced_frames, RNP_MIN_RX_FRAME,
+			RNP_MAX_RX_FRAME);
+	if (adapter->rx_frames != value) {
+		reset = 1;
+		adapter->rx_frames = value;
+	}
+
+	if ((ec->rx_coalesce_usecs < RNP_MIN_RX_USEC) ||
+	    (ec->rx_coalesce_usecs > RNP_MAX_RX_USEC))
+		return -EINVAL;
+	/* check vlaue */
+	value = clamp_t(u32, ec->rx_coalesce_usecs, RNP_MIN_RX_USEC,
+			RNP_MAX_RX_USEC);
+
+	if (adapter->rx_usecs != value) {
+		reset = 1;
+		adapter->rx_usecs = value;
+	}
+	/* other setup is not supported */
+	if ((ec->pkt_rate_low) || (ec->pkt_rate_high) ||
+	    (ec->rx_coalesce_usecs_low) || (ec->rx_max_coalesced_frames_low) ||
+	    (ec->tx_coalesce_usecs_low) || (ec->tx_max_coalesced_frames_low) ||
+	    (ec->rx_coalesce_usecs_high) ||
+	    (ec->rx_max_coalesced_frames_high) ||
+	    (ec->tx_coalesce_usecs_high) ||
+	    (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval) ||
+	    (ec->tx_coalesce_usecs_irq) || (ec->rx_coalesce_usecs_irq))
+		return -EINVAL;
+
+	if (reset)
+		return rnpgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
+
+	return 0;
+}
+
+static int rnpgbe_get_rss_hash_opts(struct rnpgbe_adapter *adapter,
+				    struct ethtool_rxnfc *cmd)
+{
+	cmd->data = 0;
+
+	/* Report default options for RSS on rnp */
+	switch (cmd->flow_type) {
+	case TCP_V4_FLOW:
+		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+		/* fall through */
+		fallthrough;
+	case UDP_V4_FLOW:
+	case SCTP_V4_FLOW:
+		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+		/* fall through */
+		fallthrough;
+	case AH_ESP_V4_FLOW:
+	case AH_V4_FLOW:
+	case ESP_V4_FLOW:
+	case IPV4_FLOW:
+		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+		break;
+	case TCP_V6_FLOW:
+		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+		/* fall through */
+		fallthrough;
+	case UDP_V6_FLOW:
+	case SCTP_V6_FLOW:
+		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+		/* fall through */
+		fallthrough;
+	case AH_ESP_V6_FLOW:
+	case AH_V6_FLOW:
+	case ESP_V6_FLOW:
+	case IPV6_FLOW:
+		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+void dump_fsp(struct ethtool_rx_flow_spec *fsp)
+{
+	int i;
+
+	dbg(" fsp cookie is %llx\n", fsp->ring_cookie);
+	switch (fsp->flow_type & ~FLOW_EXT) {
+	case ETHER_FLOW:
+		for (i = 0; i < ETH_ALEN; i++)
+			dbg("src 0x%02x\n", fsp->h_u.ether_spec.h_source[i]);
+		for (i = 0; i < ETH_ALEN; i++)
+			dbg("dst 0x%02x\n", fsp->h_u.ether_spec.h_dest[i]);
+		for (i = 0; i < ETH_ALEN; i++)
+			dbg("src mask 0x%02x\n",
+			    fsp->m_u.ether_spec.h_source[i]);
+		for (i = 0; i < ETH_ALEN; i++)
+			dbg("dst mask 0x%02x\n", fsp->m_u.ether_spec.h_dest[i]);
+
+		dbg("proto type is %x\n", fsp->h_u.ether_spec.h_proto);
+
+		break;
+
+	default:
+		dbg("flow type is %x\n", fsp->flow_type);
+		dbg("l2 prot is %x\n", fsp->h_u.ether_spec.h_proto);
+		dbg("ip4 src ip is %x\n", fsp->h_u.tcp_ip4_spec.ip4src);
+		dbg("ip4 src ip mask is %x\n", fsp->m_u.tcp_ip4_spec.ip4src);
+
+		dbg("ip4 dst ip is %x\n", fsp->h_u.tcp_ip4_spec.ip4dst);
+		dbg("ip4 dst ip mask is %x\n", fsp->m_u.tcp_ip4_spec.ip4dst);
+
+		dbg("ip4 src port is %x\n", fsp->h_u.tcp_ip4_spec.psrc);
+		dbg("ip4 src port mask is %x\n", fsp->m_u.tcp_ip4_spec.psrc);
+
+		dbg("ip4 dst port is %x\n", fsp->h_u.tcp_ip4_spec.pdst);
+		dbg("ip4 dst port mask is %x\n", fsp->m_u.tcp_ip4_spec.pdst);
+
+		dbg("proto is %x\n", fsp->h_u.usr_ip4_spec.proto);
+		break;
+	}
+}
+
+static int rnpgbe_get_ethtool_fdir_entry(struct rnpgbe_adapter *adapter,
+					 struct ethtool_rxnfc *cmd)
+{
+	struct ethtool_rx_flow_spec *fsp =
+		(struct ethtool_rx_flow_spec *)&cmd->fs;
+	struct hlist_node *node2;
+	struct rnpgbe_fdir_filter *rule = NULL;
+
+	/* report total rule count */
+	cmd->data = adapter->fdir_pballoc;
+
+	hlist_for_each_entry_safe (rule, node2, &adapter->fdir_filter_list,
+				   fdir_node)
+		if (fsp->location <= rule->sw_idx)
+			break;
+
+	if (!rule || fsp->location != rule->sw_idx)
+		return -EINVAL;
+
+	/* fill out the flow spec entry */
+
+	/* set flow type field */
+	switch (rule->filter.formatted.flow_type) {
+	case RNP_ATR_FLOW_TYPE_TCPV4:
+		fsp->flow_type = TCP_V4_FLOW;
+		break;
+	case RNP_ATR_FLOW_TYPE_UDPV4:
+		fsp->flow_type = UDP_V4_FLOW;
+		break;
+	case RNP_ATR_FLOW_TYPE_SCTPV4:
+		fsp->flow_type = SCTP_V4_FLOW;
+		break;
+	case RNP_ATR_FLOW_TYPE_IPV4:
+		fsp->flow_type = IP_USER_FLOW;
+		fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+		if (adapter->fdir_mode == fdir_mode_tuple5) {
+			fsp->h_u.usr_ip4_spec.proto =
+				rule->filter.formatted.inner_mac[0];
+			fsp->m_u.usr_ip4_spec.proto = 0xff;
+		} else {
+			fsp->h_u.usr_ip4_spec.proto =
+				rule->filter.formatted.inner_mac[0] &
+				rule->filter.formatted.inner_mac_mask[0];
+			fsp->m_u.usr_ip4_spec.proto =
+				rule->filter.formatted.inner_mac_mask[0];
+		}
+		break;
+	case RNP_ATR_FLOW_TYPE_ETHER:
+		fsp->flow_type = ETHER_FLOW;
+		/* support proto and mask only in this mode */
+		fsp->h_u.ether_spec.h_proto = rule->filter.layer2_formate.proto;
+		fsp->m_u.ether_spec.h_proto = 0xffff;
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (rule->filter.formatted.flow_type != RNP_ATR_FLOW_TYPE_ETHER) {
+		/* not support mask in tuple 5 mode */
+		if (adapter->fdir_mode == fdir_mode_tuple5) {
+			fsp->h_u.tcp_ip4_spec.psrc =
+				rule->filter.formatted.src_port;
+			fsp->h_u.tcp_ip4_spec.pdst =
+				rule->filter.formatted.dst_port;
+			fsp->h_u.tcp_ip4_spec.ip4src =
+				rule->filter.formatted.src_ip[0];
+			fsp->h_u.tcp_ip4_spec.ip4dst =
+				rule->filter.formatted.dst_ip[0];
+			fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
+			fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
+			fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
+			fsp->m_u.tcp_ip4_spec.ip4dst = 0xffffffff;
+		} else {
+			fsp->h_u.tcp_ip4_spec.psrc =
+				rule->filter.formatted.src_port &
+				rule->filter.formatted.src_port_mask;
+			fsp->m_u.tcp_ip4_spec.psrc =
+				rule->filter.formatted.src_port_mask;
+			fsp->h_u.tcp_ip4_spec.pdst =
+				rule->filter.formatted.dst_port &
+				rule->filter.formatted.dst_port_mask;
+			fsp->m_u.tcp_ip4_spec.pdst =
+				rule->filter.formatted.dst_port_mask;
+
+			fsp->h_u.tcp_ip4_spec.ip4src =
+				rule->filter.formatted.src_ip[0] &
+				rule->filter.formatted.src_ip_mask[0];
+			fsp->m_u.tcp_ip4_spec.ip4src =
+				rule->filter.formatted.src_ip_mask[0];
+
+			fsp->h_u.tcp_ip4_spec.ip4dst =
+				rule->filter.formatted.dst_ip[0] &
+				rule->filter.formatted.dst_ip_mask[0];
+			fsp->m_u.tcp_ip4_spec.ip4dst =
+				rule->filter.formatted.dst_ip_mask[0];
+		}
+	}
+
+	/* record action */
+	if (rule->action == RNP_FDIR_DROP_QUEUE)
+		fsp->ring_cookie = RX_CLS_FLOW_DISC;
+	else {
+		if (rule->vf_num != 0) {
+			fsp->ring_cookie =
+				((u64)rule->vf_num << 32) | (rule->action);
+		} else {
+			fsp->ring_cookie = rule->action;
+		}
+	}
+
+	return 0;
+}
+
+static int rnpgbe_get_ethtool_fdir_all(struct rnpgbe_adapter *adapter,
+				       struct ethtool_rxnfc *cmd,
+				       u32 *rule_locs)
+{
+	struct hlist_node *node2;
+	struct rnpgbe_fdir_filter *rule;
+	int cnt = 0;
+
+	/* report total rule count */
+	cmd->data = adapter->fdir_pballoc;
+
+	hlist_for_each_entry_safe (rule, node2, &adapter->fdir_filter_list,
+				   fdir_node) {
+		if (cnt == cmd->rule_cnt)
+			return -EMSGSIZE;
+		rule_locs[cnt] = rule->sw_idx;
+		cnt++;
+	}
+
+	cmd->rule_cnt = cnt;
+
+	return 0;
+}
+
+int rnpgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+		     u32 *rule_locs)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+	int ret = -EOPNOTSUPP;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	switch (cmd->cmd) {
+	case ETHTOOL_GRXRINGS:
+		if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+			/* we fix 2 when srio on */
+			cmd->data = hw->sriov_ring_limit;
+		} else {
+			cmd->data = adapter->num_rx_queues;
+		}
+		ret = 0;
+		break;
+	case ETHTOOL_GRXCLSRLCNT:
+		cmd->rule_cnt = adapter->fdir_filter_count;
+		ret = 0;
+		break;
+	case ETHTOOL_GRXCLSRULE:
+		ret = rnpgbe_get_ethtool_fdir_entry(adapter, cmd);
+		break;
+	case ETHTOOL_GRXCLSRLALL:
+		ret = rnpgbe_get_ethtool_fdir_all(adapter, cmd,
+						  (u32 *)rule_locs);
+		break;
+	case ETHTOOL_GRXFH:
+		ret = rnpgbe_get_rss_hash_opts(adapter, cmd);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+#define UDP_RSS_FLAGS                                                          \
+	(RNP_FLAG2_RSS_FIELD_IPV4_UDP | RNP_FLAG2_RSS_FIELD_IPV6_UDP)
+static int rnpgbe_set_rss_hash_opt(struct rnpgbe_adapter *adapter,
+				   struct ethtool_rxnfc *nfc)
+{
+	/*
+	 * RSS does not support anything other than hashing
+	 * to queues on src and dst IPs and ports
+	 */
+	if (nfc->data &
+	    ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
+		return -EINVAL;
+
+	switch (nfc->flow_type) {
+	case TCP_V4_FLOW:
+	case TCP_V6_FLOW:
+	case UDP_V4_FLOW:
+	case UDP_V6_FLOW:
+		if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST) ||
+		    !(nfc->data & RXH_L4_B_0_1) || !(nfc->data & RXH_L4_B_2_3))
+			return -EINVAL;
+		break;
+	case AH_ESP_V4_FLOW:
+	case AH_V4_FLOW:
+	case ESP_V4_FLOW:
+	case SCTP_V4_FLOW:
+	case AH_ESP_V6_FLOW:
+	case AH_V6_FLOW:
+	case ESP_V6_FLOW:
+	case SCTP_V6_FLOW:
+		if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST) ||
+		    (nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3))
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int rnpgbe_flowspec_to_flow_type(struct rnpgbe_adapter *adapter,
+					struct ethtool_rx_flow_spec *fsp,
+					uint8_t *flow_type,
+					struct rnpgbe_fdir_filter *input)
+{
+	int i;
+	int ret = 1;
+	/* not support flow_ext */
+	if (fsp->flow_type & FLOW_EXT)
+		return 0;
+
+	switch (fsp->flow_type & ~FLOW_EXT) {
+	/* todo ipv6 is not considered*/
+	case TCP_V4_FLOW:
+		*flow_type = RNP_ATR_FLOW_TYPE_TCPV4;
+		break;
+	case UDP_V4_FLOW:
+		*flow_type = RNP_ATR_FLOW_TYPE_UDPV4;
+		break;
+	case SCTP_V4_FLOW:
+		*flow_type = RNP_ATR_FLOW_TYPE_SCTPV4;
+		break;
+	case ETHER_FLOW:
+		/* layer 2 flow */
+		*flow_type = RNP_ATR_FLOW_TYPE_ETHER;
+		input->filter.layer2_formate.proto =
+			fsp->h_u.ether_spec.h_proto;
+		break;
+	case IP_USER_FLOW:
+		switch (fsp->h_u.usr_ip4_spec.proto) {
+		case IPPROTO_TCP:
+			*flow_type = RNP_ATR_FLOW_TYPE_TCPV4;
+			break;
+		case IPPROTO_UDP:
+			*flow_type = RNP_ATR_FLOW_TYPE_UDPV4;
+			break;
+		case IPPROTO_SCTP:
+			*flow_type = RNP_ATR_FLOW_TYPE_SCTPV4;
+			break;
+		case 0:
+			/* if only ip4 no src no dst*/
+			if (!(fsp->h_u.tcp_ip4_spec.ip4src) &&
+			    (!(fsp->h_u.tcp_ip4_spec.ip4dst))) {
+				/* if have no l4 proto, use layer2 */
+				*flow_type = RNP_ATR_FLOW_TYPE_ETHER;
+				input->filter.layer2_formate.proto =
+					htons(0x0800);
+			} else {
+				/* may only src or dst input */
+				*flow_type = RNP_ATR_FLOW_TYPE_IPV4;
+			}
+			break;
+		default:
+			/* other unknown l4 proto ip */
+			*flow_type = RNP_ATR_FLOW_TYPE_IPV4;
+		}
+		break;
+	default:
+		return 0;
+	}
+	/* layer2 flow */
+	if (*flow_type == RNP_ATR_FLOW_TYPE_ETHER) {
+		if (adapter->layer2_count < 0) {
+			e_err(drv, "layer2 count full\n");
+			ret = 0;
+		}
+		/* should check dst mac filter */
+		/* should check src dst all zeros */
+		for (i = 0; i < ETH_ALEN; i++) {
+			if (fsp->h_u.ether_spec.h_source[i] != 0)
+				ret = 0;
+
+			if (fsp->h_u.ether_spec.h_dest[i] != 0)
+				ret = 0;
+
+			if (fsp->m_u.ether_spec.h_source[i] != 0)
+				ret = 0;
+
+			if (fsp->m_u.ether_spec.h_dest[i] != 0)
+				ret = 0;
+		}
+		// we not support setup vlan type
+		if (input->filter.layer2_formate.proto == htons(ETH_P_8021Q))
+			ret = 0;
+		if (input->filter.layer2_formate.proto == htons(0x88a8))
+			ret = 0;
+		if (input->filter.layer2_formate.proto == htons(0x9100))
+			ret = 0;
+		if (input->filter.layer2_formate.proto == htons(0x9200))
+			ret = 0;
+
+	} else if (*flow_type == RNP_ATR_FLOW_TYPE_IPV4) {
+		if (adapter->fdir_mode == fdir_mode_tuple5) {
+			if (adapter->tuple_5_count < 0) {
+				e_err(drv, "tuple 5 count full\n");
+				ret = 0;
+			}
+			if ((fsp->h_u.usr_ip4_spec.ip4src != 0) &&
+			    (fsp->m_u.usr_ip4_spec.ip4src != 0xffffffff)) {
+				e_err(drv, "ip src mask error\n");
+				ret = 0;
+			}
+			if ((fsp->h_u.usr_ip4_spec.ip4dst != 0) &&
+			    (fsp->m_u.usr_ip4_spec.ip4dst != 0xffffffff)) {
+				e_err(drv, "ip dst mask error\n");
+				ret = 0;
+			}
+			if ((fsp->h_u.usr_ip4_spec.proto != 0) &&
+			    (fsp->m_u.usr_ip4_spec.proto != 0xff)) {
+				e_err(drv, "ip l4 proto mask error\n");
+				ret = 0;
+			}
+		} else {
+			if (adapter->tuple_5_count < 0) {
+				e_err(drv, "tcam count full\n");
+				ret = 0;
+			}
+			/* tcam mode can support mask */
+		}
+		/* not support l4_4_bytes */
+		if ((fsp->h_u.usr_ip4_spec.l4_4_bytes != 0)) {
+			e_err(drv, "ip l4_4_bytes error\n");
+			ret = 0;
+		}
+	} else {
+		if (adapter->fdir_mode == fdir_mode_tuple5) {
+			/* should check mask all ff */
+			if (adapter->tuple_5_count < 0) {
+				e_err(drv, "tuple 5 count full\n");
+				ret = 0;
+			}
+			if ((fsp->h_u.tcp_ip4_spec.ip4src != 0) &&
+			    (fsp->m_u.tcp_ip4_spec.ip4src != 0xffffffff)) {
+				e_err(drv, "src mask error\n");
+				ret = 0;
+			}
+			if ((fsp->h_u.tcp_ip4_spec.ip4dst != 0) &&
+			    (fsp->m_u.tcp_ip4_spec.ip4dst != 0xffffffff)) {
+				e_err(drv, "dst mask error\n");
+				ret = 0;
+			}
+			if ((fsp->h_u.tcp_ip4_spec.psrc != 0) &&
+			    (fsp->m_u.tcp_ip4_spec.psrc != 0xffff)) {
+				e_err(drv, "src port mask error\n");
+				ret = 0;
+			}
+			if ((fsp->h_u.tcp_ip4_spec.pdst != 0) &&
+			    (fsp->m_u.tcp_ip4_spec.pdst != 0xffff)) {
+				e_err(drv, "src port mask error\n");
+				ret = 0;
+			}
+		} else {
+			if (adapter->tuple_5_count < 0) {
+				e_err(drv, "tcam count full\n");
+				ret = 0;
+			}
+		}
+		/* l4 tos is not supported */
+		if (fsp->h_u.tcp_ip4_spec.tos != 0) {
+			e_err(drv, "tos error\n");
+			ret = 0;
+		}
+	}
+
+	return ret;
+}
+
+int rnpgbe_update_ethtool_fdir_entry(struct rnpgbe_adapter *adapter,
+				     struct rnpgbe_fdir_filter *input,
+				     u16 sw_idx)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct hlist_node *node2;
+	struct rnpgbe_fdir_filter *rule, *parent;
+	bool deleted = false;
+	u16 hw_idx_layer2 = 0;
+	u16 hw_idx_tuple5 = 0;
+
+	s32 err;
+
+	parent = NULL;
+	rule = NULL;
+
+	hlist_for_each_entry_safe (rule, node2, &adapter->fdir_filter_list,
+				   fdir_node) {
+		/* hash found, or no matching entry */
+		if (rule->sw_idx >= sw_idx)
+			break;
+
+		parent = rule;
+	}
+
+	/* if there is an old rule occupying our place remove it */
+	if (rule && (rule->sw_idx == sw_idx)) {
+		/* only clear hw enable bits */
+		/* hardware filters are only configured when interface is up,
+		 * and we should not issue filter commands while the interface
+		 * is down
+		 */
+		if (netif_running(adapter->netdev) && (!input)) {
+			err = rnpgbe_fdir_erase_perfect_filter(
+				adapter->fdir_mode, hw, &rule->filter,
+				rule->hw_idx);
+			if (err)
+				return -EINVAL;
+		}
+
+		adapter->fdir_filter_count--;
+		if (rule->filter.formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER)
+			adapter->layer2_count++;
+		else
+			adapter->tuple_5_count++;
+
+		hlist_del(&rule->fdir_node);
+		kfree(rule);
+		deleted = true;
+	}
+
+	/* If we weren't given an input, then this was a request to delete a
+	 * filter. We should return -EINVAL if the filter wasn't found, but
+	 * return 0 if the rule was successfully deleted.
+	 */
+	if (!input)
+		return deleted ? 0 : -EINVAL;
+
+	/* initialize node and set software index */
+	INIT_HLIST_NODE(&input->fdir_node);
+
+	/* add filter to the list */
+	if (parent)
+		hlist_add_behind(&input->fdir_node, &parent->fdir_node);
+	else
+		hlist_add_head(&input->fdir_node, &adapter->fdir_filter_list);
+
+	/* we must setup all */
+	/* should first earase all tcam and l2 rule */
+
+	if (adapter->fdir_mode != fdir_mode_tcam)
+		hw->ops.clr_all_layer2_remapping(hw);
+	else
+		hw->ops.clr_all_tuple5_remapping(hw);
+
+	/* setup hw */
+	hlist_for_each_entry_safe(rule, node2, &adapter->fdir_filter_list,
+				  fdir_node) {
+		if (netif_running(adapter->netdev)) {
+			/* hw_idx */
+			if (rule->filter.formatted.flow_type ==
+			    RNP_ATR_FLOW_TYPE_ETHER)
+				rule->hw_idx = hw_idx_layer2++;
+			else
+				rule->hw_idx = hw_idx_tuple5++;
+
+			if ((!rule->vf_num) &&
+			    (rule->action != ACTION_TO_MPE)) {
+				int idx = rule->action;
+
+				err = rnpgbe_fdir_write_perfect_filter(
+					adapter->fdir_mode, hw, &rule->filter,
+					rule->hw_idx,
+					(rule->action == RNP_FDIR_DROP_QUEUE) ?
+						RNP_FDIR_DROP_QUEUE :
+						adapter->rx_ring[idx]
+							->rnpgbe_queue_idx,
+					(adapter->priv_flags &
+					 RNP_PRIV_FLAG_REMAP_PRIO) ?
+						true :
+						false);
+			} else {
+				err = rnpgbe_fdir_write_perfect_filter(
+					adapter->fdir_mode, hw, &rule->filter,
+					rule->hw_idx,
+					(rule->action == RNP_FDIR_DROP_QUEUE) ?
+						RNP_FDIR_DROP_QUEUE :
+						rule->action,
+					(adapter->priv_flags &
+					 RNP_PRIV_FLAG_REMAP_PRIO) ?
+						true :
+						false);
+			}
+			if (err)
+				return -EINVAL;
+		}
+	}
+
+	/* update counts */
+	adapter->fdir_filter_count++;
+	if (input->filter.formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER) {
+		/* used to determine hw reg offset */
+		adapter->layer2_count--;
+	} else {
+		adapter->tuple_5_count--;
+	}
+	return 0;
+}
+
+/* used to dbg flo_spec info */
+static void print_fsp(struct ethtool_rx_flow_spec *fsp)
+{
+	int i;
+
+	switch (fsp->flow_type & ~FLOW_EXT) {
+	case ETHER_FLOW:
+		for (i = 0; i < ETH_ALEN; i++)
+			dbg("src 0x%02x\n", fsp->h_u.ether_spec.h_source[i]);
+		for (i = 0; i < ETH_ALEN; i++)
+			dbg("dst 0x%02x\n", fsp->h_u.ether_spec.h_dest[i]);
+		for (i = 0; i < ETH_ALEN; i++)
+			dbg("src mask 0x%02x\n",
+			    fsp->m_u.ether_spec.h_source[i]);
+		for (i = 0; i < ETH_ALEN; i++)
+			dbg("dst mask 0x%02x\n", fsp->m_u.ether_spec.h_dest[i]);
+
+		dbg("proto type is %x\n", fsp->h_u.ether_spec.h_proto);
+
+		break;
+
+	default:
+		dbg("flow type is %x\n", fsp->flow_type);
+
+		dbg("ip4 src ip is %x\n", fsp->h_u.tcp_ip4_spec.ip4src);
+		dbg("ip4 src ip mask is %x\n", fsp->m_u.tcp_ip4_spec.ip4src);
+
+		dbg("ip4 dst ip is %x\n", fsp->h_u.tcp_ip4_spec.ip4dst);
+		dbg("ip4 dst ip mask is %x\n", fsp->m_u.tcp_ip4_spec.ip4dst);
+
+		dbg("ip4 src port is %x\n", fsp->h_u.tcp_ip4_spec.psrc);
+		dbg("ip4 src port mask is %x\n", fsp->m_u.tcp_ip4_spec.psrc);
+
+		dbg("ip4 dst port is %x\n", fsp->h_u.tcp_ip4_spec.pdst);
+		dbg("ip4 dst port mask is %x\n", fsp->m_u.tcp_ip4_spec.pdst);
+
+		dbg("l4 proto type is %x\n", fsp->h_u.usr_ip4_spec.proto);
+		break;
+	}
+}
+
+static int rnpgbe_add_ethtool_fdir_entry(struct rnpgbe_adapter *adapter,
+					 struct ethtool_rxnfc *cmd)
+{
+	struct ethtool_rx_flow_spec *fsp =
+		(struct ethtool_rx_flow_spec *)&cmd->fs;
+	struct rnpgbe_fdir_filter *input;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	/* we don't support mask */
+	int err;
+
+	u32 ring_cookie_high = fsp->ring_cookie >> 32;
+
+	if (!(adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE))
+		return -EOPNOTSUPP;
+
+	/*
+	 * Don't allow programming if the action is a queue greater than
+	 * the number of online Rx queues.
+	 */
+	/* is sriov is on, allow vf and queue */
+	/* vf should smaller than num_vfs */
+	print_fsp(fsp);
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+		if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
+		    (((ring_cookie_high & 0xff) > adapter->num_vfs) ||
+		     ((fsp->ring_cookie & (u64)0xffffffff) >=
+		      hw->sriov_ring_limit)))
+			return -EINVAL;
+
+	} else {
+		if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
+		    (fsp->ring_cookie >= adapter->num_rx_queues)) {
+			/* ACTION_TO_MPE to mpe special */
+			if (fsp->ring_cookie != ACTION_TO_MPE)
+				return -EINVAL;
+		}
+	}
+
+	/* Don't allow indexes to exist outside of available space */
+	if (fsp->location >= (adapter->fdir_pballoc)) {
+		e_err(drv, "Location out of range\n");
+		return -EINVAL;
+	}
+
+	input = kzalloc(sizeof(*input), GFP_ATOMIC);
+	if (!input)
+		return -ENOMEM;
+
+	/* set SW index */
+	input->sw_idx = fsp->location;
+
+	/* record flow type */
+	if (!rnpgbe_flowspec_to_flow_type(
+		    adapter, fsp, &input->filter.formatted.flow_type, input)) {
+		e_err(drv, "Unrecognized flow type\n");
+		goto err_out;
+	}
+
+	if (input->filter.formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER) {
+		/* used to determine hw reg offset */
+	} else if (input->filter.formatted.flow_type ==
+		   RNP_ATR_FLOW_TYPE_IPV4) {
+		/* Copy input into formatted structures */
+		input->filter.formatted.src_ip[0] =
+			fsp->h_u.usr_ip4_spec.ip4src;
+		input->filter.formatted.src_ip_mask[0] =
+			fsp->m_u.usr_ip4_spec.ip4src;
+		input->filter.formatted.dst_ip[0] =
+			fsp->h_u.usr_ip4_spec.ip4dst;
+		input->filter.formatted.dst_ip_mask[0] =
+			fsp->m_u.usr_ip4_spec.ip4dst;
+		input->filter.formatted.src_port = 0;
+		input->filter.formatted.src_port_mask = 0xffff;
+		input->filter.formatted.dst_port = 0;
+		input->filter.formatted.dst_port_mask = 0xffff;
+		input->filter.formatted.inner_mac[0] =
+			fsp->h_u.usr_ip4_spec.proto;
+		input->filter.formatted.inner_mac_mask[0] =
+			fsp->m_u.usr_ip4_spec.proto;
+	} else { /* tcp or udp or sctp*/
+		/* Copy input into formatted structures */
+		input->filter.formatted.src_ip[0] =
+			fsp->h_u.tcp_ip4_spec.ip4src;
+		input->filter.formatted.src_ip_mask[0] =
+			fsp->m_u.usr_ip4_spec.ip4src;
+		input->filter.formatted.dst_ip[0] =
+			fsp->h_u.tcp_ip4_spec.ip4dst;
+		input->filter.formatted.dst_ip_mask[0] =
+			fsp->m_u.usr_ip4_spec.ip4dst;
+		input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
+		input->filter.formatted.src_port_mask =
+			fsp->m_u.tcp_ip4_spec.psrc;
+		input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+		input->filter.formatted.dst_port_mask =
+			fsp->m_u.tcp_ip4_spec.pdst;
+	}
+
+	/* determine if we need to drop or route the packet */
+	if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
+		input->action = RNP_FDIR_DROP_QUEUE;
+	else {
+		input->vf_num = (fsp->ring_cookie >> 32) & 0xff;
+		if (input->vf_num) {
+			/* in vf mode input->action is the real queue nums */
+			input->action =
+				hw->sriov_ring_limit *
+					(((fsp->ring_cookie >> 32) & 0xff) -
+					 1) +
+				(fsp->ring_cookie & 0xffffffff);
+		} else
+			input->action = fsp->ring_cookie;
+	}
+
+	spin_lock(&adapter->fdir_perfect_lock);
+	err = rnpgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
+	spin_unlock(&adapter->fdir_perfect_lock);
+
+	return err;
+err_out:
+	kfree(input);
+	return -EINVAL;
+}
+
+static int rnpgbe_del_ethtool_fdir_entry(struct rnpgbe_adapter *adapter,
+					 struct ethtool_rxnfc *cmd)
+{
+	struct ethtool_rx_flow_spec *fsp =
+		(struct ethtool_rx_flow_spec *)&cmd->fs;
+	int err;
+
+	spin_lock(&adapter->fdir_perfect_lock);
+	err = rnpgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
+	spin_unlock(&adapter->fdir_perfect_lock);
+
+	return err;
+}
+
+int rnpgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+	int ret = -EOPNOTSUPP;
+
+	switch (cmd->cmd) {
+	case ETHTOOL_SRXCLSRLINS:
+		ret = rnpgbe_add_ethtool_fdir_entry(adapter, cmd);
+		break;
+	case ETHTOOL_SRXCLSRLDEL:
+		ret = rnpgbe_del_ethtool_fdir_entry(adapter, cmd);
+		break;
+	case ETHTOOL_SRXFH:
+		ret = rnpgbe_set_rss_hash_opt(adapter, cmd);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+u32 rnpgbe_rss_indir_size(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	return rnpgbe_rss_indir_tbl_entries(adapter);
+}
+
+u32 rnpgbe_get_rxfh_key_size(struct net_device *netdev)
+{
+	return RNP_RSS_KEY_SIZE;
+}
+
+void rnpgbe_get_reta(struct rnpgbe_adapter *adapter, u32 *indir)
+{
+	int i, reta_size = rnpgbe_rss_indir_tbl_entries(adapter);
+	u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
+
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED)
+		rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
+
+	for (i = 0; i < reta_size; i++)
+		indir[i] = adapter->rss_indir_tbl[i] & rss_m;
+}
+
+int rnpgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	u8 *seed = NULL;
+
+	if (hfunc) {
+		switch (adapter->rss_func_mode) {
+		case rss_func_top:
+			*hfunc = ETH_RSS_HASH_TOP;
+			break;
+		case rss_func_xor:
+			*hfunc = ETH_RSS_HASH_XOR;
+			break;
+		case rss_func_order:
+			*hfunc = ETH_RSS_HASH_TOP;
+			break;
+		}
+	}
+
+	if (indir)
+		rnpgbe_get_reta(adapter, indir);
+
+	if (key)
+		seed = key;
+	if (seed)
+		memcpy(seed, adapter->rss_key, rnpgbe_get_rxfh_key_size(netdev));
+
+	return 0;
+}
+
+static int check_fw_type(struct rnpgbe_hw *hw, const u8 *data)
+{
+	u32 device_id;
+	int ret = 0;
+
+	device_id = *((u16 *)data + 30); 
+
+	/* if no device_id no check */
+	if ((device_id == 0) || (device_id == 0xffff))
+		return 0;
+
+	switch (hw->hw_type) {
+	case rnpgbe_hw_n500:
+		if (device_id != 0x8308)
+			ret = 1;
+	break;
+	case rnpgbe_hw_n210:
+		if (device_id != 0x8208)
+			ret = 1;
+	break;
+	default:
+		ret = 1;
+	}
+
+	return ret;
+}
+
+static int rnpgbe_flash_firmware(struct rnpgbe_adapter *adapter, int region,
+				 const u8 *data, int bytes)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	if ((hw->hw_type == rnpgbe_hw_n500) ||
+	    (hw->hw_type == rnpgbe_hw_n210)) {
+		switch (region) {
+		case PART_FW:
+			if ((*((u32 *)(data)) != 0xa55aa55a) || 
+			    check_fw_type(hw, data))
+				return -EINVAL;
+			break;
+		case PART_CFG:
+			if (*((u32 *)(data)) != 0x00010cf9)
+				return -EINVAL;
+			break;
+		case PART_MACSN:
+			break;
+		case PART_PCSPHY:
+			if (*((u16 *)(data)) != 0x081d)
+				return -EINVAL;
+			break;
+		case PART_PXE:
+			if (*((u16 *)(data)) != 0xaa55)
+				return -EINVAL;
+			break;
+		default:
+			return -EINVAL;
+		}
+		return rnp500_fw_update(hw, region, data, bytes);
+
+	} else {
+		switch (region) {
+		case PART_FW:
+			if (*((u32 *)(data + 28)) != 0xA51BBEAF)
+				return -EINVAL;
+			break;
+		case PART_CFG:
+			if (*((u32 *)(data)) != 0x00010cf9)
+				return -EINVAL;
+			break;
+		case PART_MACSN:
+			break;
+		case PART_PCSPHY:
+			if (*((u16 *)(data)) != 0x081d)
+				return -EINVAL;
+			break;
+		case PART_PXE:
+			if (*((u16 *)(data)) != 0xaa55)
+				return -EINVAL;
+			break;
+		default:
+			return -EINVAL;
+		}
+		return rnpgbe_fw_update(hw, region, data, bytes);
+	}
+}
+
+static int rnpgbe_flash_firmware_from_file(struct net_device *dev,
+					   struct rnpgbe_adapter *adapter,
+					   int region, const char *filename)
+{
+	const struct firmware *fw;
+	int rc;
+
+	rc = request_firmware(&fw, filename, &dev->dev);
+	if (rc != 0) {
+		netdev_err(dev, "Error %d requesting firmware file: %s\n", rc,
+			   filename);
+		return rc;
+	}
+
+	rc = rnpgbe_flash_firmware(adapter, region, fw->data, fw->size);
+	release_firmware(fw);
+	return rc;
+}
+
+int rnpgbe_flash_device(struct net_device *dev, struct ethtool_flash *flash)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+
+	if (IS_VF(adapter->hw.pfvfnum)) {
+		netdev_err(dev,
+			   "flashdev not supported from a virtual function\n");
+		return -EINVAL;
+	}
+
+	return rnpgbe_flash_firmware_from_file(dev, adapter, flash->region,
+					       flash->data);
+}
+static int rnpgbe_rss_indir_tbl_max(struct rnpgbe_adapter *adapter)
+{
+	if (adapter->hw.rss_type == rnpgbe_rss_uv3p)
+		return 8;
+	else if (adapter->hw.rss_type == rnpgbe_rss_uv440)
+		return 128;
+	else if (adapter->hw.rss_type == rnpgbe_rss_n10)
+		return 128;
+	else if (adapter->hw.rss_type == rnpgbe_rss_n500)
+		return 128;
+	else
+		return 128;
+}
+
+int rnpgbe_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
+		    const u8 hfunc)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int i;
+	u32 reta_entries = rnpgbe_rss_indir_tbl_entries(adapter);
+
+	if (hfunc) {
+		if (hw->ops.set_rss_hfunc) {
+			if (hw->ops.set_rss_hfunc(hw, hfunc))
+				return -EINVAL;
+		} else
+			return -EINVAL;
+
+	} else {
+		if (hw->ops.set_rss_hfunc)
+			hw->ops.set_rss_hfunc(hw, hfunc);
+	}
+
+	if ((indir) && (adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return -EINVAL;
+	/* Fill out the redirection table */
+	if (indir) {
+		int max_queues = min_t(int, adapter->num_rx_queues,
+				       rnpgbe_rss_indir_tbl_max(adapter));
+
+		/*Allow max 2 queues w/ SR-IOV.*/
+		if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) &&
+		    (max_queues > 1))
+			max_queues = 1;
+
+		/* Verify user input. */
+		for (i = 0; i < reta_entries; i++) {
+			if (indir[i] >= max_queues)
+				return -EINVAL;
+		}
+		/* store rss tbl */
+		for (i = 0; i < reta_entries; i++)
+			adapter->rss_indir_tbl[i] = indir[i];
+
+		rnpgbe_store_reta(adapter);
+	}
+
+	/* Fill out the rss hash key */
+	if (key) {
+		memcpy(adapter->rss_key, key, rnpgbe_get_rxfh_key_size(netdev));
+		rnpgbe_store_key(adapter);
+	}
+	return 0;
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ethtool.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ethtool.h
new file mode 100644
index 0000000000000..d410e829a5258
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ethtool.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBE_ETHTOOL_H_
+#define _RNPGBE_ETHTOOL_H_
+
+enum { NETDEV_STATS, RNP_STATS };
+
+struct rnpgbe_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+};
+
+/* rnp allocates num_tx_queues and num_rx_queues symmetrically so
+ * we set the num_rx_queues to evaluate to num_tx_queues. This is
+ * used because we do not have a good way to get the max number of
+ * rx queues with CONFIG_RPS disabled.
+ */
+#define RNP_NUM_RX_QUEUES netdev->num_tx_queues
+#define RNP_NUM_TX_QUEUES netdev->num_tx_queues
+
+#define RNP_NETDEV_STAT(_net_stat)                                             \
+	{                                                                      \
+		.stat_string = #_net_stat,                                     \
+		.sizeof_stat =                                                 \
+			sizeof_field(struct net_device_stats, _net_stat),      \
+		.stat_offset = offsetof(struct net_device_stats, _net_stat)    \
+	}
+
+#define RNP_HW_STAT(_name, _stat)                                              \
+	{                                                                      \
+		.stat_string = _name,                                          \
+		.sizeof_stat = sizeof_field(struct rnpgbe_adapter, _stat),     \
+		.stat_offset = offsetof(struct rnpgbe_adapter, _stat)          \
+	}
+
+struct rnpgbe_tx_queue_ring_stat {
+	u64 hw_head;
+	u64 hw_tail;
+	u64 sw_to_clean;
+	u64 sw_to_next_to_use;
+};
+
+struct rnpgbe_rx_queue_ring_stat {
+	u64 hw_head;
+	u64 hw_tail;
+	u64 sw_to_use;
+	u64 sw_to_clean;
+};
+
+#define RNP_QUEUE_STATS_LEN                                                    \
+	(RNP_NUM_TX_QUEUES *                                                   \
+		 (sizeof(struct rnpgbe_tx_queue_stats) / sizeof(u64) +         \
+		  sizeof(struct rnpgbe_queue_stats) / sizeof(u64) +            \
+		  sizeof(struct rnpgbe_tx_queue_ring_stat) / sizeof(u64)) +    \
+	 RNP_NUM_RX_QUEUES *                                                   \
+		 (sizeof(struct rnpgbe_rx_queue_stats) / sizeof(u64) +         \
+		  sizeof(struct rnpgbe_queue_stats) / sizeof(u64) +            \
+		  sizeof(struct rnpgbe_rx_queue_ring_stat) / sizeof(u64)))
+
+#define RNP_STATS_LEN                                                          \
+	(RNP_GLOBAL_STATS_LEN + RNP_HWSTRINGS_STATS_LEN + RNP_QUEUE_STATS_LEN)
+
+struct ethtool_keee {
+        __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+        __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
+        __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertised);
+        u32     supported_u32;
+        u32     advertised_u32;
+        u32     lp_advertised_u32;
+        u32     tx_lpi_timer;
+        bool    tx_lpi_enabled;
+        bool    eee_active;
+        bool    eee_enabled;
+};
+
+int rnpgbe_wol_exclusion(struct rnpgbe_adapter *adapter,
+			 struct ethtool_wolinfo *wol);
+void rnpgbe_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol);
+int rnpgbe_wol_exclusion(struct rnpgbe_adapter *adapter,
+			 struct ethtool_wolinfo *wol);
+int rnpgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol);
+void rnpgbe_diag_test(struct net_device *netdev, struct ethtool_test *eth_test,
+		      u64 *data);
+u32 rnpgbe_get_msglevel(struct net_device *netdev);
+void rnpgbe_set_msglevel(struct net_device *netdev, u32 data);
+int rnpgbe_set_phys_id(struct net_device *netdev,
+		       enum ethtool_phys_id_state state);
+int rnpgbe_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
+void rnpgbe_get_channels(struct net_device *dev, struct ethtool_channels *ch);
+int rnpgbe_set_channels(struct net_device *dev, struct ethtool_channels *ch);
+int rnpgbe_get_module_info(struct net_device *dev,
+			   struct ethtool_modinfo *modinfo);
+int rnpgbe_get_module_eeprom(struct net_device *dev,
+			     struct ethtool_eeprom *eeprom, u8 *data);
+int rnpgbe_get_keee(struct net_device *netdev, struct ethtool_keee *edata);
+int rnpgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata);
+int rnpgbe_set_keee(struct net_device *netdev, struct ethtool_keee *edata);
+int rnpgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata);
+void rnpgbe_get_ringparam(struct net_device *netdev,
+			  struct ethtool_ringparam *ring,
+			  struct kernel_ethtool_ringparam __always_unused *ker,
+			  struct netlink_ext_ack __always_unused *extack);
+int rnpgbe_set_ringparam(struct net_device *netdev,
+			 struct ethtool_ringparam *ring,
+			 struct kernel_ethtool_ringparam __always_unused *ker,
+			 struct netlink_ext_ack __always_unused *extack);
+int rnpgbe_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump);
+int rnpgbe_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
+			 void *buffer);
+int rnpgbe_set_dump(struct net_device *netdev, struct ethtool_dump *dump);
+int rnpgbe_get_coalesce(struct net_device *netdev,
+			struct ethtool_coalesce *coal,
+			struct kernel_ethtool_coalesce *kernel_coal,
+			struct netlink_ext_ack *extack);
+int rnpgbe_set_coalesce(struct net_device *netdev,
+			struct ethtool_coalesce *ec,
+			struct kernel_ethtool_coalesce *kernel_coal,
+			struct netlink_ext_ack *extack);
+int rnpgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+		     u32 *rule_locs);
+int rnpgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
+u32 rnpgbe_rss_indir_size(struct net_device *netdev);
+u32 rnpgbe_get_rxfh_key_size(struct net_device *netdev);
+void rnpgbe_get_reta(struct rnpgbe_adapter *adapter, u32 *indir);
+int rnpgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
+int rnpgbe_flash_device(struct net_device *dev, struct ethtool_flash *flash);
+int rnpgbe_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
+		    const u8 hfunc);
+
+#endif /* _RNPGBE_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c
new file mode 100644
index 0000000000000..e2fb6879cc0d7
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c
@@ -0,0 +1,1118 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include "rnpgbe.h"
+#include "rnpgbe_sriov.h"
+#include "rnpgbe_common.h"
+
+/**
+ * rnpgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for DCB to the assigned rings.
+ *
+ **/
+static bool rnpgbe_cache_ring_dcb(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *dev = adapter->netdev;
+	unsigned int tx_idx, rx_idx;
+	int tc, offset, rss_i, i, step;
+	u8 num_tcs = netdev_get_num_tc(dev);
+	struct rnpgbe_ring *ring;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+
+	/* verify we have DCB queueing enabled before proceeding */
+	if (num_tcs <= 1)
+		return false;
+
+	rss_i = adapter->ring_feature[RING_F_RSS].indices;
+
+	step = 4;
+	for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
+		/*
+		 * we from tc start
+		 * tc0 0 4 8 c
+		 * tc1 1 5 9 d
+		 * tc2 2 6 a e
+		 * tc3 3 7 b f
+		 */
+		tx_idx = tc;
+		rx_idx = tc;
+		for (i = 0; i < rss_i; i++, tx_idx += step, rx_idx += step) {
+			ring = adapter->tx_ring[offset + i];
+
+			ring->ring_addr =
+				dma->dma_ring_addr + RING_OFFSET(tx_idx);
+			ring->rnpgbe_queue_idx = tx_idx;
+			ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT;
+			ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK;
+			ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR;
+
+			ring = adapter->rx_ring[offset + i];
+			ring->ring_addr =
+				dma->dma_ring_addr + RING_OFFSET(rx_idx);
+			ring->rnpgbe_queue_idx = rx_idx;
+			ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT;
+			ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK;
+			ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR;
+		}
+	}
+
+	return true;
+}
+
+/**
+ * rnpgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
+ * @adapter: board private structure to initialize
+ *
+ * SR-IOV doesn't use any descriptor rings but changes the default if
+ * no other mapping is used.
+ *
+ */
+static bool rnpgbe_cache_ring_sriov(struct rnpgbe_adapter *adapter)
+{
+	/* only proceed if VMDq is enabled */
+	if (!(adapter->flags & RNP_FLAG_VMDQ_ENABLED))
+		return false;
+	return true;
+}
+
+/**
+ * rnpgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for RSS to the assigned rings.
+ *
+ **/
+static bool rnpgbe_cache_ring_rss(struct rnpgbe_adapter *adapter)
+{
+	int i;
+	/* setup here */
+	int ring_step = 1;
+
+	struct rnpgbe_ring *ring;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+
+	/* n400 use 0 4 8 c */
+	if (hw->hw_type == rnpgbe_hw_n400)
+		ring_step = 4;
+
+	/* some ring alloc rules can be added here */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		ring = adapter->tx_ring[i];
+		ring->rnpgbe_queue_idx = i * ring_step;
+		ring->ring_addr = dma->dma_ring_addr +
+				  RING_OFFSET(ring->rnpgbe_queue_idx);
+
+		ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT;
+		ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK;
+		ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR;
+	}
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		ring = adapter->rx_ring[i];
+		ring->rnpgbe_queue_idx = i * ring_step;
+		ring->ring_addr = dma->dma_ring_addr +
+				  RING_OFFSET(ring->rnpgbe_queue_idx);
+		ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT;
+		ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK;
+		ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR;
+	}
+
+	return true;
+}
+
+/**
+ * rnpgbe_cache_ring_register - Descriptor ring to register mapping
+ * @adapter: board private structure to initialize
+ *
+ * Once we know the feature-set enabled for the device, we'll cache
+ * the register offset the descriptor ring is assigned to.
+ *
+ * Note, the order the various feature calls is important.  It must start with
+ * the "most" features enabled at the same time, then trickle down to the
+ * least amount of features turned on at once.
+ **/
+static void rnpgbe_cache_ring_register(struct rnpgbe_adapter *adapter)
+{
+	/* start with default case */
+	if (rnpgbe_cache_ring_dcb(adapter))
+		return;
+
+	/* sriov ring alloc is added before, this maybe no use */
+	if (rnpgbe_cache_ring_sriov(adapter))
+		return;
+
+	rnpgbe_cache_ring_rss(adapter);
+}
+
+#define RNP_RSS_128Q_MASK 0x7F
+#define RNP_RSS_64Q_MASK 0x3F
+#define RNP_RSS_16Q_MASK 0xF
+#define RNP_RSS_32Q_MASK 0x1F
+#define RNP_RSS_8Q_MASK 0x7
+#define RNP_RSS_4Q_MASK 0x3
+#define RNP_RSS_2Q_MASK 0x1
+#define RNP_RSS_DISABLED_MASK 0x0
+
+static bool rnpgbe_set_dcb_queues(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *dev = adapter->netdev;
+	struct rnpgbe_ring_feature *f;
+	int rss_i, rss_m, i;
+	int tcs;
+
+	/* Map queue offset and counts onto allocated tx queues */
+	tcs = netdev_get_num_tc(dev);
+
+	/* verify we have DCB queueing enabled before proceeding */
+	if (tcs <= 1)
+		return false;
+
+	/* determine the upper limit for our current DCB mode */
+	rss_i = dev->num_tx_queues / tcs;
+
+	/* we only support 4 tc , rss_i max is 32 */
+	/* 4 TC w/ 32 queues per TC */
+	rss_i = min_t(u16, rss_i, 32);
+	rss_m = RNP_RSS_32Q_MASK;
+
+	/* set RSS mask and indices */
+	/* f->limit is relative with cpu_vector */
+	f = &adapter->ring_feature[RING_F_RSS];
+	/* use f->limit to change rss */
+	rss_i = min_t(int, rss_i, f->limit);
+	f->indices = rss_i;
+	f->mask = rss_m;
+
+	/* disable ATR as it is not supported when multiple TCs are enabled */
+	adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE;
+
+	/* setup queue tc num */
+	for (i = 0; i < tcs; i++)
+		netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
+
+	/* set the true queues */
+	adapter->num_tx_queues = rss_i * tcs;
+	adapter->num_rx_queues = rss_i * tcs;
+
+	return true;
+}
+
+/**
+ * rnpgbe_set_sriov_queues - Allocate queues for SR-IOV devices
+ * @adapter: board private structure to initialize
+ *
+ * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
+ * and VM pools where appropriate.  If RSS is available, then also try and
+ * enable RSS and map accordingly.
+ *
+ **/
+static bool rnpgbe_set_sriov_queues(struct rnpgbe_adapter *adapter)
+{
+	u16 vmdq_m = 0;
+	u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
+	u16 rss_m = RNP_RSS_DISABLED_MASK;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	/* only proceed if SR-IOV is enabled */
+	if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return false;
+
+	/* save features for later use */
+	adapter->ring_feature[RING_F_VMDQ].indices =
+		adapter->max_ring_pair_counts - 1;
+	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
+
+	/* limit RSS based on user input and save for later use */
+	adapter->ring_feature[RING_F_RSS].indices = rss_i;
+	adapter->ring_feature[RING_F_RSS].mask = rss_m;
+
+	adapter->num_rx_queues = hw->sriov_ring_limit;
+	adapter->num_tx_queues = hw->sriov_ring_limit;
+
+	/* disable ATR as it is not supported when VMDq is enabled */
+	adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE;
+
+	return true;
+}
+
+/**
+ * rnpgbe_rss_indir_tbl_entries - return indir_tlb_entries
+ * @adapter: board private structure to initialize
+ *
+ */
+u32 rnpgbe_rss_indir_tbl_entries(struct rnpgbe_adapter *adapter)
+{
+	if (adapter->hw.rss_type == rnpgbe_rss_uv3p)
+		return 8;
+	else if (adapter->hw.rss_type == rnpgbe_rss_uv440)
+		return 128;
+	else if (adapter->hw.rss_type == rnpgbe_rss_n10)
+		return 128;
+	else
+		return 128;
+}
+
+/**
+ * rnpgbe_set_rss_queues - Allocate queues for RSS
+ * @adapter: board private structure to initialize
+ *
+ * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
+ * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
+ *
+ **/
+static bool rnpgbe_set_rss_queues(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_ring_feature *f;
+	u16 rss_i;
+
+	f = &adapter->ring_feature[RING_F_RSS];
+	/* use thid to change ring num */
+	rss_i = f->limit;
+	/* set limit -> indices */
+	f->indices = rss_i;
+
+	/* should init rss mask */
+	switch (adapter->hw.rss_type) {
+	case rnpgbe_rss_uv3p:
+		f->mask = RNP_RSS_8Q_MASK;
+		break;
+	case rnpgbe_rss_uv440:
+		f->mask = RNP_RSS_64Q_MASK;
+		break;
+	case rnpgbe_rss_n10:
+		/* maybe not good */
+		f->mask = RNP_RSS_128Q_MASK;
+		break;
+		/* maybe not good */
+	case rnpgbe_rss_n500:
+		f->mask = RNP_RSS_8Q_MASK;
+		break;
+	default:
+		f->mask = 0;
+
+		break;
+	}
+
+	/* set rss_i -> adapter->num_tx_queues */
+	/* should not more than irq */
+	adapter->num_tx_queues =
+		min_t(int, rss_i, adapter->max_ring_pair_counts);
+	adapter->num_rx_queues = adapter->num_tx_queues;
+
+	rnpgbe_dbg("[%s] limit:%d indices:%d queues:%d\n", adapter->name,
+		   f->limit, f->indices, adapter->num_tx_queues);
+
+	return true;
+}
+
+/**
+ * rnpgbe_set_num_queues - Allocate queues for device, feature dependent
+ * @adapter: board private structure to initialize
+ *
+ * This is the top level queue allocation routine.  The order here is very
+ * important, starting with the "most" number of features turned on at once,
+ * and ending with the smallest set of features.  This way large combinations
+ * can be allocated if they're turned on, and smaller combinations are the
+ * fallthrough conditions.
+ *
+ **/
+static void rnpgbe_set_num_queues(struct rnpgbe_adapter *adapter)
+{
+	/* Start with base case */
+	adapter->num_tx_queues = 1;
+	adapter->num_rx_queues = 1;
+
+	if (rnpgbe_set_dcb_queues(adapter))
+		return;
+
+	if (rnpgbe_set_sriov_queues(adapter))
+		return;
+	/* at last we support rss */
+	rnpgbe_set_rss_queues(adapter);
+}
+
+int rnpgbe_acquire_msix_vectors(struct rnpgbe_adapter *adapter, int vectors)
+{
+	int err;
+
+#ifdef DISABLE_RX_IRQ
+	vectors -= adapter->num_other_vectors;
+	adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);
+	return 0;
+#endif /* DISABLE_RX_IRQ */
+
+	err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+				    vectors, vectors);
+	if (err < 0) {
+		rnpgbe_err("pci_enable_msix faild: req:%d err:%d\n", vectors,
+			   err);
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+		return -EINVAL;
+	}
+	/*
+	 * Adjust for only the vectors we'll use, which is minimum
+	 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+	 * vectors we were allocated.
+	 */
+	vectors -= adapter->num_other_vectors;
+	adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);
+	/* in dcb we use max 32 q-vectors */
+	/* each vectors for max 4 tcs */
+	if (adapter->flags & RNP_FLAG_DCB_ENABLED)
+		adapter->num_q_vectors = min(32, adapter->num_q_vectors);
+
+	return 0;
+}
+
+static void rnpgbe_add_ring(struct rnpgbe_ring *ring,
+			    struct rnpgbe_ring_container *head)
+{
+	ring->next = head->ring;
+	head->ring = ring;
+	head->count++;
+}
+
+/**
+ * rnpgbe_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @adapter: board private structure to initialize
+ * @eth_queue_idx: queue_index idx for this q_vector
+ * @v_idx: index of vector used for this q_vector
+ * @r_idx: total number of Tx rings to allocate
+ * @r_count: ring count
+ * @step: ring step
+ *
+ * We allocate one q_vector.  If allocation fails we return -ENOMEM.
+ **/
+static int rnpgbe_alloc_q_vector(struct rnpgbe_adapter *adapter,
+				 int eth_queue_idx, int v_idx, int r_idx,
+				 int r_count, int step)
+{
+	struct rnpgbe_q_vector *q_vector;
+	struct rnpgbe_ring *ring;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	int node = NUMA_NO_NODE;
+	int cpu = -1;
+	int ring_count, size;
+	int txr_count, rxr_count, idx;
+	int rxr_idx = r_idx, txr_idx = r_idx;
+	int cpu_offset = 0;
+
+	DPRINTK(PROBE, INFO,
+		"eth_queue_idx:%d v_idx:%d(off:%d) ring:%d ring_cnt:%d, "
+		"step:%d\n",
+		eth_queue_idx, v_idx, adapter->q_vector_off, r_idx, r_count,
+		step);
+
+	txr_count = rxr_count = r_count;
+
+	ring_count = txr_count + rxr_count;
+	size = sizeof(struct rnpgbe_q_vector) +
+	       (sizeof(struct rnpgbe_ring) * ring_count);
+
+	/* should minis adapter->q_vector_off */
+	if (cpu_online(cpu_offset + v_idx - adapter->q_vector_off)) {
+		/* cpu 1 - 7 */
+		cpu = cpu_offset + v_idx - adapter->q_vector_off;
+		node = cpu_to_node(cpu);
+	}
+
+	/* allocate q_vector and rings */
+	q_vector = kzalloc_node(size, GFP_KERNEL, node);
+	if (!q_vector)
+		q_vector = kzalloc(size, GFP_KERNEL);
+	if (!q_vector)
+		return -ENOMEM;
+
+	/* setup affinity mask and node */
+	if (cpu != -1)
+		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
+	q_vector->numa_node = node;
+
+	netif_napi_add_weight(adapter->netdev, &q_vector->napi, rnpgbe_poll,
+			      adapter->napi_budge);
+	/* tie q_vector and adapter together */
+	adapter->q_vector[v_idx - adapter->q_vector_off] = q_vector;
+	q_vector->adapter = adapter;
+	q_vector->v_idx = v_idx;
+
+	/* initialize work limits */
+	q_vector->tx.work_limit = adapter->tx_work_limit;
+
+	/* initialize pointer to rings */
+	ring = q_vector->ring;
+
+	for (idx = 0; idx < txr_count; idx++) {
+		/* assign generic ring traits */
+		ring->dev = pci_dev_to_dev(adapter->pdev);
+		ring->netdev = adapter->netdev;
+
+		/* configure backlink on ring */
+		ring->q_vector = q_vector;
+
+		/* update q_vector Tx values */
+		rnpgbe_add_ring(ring, &q_vector->tx);
+
+		/* apply Tx specific ring traits */
+		ring->count = adapter->tx_ring_item_count;
+		if (adapter->flags & RNP_FLAG_DCB_ENABLED) {
+			int rss_i;
+
+			rss_i = adapter->ring_feature[RING_F_RSS].indices;
+			/* in dcb mode should assign rss */
+			ring->queue_index = eth_queue_idx + idx * rss_i;
+		} else {
+			ring->queue_index = eth_queue_idx + idx;
+		}
+		/* rnpgbe_queue_idx can be changed after */
+		/* it is used to location hw reg */
+		ring->rnpgbe_queue_idx = txr_idx;
+		ring->ring_addr = dma->dma_ring_addr + RING_OFFSET(txr_idx);
+		ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT;
+		ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK;
+		ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR;
+		ring->device_id = adapter->pdev->device;
+		ring->pfvfnum = hw->pfvfnum;
+		/* n10 should skip tx start control */
+		if (hw->hw_type == rnpgbe_hw_n10)
+			ring->ring_flags |= RNP_RING_SKIP_TX_START;
+
+		if (hw->hw_type == rnpgbe_hw_n400)
+			ring->ring_flags |= RNP_RING_SKIP_TX_START;
+
+		if ((hw->hw_type == rnpgbe_hw_n500) ||
+		    (hw->hw_type == rnpgbe_hw_n210)) {
+			/* n500 not support tunnel */
+			ring->ring_flags |= RNP_RING_NO_TUNNEL_SUPPORT;
+			/* n500 fixed ring size change from large to small */
+			ring->ring_flags |= RNP_RING_SIZE_CHANGE_FIX;
+			/* n500 fixed veb bug */
+			ring->ring_flags |= RNP_RING_VEB_MULTI_FIX;
+			ring->ring_flags |= RNP_RING_OUTER_VLAN_FIX;
+		}
+		/* assign ring to adapter */
+		adapter->tx_ring[ring->queue_index] = ring;
+
+		/* update count and index */
+		txr_idx += step;
+
+		rnpgbe_dbg("\t\t%s:vector[%d] <--RNP TxRing:%d, eth_queue:%d\n",
+			   adapter->name, v_idx, ring->rnpgbe_queue_idx,
+			   ring->queue_index);
+
+		/* push pointer to next ring */
+		ring++;
+	}
+
+	for (idx = 0; idx < rxr_count; idx++) {
+		/* assign generic ring traits */
+		ring->dev = pci_dev_to_dev(adapter->pdev);
+		ring->netdev = adapter->netdev;
+
+		/* configure backlink on ring */
+		ring->q_vector = q_vector;
+
+		/* update q_vector Rx values */
+		rnpgbe_add_ring(ring, &q_vector->rx);
+
+		/* apply Rx specific ring traits */
+		ring->count = adapter->rx_ring_item_count;
+		/* rnpgbe_queue_idx can be changed after */
+		/* it is used to location hw reg */
+		if (adapter->flags & RNP_FLAG_DCB_ENABLED) {
+			int rss_i;
+
+			rss_i = adapter->ring_feature[RING_F_RSS].indices;
+			/* in dcb mode should assign rss */
+			ring->queue_index = eth_queue_idx + idx * rss_i;
+		} else {
+			ring->queue_index = eth_queue_idx + idx;
+		}
+		ring->rnpgbe_queue_idx = rxr_idx;
+		ring->ring_addr = dma->dma_ring_addr + RING_OFFSET(rxr_idx);
+		ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT;
+		ring->dma_int_mask = ring->ring_addr + RNP_DMA_INT_MASK;
+		ring->dma_int_clr = ring->ring_addr + RNP_DMA_INT_CLR;
+		ring->device_id = adapter->pdev->device;
+		ring->pfvfnum = hw->pfvfnum;
+
+		/* n500 fixed ring size change from large to small */
+		ring->ring_flags |= RNP_RING_SIZE_CHANGE_FIX;
+		ring->ring_flags |= RNP_RING_SCATER_SETUP;
+		ring->ring_flags |= RNP_RING_NO_TUNNEL_SUPPORT;
+		ring->ring_flags |= RNP_RING_STAGS_SUPPORT;
+		ring->ring_flags |= RNP_RING_DOUBLE_VLAN_SUPPORT;
+		ring->ring_flags |= RNP_RING_IRQ_MISS_FIX;
+		/* n500 fixed veb bug */
+		ring->ring_flags |= RNP_RING_VEB_MULTI_FIX;
+		ring->ring_flags |= RNP_RING_CHKSM_FIX;
+		adapter->flags2 |= RNP_FLAG2_CHKSM_FIX;
+
+		/* assign ring to adapter */
+		adapter->rx_ring[ring->queue_index] = ring;
+		rnpgbe_dbg("\t\t%s:vector[%d] <--RNP RxRing:%d, eth_queue:%d\n",
+			   adapter->name, v_idx, ring->rnpgbe_queue_idx,
+			   ring->queue_index);
+
+		/* update count and index */
+		rxr_idx += step;
+
+		/* push pointer to next ring */
+		ring++;
+	}
+
+	if ((hw->hw_type == rnpgbe_hw_n500) ||
+	    (hw->hw_type == rnpgbe_hw_n210)) {
+		q_vector->vector_flags |= RNP_QVECTOR_FLAG_ITR_FEATURE;
+		q_vector->new_rx_count = adapter->rx_frames;
+		q_vector->old_rx_count = adapter->rx_frames;
+		q_vector->itr_rx = adapter->rx_usecs;
+		q_vector->rx.itr = adapter->rx_usecs;
+	}
+
+	return 0;
+}
+
+/**
+ * rnpgbe_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void rnpgbe_free_q_vector(struct rnpgbe_adapter *adapter, int v_idx)
+{
+	struct rnpgbe_q_vector *q_vector = adapter->q_vector[v_idx];
+	struct rnpgbe_ring *ring;
+
+	rnpgbe_dbg("v_idx:%d\n", v_idx);
+
+	rnpgbe_for_each_ring(ring, q_vector->tx)
+		adapter->tx_ring[ring->queue_index] = NULL;
+
+	rnpgbe_for_each_ring(ring, q_vector->rx)
+		adapter->rx_ring[ring->queue_index] = NULL;
+
+	adapter->q_vector[v_idx] = NULL;
+	netif_napi_del(&q_vector->napi);
+
+	if (q_vector->vector_flags & RNP_QVECTOR_FLAG_IRQ_MISS_CHECK)
+		hrtimer_cancel(&q_vector->irq_miss_check_timer);
+
+	/*
+	 * rnpgbe_get_stats64() might access the rings on this vector,
+	 * we must wait a grace period before freeing it.
+	 */
+	kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * rnpgbe_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int rnpgbe_alloc_q_vectors(struct rnpgbe_adapter *adapter)
+{
+	int v_idx = adapter->q_vector_off;
+	int ring_idx = 0;
+	int r_remaing =
+		min_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
+	int ring_step = 1;
+	int err, ring_cnt, v_remaing = adapter->num_q_vectors;
+	int q_vector_nums = 0;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+		ring_idx = 0;
+		/* only 2 rings when sriov enabled */
+		/* from back */
+		if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) {
+			/* this mode pf use vf 0 ring */
+			ring_idx = 0;
+			r_remaing = hw->sriov_ring_limit;
+
+		} else {
+			ring_idx = adapter->max_ring_pair_counts -
+				   ring_step * hw->sriov_ring_limit;
+			r_remaing = hw->sriov_ring_limit;
+		}
+	}
+
+	adapter->eth_queue_idx = 0;
+	BUG_ON(adapter->num_q_vectors == 0);
+
+	if (adapter->flags & RNP_FLAG_DCB_ENABLED) {
+		rnpgbe_dbg("in dcb mode r_remaing %d, num_q_vectors %d\n",
+			   r_remaing, v_remaing);
+	}
+
+	rnpgbe_dbg("r_remaing:%d, ring_step:%d num_q_vectors:%d\n", r_remaing,
+		   ring_step, v_remaing);
+
+	/* can support muti rings in one q_vector */
+	for (; r_remaing > 0 && v_remaing > 0; v_remaing--) {
+		ring_cnt = DIV_ROUND_UP(r_remaing, v_remaing);
+		if (adapter->flags & RNP_FLAG_DCB_ENABLED)
+			BUG_ON(ring_cnt != adapter->num_tc);
+
+		err = rnpgbe_alloc_q_vector(adapter, adapter->eth_queue_idx,
+					    v_idx, ring_idx, ring_cnt,
+					    ring_step);
+		if (err)
+			goto err_out;
+		ring_idx += ring_step * ring_cnt;
+		r_remaing -= ring_cnt;
+		v_idx++;
+		q_vector_nums++;
+		/* dcb mode only add 1 */
+		if (adapter->flags & RNP_FLAG_DCB_ENABLED)
+			adapter->eth_queue_idx += 1;
+		else
+			adapter->eth_queue_idx += ring_cnt;
+	}
+	/* should fix the real used q_vectors_nums */
+	adapter->num_q_vectors = q_vector_nums;
+
+	return 0;
+
+err_out:
+	adapter->num_tx_queues = 0;
+	adapter->num_rx_queues = 0;
+	adapter->num_q_vectors = 0;
+
+	while (v_idx--)
+		rnpgbe_free_q_vector(adapter, v_idx);
+
+	return -ENOMEM;
+}
+
+/**
+ * rnpgbe_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void rnpgbe_free_q_vectors(struct rnpgbe_adapter *adapter)
+{
+	int v_idx = adapter->num_q_vectors;
+
+	adapter->num_rx_queues = 0;
+	adapter->num_tx_queues = 0;
+	adapter->num_q_vectors = 0;
+
+	while (v_idx--)
+		rnpgbe_free_q_vector(adapter, v_idx);
+}
+
+static void rnpgbe_reset_interrupt_capability(struct rnpgbe_adapter *adapter)
+{
+	if (adapter->flags & RNP_FLAG_MSIX_ENABLED)
+		pci_disable_msix(adapter->pdev);
+	else if (adapter->flags & RNP_FLAG_MSI_CAPABLE)
+		pci_disable_msi(adapter->pdev);
+
+	kfree(adapter->msix_entries);
+	adapter->msix_entries = NULL;
+	adapter->q_vector_off = 0;
+
+	/* frist clean msix flags */
+	adapter->flags &= (~RNP_FLAG_MSIX_ENABLED);
+	adapter->flags &= (~RNP_FLAG_MSI_ENABLED);
+}
+
+/**
+ * rnpgbe_set_interrupt_capability - set MSI-X or MSI if supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int rnpgbe_set_interrupt_capability(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int vector, v_budget, err = 0;
+	int irq_mode_back = adapter->irq_mode;
+
+	v_budget = min_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
+
+	/* in one ring mode should reset v_budget */
+	v_budget = min_t(int, v_budget, num_online_cpus());
+	v_budget += adapter->num_other_vectors;
+	v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
+
+	if (adapter->irq_mode == irq_mode_msix) {
+		adapter->msix_entries = kcalloc(
+			v_budget, sizeof(struct msix_entry), GFP_KERNEL);
+
+		if (!adapter->msix_entries) {
+			rnpgbe_err("alloc msix_entries faild!\n");
+			return -EINVAL;
+		}
+		dbg("[%s] adapter:%p msix_entry:%p\n", __func__, adapter,
+		    adapter->msix_entries);
+
+		for (vector = 0; vector < v_budget; vector++)
+			adapter->msix_entries[vector].entry = vector;
+
+		err = rnpgbe_acquire_msix_vectors(adapter, v_budget);
+		if (!err) {
+			if (adapter->num_other_vectors)
+				adapter->q_vector_off = 1;
+			rnpgbe_dbg(
+				"adapter%d alloc vectors: cnt:%d [%d~%d] num_q_vectors:%d\n",
+				adapter->bd_number, v_budget,
+				adapter->q_vector_off,
+				adapter->q_vector_off + v_budget - 1,
+				adapter->num_q_vectors);
+			adapter->flags |= RNP_FLAG_MSIX_ENABLED;
+
+			goto out;
+		}
+		/* if has msi capability try it */
+		if (adapter->flags & RNP_FLAG_MSI_CAPABLE)
+			adapter->irq_mode = irq_mode_msi;
+		kfree(adapter->msix_entries);
+		pr_info("acquire msix failed, try to use msi\n");
+	} else {
+		rnpgbe_dbg("adapter%d not in msix mode\n", adapter->bd_number);
+	}
+	/* if has msi capability or set irq_mode */
+	if (adapter->irq_mode == irq_mode_msi) {
+		err = pci_enable_msi(adapter->pdev);
+		if (err) {
+			pr_info("Failed to allocate MSI interrupt, falling back to legacy. Error");
+		} else {
+			/* msi mode use only 1 irq */
+			adapter->flags |= RNP_FLAG_MSI_ENABLED;
+		}
+	}
+	/* write back origin irq_mode */
+	adapter->irq_mode = irq_mode_back;
+	/* legacy and msi only 1 vectors */
+	adapter->num_q_vectors = 1;
+	err = 0;
+
+out:
+	return err;
+}
+
+static void rnpgbe_print_ring_info(struct rnpgbe_adapter *adapter)
+{
+	int i;
+	struct rnpgbe_ring *ring;
+	struct rnpgbe_q_vector *q_vector;
+
+	rnpgbe_dbg("tx_queue count %d\n", adapter->num_tx_queues);
+	rnpgbe_dbg("queue-mapping :\n");
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		ring = adapter->tx_ring[i];
+		rnpgbe_dbg(" queue %d , physical ring %d\n", i,
+			   ring->rnpgbe_queue_idx);
+	}
+	rnpgbe_dbg("rx_queue count %d\n", adapter->num_rx_queues);
+	rnpgbe_dbg("queue-mapping :\n");
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		ring = adapter->rx_ring[i];
+		rnpgbe_dbg(" queue %d , physical ring %d\n", i,
+			   ring->rnpgbe_queue_idx);
+	}
+	rnpgbe_dbg("q_vector count %d\n", adapter->num_q_vectors);
+	rnpgbe_dbg("vector-queue mapping:\n");
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		q_vector = adapter->q_vector[i];
+		rnpgbe_dbg("vector %d\n", i);
+		rnpgbe_for_each_ring(ring, q_vector->tx) {
+			rnpgbe_dbg(" tx physical ring %d\n",
+				   ring->rnpgbe_queue_idx);
+		}
+		rnpgbe_for_each_ring(ring, q_vector->rx) {
+			rnpgbe_dbg(" rx physical ring %d\n",
+				   ring->rnpgbe_queue_idx);
+		}
+	}
+}
+
+static void update_ring_count(struct rnpgbe_adapter *adapter)
+{
+	if (adapter->flags2 & RNP_FLAG2_INSMOD)
+		return;
+
+	adapter->flags2 |= RNP_FLAG2_INSMOD;
+
+	/* limit ring count if in msi or legacy mode */
+	if (!(adapter->flags & RNP_FLAG_MSIX_ENABLED)) {
+		adapter->num_tx_queues = 1;
+		adapter->num_rx_queues = 1;
+		adapter->ring_feature[RING_F_RSS].limit = 1;
+		adapter->ring_feature[RING_F_FDIR].limit = 1;
+		adapter->ring_feature[RING_F_RSS].indices = 1;
+	}
+}
+
+/**
+ * rnpgbe_init_interrupt_scheme - Determine proper interrupt scheme
+ * @adapter: board private structure to initialize
+ *
+ * We determine which interrupt scheme to use based on...
+ * - Hardware queue count (num_*_queues)
+ *   - defined by miscellaneous hardware support/features (RSS, etc.)
+ **/
+int rnpgbe_init_interrupt_scheme(struct rnpgbe_adapter *adapter)
+{
+	int err;
+
+	/* Number of supported queues */
+	rnpgbe_set_num_queues(adapter);
+
+	/* Set interrupt mode */
+	err = rnpgbe_set_interrupt_capability(adapter);
+	if (err) {
+		e_dev_err("Unable to get interrupt\n");
+		goto err_set_interrupt;
+	}
+	/* update ring num only init */
+	update_ring_count(adapter);
+
+	err = rnpgbe_alloc_q_vectors(adapter);
+	if (err) {
+		e_dev_err("Unable to allocate memory for queue vectors\n");
+		goto err_alloc_q_vectors;
+	}
+	rnpgbe_cache_ring_register(adapter);
+
+	DPRINTK(PROBE, INFO,
+		"Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n\n",
+		(adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
+		adapter->num_rx_queues, adapter->num_tx_queues);
+	rnpgbe_print_ring_info(adapter);
+
+	set_bit(__RNP_DOWN, &adapter->state);
+
+	return 0;
+
+err_alloc_q_vectors:
+	rnpgbe_reset_interrupt_capability(adapter);
+err_set_interrupt:;
+	return err;
+}
+
+/**
+ * rnpgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @adapter: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+void rnpgbe_clear_interrupt_scheme(struct rnpgbe_adapter *adapter)
+{
+	adapter->num_tx_queues = 0;
+	adapter->num_rx_queues = 0;
+
+	rnpgbe_free_q_vectors(adapter);
+	rnpgbe_reset_interrupt_capability(adapter);
+}
+
+/**
+ * rnpgbe_tx_ctxtdesc - Send a control desc to hw
+ * @tx_ring: target ring of this control desc
+ * @mss_seg_len: mss length
+ * @l4_hdr_len:  l4 length
+ * @tunnel_hdr_len: tunnel_hdr_len
+ * @inner_vlan_tag: inner_vlan_tag
+ * @type_tucmd: cmd
+ *
+ **/
+static void rnpgbe_tx_ctxtdesc(struct rnpgbe_ring *tx_ring,
+			       u32 mss_len_vf_num,
+			       u32 inner_vlan_tunnel_len,
+			       int ignore_vlan,
+			       bool crc_pad)
+{
+	struct rnpgbe_tx_ctx_desc *context_desc;
+	u16 i = tx_ring->next_to_use;
+	struct rnpgbe_adapter *adapter = RING2ADAPT(tx_ring);
+	u32 type_tucmd = 0;
+
+	context_desc = RNP_TX_CTXTDESC(tx_ring, i);
+
+	i++;
+	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+	/* set bits to identify this as an advanced context descriptor */
+	type_tucmd |= RNP_TXD_CTX_CTRL_DESC;
+
+	/* set mac padding status if set priv_flags */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) {
+		if (!crc_pad)
+			type_tucmd |= RNP_TXD_MTI_CRC_PAD_CTRL;
+	}
+
+	if (tx_ring->ring_flags & RNP_RING_OUTER_VLAN_FIX) {
+#define VLAN_MASK (0x0000ffff)
+#define VLAN_INSERT (0x00800000)
+		if (inner_vlan_tunnel_len & VLAN_MASK)
+			type_tucmd |= VLAN_INSERT;
+	} else {
+		if (inner_vlan_tunnel_len & 0x00ffff00) {
+			/* if a inner vlan */
+			type_tucmd |= RNP_TXD_CMD_INNER_VLAN;
+		}
+	}
+
+	context_desc->mss_len_vf_num = cpu_to_le32(mss_len_vf_num);
+	context_desc->inner_vlan_tunnel_len =
+		cpu_to_le32(inner_vlan_tunnel_len);
+	context_desc->resv_cmd = cpu_to_le32(type_tucmd);
+	context_desc->resv = 0;
+	if (tx_ring->q_vector->adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+		if (ignore_vlan)
+			context_desc->inner_vlan_tunnel_len |=
+				VF_VEB_IGNORE_VLAN;
+	}
+	buf_dump_line("ctx  ", __LINE__, context_desc, sizeof(*context_desc));
+}
+
+void rnpgbe_maybe_tx_ctxtdesc(struct rnpgbe_ring *tx_ring,
+			      struct rnpgbe_tx_buffer *first, u32 ignore_vlan)
+{
+	/* sriov mode pf use the last vf */
+	if (first->ctx_flag) {
+		rnpgbe_tx_ctxtdesc(tx_ring, first->mss_len_vf_num,
+				   first->inner_vlan_tunnel_len, ignore_vlan,
+				   first->gso_need_padding);
+	}
+}
+
+void rnpgbe_store_reta(struct rnpgbe_adapter *adapter)
+{
+	u32 i, reta_entries = rnpgbe_rss_indir_tbl_entries(adapter);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 reta = 0;
+	/* relative with rss table */
+	struct rnpgbe_ring *rx_ring;
+
+	/* Write redirection table to HW */
+	for (i = 0; i < reta_entries; i++) {
+		if (adapter->flags & RNP_FLAG_SRIOV_ENABLED)
+			reta = adapter->rss_indir_tbl[i];
+		else {
+			rx_ring = adapter->rx_ring[adapter->rss_indir_tbl[i]];
+			reta = rx_ring->rnpgbe_queue_idx;
+		}
+		hw->rss_indir_tbl[i] = reta;
+	}
+	hw->ops.set_rss_table(hw);
+}
+
+void rnpgbe_store_key(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED);
+
+	hw->ops.set_rss_key(hw, sriov_flag);
+}
+
+int rnpgbe_init_rss_key(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED);
+
+	/* only init rss key once */
+	/* no change rss key if user input one */
+	if (!adapter->rss_key_setup_flag) {
+		netdev_rss_key_fill(adapter->rss_key, RNP_RSS_KEY_SIZE);
+		adapter->rss_key_setup_flag = 1;
+	}
+	hw->ops.set_rss_key(hw, sriov_flag);
+
+	return 0;
+}
+
+int rnpgbe_init_rss_table(struct rnpgbe_adapter *adapter)
+{
+	int rx_nums = adapter->num_rx_queues;
+	int i, j;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_ring *rx_ring;
+	u32 reta = 0;
+	u32 reta_entries = rnpgbe_rss_indir_tbl_entries(adapter);
+
+	if (adapter->flags & RNP_FLAG_DCB_ENABLED) {
+		rx_nums = rx_nums / adapter->num_tc;
+		for (i = 0, j = 0; i < 8; i++) {
+			adapter->rss_tc_tbl[i] = j;
+			hw->rss_tc_tbl[i] = j;
+			j = (j + 1) % adapter->num_tc;
+		}
+	} else {
+		for (i = 0, j = 0; i < 8; i++) {
+			hw->rss_tc_tbl[i] = 0;
+			adapter->rss_tc_tbl[i] = 0;
+		}
+	}
+
+	/* adapter->num_q_vectors is not correct */
+	for (i = 0, j = 0; i < reta_entries; i++) {
+		/* init with default value */
+		if (!adapter->rss_tbl_setup_flag)
+			adapter->rss_indir_tbl[i] = j;
+
+		if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+			/* in sriov mode reta in [0, rx_nums] */
+			reta = j;
+		} else {
+			/* in no sriov, reta is real ring number */
+			rx_ring = adapter->rx_ring[adapter->rss_indir_tbl[i]];
+			reta = rx_ring->rnpgbe_queue_idx;
+		}
+		/* store rss_indir_tbl */
+		hw->rss_indir_tbl[i] = reta;
+
+		j = (j + 1) % rx_nums;
+	}
+	/* tbl only init once */
+	adapter->rss_tbl_setup_flag = 1;
+	hw->ops.set_rss_table(hw);
+	return 0;
+}
+
+/* setup to the hw  */
+s32 rnpgbe_fdir_write_perfect_filter(int fdir_mode, struct rnpgbe_hw *hw,
+				     union rnpgbe_atr_input *filter, u16 hw_id,
+				     u8 queue, bool prio_flag)
+{
+	if (filter->formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER)
+		hw->ops.set_layer2_remapping(hw, filter, hw_id, queue,
+					     prio_flag);
+	else
+		hw->ops.set_tuple5_remapping(hw, filter, hw_id, queue,
+					     prio_flag);
+
+	return 0;
+}
+
+s32 rnpgbe_fdir_erase_perfect_filter(int fdir_mode, struct rnpgbe_hw *hw,
+				     union rnpgbe_atr_input *input, u16 pri_id)
+{
+	/* just diable filter */
+	if (input->formatted.flow_type == RNP_ATR_FLOW_TYPE_ETHER) {
+		hw->ops.clr_layer2_remapping(hw, pri_id);
+		dbg("disble layer2 %d\n", pri_id);
+	} else {
+		hw->ops.clr_tuple5_remapping(hw, pri_id);
+		dbg("disble tuple5 %d\n", pri_id);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
new file mode 100644
index 0000000000000..d67f1bc4c7204
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c
@@ -0,0 +1,7727 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "rnpgbe_common.h"
+#include "rnpgbe.h"
+#include "rnpgbe_sriov.h"
+#include "rnpgbe_ptp.h"
+#include "rnpgbe_ethtool.h"
+
+
+/* for test only */
+#ifdef CONFIG_ARM64
+#define NO_BQL_TEST
+#endif
+
+char rnpgbe_driver_name[] = "rnpgbe";
+static const char rnpgbe_driver_string[] =
+	"mucse 1 Gigabit PCI Express Network Driver";
+#define DRV_VERSION "0.2.3-rc10"
+static u32 driver_version = 0x0002030a;
+#include "version.h"
+
+const char rnpgbe_driver_version[] = DRV_VERSION;
+static const char rnpgbe_copyright[] =
+	"Copyright (c) 2020-2024 mucse Corporation.";
+
+static struct rnpgbe_info *rnpgbe_info_tbl[] = {
+	[board_n500] = &rnpgbe_n500_info,
+	[board_n210] = &rnpgbe_n210_info,
+};
+
+static int register_mbx_irq(struct rnpgbe_adapter *adapter);
+static void remove_mbx_irq(struct rnpgbe_adapter *adapter);
+
+static void rnpgbe_pull_tail(struct sk_buff *skb);
+#ifdef OPTM_WITH_LPAGE
+static bool rnpgbe_alloc_mapped_page(struct rnpgbe_ring *rx_ring,
+				     struct rnpgbe_rx_buffer *bi,
+				     union rnpgbe_rx_desc *rx_desc, u16 bufsz,
+				     u64 fun_id);
+static void rnpgbe_put_rx_buffer(struct rnpgbe_ring *rx_ring,
+				 struct rnpgbe_rx_buffer *rx_buffer);
+#else /* OPTM_WITH_LPAGE */
+static bool rnpgbe_alloc_mapped_page(struct rnpgbe_ring *rx_ring,
+				     struct rnpgbe_rx_buffer *bi);
+static void rnpgbe_put_rx_buffer(struct rnpgbe_ring *rx_ring,
+				 struct rnpgbe_rx_buffer *rx_buffer,
+				 struct sk_buff *skb);
+#endif /* OPTM_WITH_LPAGE */
+
+static struct pci_device_id rnpgbe_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N500_QUAD_PORT),
+	  .driver_data = board_n500 }, /* n500 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N500_DUAL_PORT),
+	  .driver_data = board_n500 }, /* n500 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N210),
+	  .driver_data = board_n210 }, /* n210 */
+	/* required last entry */
+	{
+		0,
+	},
+};
+MODULE_DEVICE_TABLE(pci, rnpgbe_pci_tbl);
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0000);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static unsigned int fix_eth_name;
+module_param(fix_eth_name, uint, 0000);
+MODULE_PARM_DESC(fix_eth_name, "set eth adapter name to rnpgbeXX");
+
+static unsigned int module_enable_ptp = 1;
+module_param(module_enable_ptp, uint, 0000);
+MODULE_PARM_DESC(module_enable_ptp, "enable ptp, disabled default");
+
+MODULE_AUTHOR("Mucse Corporation, ");
+MODULE_DESCRIPTION("Mucse(R) 1 Gigabit PCI Express Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static struct workqueue_struct *rnpgbe_wq;
+
+static int enable_hi_dma;
+
+#define RNP_LPI_T(x) (jiffies + msecs_to_jiffies(x))
+
+static void rnpgbe_service_timer(struct timer_list *t);
+static void rnpgbe_setup_eee_mode(struct rnpgbe_adapter *adapter, bool status);
+
+void rnpgbe_service_event_schedule(struct rnpgbe_adapter *adapter)
+{
+	if (!test_bit(__RNP_DOWN, &adapter->state) &&
+	    !test_and_set_bit(__RNP_SERVICE_SCHED, &adapter->state))
+		queue_work(rnpgbe_wq, &adapter->service_task);
+}
+
+static void rnpgbe_service_event_complete(struct rnpgbe_adapter *adapter)
+{
+	BUG_ON(!test_bit(__RNP_SERVICE_SCHED, &adapter->state));
+
+	/* flush memory to make sure state is correct before next watchdog */
+	smp_mb__before_atomic();
+	clear_bit(__RNP_SERVICE_SCHED, &adapter->state);
+}
+
+/**
+ * rnpgbe_set_ring_vector - set the ring_vector registers,
+ * mapping interrupt causes to vectors
+ * @adapter: pointer to adapter struct
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ *
+ */
+static void rnpgbe_set_ring_vector(struct rnpgbe_adapter *adapter,
+				   u8 rnpgbe_queue, u8 rnpgbe_msix_vector)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 data = 0;
+
+	data = hw->pfvfnum << 24;
+	data |= (rnpgbe_msix_vector << 8);
+	data |= (rnpgbe_msix_vector << 0);
+
+	DPRINTK(IFUP, INFO,
+		"Set Ring-Vector queue:%d (reg:0x%x) <-- Rx-MSIX:%d, Tx-MSIX:%d\n",
+		rnpgbe_queue, RING_VECTOR(rnpgbe_queue), rnpgbe_msix_vector,
+		rnpgbe_msix_vector);
+
+	rnpgbe_wr_reg(hw->ring_msix_base + RING_VECTOR(rnpgbe_queue), data);
+}
+
+void rnpgbe_unmap_and_free_tx_resource(struct rnpgbe_ring *ring,
+				       struct rnpgbe_tx_buffer *tx_buffer)
+{
+	if (tx_buffer->skb) {
+		dev_kfree_skb_any(tx_buffer->skb);
+		if (dma_unmap_len(tx_buffer, len))
+			dma_unmap_single(ring->dev,
+					 dma_unmap_addr(tx_buffer, dma),
+					 dma_unmap_len(tx_buffer, len),
+					 DMA_TO_DEVICE);
+	} else if (dma_unmap_len(tx_buffer, len)) {
+		dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma),
+			       dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE);
+	}
+	tx_buffer->next_to_watch = NULL;
+	tx_buffer->skb = NULL;
+	dma_unmap_len_set(tx_buffer, len, 0);
+	/* tx_buffer must be completely set up in the transmit path */
+}
+
+static u64 rnpgbe_get_tx_completed(struct rnpgbe_ring *ring)
+{
+	return ring->stats.packets;
+}
+
+static u64 rnpgbe_get_tx_pending(struct rnpgbe_ring *ring)
+{
+	u32 head = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_HEAD);
+	u32 tail = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_TAIL);
+
+	if (head != tail)
+		return (head < tail) ? tail - head :
+				       (tail + ring->count - head);
+
+	return 0;
+}
+
+static inline bool rnpgbe_check_tx_hang(struct rnpgbe_ring *tx_ring)
+{
+	u32 tx_done = rnpgbe_get_tx_completed(tx_ring);
+	u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
+	u32 tx_pending = rnpgbe_get_tx_pending(tx_ring);
+	bool ret = false;
+
+	clear_check_for_tx_hang(tx_ring);
+
+	/*
+	 * Check for a hung queue, but be thorough. This verifies
+	 * that a transmit has been completed since the previous
+	 * check AND there is at least one packet pending. The
+	 * ARMED bit is set to indicate a potential hang. The
+	 * bit is cleared if a pause frame is received to remove
+	 * false hang detection due to PFC or 802.3x frames. By
+	 * requiring this to fail twice we avoid races with
+	 * pfc clearing the ARMED bit and conditions where we
+	 * run the check_tx_hang logic with a transmit completion
+	 * pending but without time to complete it yet.
+	 */
+	if ((tx_done_old == tx_done) && tx_pending) {
+		/* make sure it is true for two checks in a row */
+		ret = test_and_set_bit(__RNP_HANG_CHECK_ARMED, &tx_ring->state);
+	} else {
+		/* update completed stats and continue */
+		tx_ring->tx_stats.tx_done_old = tx_done;
+		/* reset the countdown */
+		clear_bit(__RNP_HANG_CHECK_ARMED, &tx_ring->state);
+	}
+	return ret;
+}
+
+/**
+ * rnpgbe_tx_timeout_reset - initiate reset due to Tx timeout
+ * @adapter: driver private struct
+ **/
+static void rnpgbe_tx_timeout_reset(struct rnpgbe_adapter *adapter)
+{
+	/* Do the reset outside of interrupt context */
+	if (!test_bit(__RNP_DOWN, &adapter->state)) {
+		adapter->flags2 |= RNP_FLAG2_RESET_REQUESTED;
+		e_warn(drv, "initiating reset due to tx timeout\n");
+		rnpgbe_service_event_schedule(adapter);
+	}
+}
+
+/**
+ * rnpgbe_enable_eee_mode - check and enter in LPI mode
+ * @priv: driver private structure
+ * Description: this function is to verify and enter in LPI mode in case of
+ * EEE.
+ */
+static void rnpgbe_enable_eee_mode(struct rnpgbe_adapter *adapter)
+{
+	int i = 0;
+	struct rnpgbe_ring *tx_ring;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	for (i = 0; i < (adapter->num_tx_queues); i++) {
+		tx_ring = adapter->tx_ring[i];
+		if (tx_ring->next_to_use != tx_ring->next_to_clean)
+			return;
+	}
+	/* Check and enter in LPI mode */
+	if (!adapter->tx_path_in_lpi_mode) {
+		if (hw->ops.set_eee_mode)
+			hw->ops.set_eee_mode(hw,
+					     adapter->en_tx_lpi_clockgating);
+	}
+	adapter->tx_path_in_lpi_mode = true;
+}
+
+/**
+ * rnpgbe_disable_eee_mode - disable and exit from LPI mode
+ * @priv: driver private structure
+ * Description: this function is to exit and disable EEE in case of
+ * LPI state is true. This is called by the xmit.
+ */
+void rnpgbe_disable_eee_mode(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	if (hw->ops.reset_eee_mode)
+		hw->ops.reset_eee_mode(hw);
+
+	if (!test_bit(__RNP_EEE_REMOVE, &adapter->state))
+		mod_timer(&adapter->eee_ctrl_timer,
+			  RNP_LPI_T(adapter->eee_timer));
+}
+
+/**
+ * rnpgbe_clean_tx_irq - Reclaim resources after transmit completes
+ * @q_vector: structure containing interrupt and ring information
+ * @tx_ring: tx ring to clean
+ **/
+static bool rnpgbe_clean_tx_irq(struct rnpgbe_q_vector *q_vector,
+				struct rnpgbe_ring *tx_ring, int napi_budget)
+{
+	struct rnpgbe_adapter *adapter = q_vector->adapter;
+	struct rnpgbe_tx_buffer *tx_buffer;
+	struct rnpgbe_tx_desc *tx_desc;
+	u64 total_bytes = 0, total_packets = 0;
+	int budget = q_vector->tx.work_limit;
+	int i = tx_ring->next_to_clean;
+
+	if (test_bit(__RNP_DOWN, &adapter->state))
+		return true;
+
+	tx_ring->tx_stats.poll_count++;
+	tx_buffer = &tx_ring->tx_buffer_info[i];
+	tx_desc = RNP_TX_DESC(tx_ring, i);
+	i -= tx_ring->count;
+
+	do {
+		struct rnpgbe_tx_desc *eop_desc = tx_buffer->next_to_watch;
+
+		/* if next_to_watch is not set then there is no work pending */
+		if (!eop_desc)
+			break;
+
+		/* prevent any other reads prior to eop_desc */
+		rmb();
+
+		/* if eop DD is not set pending work has not been completed */
+		if (!(eop_desc->vlan_cmd & cpu_to_le32(RNP_TXD_STAT_DD)))
+			break;
+		/* clear next_to_watch to prevent false hangs */
+		tx_buffer->next_to_watch = NULL;
+
+		/* update the statistics for this packet */
+		total_bytes += tx_buffer->bytecount;
+		total_packets += tx_buffer->gso_segs;
+
+		/* free the skb */
+		napi_consume_skb(tx_buffer->skb, napi_budget);
+
+		/* unmap skb header data */
+		dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma),
+				 dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE);
+
+		/* clear tx_buffer data */
+		tx_buffer->skb = NULL;
+		dma_unmap_len_set(tx_buffer, len, 0);
+
+		/* unmap remaining buffers */
+		while (tx_desc != eop_desc) {
+			tx_buffer++;
+			tx_desc++;
+			i++;
+			if (unlikely(!i)) {
+				i -= tx_ring->count;
+				tx_buffer = tx_ring->tx_buffer_info;
+				tx_desc = RNP_TX_DESC(tx_ring, 0);
+			}
+
+			/* unmap any remaining paged data */
+			if (dma_unmap_len(tx_buffer, len)) {
+				dma_unmap_page(tx_ring->dev,
+					       dma_unmap_addr(tx_buffer, dma),
+					       dma_unmap_len(tx_buffer, len),
+					       DMA_TO_DEVICE);
+				dma_unmap_len_set(tx_buffer, len, 0);
+			}
+			budget--;
+		}
+
+		/* move us one more past the eop_desc for start of next pkt */
+		tx_buffer++;
+		tx_desc++;
+		i++;
+		if (unlikely(!i)) {
+			i -= tx_ring->count;
+			tx_buffer = tx_ring->tx_buffer_info;
+			tx_desc = RNP_TX_DESC(tx_ring, 0);
+		}
+
+		/* issue prefetch for next Tx descriptor */
+		prefetch(tx_desc);
+
+		/* update budget accounting */
+		budget--;
+	} while (likely(budget > 0));
+#ifdef NO_BQL_TEST
+#else
+	netdev_tx_completed_queue(txring_txq(tx_ring), total_packets,
+				  total_bytes);
+#endif
+	i += tx_ring->count;
+	tx_ring->next_to_clean = i;
+	u64_stats_update_begin(&tx_ring->syncp);
+	tx_ring->stats.bytes += total_bytes;
+	tx_ring->stats.packets += total_packets;
+	tx_ring->tx_stats.tx_clean_count += total_packets;
+	tx_ring->tx_stats.tx_clean_times++;
+	if (tx_ring->tx_stats.tx_clean_times > 10) {
+		tx_ring->tx_stats.tx_clean_times = 0;
+		tx_ring->tx_stats.tx_clean_count = 0;
+	}
+
+	u64_stats_update_end(&tx_ring->syncp);
+	q_vector->tx.total_bytes += total_bytes;
+	q_vector->tx.total_packets += total_packets;
+	tx_ring->tx_stats.send_done_bytes += total_bytes;
+
+	if (!(q_vector->vector_flags & RNP_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS)) {
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+		if (likely(netif_carrier_ok(tx_ring->netdev) &&
+			   (rnpgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
+			/* Make sure that anybody stopping the queue after this
+			 * sees the new next_to_clean.
+			 */
+			smp_mb();
+			if (__netif_subqueue_stopped(tx_ring->netdev,
+						     tx_ring->queue_index) &&
+			    !test_bit(__RNP_DOWN, &adapter->state)) {
+				netif_wake_subqueue(tx_ring->netdev,
+						    tx_ring->queue_index);
+				++tx_ring->tx_stats.restart_queue;
+			}
+		}
+	}
+
+	if (adapter->eee_active && total_packets) {
+		if (!adapter->tx_path_in_lpi_mode) {
+			if (!test_bit(__RNP_EEE_REMOVE, &adapter->state))
+				mod_timer(&adapter->eee_ctrl_timer,
+					  RNP_LPI_T(adapter->eee_timer));
+		}
+	}
+
+	return !!budget;
+}
+
+static inline void rnpgbe_rx_hash(struct rnpgbe_ring *ring,
+				  union rnpgbe_rx_desc *rx_desc,
+				  struct sk_buff *skb)
+{
+	int rss_type;
+
+	if (!(ring->netdev->features & NETIF_F_RXHASH))
+		return;
+#define RNP_RSS_TYPE_MASK 0xc0
+	rss_type = rx_desc->wb.cmd & RNP_RSS_TYPE_MASK;
+	skb_set_hash(skb, le32_to_cpu(rx_desc->wb.rss_hash),
+		     rss_type ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
+}
+
+/**
+ * rnpgbe_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @ring: structure containing ring specific data
+ * @rx_desc: current Rx descriptor being processed
+ * @skb: skb currently being received and modified
+ **/
+static inline void rnpgbe_rx_checksum(struct rnpgbe_ring *ring,
+				      union rnpgbe_rx_desc *rx_desc,
+				      struct sk_buff *skb)
+{
+	skb_checksum_none_assert(skb);
+	/* Rx csum disabled */
+	if (!(ring->netdev->features & NETIF_F_RXCSUM))
+		return;
+
+	/* if outer L3/L4  error */
+	/* must in promisc mode or rx-all mode */
+	if (rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_ERR_MASK))
+		return;
+	ring->rx_stats.csum_good++;
+	/* at least it is a ip packet which has ip checksum */
+
+	/* It must be a TCP or UDP packet with a valid checksum */
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static inline void rnpgbe_update_rx_tail(struct rnpgbe_ring *rx_ring, u32 val)
+{
+	rx_ring->next_to_use = val;
+	/* update next to alloc since we have filled the ring */
+	rx_ring->next_to_alloc = val;
+	/*
+	 * Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64).
+	 */
+	wmb();
+	rnpgbe_wr_reg(rx_ring->tail, val);
+
+	/* if rx_ring in delay setup mode, don't update
+	 * next_to_use to hw large than RNP_MIN_RXD
+	 */
+}
+
+#if (PAGE_SIZE < 8192)
+#define RNP_MAX_2K_FRAME_BUILD_SKB (RNP_RXBUFFER_1536 - NET_IP_ALIGN)
+#define RNP_2K_TOO_SMALL_WITH_PADDING                                          \
+	((NET_SKB_PAD + RNP_RXBUFFER_1536) > SKB_WITH_OVERHEAD(RNP_RXBUFFER_2K))
+
+static inline int rnpgbe_compute_pad(int rx_buf_len)
+{
+	int page_size, pad_size;
+
+	page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+	pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+	return pad_size;
+}
+
+static inline int rnpgbe_skb_pad(void)
+{
+	int rx_buf_len;
+
+	/* If a 2K buffer cannot handle a standard Ethernet frame then
+	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
+	 *
+	 * For a 3K buffer we need to add enough padding to allow for
+	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
+	 * cache-line alignment.
+	 */
+	if (RNP_2K_TOO_SMALL_WITH_PADDING)
+		rx_buf_len = RNP_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN);
+	else
+		rx_buf_len = RNP_RXBUFFER_1536;
+
+	/* if needed make room for NET_IP_ALIGN */
+	rx_buf_len -= NET_IP_ALIGN;
+	return rnpgbe_compute_pad(rx_buf_len);
+}
+
+#define RNP_SKB_PAD rnpgbe_skb_pad()
+#else /* PAGE_SIZE < 8192 */
+#define RNP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+/**
+ * rnpgbe_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, timestamp, protocol, and
+ * other fields within the skb.
+ **/
+static void rnpgbe_process_skb_fields(struct rnpgbe_ring *rx_ring,
+				      union rnpgbe_rx_desc *rx_desc,
+				      struct sk_buff *skb)
+{
+	struct net_device *dev = rx_ring->netdev;
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	// rnpgbe_update_rsc_stats(rx_ring, skb);
+	rnpgbe_rx_hash(rx_ring, rx_desc, skb);
+
+	rnpgbe_rx_checksum(rx_ring, rx_desc, skb);
+
+	if (hw->ncsi_en) {
+		// if ncsi with stags on
+		if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) {
+			// check outer stags with set one
+			u8 header[ETH_ALEN + ETH_ALEN];
+			u8 *data = skb->data;
+			struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
+			u16 vlan_tci;
+
+			switch (adapter->outer_vlan_type) {
+			case outer_vlan_type_88a8:
+				vlan_tci = htons(ETH_P_8021AD);
+				break;
+			case outer_vlan_type_9100:
+				vlan_tci = htons(ETH_P_QINQ1);
+				break;
+			case outer_vlan_type_9200:
+				vlan_tci = htons(ETH_P_QINQ2);
+				break;
+			default:
+				vlan_tci = htons(ETH_P_8021AD);
+				break;
+			}
+
+			if (veth->h_vlan_proto != vlan_tci)
+				goto skip_vlan;
+
+			if (veth->h_vlan_TCI != htons(adapter->stags_vid))
+				goto skip_vlan;
+
+			memcpy(header, data, ETH_ALEN + ETH_ALEN);
+			memcpy(skb->data + 4, header, ETH_ALEN + ETH_ALEN);
+			skb->len -= 4;
+			skb->data += 4;
+			goto skip_vlan;
+
+		}
+	}
+
+	if (((dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+	    || (dev->features & NETIF_F_HW_VLAN_STAG_RX)) &&
+	    rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_VLAN_VALID) &&
+	    !ignore_veb_vlan(rx_ring->q_vector->adapter, rx_desc)) {
+		if (rnpgbe_test_ext_cmd(rx_desc, REV_OUTER_VLAN)) {
+			u16 vid_inner = le16_to_cpu(rx_desc->wb.vlan);
+			u16 vid_outer;
+			u16 vlan_tci = htons(ETH_P_8021Q);
+			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+					       vid_inner);
+			if (rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_STAG)) {
+				switch (rx_ring->q_vector->adapter
+						->outer_vlan_type) {
+				case outer_vlan_type_88a8:
+					vlan_tci = htons(ETH_P_8021AD);
+					break;
+				case outer_vlan_type_9100:
+					vlan_tci = htons(ETH_P_QINQ1);
+					break;
+				case outer_vlan_type_9200:
+					vlan_tci = htons(ETH_P_QINQ2);
+					break;
+				default:
+					vlan_tci = htons(ETH_P_8021AD);
+					break;
+				}
+			} else {
+				vlan_tci = htons(ETH_P_8021Q);
+			}
+			vid_outer = le16_to_cpu(rx_desc->wb.mark);
+			/* if in stags mode should ignore only stags */
+			if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) {
+				/* push outer in if not equal stags or cvlan */
+				if ((vid_outer != adapter->stags_vid) ||
+				    (vlan_tci == htons(ETH_P_8021Q))) {
+					/* push outer inner */
+					skb = __vlan_hwaccel_push_inside(skb);
+					__vlan_hwaccel_put_tag(skb, vlan_tci,
+							       vid_outer);
+					/* if not 88a8, push again to avoid kernel crash
+					 * todo
+					 * */
+				}
+				/* if vid_outer is stags_vid do nothing */
+			} else {
+				/* push outer */
+				skb = __vlan_hwaccel_push_inside(skb);
+				__vlan_hwaccel_put_tag(skb, vlan_tci,
+						       vid_outer);
+			}
+
+		} else {
+			/* only inner vlan */
+			u16 vid = le16_to_cpu(rx_desc->wb.vlan);
+			/* check vlan type */
+			if (rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_STAG)) {
+				if ((adapter->flags2 &
+				     RNP_FLAG2_VLAN_STAGS_ENABLED) &&
+				    (vid == adapter->stags_vid)) {
+					/* do nothing ignore this stags */
+				} else {
+					/* should consider other stags */
+					switch (rx_ring->q_vector->adapter
+							->outer_vlan_type) {
+					case outer_vlan_type_88a8:
+						__vlan_hwaccel_put_tag(
+							skb,
+							htons(ETH_P_8021AD),
+							vid);
+						break;
+					case outer_vlan_type_9100:
+						__vlan_hwaccel_put_tag(
+							skb, htons(ETH_P_QINQ1),
+							vid);
+						break;
+					case outer_vlan_type_9200:
+						__vlan_hwaccel_put_tag(
+							skb, htons(ETH_P_QINQ2),
+							vid);
+						break;
+					default:
+						__vlan_hwaccel_put_tag(
+							skb,
+							htons(ETH_P_8021AD),
+							vid);
+						break;
+					}
+				}
+			} else {
+				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+						       vid);
+			}
+		}
+		rx_ring->rx_stats.vlan_remove++;
+	}
+skip_vlan:
+	skb_record_rx_queue(skb, rx_ring->queue_index);
+
+	skb->protocol = eth_type_trans(skb, dev);
+}
+
+static void rnpgbe_rx_skb(struct rnpgbe_q_vector *q_vector, struct sk_buff *skb)
+{
+	struct rnpgbe_adapter *adapter = q_vector->adapter;
+
+	if (!(adapter->flags & RNP_FLAG_IN_NETPOLL))
+		napi_gro_receive(&q_vector->napi, skb);
+	else
+		netif_rx(skb);
+}
+
+/* drop this packets if error */
+static bool rnpgbe_check_csum_error(struct rnpgbe_ring *rx_ring,
+				    union rnpgbe_rx_desc *rx_desc,
+				    unsigned int size,
+				    unsigned int *driver_drop_packets)
+{
+	bool err = false;
+
+	struct net_device *netdev = rx_ring->netdev;
+
+	if (netdev->features & NETIF_F_RXCSUM) {
+		if (unlikely(rnpgbe_test_staterr(rx_desc,
+						 RNP_RXD_STAT_ERR_MASK))) {
+			rx_debug_printk("rx error: VEB:%s mark:0x%x cmd:0x%x\n",
+					(rx_ring->q_vector->adapter->flags &
+					 RNP_FLAG_SRIOV_ENABLED) ?
+						"On" :
+						"Off",
+					rx_desc->wb.mark, rx_desc->wb.cmd);
+			/* push this packet to stack if in promisc mode */
+			rx_ring->rx_stats.csum_err++;
+
+			if ((!(netdev->flags & IFF_PROMISC) &&
+			     (!(netdev->features & NETIF_F_RXALL)))) {
+				/* if we fixed in hw */
+				if (rx_ring->ring_flags & RNP_RING_CHKSM_FIX) {
+					err = true;
+					goto skip_fix;
+				}
+				/* if not ipv4 with l4 error, we should ignore l4 csum error */
+				if (unlikely(rnpgbe_test_staterr(
+						     rx_desc,
+						     RNP_RXD_STAT_L4_MASK) &&
+					     (!(rx_desc->wb.rev1 &
+						RNP_RX_L3_TYPE_MASK)))) {
+					rx_ring->rx_stats.csum_err--;
+					goto skip_fix;
+				}
+				/* we ignore sctp csum erro small than 60 */
+				if (unlikely(rnpgbe_test_staterr(
+					    rx_desc, RNP_RXD_STAT_SCTP_MASK))) {
+					if ((size > 60) &&
+					    (rx_desc->wb.rev1 &
+					     RNP_RX_L3_TYPE_MASK)) {
+						err = true;
+					} else {
+						/* sctp less than 60 hw report err by mistake */
+						rx_ring->rx_stats.csum_err--;
+					}
+				} else {
+					err = true;
+				}
+			}
+		}
+	}
+skip_fix:
+	if (err) {
+		u32 ntc = rx_ring->next_to_clean + 1;
+		struct rnpgbe_rx_buffer *rx_buffer;
+#if (PAGE_SIZE < 8192)
+		unsigned int truesize = rnpgbe_rx_pg_size(rx_ring) / 2;
+#else
+		unsigned int truesize =
+			ring_uses_build_skb(rx_ring) ?
+				SKB_DATA_ALIGN(RNP_SKB_PAD + size) :
+				SKB_DATA_ALIGN(size);
+#endif
+
+		if (likely(rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_EOP)))
+			*driver_drop_packets = *driver_drop_packets + 1;
+
+		/* we are reusing so sync this buffer for CPU use */
+		rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+		dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma,
+					      rx_buffer->page_offset, size,
+					      DMA_FROM_DEVICE);
+
+		/* we should clean it since we used all info in it */
+		rx_desc->wb.cmd = 0;
+
+#if (PAGE_SIZE < 8192)
+		rx_buffer->page_offset ^= truesize;
+#else
+		rx_buffer->page_offset += truesize;
+#endif
+#ifdef OPTM_WITH_LPAGE
+		rnpgbe_put_rx_buffer(rx_ring, rx_buffer);
+#else
+		rnpgbe_put_rx_buffer(rx_ring, rx_buffer, NULL);
+#endif
+		/* update to the next desc */
+		ntc = (ntc < rx_ring->count) ? ntc : 0;
+		rx_ring->next_to_clean = ntc;
+	}
+	return err;
+}
+
+/**
+ * rnpgbe_rx_ring_reinit - just reinit rx_ring with new count in ->reset_count
+ * @rx_ring: rx descriptor ring to transact packets on
+ */
+int rnpgbe_rx_ring_reinit(struct rnpgbe_adapter *adapter,
+			  struct rnpgbe_ring *rx_ring)
+{
+	struct rnpgbe_ring *temp_ring;
+	int err = 0;
+
+	if (rx_ring->count == rx_ring->reset_count)
+		return 0;
+	/* stop rx queue */
+	temp_ring = vmalloc(array_size(1, sizeof(struct rnpgbe_ring)));
+	if (!temp_ring)
+		return -1;
+
+	rnpgbe_disable_rx_queue(adapter, rx_ring);
+	memset(temp_ring, 0x00, sizeof(struct rnpgbe_ring));
+	/* reinit for this ring */
+	memcpy(temp_ring, rx_ring, sizeof(struct rnpgbe_ring));
+	/* setup new count */
+	temp_ring->count = rx_ring->reset_count;
+	err = rnpgbe_setup_rx_resources(temp_ring, adapter);
+	if (err) {
+		rnpgbe_free_rx_resources(temp_ring);
+		goto err_setup;
+	}
+	rnpgbe_free_rx_resources(rx_ring);
+	memcpy(rx_ring, temp_ring, sizeof(struct rnpgbe_ring));
+	rnpgbe_configure_rx_ring(adapter, rx_ring);
+err_setup:
+	vfree(temp_ring);
+	/* start rx */
+	ring_wr32(rx_ring, RNP_DMA_RX_START, 1);
+	return 0;
+}
+
+#ifndef OPTM_WITH_LPAGE
+/**
+ * rnpgbe_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void rnpgbe_alloc_rx_buffers(struct rnpgbe_ring *rx_ring, u16 cleaned_count)
+{
+	union rnpgbe_rx_desc *rx_desc;
+	struct rnpgbe_rx_buffer *bi;
+	u16 i = rx_ring->next_to_use;
+	u64 fun_id = ((u64)(rx_ring->pfvfnum) << (32 + 24));
+	u16 bufsz;
+	/* nothing to do */
+	if (!cleaned_count)
+		return;
+
+	rx_desc = RNP_RX_DESC(rx_ring, i);
+
+	BUG_ON(rx_desc == NULL);
+
+	bi = &rx_ring->rx_buffer_info[i];
+
+	BUG_ON(bi == NULL);
+
+	i -= rx_ring->count;
+	bufsz = rnpgbe_rx_bufsz(rx_ring);
+
+	do {
+		if (!rnpgbe_alloc_mapped_page(rx_ring, bi))
+			break;
+		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+						 bi->page_offset, bufsz,
+						 DMA_FROM_DEVICE);
+
+		/*
+		 * Refresh the desc even if buffer_addrs didn't change
+		 * because each write-back erases this info.
+		 */
+		rx_desc->pkt_addr =
+			cpu_to_le64(bi->dma + bi->page_offset + fun_id);
+
+		/* clean dd */
+		rx_desc->resv_cmd = 0;
+
+		rx_desc++;
+		bi++;
+		i++;
+		if (unlikely(!i)) {
+			rx_desc = RNP_RX_DESC(rx_ring, 0);
+			bi = rx_ring->rx_buffer_info;
+			i -= rx_ring->count;
+		}
+
+		/* clear the hdr_addr for the next_to_use descriptor */
+		// rx_desc->cmd = 0;
+		cleaned_count--;
+	} while (cleaned_count);
+
+	i += rx_ring->count;
+
+	if (rx_ring->next_to_use != i)
+		rnpgbe_update_rx_tail(rx_ring, i);
+}
+
+#endif
+
+static inline unsigned int rnpgbe_rx_offset(struct rnpgbe_ring *rx_ring)
+{
+	return ring_uses_build_skb(rx_ring) ? RNP_SKB_PAD : 0;
+}
+
+#ifdef OPTM_WITH_LPAGE
+/**
+ * rnpgbe_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void rnpgbe_alloc_rx_buffers(struct rnpgbe_ring *rx_ring, u16 cleaned_count)
+{
+	union rnpgbe_rx_desc *rx_desc;
+	struct rnpgbe_rx_buffer *bi;
+	u16 i = rx_ring->next_to_use;
+	u64 fun_id = ((u64)(rx_ring->pfvfnum) << (32 + 24));
+	u16 bufsz;
+	/* nothing to do */
+	if (!cleaned_count)
+		return;
+
+	rx_desc = RNP_RX_DESC(rx_ring, i);
+
+	BUG_ON(rx_desc == NULL);
+
+	bi = &rx_ring->rx_buffer_info[i];
+
+	BUG_ON(bi == NULL);
+
+	i -= rx_ring->count;
+	bufsz = rnpgbe_rx_bufsz(rx_ring);
+
+	do {
+		int count = 1;
+		struct page *page;
+
+		if (!rnpgbe_alloc_mapped_page(rx_ring, bi, rx_desc, bufsz,
+					      fun_id))
+			break;
+		page = bi->page;
+
+		rx_desc->resv_cmd = 0;
+
+		rx_desc++;
+		i++;
+		bi++;
+
+		if (unlikely(!i)) {
+			rx_desc = RNP_RX_DESC(rx_ring, 0);
+			bi = rx_ring->rx_buffer_info;
+			i -= rx_ring->count;
+		}
+
+		rx_desc->resv_cmd = 0;
+
+		cleaned_count--;
+
+		while (count < rx_ring->rx_page_buf_nums && cleaned_count) {
+			dma_addr_t dma;
+
+			bi->page_offset = rx_ring->rx_per_buf_mem * count +
+					  rnpgbe_rx_offset(rx_ring);
+			/* map page for use */
+			dma = dma_map_page_attrs(rx_ring->dev, page,
+						 bi->page_offset, bufsz,
+						 DMA_FROM_DEVICE,
+						 RNP_RX_DMA_ATTR);
+
+			if (dma_mapping_error(rx_ring->dev, dma)) {
+				netdev_dbg(rx_ring->netdev,
+					   "map second error\n");
+				rx_ring->rx_stats.alloc_rx_page_failed++;
+				break;
+			}
+
+			bi->dma = dma;
+			bi->page = page;
+
+			page_ref_add(page, USHRT_MAX);
+			bi->pagecnt_bias = USHRT_MAX;
+
+			/* sync the buffer for use by the device */
+			dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+							 0, bufsz,
+							 DMA_FROM_DEVICE);
+
+			/*
+			 * Refresh the desc even if buffer_addrs didn't change
+			 * because each write-back erases this info.
+			 */
+			rx_desc->pkt_addr = cpu_to_le64(bi->dma + fun_id);
+			/* clean dd */
+			rx_desc->resv_cmd = 0;
+
+			rx_desc++;
+			bi++;
+			i++;
+			if (unlikely(!i)) {
+				rx_desc = RNP_RX_DESC(rx_ring, 0);
+				bi = rx_ring->rx_buffer_info;
+				i -= rx_ring->count;
+			}
+			count++;
+			/* clear the hdr_addr for the next_to_use descriptor */
+			cleaned_count--;
+		}
+	} while (cleaned_count);
+
+	i += rx_ring->count;
+
+	if (rx_ring->next_to_use != i)
+		rnpgbe_update_rx_tail(rx_ring, i);
+}
+#endif /* OPTM_WITH_LPAGE */
+/**
+ * rnpgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
+ * @data: pointer to the start of the headers
+ * @max_len: total length of section to find headers in
+ *
+ * This function is meant to determine the length of headers that will
+ * be recognized by hardware for LRO, GRO, and RSC offloads.  The main
+ * motivation of doing this is to only perform one pull for IPv4 TCP
+ * packets so that we can do basic things like calculating the gso_size
+ * based on the average data per packet.
+ **/
+static unsigned int rnpgbe_get_headlen(unsigned char *data,
+				       unsigned int max_len)
+{
+	union {
+		unsigned char *network;
+		/* l2 headers */
+		struct ethhdr *eth;
+		struct vlan_hdr *vlan;
+		/* l3 headers */
+		struct iphdr *ipv4;
+		struct ipv6hdr *ipv6;
+	} hdr;
+	__be16 protocol;
+	u8 nexthdr = 0; /* default to not TCP */
+	u8 hlen;
+
+	/* this should never happen, but better safe than sorry */
+	if (max_len < ETH_HLEN)
+		return max_len;
+
+	/* initialize network frame pointer */
+	hdr.network = data;
+
+	/* set first protocol and move network header forward */
+	protocol = hdr.eth->h_proto;
+	hdr.network += ETH_HLEN;
+
+	/* handle any vlan tag if present */
+	if (protocol == htons(ETH_P_8021Q)) {
+		if ((hdr.network - data) > (max_len - VLAN_HLEN))
+			return max_len;
+
+		protocol = hdr.vlan->h_vlan_encapsulated_proto;
+		hdr.network += VLAN_HLEN;
+	}
+
+	/* handle L3 protocols */
+	if (protocol == htons(ETH_P_IP)) {
+		if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
+			return max_len;
+
+		/* access ihl as a u8 to avoid unaligned access on ia64 */
+		hlen = (hdr.network[0] & 0x0F) << 2;
+
+		/* verify hlen meets minimum size requirements */
+		if (hlen < sizeof(struct iphdr))
+			return hdr.network - data;
+
+		/* record next protocol if header is present */
+		if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
+			nexthdr = hdr.ipv4->protocol;
+	} else if (protocol == htons(ETH_P_IPV6)) {
+		if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+			return max_len;
+
+		/* record next protocol */
+		nexthdr = hdr.ipv6->nexthdr;
+		hlen = sizeof(struct ipv6hdr);
+	} else {
+		return hdr.network - data;
+	}
+
+	/* relocate pointer to start of L4 header */
+	hdr.network += hlen;
+
+	/* finally sort out TCP/UDP */
+	if (nexthdr == IPPROTO_TCP) {
+		if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
+			return max_len;
+
+		/* access doff as a u8 to avoid unaligned access on ia64 */
+		hlen = (hdr.network[12] & 0xF0) >> 2;
+
+		/* verify hlen meets minimum size requirements */
+		if (hlen < sizeof(struct tcphdr))
+			return hdr.network - data;
+
+		hdr.network += hlen;
+	} else if (nexthdr == IPPROTO_UDP) {
+		if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
+			return max_len;
+
+		hdr.network += sizeof(struct udphdr);
+	}
+
+	/*
+	 * If everything has gone correctly hdr.network should be the
+	 * data section of the packet and will be the end of the header.
+	 * If not then it probably represents the end of the last recognized
+	 * header.
+	 */
+	if ((hdr.network - data) < max_len)
+		return hdr.network - data;
+	else
+		return max_len;
+}
+
+#ifdef OPTM_WITH_LPAGE
+
+/**
+ * rnpgbe_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean.  If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool rnpgbe_is_non_eop(struct rnpgbe_ring *rx_ring,
+			      union rnpgbe_rx_desc *rx_desc)
+{
+	u32 ntc = rx_ring->next_to_clean + 1;
+	/* fetch, update, and store next to clean */
+	ntc = (ntc < rx_ring->count) ? ntc : 0;
+	rx_ring->next_to_clean = ntc;
+
+	prefetch(RNP_RX_DESC(rx_ring, ntc));
+
+	/* if we are the last buffer then there is nothing else to do */
+	if (likely(rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_EOP)))
+		return false;
+	/* place skb in next buffer to be received */
+
+	/* we should clean it since we used all info in it */
+	rx_desc->wb.cmd = 0;
+
+	return true;
+}
+
+static bool rnpgbe_alloc_mapped_page(struct rnpgbe_ring *rx_ring,
+				     struct rnpgbe_rx_buffer *bi,
+				     union rnpgbe_rx_desc *rx_desc, u16 bufsz,
+				     u64 fun_id)
+{
+	struct page *page = bi->page;
+	dma_addr_t dma;
+
+	/* since we are recycling buffers we should seldom need to alloc */
+	if (likely(page))
+		return true;
+
+	page = dev_alloc_pages(RNP_ALLOC_PAGE_ORDER);
+	if (unlikely(!page)) {
+		rx_ring->rx_stats.alloc_rx_page_failed++;
+		return false;
+	}
+
+	bi->page_offset = rnpgbe_rx_offset(rx_ring);
+
+	/* map page for use */
+	dma = dma_map_page_attrs(rx_ring->dev, page, bi->page_offset, bufsz,
+				 DMA_FROM_DEVICE,
+				 RNP_RX_DMA_ATTR);
+
+	/*
+	 * if mapping failed free memory back to system since
+	 * there isn't much point in holding memory we can't use
+	 */
+	if (dma_mapping_error(rx_ring->dev, dma)) {
+		__free_pages(page, RNP_ALLOC_PAGE_ORDER);
+		netdev_dbg(rx_ring->netdev, "map failed\n");
+
+		rx_ring->rx_stats.alloc_rx_page_failed++;
+		return false;
+	}
+	bi->dma = dma;
+	bi->page = page;
+	bi->page_offset = rnpgbe_rx_offset(rx_ring);
+	page_ref_add(page, USHRT_MAX - 1);
+	bi->pagecnt_bias = USHRT_MAX;
+	rx_ring->rx_stats.alloc_rx_page++;
+
+	/* sync the buffer for use by the device */
+	dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0, bufsz,
+					 DMA_FROM_DEVICE);
+	/*
+	 * Refresh the desc even if buffer_addrs didn't change
+	 * because each write-back erases this info.
+	 */
+	rx_desc->pkt_addr = cpu_to_le64(bi->dma + fun_id);
+
+	return true;
+}
+
+#else
+static bool rnpgbe_alloc_mapped_page(struct rnpgbe_ring *rx_ring,
+				     struct rnpgbe_rx_buffer *bi)
+{
+	struct page *page = bi->page;
+	dma_addr_t dma;
+
+	/* since we are recycling buffers we should seldom need to alloc */
+	if (likely(page))
+		return true;
+
+	page = dev_alloc_pages(rnpgbe_rx_pg_order(rx_ring));
+	if (unlikely(!page)) {
+		rx_ring->rx_stats.alloc_rx_page_failed++;
+		return false;
+	}
+
+	/* map page for use */
+	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+				 rnpgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
+				 RNP_RX_DMA_ATTR);
+
+	/*
+	 * if mapping failed free memory back to system since
+	 * there isn't much point in holding memory we can't use
+	 */
+	if (dma_mapping_error(rx_ring->dev, dma)) {
+		__free_pages(page, rnpgbe_rx_pg_order(rx_ring));
+		netdev_dbg(rx_ring->netdev, "map failed\n");
+
+		rx_ring->rx_stats.alloc_rx_page_failed++;
+		return false;
+	}
+	bi->dma = dma;
+	bi->page = page;
+	bi->page_offset = rnpgbe_rx_offset(rx_ring);
+	page_ref_add(page, USHRT_MAX - 1);
+	bi->pagecnt_bias = USHRT_MAX;
+	rx_ring->rx_stats.alloc_rx_page++;
+
+	return true;
+}
+
+/**
+ * rnpgbe_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean.  If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool rnpgbe_is_non_eop(struct rnpgbe_ring *rx_ring,
+			      union rnpgbe_rx_desc *rx_desc,
+			      struct sk_buff *skb)
+{
+	u32 ntc = rx_ring->next_to_clean + 1;
+	/* fetch, update, and store next to clean */
+	ntc = (ntc < rx_ring->count) ? ntc : 0;
+	rx_ring->next_to_clean = ntc;
+
+	prefetch(RNP_RX_DESC(rx_ring, ntc));
+
+	/* if we are the last buffer then there is nothing else to do */
+	if (likely(rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_EOP)))
+		return false;
+	/* place skb in next buffer to be received */
+	rx_ring->rx_buffer_info[ntc].skb = skb;
+	rx_ring->rx_stats.non_eop_descs++;
+	/* we should clean it since we used all info in it */
+	rx_desc->wb.cmd = 0;
+
+	return true;
+}
+
+#endif
+/**
+ * rnpgbe_pull_tail - rnpgbe specific version of skb_pull_tail
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an rnpgbe specific version of __pskb_pull_tail.  The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void rnpgbe_pull_tail(struct sk_buff *skb)
+{
+	skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+	unsigned char *va;
+	unsigned int pull_len;
+
+	/*
+	 * it is valid to use page_address instead of kmap since we are
+	 * working with pages allocated out of the lomem pool per
+	 * alloc_page(GFP_ATOMIC)
+	 */
+	va = skb_frag_address(frag);
+
+	/*
+	 * we need the header to contain the greater of either ETH_HLEN or
+	 * 60 bytes if the skb->len is less than 60 for skb_pad.
+	 */
+	pull_len = rnpgbe_get_headlen(va, RNP_RX_HDR_SIZE);
+
+	/* align pull length to size of long to optimize memcpy performance */
+	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+	/* update all of the pointers */
+	skb_frag_size_sub(frag, pull_len);
+	skb_frag_off_add(frag, pull_len);
+	skb->data_len -= pull_len;
+	skb->tail += pull_len;
+}
+
+static bool rnpgbe_check_src_mac(struct sk_buff *skb, struct net_device *netdev)
+{
+	u8 *data = (u8 *)skb->data;
+	bool ret = false;
+	struct netdev_hw_addr *ha;
+
+	if (is_multicast_ether_addr(data)) {
+		if (memcmp(data + netdev->addr_len, netdev->dev_addr,
+			   netdev->addr_len) == 0) {
+			dev_kfree_skb_any(skb);
+			ret = true;
+		}
+		/* if src mac equal own mac */
+		netdev_for_each_uc_addr (ha, netdev) {
+			if (memcmp(data + netdev->addr_len, ha->addr,
+				   netdev->addr_len) == 0) {
+				dev_kfree_skb_any(skb);
+				ret = true;
+			}
+		}
+	}
+	return ret;
+}
+
+/**
+ * rnpgbe_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Check if the skb is valid. In the XDP case it will be an error pointer.
+ * Return true in this case to abort processing and advance to next
+ * descriptor.
+ *
+ * Check for corrupted packet headers caused by senders on the local L2
+ * embedded NIC switch not setting up their Tx Descriptors right.  These
+ * should be very rare.
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool rnpgbe_cleanup_headers(struct rnpgbe_ring __maybe_unused *rx_ring,
+				   union rnpgbe_rx_desc *rx_desc,
+				   struct sk_buff *skb)
+{
+	struct net_device *netdev = rx_ring->netdev;
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+#ifdef OPTM_WITH_LPAGE
+#else
+	/* XDP packets use error pointer so abort at this point */
+	if (IS_ERR(skb))
+		return true;
+#endif
+	/* place header in linear portion of buffer */
+	if (!skb_headlen(skb))
+		rnpgbe_pull_tail(skb);
+	/* if eth_skb_pad returns an error the skb was freed */
+	/* will padding skb->len to 60 */
+	if (eth_skb_pad(skb))
+		return true;
+
+	if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) &&
+	    (!(rx_ring->ring_flags & RNP_RING_VEB_MULTI_FIX)))
+		return rnpgbe_check_src_mac(skb, rx_ring->netdev);
+	else
+		return false;
+}
+
+/**
+ * rnpgbe_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void rnpgbe_reuse_rx_page(struct rnpgbe_ring *rx_ring,
+				 struct rnpgbe_rx_buffer *old_buff)
+{
+	struct rnpgbe_rx_buffer *new_buff;
+	u16 nta = rx_ring->next_to_alloc;
+
+	new_buff = &rx_ring->rx_buffer_info[nta];
+
+	/* update, and store next to alloc */
+	nta++;
+	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+	/*
+	 * Transfer page from old buffer to new buffer.
+	 * Move each member individually to avoid possible store
+	 * forwarding stalls and unnecessary copy of skb.
+	 */
+	new_buff->dma = old_buff->dma;
+	new_buff->page = old_buff->page;
+	new_buff->page_offset = old_buff->page_offset;
+	new_buff->pagecnt_bias = old_buff->pagecnt_bias;
+}
+
+static inline bool rnpgbe_page_is_reserved(struct page *page)
+{
+	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+static bool rnpgbe_can_reuse_rx_page(struct rnpgbe_rx_buffer *rx_buffer)
+{
+	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+	struct page *page = rx_buffer->page;
+
+#ifdef OPTM_WITH_LPAGE
+	return false;
+#endif
+	/* avoid re-using remote pages */
+	if (unlikely(rnpgbe_page_is_reserved(page)))
+		return false;
+
+#if (PAGE_SIZE < 8192)
+	/* if we are only owner of page we can reuse it */
+	if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+		return false;
+#else
+
+	/*
+	 * The last offset is a bit aggressive in that we assume the
+	 * worst case of FCoE being enabled and using a 3K buffer.
+	 * However this should have minimal impact as the 1K extra is
+	 * still less than one buffer in size.
+	 */
+#define RNP_LAST_OFFSET (SKB_WITH_OVERHEAD(PAGE_SIZE) - RNP_RXBUFFER_2K)
+	if (rx_buffer->page_offset > RNP_LAST_OFFSET)
+		return false;
+#endif
+
+	/* If we have drained the page fragment pool we need to update
+	 * the pagecnt_bias and page count so that we fully restock the
+	 * number of references the driver holds.
+	 */
+	if (unlikely(pagecnt_bias == 1)) {
+		page_ref_add(page, USHRT_MAX - 1);
+		rx_buffer->pagecnt_bias = USHRT_MAX;
+	}
+
+	return true;
+}
+
+/**
+ * rnpgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @skb: sk_buff to place the data into
+ * @size: size of data
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static void rnpgbe_add_rx_frag(struct rnpgbe_ring *rx_ring,
+			       struct rnpgbe_rx_buffer *rx_buffer,
+			       struct sk_buff *skb, unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = rnpgbe_rx_pg_size(rx_ring) / 2;
+#else
+	unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+					SKB_DATA_ALIGN(RNP_SKB_PAD + size) :
+					SKB_DATA_ALIGN(size);
+#endif
+
+	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+			rx_buffer->page_offset, size, truesize);
+
+#if (PAGE_SIZE < 8192)
+	rx_buffer->page_offset ^= truesize;
+#else
+	rx_buffer->page_offset += truesize;
+#endif
+}
+
+#ifdef OPTM_WITH_LPAGE
+static struct rnpgbe_rx_buffer *
+rnpgbe_get_rx_buffer(struct rnpgbe_ring *rx_ring, union rnpgbe_rx_desc *rx_desc,
+		     const unsigned int size)
+{
+	struct rnpgbe_rx_buffer *rx_buffer;
+
+	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+	prefetchw(rx_buffer->page);
+
+	rx_buf_dump("rx buf",
+		    page_address(rx_buffer->page) + rx_buffer->page_offset,
+		    rx_desc->wb.len);
+
+	/* we are reusing so sync this buffer for CPU use */
+	dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, 0, size,
+				      DMA_FROM_DEVICE);
+	/* skip_sync: */
+	rx_buffer->pagecnt_bias--;
+
+	return rx_buffer;
+}
+#else
+
+static struct rnpgbe_rx_buffer *
+rnpgbe_get_rx_buffer(struct rnpgbe_ring *rx_ring, union rnpgbe_rx_desc *rx_desc,
+		     struct sk_buff **skb, const unsigned int size)
+{
+	struct rnpgbe_rx_buffer *rx_buffer;
+
+	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+	prefetchw(rx_buffer->page);
+	*skb = rx_buffer->skb;
+
+	rx_buf_dump("rx buf",
+		    page_address(rx_buffer->page) + rx_buffer->page_offset,
+		    rx_desc->wb.len);
+
+	/* we are reusing so sync this buffer for CPU use */
+	dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma,
+				      rx_buffer->page_offset, size,
+				      DMA_FROM_DEVICE);
+	/* skip_sync: */
+	rx_buffer->pagecnt_bias--;
+
+	return rx_buffer;
+}
+#endif
+
+#ifdef OPTM_WITH_LPAGE
+static void rnpgbe_put_rx_buffer(struct rnpgbe_ring *rx_ring,
+				 struct rnpgbe_rx_buffer *rx_buffer)
+{
+	if (rnpgbe_can_reuse_rx_page(rx_buffer)) {
+		/* hand second half of page back to the ring */
+		rnpgbe_reuse_rx_page(rx_ring, rx_buffer);
+	} else {
+		/* we are not reusing the buffer so unmap it */
+		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+				     rnpgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE,
+				     RNP_RX_DMA_ATTR);
+		__page_frag_cache_drain(rx_buffer->page,
+					rx_buffer->pagecnt_bias);
+	}
+
+	/* clear contents of rx_buffer */
+	rx_buffer->page = NULL;
+}
+
+#else
+static void rnpgbe_put_rx_buffer(struct rnpgbe_ring *rx_ring,
+				 struct rnpgbe_rx_buffer *rx_buffer,
+				 struct sk_buff *skb)
+{
+	if (rnpgbe_can_reuse_rx_page(rx_buffer)) {
+		/* hand second half of page back to the ring */
+		rnpgbe_reuse_rx_page(rx_ring, rx_buffer);
+	} else {
+		/* we are not reusing the buffer so unmap it */
+		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+				     rnpgbe_rx_pg_size(rx_ring),
+				     DMA_FROM_DEVICE,
+				     RNP_RX_DMA_ATTR);
+		__page_frag_cache_drain(rx_buffer->page,
+					rx_buffer->pagecnt_bias);
+	}
+
+	/* clear contents of rx_buffer */
+	rx_buffer->page = NULL;
+	rx_buffer->skb = NULL;
+}
+#endif
+
+#ifdef OPTM_WITH_LPAGE
+static struct sk_buff *rnpgbe_construct_skb(struct rnpgbe_ring *rx_ring,
+					    struct rnpgbe_rx_buffer *rx_buffer,
+					    union rnpgbe_rx_desc *rx_desc,
+					    unsigned int size)
+{
+	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+	unsigned int truesize = SKB_DATA_ALIGN(size);
+	unsigned int headlen;
+	struct sk_buff *skb;
+
+	/* prefetch first cache line of first page */
+	prefetch(va);
+#if L1_CACHE_BYTES < 128
+	prefetch(va + L1_CACHE_BYTES);
+#endif
+	/* allocate a skb to store the frags */
+	skb = napi_alloc_skb(&rx_ring->q_vector->napi, RNP_RX_HDR_SIZE);
+	if (unlikely(!skb))
+		return NULL;
+
+	prefetchw(skb->data);
+
+	/* Determine available headroom for copy */
+	headlen = size;
+	if (headlen > RNP_RX_HDR_SIZE)
+		headlen = rnpgbe_get_headlen(va, RNP_RX_HDR_SIZE);
+	/* align pull length to size of long to optimize memcpy performance */
+	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+
+	/* update all of the pointers */
+	size -= headlen;
+
+	if (size) {
+		skb_add_rx_frag(skb, 0, rx_buffer->page,
+				(va + headlen) - page_address(rx_buffer->page),
+				size, truesize);
+		rx_buffer->page_offset += truesize;
+	} else {
+		rx_buffer->pagecnt_bias++;
+	}
+
+	return skb;
+}
+
+static struct sk_buff *rnpgbe_build_skb(struct rnpgbe_ring *rx_ring,
+					struct rnpgbe_rx_buffer *rx_buffer,
+					union rnpgbe_rx_desc *rx_desc,
+					unsigned int size)
+{
+	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+				SKB_DATA_ALIGN(size + RNP_SKB_PAD);
+	struct sk_buff *skb;
+
+	/* prefetch first cache line of first page */
+	prefetch(va);
+#if L1_CACHE_BYTES < 128
+	prefetch(va + L1_CACHE_BYTES);
+#endif
+
+	/* build an skb around the page buffer */
+	skb = build_skb(va - RNP_SKB_PAD, truesize);
+	if (unlikely(!skb))
+		return NULL;
+
+	/* update pointers within the skb to store the data */
+	skb_reserve(skb, RNP_SKB_PAD);
+	__skb_put(skb, size);
+	/* record DMA address if this is the start of a
+	 * chain of buffers
+	 */
+
+	return skb;
+}
+
+#else
+
+static struct sk_buff *rnpgbe_construct_skb(struct rnpgbe_ring *rx_ring,
+					    struct rnpgbe_rx_buffer *rx_buffer,
+					    struct xdp_buff *xdp,
+					    union rnpgbe_rx_desc *rx_desc)
+{
+	unsigned int size = xdp->data_end - xdp->data;
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = rnpgbe_rx_pg_size(rx_ring) / 2;
+#else
+	unsigned int truesize =
+		SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start);
+#endif
+	struct sk_buff *skb;
+
+	/* prefetch first cache line of first page */
+	prefetch(xdp->data);
+#if L1_CACHE_BYTES < 128
+	prefetch(xdp->data + L1_CACHE_BYTES);
+#endif
+	/* allocate a skb to store the frags */
+	skb = napi_alloc_skb(&rx_ring->q_vector->napi, RNP_RX_HDR_SIZE);
+	if (unlikely(!skb))
+		return NULL;
+
+	prefetchw(skb->data);
+
+	if (size > RNP_RX_HDR_SIZE) {
+		/*
+		 * if (!rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_EOP))
+		 * RNP_CB(skb)->dma = rx_buffer->dma;
+		 */
+
+		skb_add_rx_frag(skb, 0, rx_buffer->page,
+				xdp->data - page_address(rx_buffer->page), size,
+				truesize);
+#if (PAGE_SIZE < 8192)
+		rx_buffer->page_offset ^= truesize;
+#else
+		rx_buffer->page_offset += truesize;
+#endif
+	} else {
+		memcpy(__skb_put(skb, size), xdp->data,
+		       ALIGN(size, sizeof(long)));
+		rx_buffer->pagecnt_bias++;
+	}
+
+	return skb;
+}
+
+static struct sk_buff *rnpgbe_build_skb(struct rnpgbe_ring *rx_ring,
+					struct rnpgbe_rx_buffer *rx_buffer,
+					struct xdp_buff *xdp,
+					union rnpgbe_rx_desc *rx_desc)
+{
+	unsigned int metasize = xdp->data - xdp->data_meta;
+	void *va = xdp->data_meta;
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = rnpgbe_rx_pg_size(rx_ring) / 2;
+#else
+	unsigned int truesize =
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+		SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start);
+#endif
+	struct sk_buff *skb;
+
+	/* prefetch first cache line of first page */
+	prefetch(va);
+#if L1_CACHE_BYTES < 128
+	prefetch(va + L1_CACHE_BYTES);
+#endif
+
+	/* build an skb around the page buffer */
+	skb = build_skb(xdp->data_hard_start, truesize);
+	if (unlikely(!skb))
+		return NULL;
+
+	/* update pointers within the skb to store the data */
+	skb_reserve(skb, xdp->data - xdp->data_hard_start);
+	__skb_put(skb, xdp->data_end - xdp->data);
+	if (metasize)
+		skb_metadata_set(skb, metasize);
+	/* update buffer offset */
+#if (PAGE_SIZE < 8192)
+	rx_buffer->page_offset ^= truesize;
+#else
+	rx_buffer->page_offset += truesize;
+#endif
+
+	return skb;
+}
+
+#endif
+
+#define RNP_XDP_PASS 0
+#define RNP_XDP_CONSUMED 1
+#define RNP_XDP_TX 2
+
+#ifndef OPTM_WITH_LPAGE
+static void rnpgbe_rx_buffer_flip(struct rnpgbe_ring *rx_ring,
+				  struct rnpgbe_rx_buffer *rx_buffer,
+				  unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = rnpgbe_rx_pg_size(rx_ring) / 2;
+
+	rx_buffer->page_offset ^= truesize;
+#else
+	unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+					SKB_DATA_ALIGN(RNP_SKB_PAD + size) :
+					SKB_DATA_ALIGN(size);
+
+	rx_buffer->page_offset += truesize;
+#endif
+}
+#endif
+
+#ifdef OPTM_WITH_LPAGE
+
+/**
+ * rnpgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @q_vector: structure containing interrupt and ring information
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing.  The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed.
+ **/
+
+static int rnpgbe_clean_rx_irq(struct rnpgbe_q_vector *q_vector,
+			       struct rnpgbe_ring *rx_ring, int budget)
+{
+	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+	unsigned int err_packets = 0;
+	unsigned int driver_drop_packets = 0;
+	struct sk_buff *skb = rx_ring->skb;
+	struct rnpgbe_adapter *adapter = q_vector->adapter;
+	u16 cleaned_count = rnpgbe_desc_unused_rx(rx_ring);
+
+	while (likely(total_rx_packets < budget)) {
+		union rnpgbe_rx_desc *rx_desc;
+		struct rnpgbe_rx_buffer *rx_buffer;
+		unsigned int size;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= RNP_RX_BUFFER_WRITE) {
+			rnpgbe_alloc_rx_buffers(rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+		rx_desc = RNP_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+		rx_buf_dump("rx-desc:", rx_desc, sizeof(*rx_desc));
+		rx_debug_printk("  dd set: %s\n",
+				(rx_desc->wb.cmd & RNP_RXD_STAT_DD) ? "Yes" :
+								      "No");
+
+		if (!rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_DD))
+			break;
+
+		/* This memory barrier is needed to keep us from reading
+		 * any other fields out of the rx_desc until we know the
+		 * descriptor has been written back
+		 */
+		dma_rmb();
+		rx_debug_printk(
+			"queue:%d  rx-desc:%d has-data len:%d next_to_clean %d\n",
+			rx_ring->rnpgbe_queue_idx, rx_ring->next_to_clean,
+			rx_desc->wb.len, rx_ring->next_to_clean);
+
+		/* handle padding */
+		if ((adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING) &&
+		    (!(adapter->priv_flags & RNP_PRIV_FLAG_PADDING_DEBUG))) {
+			if (likely(rnpgbe_test_staterr(rx_desc,
+						       RNP_RXD_STAT_EOP))) {
+				size = le16_to_cpu(rx_desc->wb.len) -
+				       le16_to_cpu(rx_desc->wb.padding_len);
+			} else {
+				size = le16_to_cpu(rx_desc->wb.len);
+			}
+		} else {
+			/* size should not zero */
+			size = le16_to_cpu(rx_desc->wb.len);
+		}
+
+		if (!size)
+			break;
+
+		/*
+		 * should check csum err
+		 * maybe one packet use multiple descs
+		 * no problems hw set all csum_err in multiple descs
+		 * maybe BUG if the last sctp desc less than 60
+		 */
+		if (rnpgbe_check_csum_error(rx_ring, rx_desc, size,
+					    &driver_drop_packets)) {
+			cleaned_count++;
+			err_packets++;
+			if (err_packets + total_rx_packets > budget)
+				break;
+			continue;
+		}
+
+		rx_buffer = rnpgbe_get_rx_buffer(rx_ring, rx_desc, size);
+
+		if (skb) {
+			rnpgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
+		} else if (ring_uses_build_skb(rx_ring)) {
+			skb = rnpgbe_build_skb(rx_ring, rx_buffer, rx_desc,
+					       size);
+		} else {
+			skb = rnpgbe_construct_skb(rx_ring, rx_buffer, rx_desc,
+						   size);
+		}
+
+		/* exit if we failed to retrieve a buffer */
+		if (!skb) {
+			rx_ring->rx_stats.alloc_rx_buff_failed++;
+			rx_buffer->pagecnt_bias++;
+			break;
+		}
+		if (module_enable_ptp && adapter->ptp_rx_en &&
+		    adapter->flags2 & RNP_FLAG2_PTP_ENABLED)
+			rnpgbe_ptp_get_rx_hwstamp(adapter, rx_desc, skb);
+
+		rnpgbe_put_rx_buffer(rx_ring, rx_buffer);
+		cleaned_count++;
+
+		/* place incomplete frames back on ring for completion */
+		if (rnpgbe_is_non_eop(rx_ring, rx_desc))
+			continue;
+
+		/* verify the packet layout is correct */
+		if (rnpgbe_cleanup_headers(rx_ring, rx_desc, skb)) {
+			/* we should clean it since we used all info in it */
+			rx_desc->wb.cmd = 0;
+			skb = NULL;
+			continue;
+		}
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += skb->len;
+
+		/* populate checksum, timestamp, VLAN, and protocol */
+		rnpgbe_process_skb_fields(rx_ring, rx_desc, skb);
+
+		/* we should clean it since we used all info in it */
+		rx_desc->wb.cmd = 0;
+		rnpgbe_rx_skb(q_vector, skb);
+		skb = NULL;
+
+		/* update budget accounting */
+		total_rx_packets++;
+	}
+
+	rx_ring->skb = skb;
+
+	u64_stats_update_begin(&rx_ring->syncp);
+	rx_ring->stats.packets += total_rx_packets;
+	rx_ring->stats.bytes += total_rx_bytes;
+	rx_ring->rx_stats.driver_drop_packets += driver_drop_packets;
+	rx_ring->rx_stats.rx_clean_count += total_rx_packets;
+	rx_ring->rx_stats.rx_clean_times++;
+	if (rx_ring->rx_stats.rx_clean_times > 10) {
+		rx_ring->rx_stats.rx_clean_times = 0;
+		rx_ring->rx_stats.rx_clean_count = 0;
+	}
+	u64_stats_update_end(&rx_ring->syncp);
+	q_vector->rx.total_packets += total_rx_packets;
+	q_vector->rx.total_bytes += total_rx_bytes;
+
+	if (total_rx_packets >= budget)
+		rx_ring->rx_stats.poll_again_count++;
+
+	return total_rx_packets;
+}
+
+#else
+
+/**
+ * rnpgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @q_vector: structure containing interrupt and ring information
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing.  The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed.
+ **/
+
+static int rnpgbe_clean_rx_irq(struct rnpgbe_q_vector *q_vector,
+			       struct rnpgbe_ring *rx_ring, int budget)
+{
+	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+	unsigned int err_packets = 0;
+	unsigned int driver_drop_packets = 0;
+	struct rnpgbe_adapter *adapter = q_vector->adapter;
+	u16 cleaned_count = rnpgbe_desc_unused_rx(rx_ring);
+	bool xdp_xmit = false;
+	struct xdp_buff xdp;
+
+	xdp.data = NULL;
+	xdp.data_end = NULL;
+
+	while (likely(total_rx_packets < budget)) {
+		union rnpgbe_rx_desc *rx_desc;
+		struct rnpgbe_rx_buffer *rx_buffer;
+		struct sk_buff *skb;
+		unsigned int size;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= RNP_RX_BUFFER_WRITE) {
+			rnpgbe_alloc_rx_buffers(rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+		rx_desc = RNP_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+		rx_buf_dump("rx-desc:", rx_desc, sizeof(*rx_desc));
+		rx_debug_printk("  dd set: %s\n",
+				(rx_desc->wb.cmd & RNP_RXD_STAT_DD) ? "Yes" :
+								      "No");
+
+		if (!rnpgbe_test_staterr(rx_desc, RNP_RXD_STAT_DD))
+			break;
+
+		rx_debug_printk(
+			"queue:%d  rx-desc:%d has-data len:%d next_to_clean %d\n",
+			rx_ring->rnpgbe_queue_idx, rx_ring->next_to_clean,
+			rx_desc->wb.len, rx_ring->next_to_clean);
+
+		/* This memory barrier is needed to keep us from reading
+		 * any other fields out of the rx_desc until we know the
+		 * descriptor has been written back
+		 */
+		dma_rmb();
+		size = le16_to_cpu(rx_desc->wb.len);
+		if (!size)
+			break;
+
+		/*
+		 * should check csum err
+		 * maybe one packet use multiple descs
+		 * no problems hw set all csum_err in multiple descs
+		 * maybe BUG if the last sctp desc less than 60
+		 */
+		if (rnpgbe_check_csum_error(rx_ring, rx_desc, size,
+					    &driver_drop_packets)) {
+			cleaned_count++;
+			err_packets++;
+			if (err_packets + total_rx_packets > budget)
+				break;
+			continue;
+		}
+
+		rx_buffer = rnpgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
+
+		if (!skb) {
+			xdp.data = page_address(rx_buffer->page) +
+				   rx_buffer->page_offset;
+			xdp.data_meta = xdp.data;
+			xdp.data_hard_start =
+				xdp.data - rnpgbe_rx_offset(rx_ring);
+			xdp.data_end = xdp.data + size;
+			/* call xdp hook use this to support xdp hook */
+		}
+
+		if (IS_ERR(skb)) {
+			if (PTR_ERR(skb) == -RNP_XDP_TX) {
+				xdp_xmit = true;
+				rnpgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
+			} else {
+				rx_buffer->pagecnt_bias++;
+			}
+			total_rx_packets++;
+			total_rx_bytes += size;
+		} else if (skb) {
+			rnpgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
+		} else if (ring_uses_build_skb(rx_ring)) {
+			skb = rnpgbe_build_skb(rx_ring, rx_buffer, &xdp,
+					       rx_desc);
+		} else {
+			skb = rnpgbe_construct_skb(rx_ring, rx_buffer, &xdp,
+						   rx_desc);
+		}
+
+		/* exit if we failed to retrieve a buffer */
+		if (!skb) {
+			rx_ring->rx_stats.alloc_rx_buff_failed++;
+			rx_buffer->pagecnt_bias++;
+			break;
+		}
+		if (module_enable_ptp && adapter->ptp_rx_en &&
+		    adapter->flags2 & RNP_FLAG2_PTP_ENABLED)
+			rnpgbe_ptp_get_rx_hwstamp(adapter, rx_desc, skb);
+
+		rnpgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
+		cleaned_count++;
+
+		/* place incomplete frames back on ring for completion */
+		if (rnpgbe_is_non_eop(rx_ring, rx_desc, skb))
+			continue;
+
+		/* verify the packet layout is correct */
+		if (rnpgbe_cleanup_headers(rx_ring, rx_desc, skb)) {
+			/* we should clean it since we used all info in it */
+			rx_desc->wb.cmd = 0;
+			continue;
+		}
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += skb->len;
+
+		/* populate checksum, timestamp, VLAN, and protocol */
+		rnpgbe_process_skb_fields(rx_ring, rx_desc, skb);
+
+		/* we should clean it since we used all info in it */
+		rx_desc->wb.cmd = 0;
+
+		rnpgbe_rx_skb(q_vector, skb);
+
+		/* update budget accounting */
+		total_rx_packets++;
+	}
+
+	u64_stats_update_begin(&rx_ring->syncp);
+	rx_ring->stats.packets += total_rx_packets;
+	rx_ring->stats.bytes += total_rx_bytes;
+	rx_ring->rx_stats.driver_drop_packets += driver_drop_packets;
+	rx_ring->rx_stats.rx_clean_count += total_rx_packets;
+	rx_ring->rx_stats.rx_clean_times++;
+	if (rx_ring->rx_stats.rx_clean_times > 10) {
+		rx_ring->rx_stats.rx_clean_times = 0;
+		rx_ring->rx_stats.rx_clean_count = 0;
+	}
+	u64_stats_update_end(&rx_ring->syncp);
+	q_vector->rx.total_packets += total_rx_packets;
+	q_vector->rx.total_bytes += total_rx_bytes;
+
+	if (total_rx_packets >= budget)
+		rx_ring->rx_stats.poll_again_count++;
+	return total_rx_packets;
+}
+#endif
+
+
+/**
+ * rnpgbe_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
+ *
+ * rnpgbe_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ **/
+static void rnpgbe_configure_msix(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_q_vector *q_vector;
+	int i;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	/*
+	 * configure ring-msix Registers table
+	 */
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct rnpgbe_ring *ring;
+
+		q_vector = adapter->q_vector[i];
+		rnpgbe_for_each_ring(ring, q_vector->rx) {
+			rnpgbe_set_ring_vector(adapter, ring->rnpgbe_queue_idx,
+					       q_vector->v_idx);
+		}
+	}
+	/* n500 should mask other */
+	if ((hw->hw_type == rnpgbe_hw_n500) ||
+	    (hw->hw_type == rnpgbe_hw_n210)) {
+		/*
+	 *  8  lpi | PMT
+	 *  9  BMC_RX_IRQ |
+	 *  10 PHY_IRQ | LPI_IRQ
+	 *  11 BMC_TX_IRQ |
+	 *  may DMAR error if set pf to vm
+	 */
+#define OTHER_VECTOR_START (8)
+#define OTHER_VECTOR_STOP (11)
+#define MSIX_UNUSED (0x0f0f)
+		for (i = OTHER_VECTOR_START; i <= OTHER_VECTOR_STOP; i++) {
+			if (hw->feature_flags & RNP_HW_SOFT_MASK_OTHER_IRQ)
+				rnpgbe_wr_reg(hw->ring_msix_base +
+						      RING_VECTOR(i),
+					      MSIX_UNUSED);
+			else
+				rnpgbe_wr_reg(
+					hw->ring_msix_base + RING_VECTOR(i), 0);
+		}
+		if (hw->feature_flags & RNP_HW_FEATURE_EEE) {
+#define LPI_IRQ (8)
+			/* only open lpi irq */
+			if (hw->feature_flags & RNP_HW_SOFT_MASK_OTHER_IRQ)
+				rnpgbe_wr_reg(hw->ring_msix_base +
+						      RING_VECTOR(LPI_IRQ),
+					      0x000f);
+			else
+				rnpgbe_wr_reg(hw->ring_msix_base +
+						      RING_VECTOR(LPI_IRQ),
+					      0x0000);
+		}
+	}
+}
+
+
+static void rnpgbe_update_ring_itr_rx(struct rnpgbe_q_vector *q_vector)
+{
+	int new_val = q_vector->itr_rx;
+	int avg_wire_size = 0;
+	struct rnpgbe_adapter *adapter = q_vector->adapter;
+	unsigned int packets;
+	/* For non-gigabit speeds, just fix the interrupt rate at 4000
+	 * ints/sec - ITR timer value of 120 ticks.
+	 */
+	switch (adapter->link_speed) {
+	case RNP_LINK_SPEED_10_FULL:
+	case RNP_LINK_SPEED_100_FULL:
+		new_val = RNP_4K_ITR;
+		goto set_itr_val;
+	default:
+		break;
+	}
+
+	packets = q_vector->rx.total_packets;
+	if (packets)
+		avg_wire_size = max_t(u32, avg_wire_size,
+				      q_vector->rx.total_bytes / packets);
+
+	/* if avg_wire_size isn't set no work was done */
+	if (!avg_wire_size)
+		goto clear_counts;
+
+	/* Add 24 bytes to size to account for CRC, preamble, and gap */
+	avg_wire_size += 24;
+
+	/* Don't starve jumbo frames */
+	avg_wire_size = min(avg_wire_size, 3000);
+
+	/* Give a little boost to mid-size frames */
+	if ((avg_wire_size > 300) && (avg_wire_size < 1200))
+		new_val = avg_wire_size / 3;
+	else
+		new_val = avg_wire_size / 2;
+
+	if (new_val < RNP_LOWEREST_ITR)
+		new_val = RNP_LOWEREST_ITR;
+
+set_itr_val:
+	if (q_vector->rx.itr != new_val) {
+		q_vector->rx.update_count++;
+		if (q_vector->rx.update_count >= 2) {
+			q_vector->rx.itr = new_val;
+			q_vector->rx.update_count = 0;
+		}
+	} else
+		q_vector->rx.update_count = 0;
+
+clear_counts:
+	q_vector->rx.total_bytes = 0;
+	q_vector->rx.total_packets = 0;
+}
+
+static void rnpgbe_write_eitr_rx(struct rnpgbe_q_vector *q_vector)
+{
+	struct rnpgbe_adapter *adapter = q_vector->adapter;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 new_itr_rx = q_vector->rx.itr;
+	u32 old_itr_rx = q_vector->rx.itr;
+	struct rnpgbe_ring *ring;
+
+	new_itr_rx = new_itr_rx * hw->usecstocount;
+	/* if we are in auto mode write to hw */
+	if (!(adapter->priv_flags & RNP_PRIV_FLAG_RX_COALESCE)) {
+		rnpgbe_for_each_ring(ring, q_vector->rx) {
+			ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER,
+				  new_itr_rx);
+			if (ring->ring_flags & RNP_RING_LOWER_ITR) {
+				/* if we are alredy in this mode skip */
+				if (q_vector->itr_rx == RNP_LOWEREST_ITR)
+					continue;
+				ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_PKTCNT,
+					  1);
+				ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER,
+					  RNP_LOWEREST_ITR);
+				q_vector->itr_rx = RNP_LOWEREST_ITR;
+			} else {
+				if (new_itr_rx == q_vector->itr_rx)
+					continue;
+				ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER,
+					  new_itr_rx);
+				ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_PKTCNT,
+					  adapter->rx_frames);
+				q_vector->itr_rx = old_itr_rx;
+			}
+		}
+	}
+}
+
+enum latency_range {
+	lowest_latency = 0,
+	low_latency = 1,
+	bulk_latency = 2,
+	latency_invalid = 255
+};
+
+static inline void rnpgbe_irq_enable_queues(struct rnpgbe_adapter *adapter,
+					    struct rnpgbe_q_vector *q_vector)
+{
+	struct rnpgbe_ring *ring;
+
+	rnpgbe_for_each_ring(ring, q_vector->rx) {
+#ifdef CONFIG_RNP_DISABLE_TX_IRQ
+		rnpgbe_wr_reg(ring->dma_int_mask, ~(RX_INT_MASK));
+#else
+		rnpgbe_wr_reg(ring->dma_int_mask, ~(RX_INT_MASK | TX_INT_MASK));
+		ring_wr32(ring, RNP_DMA_INT_TRIG,
+			  (0x3 << 16) | TX_INT_MASK | RX_INT_MASK);
+#endif
+	}
+}
+
+static inline void rnpgbe_irq_disable_queues(struct rnpgbe_q_vector *q_vector)
+{
+	struct rnpgbe_ring *ring;
+
+	rnpgbe_for_each_ring(ring, q_vector->tx) {
+		ring_wr32(ring, RNP_DMA_INT_TRIG,
+			  (0x3 << 16) | (~(TX_INT_MASK | RX_INT_MASK)));
+		rnpgbe_wr_reg(ring->dma_int_mask, (RX_INT_MASK | TX_INT_MASK));
+	}
+}
+/**
+ * rnpgbe_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static inline void rnpgbe_irq_enable(struct rnpgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_q_vectors; i++)
+		rnpgbe_irq_enable_queues(adapter, adapter->q_vector[i]);
+}
+
+static void rnpgbe_lpi_task(struct rnpgbe_adapter *adapter)
+{
+	int status;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	if (hw->feature_flags & RNP_HW_FEATURE_EEE) {
+		status = hw->ops.get_lpi_status(hw);
+
+		if (status) {
+			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
+				adapter->tx_path_in_lpi_mode = true;
+			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
+				adapter->tx_path_in_lpi_mode = false;
+		}
+	}
+}
+
+static irqreturn_t rnpgbe_msix_other(int irq, void *data)
+{
+	struct rnpgbe_adapter *adapter = data;
+
+	set_bit(__RNP_IN_IRQ, &adapter->state);
+	rnpgbe_lpi_task(adapter);
+	rnpgbe_msg_task(adapter);
+	clear_bit(__RNP_IN_IRQ, &adapter->state);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t rnpgbe_msix_clean_rings(int irq, void *data)
+{
+	struct rnpgbe_q_vector *q_vector = data;
+
+	rnpgbe_irq_disable_queues(q_vector);
+
+	rnpgbe_write_eitr_rx(q_vector);
+	/*  disabled interrupts (on this vector) for us */
+
+	if (q_vector->rx.ring || q_vector->tx.ring)
+		napi_schedule_irqoff(&q_vector->napi);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * rnpgbe_poll - NAPI Rx polling callback
+ * @napi: structure for representing this polling device
+ * @budget: how many packets driver is allowed to clean
+ *
+ * This function is used for legacy and MSI, NAPI mode
+ **/
+int rnpgbe_poll(struct napi_struct *napi, int budget)
+{
+	struct rnpgbe_q_vector *q_vector =
+		container_of(napi, struct rnpgbe_q_vector, napi);
+	struct rnpgbe_adapter *adapter = q_vector->adapter;
+	struct rnpgbe_ring *ring;
+	int per_ring_budget, work_done = 0;
+	bool clean_complete = true;
+	int cleaned_total = 0;
+
+	rnpgbe_for_each_ring(ring, q_vector->tx) {
+		clean_complete = rnpgbe_clean_tx_irq(q_vector, ring, budget);
+	}
+
+	/* attempt to distribute budget to each queue fairly, but don't allow
+	 * the budget to go below 1 because we'll exit polling
+	 */
+	if (q_vector->rx.count > 1)
+		per_ring_budget = max(budget / q_vector->rx.count, 1);
+	else
+		per_ring_budget = budget;
+
+	rnpgbe_for_each_ring(ring, q_vector->rx) {
+		int cleaned = 0;
+		/* this ring is waitting to reset rx_len*/
+		/* avoid to deal this ring until reset done */
+		if (likely(!(ring->ring_flags & RNP_RING_FLAG_DO_RESET_RX_LEN)))
+			cleaned = rnpgbe_clean_rx_irq(q_vector, ring,
+						      per_ring_budget);
+		work_done += cleaned;
+		cleaned_total += cleaned;
+		if (cleaned >= per_ring_budget)
+			clean_complete = false;
+	}
+
+	/* force close irq */
+	if (test_bit(__RNP_DOWN, &adapter->state))
+		clean_complete = true;
+
+	if (!clean_complete) {
+		int cpu_id = smp_processor_id();
+
+		/* It is possible that the interrupt affinity has changed but,
+		 * if the cpu is pegged at 100%, polling will never exit while
+		 * traffic continues and the interrupt will be stuck on this
+		 * cpu.  We check to make sure affinity is correct before we
+		 * continue to poll, otherwise we must stop polling so the
+		 * interrupt can move to the correct cpu.
+		 */
+		if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
+			/* Tell napi that we are done polling */
+			if (likely(napi_complete_done(napi, work_done))) {
+				if (!test_bit(__RNP_DOWN, &adapter->state))
+					rnpgbe_irq_enable_queues(adapter, q_vector);
+			}
+			return min(work_done, budget - 1);
+		}
+		return budget;
+	}
+
+	if (likely(napi_complete_done(napi, work_done))) {
+		/* try to do itr handle */
+		if (!(adapter->priv_flags & RNP_PRIV_FLAG_RX_COALESCE))
+			rnpgbe_update_ring_itr_rx(q_vector);
+
+		if (!test_bit(__RNP_DOWN, &adapter->state))
+			rnpgbe_irq_enable_queues(adapter, q_vector);
+	}
+
+	return min(work_done, budget - 1);
+}
+
+/**
+ * rnpgbe_irq_affinity_notify - Callback for affinity changes
+ * @notify: context as to what irq was changed
+ * @mask: the new affinity mask
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * so that we may register to receive changes to the irq affinity masks.
+ **/
+static void rnpgbe_irq_affinity_notify(struct irq_affinity_notify *notify,
+				       const cpumask_t *mask)
+{
+	struct rnpgbe_q_vector *q_vector =
+		container_of(notify, struct rnpgbe_q_vector, affinity_notify);
+
+	cpumask_copy(&q_vector->affinity_mask, mask);
+}
+
+/**
+ * rnpgbe_irq_affinity_release - Callback for affinity notifier release
+ * @ref: internal core kernel usage
+ *
+ * This is a callback function used by the irq_set_affinity_notifier function
+ * to inform the current notification subscriber that they will no longer
+ * receive notifications.
+ **/
+static void rnpgbe_irq_affinity_release(struct kref *ref)
+{
+}
+
+static irqreturn_t rnpgbe_intr(int irq, void *data)
+{
+	struct rnpgbe_adapter *adapter = data;
+	struct rnpgbe_q_vector *q_vector = adapter->q_vector[0];
+
+	/* disabled interrupts (on this vector) for us */
+	rnpgbe_irq_disable_queues(q_vector);
+
+	rnpgbe_write_eitr_rx(q_vector);
+
+	if (q_vector->rx.ring || q_vector->tx.ring)
+		napi_schedule_irqoff(&q_vector->napi);
+
+	rnpgbe_msg_task(adapter);
+	rnpgbe_lpi_task(adapter);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * rnpgbe_request_msix_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * rnpgbe_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int rnpgbe_request_msix_irqs(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int err;
+	int i = 0;
+
+	DPRINTK(IFUP, INFO, "[%s] num_q_vectors:%d\n", __func__,
+		adapter->num_q_vectors);
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct rnpgbe_q_vector *q_vector = adapter->q_vector[i];
+		struct msix_entry *entry =
+			&adapter->msix_entries[i + adapter->q_vector_off];
+
+		if (q_vector->tx.ring && q_vector->rx.ring) {
+			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+				 "%s-%s-%d-%d", netdev->name, "TxRx", i,
+				 q_vector->v_idx);
+		} else {
+			WARN(!(q_vector->tx.ring && q_vector->rx.ring),
+			     "%s vector%d tx rx is null, v_idx:%d\n",
+			     netdev->name, i, q_vector->v_idx);
+			/* skip this unused q_vector */
+			continue;
+		}
+		err = request_irq(entry->vector, &rnpgbe_msix_clean_rings, 0,
+				  q_vector->name, q_vector);
+		if (err) {
+			e_err(probe,
+			      "%s:request_irq failed for MSIX interrupt:%d "
+			      "Error: %d\n",
+			      netdev->name, entry->vector, err);
+			goto free_queue_irqs;
+		}
+		/* register for affinity change notifications */
+		q_vector->affinity_notify.notify = rnpgbe_irq_affinity_notify;
+		q_vector->affinity_notify.release = rnpgbe_irq_affinity_release;
+		irq_set_affinity_notifier(entry->vector,
+					  &q_vector->affinity_notify);
+		DPRINTK(IFUP, INFO, "[%s] set %s affinity_mask\n", __func__,
+			q_vector->name);
+
+		irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask);
+	}
+
+	return 0;
+
+free_queue_irqs:
+	while (i) {
+		i--;
+		irq_set_affinity_hint(
+			adapter->msix_entries[i + adapter->q_vector_off].vector,
+			NULL);
+		free_irq(
+			adapter->msix_entries[i + adapter->q_vector_off].vector,
+			adapter->q_vector[i]);
+		irq_set_affinity_notifier(
+			adapter->msix_entries[i + adapter->q_vector_off].vector,
+			NULL);
+		irq_set_affinity_hint(
+			adapter->msix_entries[i + adapter->q_vector_off].vector,
+			NULL);
+	}
+	return err;
+}
+
+static int rnpgbe_free_msix_irqs(struct rnpgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct rnpgbe_q_vector *q_vector = adapter->q_vector[i];
+		struct msix_entry *entry =
+			&adapter->msix_entries[i + adapter->q_vector_off];
+
+		/* free only the irqs that were actually requested */
+		if (!q_vector->rx.ring && !q_vector->tx.ring)
+			continue;
+		/* clear the affinity notifier in the IRQ descriptor */
+		irq_set_affinity_notifier(entry->vector, NULL);
+		/* clear the affinity_mask in the IRQ descriptor */
+		irq_set_affinity_hint(entry->vector, NULL);
+		DPRINTK(IFDOWN, INFO, "free irq %s\n", q_vector->name);
+		free_irq(entry->vector, q_vector);
+	}
+
+	return 0;
+}
+
+#ifdef DISABLE_RX_IRQ
+int rx_poll_thread_handler(void *data)
+{
+	int i;
+	struct rnpgbe_adapter *adapter = data;
+
+	dbg("%s  %s running...\n", __func__, adapter->name);
+
+	do {
+		for (i = 0; i < adapter->num_q_vectors; i++)
+			rnpgbe_msix_clean_rings(0, adapter->q_vector[i]);
+
+		msleep(30);
+	} while (!kthread_should_stop() && adapter->quit_poll_thread != true);
+
+	dbg("%s  %s stopped\n", __func__, adapter->name);
+
+	return 0;
+}
+#endif
+
+/**
+ * rnpgbe_request_irq - initialize interrupts
+ * @adapter: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int rnpgbe_request_irq(struct rnpgbe_adapter *adapter)
+{
+	int err;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+#ifdef DISABLE_RX_IRQ
+	adapter->rx_poll_thread =
+		kthread_run(rx_poll_thread_handler, adapter, adapter->name);
+	if (!adapter->rx_poll_thread) {
+		rnpgbe_err("kthread_run failed!\n");
+		return -EIO;
+	}
+	return 0;
+#endif
+	if (adapter->flags & RNP_FLAG_MSIX_ENABLED) {
+		pr_info("msix mode is used\n");
+		err = rnpgbe_request_msix_irqs(adapter);
+		if ((hw->hw_type == rnpgbe_hw_n500) ||
+		    (hw->hw_type == rnpgbe_hw_n210))
+			wr32(hw, RNP500_LEGANCY_ENABLE, 0);
+	} else if (adapter->flags & RNP_FLAG_MSI_ENABLED) {
+		/* in this case one for all */
+		pr_info("msi mode is used\n");
+		err = request_irq(adapter->pdev->irq, rnpgbe_intr, 0,
+				  adapter->netdev->name, adapter);
+		adapter->hw.mbx.other_irq_enabled = true;
+		if ((hw->hw_type == rnpgbe_hw_n500) ||
+		    (hw->hw_type == rnpgbe_hw_n210))
+			wr32(hw, RNP500_LEGANCY_ENABLE, 0);
+	} else {
+		pr_info("legacy mode is used\n");
+		err = request_irq(adapter->pdev->irq, rnpgbe_intr, IRQF_SHARED,
+				  adapter->netdev->name, adapter);
+		adapter->hw.mbx.other_irq_enabled = true;
+		if ((hw->hw_type == rnpgbe_hw_n500) ||
+		    (hw->hw_type == rnpgbe_hw_n210)) {
+			wr32(hw, RNP500_LEGANCY_ENABLE, 1);
+			wr32(hw, RNP500_LEGANCY_TIME, 0x200);
+		}
+	}
+
+	if (err)
+		e_err(probe, "request_irq failed, Error %d\n", err);
+
+	return err;
+}
+
+static void rnpgbe_free_irq(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+#ifdef DISABLE_RX_IRQ
+	return;
+#endif
+	if (adapter->flags & RNP_FLAG_MSIX_ENABLED) {
+		rnpgbe_free_msix_irqs(adapter);
+	} else if (adapter->flags & RNP_FLAG_MSI_ENABLED) {
+		/* in this case one for all */
+		free_irq(adapter->pdev->irq, adapter);
+		adapter->hw.mbx.other_irq_enabled = false;
+	} else {
+		free_irq(adapter->pdev->irq, adapter);
+		adapter->hw.mbx.other_irq_enabled = false;
+		if ((hw->hw_type == rnpgbe_hw_n500) ||
+		    (hw->hw_type == rnpgbe_hw_n210))
+			wr32(hw, RNP500_LEGANCY_ENABLE, 0);
+	}
+}
+
+/**
+ * rnpgbe_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static inline void rnpgbe_irq_disable(struct rnpgbe_adapter *adapter)
+{
+	int i, j;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		rnpgbe_irq_disable_queues(adapter->q_vector[i]);
+		j = i + adapter->q_vector_off;
+
+		if (adapter->flags & RNP_FLAG_MSIX_ENABLED)
+			synchronize_irq(adapter->msix_entries[j].vector);
+		else
+			synchronize_irq(adapter->pdev->irq);
+	}
+}
+
+int rnpgbe_setup_tx_maxrate(struct rnpgbe_ring *tx_ring, u64 max_rate,
+			    int samples_1sec)
+{
+	/* set hardware samping internal 1S */
+	ring_wr32(tx_ring, RNP_DMA_REG_TX_FLOW_CTRL_TM, samples_1sec);
+	ring_wr32(tx_ring, RNP_DMA_REG_TX_FLOW_CTRL_TH, max_rate);
+
+	return 0;
+}
+
+/**
+ * rnpgbe_tx_maxrate_own - callback to set the maximum per-queue bitrate
+ * @netdev: network interface device structure
+ * @queue_index: Tx queue to set
+ * @maxrate: desired maximum transmit bitrate Mbps
+ **/
+static int rnpgbe_tx_maxrate_own(struct rnpgbe_adapter *adapter,
+				 int queue_index)
+{
+	struct rnpgbe_ring *tx_ring = adapter->tx_ring[queue_index];
+	u64 real_rate = 0;
+	u32 maxrate = adapter->max_rate[queue_index];
+
+	if (!maxrate)
+		return rnpgbe_setup_tx_maxrate(
+			tx_ring, 0, adapter->hw.usecstocount * 100000);
+	/* we need turn it to bytes/s */
+	if (real_rate < 50)
+		real_rate = ((u64)maxrate * 1000 * 85) >> 3;
+	else
+		real_rate = ((u64)maxrate * 1000 * 94) >> 3;
+	rnpgbe_setup_tx_maxrate(tx_ring, real_rate,
+				adapter->hw.usecstocount * 100000);
+
+	return 0;
+}
+
+/**
+ * rnpgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
+ * @adapter: board private structure
+ * @ring: structure containing ring specific data
+ *
+ * Configure the Tx descriptor ring after a reset.
+ **/
+void rnpgbe_configure_tx_ring(struct rnpgbe_adapter *adapter,
+			      struct rnpgbe_ring *ring)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	/* disable queue to avoid issues while updating state */
+
+	if (!(ring->ring_flags & RNP_RING_SKIP_TX_START))
+		ring_wr32(ring, RNP_DMA_TX_START, 0);
+
+	ring_wr32(ring, RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_LO, (u32)ring->dma);
+	ring_wr32(ring, RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_HI,
+		  (u32)(((u64)ring->dma) >> 32) | (hw->pfvfnum << 24));
+	ring_wr32(ring, RNP_DMA_REG_TX_DESC_BUF_LEN, ring->count);
+	ring->next_to_clean = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_HEAD);
+	ring->next_to_use = ring->next_to_clean;
+	ring->tail = ring->ring_addr + RNP_DMA_REG_TX_DESC_BUF_TAIL;
+	rnpgbe_wr_reg(ring->tail, ring->next_to_use);
+
+	ring_wr32(ring, RNP_DMA_REG_TX_DESC_FETCH_CTRL,
+		  (8 << 0) /* max_water_flow */
+			  | (TSRN10_TX_DEFAULT_BURST << 16)
+		  /* max-num_descs_peer_read */
+	);
+
+	ring_wr32(ring, RNP_DMA_REG_TX_INT_DELAY_TIMER,
+		  adapter->tx_usecs * hw->usecstocount);
+	ring_wr32(ring, RNP_DMA_REG_TX_INT_DELAY_PKTCNT, adapter->tx_frames);
+
+	rnpgbe_tx_maxrate_own(adapter, ring->queue_index);
+	/* flow control: bytes-peer-ctrl-tm-clk. 0:no-control */
+	if (adapter->flags & RNP_FLAG_FDIR_HASH_CAPABLE) {
+		ring->atr_sample_rate = adapter->atr_sample_rate;
+		ring->atr_count = 0;
+		set_bit(__RNP_TX_FDIR_INIT_DONE, &ring->state);
+	} else {
+		ring->atr_sample_rate = 0;
+	}
+
+	clear_bit(__RNP_HANG_CHECK_ARMED, &ring->state);
+
+	if (!(ring->ring_flags & RNP_RING_SKIP_TX_START)) {
+		/* n500 should wait tx_ready before open tx start */
+		int timeout = 0;
+		u32 status = 0;
+
+		do {
+			status = ring_rd32(ring, RNP_DMA_TX_READY);
+			usleep_range(100, 200);
+			timeout++;
+			rnpgbe_dbg("wait %d tx ready to 1\n",
+				   ring->rnpgbe_queue_idx);
+		} while ((status != 1) && (timeout < 100));
+
+		if (timeout >= 100)
+			rnpgbe_dbg("wait tx ready timeout\n");
+		ring_wr32(ring, RNP_DMA_TX_START, 1);
+	}
+}
+
+/**
+ * rnpgbe_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void rnpgbe_configure_tx(struct rnpgbe_adapter *adapter)
+{
+	u32 i, dma_axi_ctl;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+
+	/* dma_axi_en.tx_en must be before Tx queues are enabled */
+	dma_axi_ctl = dma_rd32(dma, RNP_DMA_AXI_EN);
+	dma_axi_ctl |= TX_AXI_RW_EN;
+	dma_wr32(dma, RNP_DMA_AXI_EN, dma_axi_ctl);
+
+	/* Setup the HW Tx Head and Tail descriptor pointers */
+	for (i = 0; i < (adapter->num_tx_queues); i++)
+		rnpgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
+}
+
+void rnpgbe_disable_rx_queue(struct rnpgbe_adapter *adapter,
+			     struct rnpgbe_ring *ring)
+{
+	ring_wr32(ring, RNP_DMA_RX_START, 0);
+}
+
+void rnpgbe_configure_rx_ring(struct rnpgbe_adapter *adapter,
+			      struct rnpgbe_ring *ring)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u64 desc_phy = ring->dma;
+	u16 q_idx = ring->queue_index;
+
+	/* disable queue to avoid issues while updating state */
+	rnpgbe_disable_rx_queue(adapter, ring);
+
+	/* set descripts registers*/
+	ring_wr32(ring, RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_LO, (u32)desc_phy);
+	ring_wr32(ring, RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_HI,
+		  ((u32)(desc_phy >> 32)) | (hw->pfvfnum << 24));
+	ring_wr32(ring, RNP_DMA_REG_RX_DESC_BUF_LEN, ring->count);
+
+	ring->tail = ring->ring_addr + RNP_DMA_REG_RX_DESC_BUF_TAIL;
+	ring->next_to_clean = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD);
+	ring->next_to_use = ring->next_to_clean;
+
+	if (ring->ring_flags & RNP_RING_SCATER_SETUP) {
+#if (PAGE_SIZE < 8192)
+		{
+			int split_size;
+
+			split_size = rnpgbe_rx_pg_size(ring) / 2 -
+				     rnpgbe_rx_offset(ring) -
+				     sizeof(struct skb_shared_info);
+			split_size = split_size >> 4;
+			ring_wr32(ring, PCI_DMA_REG_RX_SCATTER_LENGTH,
+				  split_size);
+		}
+#else
+		ring_wr32(ring, PCI_DMA_REG_RX_SCATTER_LENGTH, 96);
+#endif
+	}
+
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+		ring_wr32(ring, RNP_DMA_REG_RX_DESC_FETCH_CTRL,
+			  0 | (TSRN10_RX_DEFAULT_LINE << 0) /* rx-desc-flow */
+				  | (TSRN10_RX_DEFAULT_BURST << 16)
+			  /* max-read-desc-cnt */
+		);
+
+	} else {
+		ring_wr32(ring, RNP_DMA_REG_RX_DESC_FETCH_CTRL,
+			  0 | (TSRN10_RX_DEFAULT_LINE << 0) /* rx-desc-flow */
+				  | (TSRN10_RX_DEFAULT_BURST << 16)
+			  /* max-read-desc-cnt */
+		);
+	}
+	/* setup rx drop */
+	if (adapter->rx_drop_status & BIT(q_idx)) {
+		ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH,
+			  adapter->drop_time);
+	} else {
+		/* if ncsi card ,maybe should setup this */
+		/* drop packets if no rx-desc in 800ms, maybe os crash */
+		if (hw->ncsi_en)
+			ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, 100000);
+
+		else
+			ring_wr32(ring, PCI_DMA_REG_RX_DESC_TIMEOUT_TH, 0);
+	}
+
+	ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER,
+		  adapter->rx_usecs * hw->usecstocount);
+	ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_PKTCNT, adapter->rx_frames);
+	rnpgbe_alloc_rx_buffers(ring, rnpgbe_desc_unused_rx(ring));
+}
+
+static void rnpgbe_configure_virtualization(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	u32 ring, vfnum;
+	int i, vf_ring;
+	u64 real_rate = 0;
+
+	if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) {
+		hw->ops.set_sriov_status(hw, false);
+		return;
+	}
+
+	/* Enable only the PF's pool for Tx/Rx */
+
+	if (adapter->flags2 & RNP_FLAG2_BRIDGE_MODE_VEB) {
+		dma_wr32(dma, RNP_DMA_CONFIG,
+			 dma_rd32(dma, RNP_DMA_CONFIG) & (~DMA_VEB_BYPASS));
+		adapter->flags2 |= RNP_FLAG2_BRIDGE_MODE_VEB;
+	}
+	ring = adapter->tx_ring[0]->rnpgbe_queue_idx;
+	hw->ops.set_sriov_status(hw, true);
+
+	/* store vfnum */
+	vfnum = hw->max_vfs - 1;
+	hw->veb_ring = ring;
+	hw->vfnum = vfnum;
+	/* use last-vf's table entry. the last */
+	adapter->vf_num_for_pf = 0x80 | vfnum;
+
+	/* setup vf tx rate setup here */
+	for (i = 0; i < adapter->num_vfs; i++) {
+		vf_ring = rnpgbe_get_vf_ringnum(hw, i, 0);
+		real_rate = (adapter->vfinfo[i].tx_rate * 1024 * 128);
+		rnpgbe_setup_ring_maxrate(adapter, vf_ring, real_rate);
+	}
+}
+
+static void rnpgbe_set_rx_buffer_len(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN * 3;
+	struct rnpgbe_ring *rx_ring;
+	int i;
+
+	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+		max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		rx_ring = adapter->rx_ring[i];
+		clear_bit(__RNP_RX_3K_BUFFER, &rx_ring->state);
+		clear_bit(__RNP_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+
+		set_bit(__RNP_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+
+#ifdef OPTM_WITH_LPAGE
+		rx_ring->rx_page_buf_nums = RNP_PAGE_BUFFER_NUMS(rx_ring);
+		rx_ring->rx_per_buf_mem = ALIGN(
+			(rnpgbe_rx_offset(rx_ring) + rnpgbe_rx_bufsz(rx_ring) +
+			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+			 RNP_RX_HWTS_OFFSET),
+			1024);
+#endif
+	}
+}
+
+/**
+ * rnpgbe_configure_rx - Configure 8259x Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void rnpgbe_configure_rx(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	int i;
+	u32 rxctrl = 0, dma_axi_ctl;
+
+	/* set_rx_buffer_len must be called before ring initialization */
+	rnpgbe_set_rx_buffer_len(adapter);
+
+	/*
+	 * Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring
+	 */
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rnpgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
+
+	if (adapter->num_rx_queues > 0) {
+		wr32(hw, RNP_ETH_DEFAULT_RX_RING,
+		     adapter->rx_ring[0]->rnpgbe_queue_idx);
+	}
+
+	/* enable all receives */
+	rxctrl |= 0;
+
+	dma_axi_ctl = dma_rd32(dma, RNP_DMA_AXI_EN);
+	dma_axi_ctl |= RX_AXI_RW_EN;
+	dma_wr32(dma, RNP_DMA_AXI_EN, dma_axi_ctl);
+}
+
+static int rnpgbe_vlan_rx_add_vid(struct net_device *netdev,
+				  __always_unused __be16 proto, u16 vid)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	bool veb_setup = true;
+
+	bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED);
+
+	if (sriov_flag) {
+		if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN) {
+			if (hw->ops.set_veb_vlan_mask) {
+				if (hw->ops.set_veb_vlan_mask(
+					    hw, vid, hw->vfnum, true) != 0) {
+					netdev_dbg(netdev,
+						   "out of vlan entries\n");
+					return -EACCES;
+				}
+			}
+		} else {
+			/* in sriov mode */
+			if ((vid) && (adapter->vf_vlan) &&
+			    (vid != adapter->vf_vlan)) {
+				netdev_dbg(netdev,
+					   "only 1 vlan in sriov mode\n");
+				return -EACCES;
+			}
+
+			/* update this */
+			if (vid) {
+				adapter->vf_vlan = vid;
+				if (hw->ops.set_vf_vlan_mode) {
+					if (hw->feature_flags &
+					    RNP_NET_FEATURE_VF_FIXED)
+						hw->ops.set_vf_vlan_mode(
+							hw, vid, 0, true);
+					else
+						hw->ops.set_vf_vlan_mode(
+							hw, vid, hw->vfnum,
+							true);
+				}
+			}
+		}
+	}
+
+	if (vid) {
+		if (proto == htons(ETH_P_8021Q))
+			adapter->vlan_count++;
+	}
+
+	if (vid < VLAN_N_VID) {
+		if (proto != htons(ETH_P_8021Q)) {
+			set_bit(vid, adapter->active_vlans_stags);
+			veb_setup = false;
+		} else {
+			set_bit(vid, adapter->active_vlans);
+		}
+	}
+	/* if vid 0 never setup veb */
+	if (vid == 0)
+		veb_setup = false;
+
+	/* only ctags setup veb if in sriov and not stags */
+	if (hw->ops.set_vlan_filter) {
+		hw->ops.set_vlan_filter(hw, vid, true,
+					(sriov_flag && veb_setup));
+	}
+	return 0;
+}
+
+static int rnpgbe_vlan_rx_kill_vid(struct net_device *netdev,
+				   __always_unused __be16 proto, u16 vid)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int i;
+	bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED);
+	bool veb_setup = true;
+
+	if (!vid)
+		return 0;
+	if (sriov_flag) {
+		if (vid) {
+			int true_remove = 1;
+			adapter->vf_vlan = 0;
+			for (i = 0; i < adapter->num_vfs; i++) {
+				if (vid == adapter->vfinfo[i].vf_vlan)
+					true_remove = 0;
+			}
+			/* if no vf use this vid */
+			if (true_remove) {
+				if (proto != htons(ETH_P_8021Q)) {
+					veb_setup = false;
+					if (!test_bit(vid,
+						      adapter->active_vlans))
+						true_remove = 1;
+				} else {
+					if (!test_bit(
+						    vid,
+						    adapter->active_vlans_stags))
+						true_remove = 1;
+				}
+				/* if no other tags use this vid */
+				if (true_remove) {
+						hw->ops.set_vlan_filter(
+							hw, vid, false,
+							veb_setup);
+				}
+			}
+			/* always clean veb */
+			hw->ops.set_vlan_filter(hw, vid, false, true);
+
+			if (hw->ops.set_vf_vlan_mode) {
+				if (hw->feature_flags &
+				    RNP_NET_FEATURE_VF_FIXED)
+					hw->ops.set_vf_vlan_mode(hw, vid, 0,
+								 false);
+				else
+					hw->ops.set_vf_vlan_mode(
+						hw, vid, hw->vfnum, false);
+			}
+
+			if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN) {
+				if (hw->ops.set_veb_vlan_mask) {
+					hw->ops.set_veb_vlan_mask(
+						hw, vid, hw->vfnum, false);
+				}
+			}
+		}
+	} else {
+		int true_remove = 0;
+
+		if (proto != htons(ETH_P_8021Q)) {
+			veb_setup = false;
+			if (!test_bit(vid, adapter->active_vlans))
+				true_remove = 1;
+		} else {
+			if (!test_bit(vid, adapter->active_vlans_stags))
+				true_remove = 1;
+		}
+		if (true_remove) {
+			if ((adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) &&
+			    (vid == adapter->stags_vid))
+				goto SKIP_REMOVE;
+			hw->ops.set_vlan_filter(hw, vid, false, false);
+		}
+	}
+SKIP_REMOVE:;
+	if (vid) {
+		if (proto == htons(ETH_P_8021Q)) {
+			/* should check proto todo */
+			adapter->vlan_count--;
+		}
+	}
+	if (proto == htons(ETH_P_8021Q))
+		clear_bit(vid, adapter->active_vlans);
+	if (proto != htons(ETH_P_8021Q))
+		clear_bit(vid, adapter->active_vlans_stags);
+
+	return 0;
+}
+
+/**
+ * rnpgbe_vlan_strip_disable - helper to disable hw vlan stripping
+ * @adapter: driver data
+ */
+static void rnpgbe_vlan_strip_disable(struct rnpgbe_adapter *adapter)
+{
+	int i;
+	struct rnpgbe_ring *tx_ring;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		tx_ring = adapter->rx_ring[i];
+		hw->ops.set_vlan_strip(hw, tx_ring->rnpgbe_queue_idx, false);
+	}
+}
+
+/**
+ * rnpgbe_vlan_strip_enable - helper to enable hw vlan stripping
+ * @adapter: driver data
+ */
+static void rnpgbe_vlan_strip_enable(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_ring *tx_ring;
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		tx_ring = adapter->rx_ring[i];
+
+		hw->ops.set_vlan_strip(hw, tx_ring->rnpgbe_queue_idx, true);
+	}
+}
+
+static void rnpgbe_remove_vlan(struct rnpgbe_adapter *adapter)
+{
+	adapter->vlan_count = 0;
+}
+
+static void rnpgbe_restore_vlan(struct rnpgbe_adapter *adapter)
+{
+	u16 vid;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+
+	/* in stags open, set stags_vid to vlan filter */
+	if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED)
+		eth->ops.set_vfta(eth, adapter->stags_vid, true);
+
+	rnpgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
+
+	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) {
+		rnpgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q),
+				       vid);
+	}
+
+	for_each_set_bit(vid, adapter->active_vlans_stags, VLAN_N_VID)
+		rnpgbe_vlan_rx_add_vid(adapter->netdev, htons(0x88a8), vid);
+}
+
+/**
+ * rnpgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_method entry point is called whenever the unicast/multicast
+ * address list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper unicast, multicast and
+ * promiscuous mode.
+ **/
+void rnpgbe_set_rx_mode(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	netdev_features_t features;
+	bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED);
+
+	hw->ops.set_rx_mode(hw, netdev, sriov_flag);
+
+	if (sriov_flag) {
+		if (!test_and_set_bit(__RNP_USE_VFINFI, &adapter->state)) {
+			rnpgbe_restore_vf_macvlans(adapter);
+
+			rnpgbe_restore_vf_macs(adapter);
+			clear_bit(__RNP_USE_VFINFI, &adapter->state);
+		}
+	}
+
+	features = netdev->features;
+
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		rnpgbe_vlan_strip_enable(adapter);
+	else
+		rnpgbe_vlan_strip_disable(adapter);
+	/* only do this if hw support stags */
+	if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) {
+		if (features & NETIF_F_HW_VLAN_STAG_RX)
+			rnpgbe_vlan_strip_enable(adapter);
+		else
+			rnpgbe_vlan_strip_disable(adapter);
+	}
+}
+
+static void rnpgbe_napi_enable_all(struct rnpgbe_adapter *adapter)
+{
+	int q_idx;
+
+	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
+		napi_enable(&adapter->q_vector[q_idx]->napi);
+}
+
+static void rnpgbe_napi_disable_all(struct rnpgbe_adapter *adapter)
+{
+	int q_idx;
+
+	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
+		napi_disable(&adapter->q_vector[q_idx]->napi);
+}
+
+static void rnpgbe_fdir_filter_restore(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct hlist_node *node2;
+	struct rnpgbe_fdir_filter *filter;
+
+	spin_lock(&adapter->fdir_perfect_lock);
+
+	/* setup ntuple */
+	hlist_for_each_entry_safe(filter, node2, &adapter->fdir_filter_list,
+			fdir_node) {
+		if ((!filter->vf_num) && (filter->action != ACTION_TO_MPE)) {
+			rnpgbe_fdir_write_perfect_filter(
+					adapter->fdir_mode, hw, &filter->filter, filter->hw_idx,
+					(filter->action == RNP_FDIR_DROP_QUEUE) ?
+					RNP_FDIR_DROP_QUEUE :
+					adapter->rx_ring[filter->action]->rnpgbe_queue_idx,
+					(adapter->priv_flags & RNP_PRIV_FLAG_REMAP_PRIO) ?
+					true :
+					false);
+		} else {
+			rnpgbe_fdir_write_perfect_filter(
+					adapter->fdir_mode, hw, &filter->filter,
+					filter->hw_idx,
+					(filter->action == RNP_FDIR_DROP_QUEUE) ?
+					RNP_FDIR_DROP_QUEUE :
+					filter->action,
+					(adapter->priv_flags &
+					 RNP_PRIV_FLAG_REMAP_PRIO) ?
+					true :
+					false);
+		}
+	}
+
+	spin_unlock(&adapter->fdir_perfect_lock);
+}
+
+static void rnpgbe_configure_pause(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	hw->ops.set_pause_mode(hw);
+}
+
+static void rnpgbe_vlan_stags_flag(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	/* stags is added */
+	if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED)
+		hw->ops.set_txvlan_mode(hw, false);
+	else
+		hw->ops.set_txvlan_mode(hw, true);
+}
+
+static void rnpgbe_configure(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED);
+
+	/*
+	 * We must restore virtualization before VLANs or else
+	 * the VLVF registers will not be populated
+	 */
+	rnpgbe_configure_virtualization(adapter);
+
+	rnpgbe_set_rx_mode(adapter->netdev);
+	/* reconfigure hw */
+	hw->ops.set_mac(hw, hw->mac.addr, sriov_flag);
+
+	/* in sriov mode vlan is not reset */
+	rnpgbe_restore_vlan(adapter);
+
+	hw->ops.update_hw_info(hw);
+
+	/* init setup pause */
+	rnpgbe_configure_pause(adapter);
+
+	rnpgbe_vlan_stags_flag(adapter);
+
+	rnpgbe_init_rss_key(adapter);
+	rnpgbe_init_rss_table(adapter);
+
+	if (adapter->flags & RNP_FLAG_FDIR_HASH_CAPABLE) {
+
+	} else if (adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE)
+		rnpgbe_fdir_filter_restore(adapter);
+
+	rnpgbe_configure_tx(adapter);
+	rnpgbe_configure_rx(adapter);
+}
+
+/**
+ * rnpgbe_sfp_link_config - set up SFP+ link
+ * @adapter: pointer to private adapter struct
+ **/
+static void rnpgbe_sfp_link_config(struct rnpgbe_adapter *adapter)
+{
+	/*
+	 * We are assuming the worst case scenario here, and that
+	 * is that an SFP was inserted/removed after the reset
+	 * but before SFP detection was enabled.  As such the best
+	 * solution is to just start searching as soon as we start
+	 */
+	adapter->flags2 |= RNP_FLAG2_SFP_NEEDS_RESET;
+}
+
+static void rnpgbe_up_complete(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int i;
+
+	rnpgbe_configure_msix(adapter);
+
+	/* enable the optics for n10 SFP+ fiber */
+	if (hw->ops.enable_tx_laser)
+		hw->ops.enable_tx_laser(hw);
+
+	/* we need this */
+	smp_mb__before_atomic();
+	clear_bit(__RNP_DOWN, &adapter->state);
+	rnpgbe_napi_enable_all(adapter);
+
+	rnpgbe_sfp_link_config(adapter);
+	/*clear any pending interrupts*/
+	rnpgbe_irq_enable(adapter);
+
+	/* enable transmits */
+	netif_tx_start_all_queues(adapter->netdev);
+
+	/* enable rx transmit */
+	/* setup rx scater */
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		ring_wr32(adapter->rx_ring[i], RNP_DMA_RX_START, 1);
+
+	/* bring the link up in the watchdog, this could race with our first
+	 * link up interrupt but shouldn't be a problems
+	 */
+	adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE;
+	adapter->link_check_timeout = jiffies;
+	mod_timer(&adapter->service_timer, jiffies);
+
+	/* Set PF Reset Done bit so PF/VF Mail Ops can work */
+	/* maybe differ in n500 */
+	hw->link = 0;
+	hw->ops.set_mbx_link_event(hw, 1);
+	hw->ops.set_mbx_ifup(hw, 1);
+
+}
+
+void rnpgbe_reinit_locked(struct rnpgbe_adapter *adapter)
+{
+	WARN_ON(in_interrupt());
+
+	while (test_and_set_bit(__RNP_RESETTING, &adapter->state))
+		usleep_range(1000, 2000);
+	rnpgbe_down(adapter);
+	/*
+	 * If SR-IOV enabled then wait a bit before bringing the adapter
+	 * back up to give the VFs time to respond to the reset.  The
+	 * two second wait is based upon the watchdog timer cycle in
+	 * the VF driver.
+	 */
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED)
+		msleep(2000);
+	rnpgbe_up(adapter);
+
+	clear_bit(__RNP_RESETTING, &adapter->state);
+}
+
+void rnpgbe_up(struct rnpgbe_adapter *adapter)
+{
+	/* hardware has been reset, we need to reload some things */
+	rnpgbe_configure(adapter);
+	rnpgbe_up_complete(adapter);
+}
+
+void rnpgbe_reset(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int err;
+	bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED);
+
+	rnpgbe_logd(LOG_ADPT_STAT, "%s\n", __func__);
+
+	/* lock SFP init bit to prevent race conditions with the watchdog */
+	while (test_and_set_bit(__RNP_IN_SFP_INIT, &adapter->state))
+		usleep_range(1000, 2000);
+
+	/* clear all SFP and link config related flags while holding SFP_INIT */
+	adapter->flags2 &=
+		~(RNP_FLAG2_SEARCH_FOR_SFP | RNP_FLAG2_SFP_NEEDS_RESET);
+	adapter->flags &= ~RNP_FLAG_NEED_LINK_CONFIG;
+	err = hw->ops.init_hw(hw);
+	if (err) {
+		e_dev_err("init_hw: Hardware Error: err:%d. line:%d\n", err,
+			  __LINE__);
+	}
+
+	clear_bit(__RNP_IN_SFP_INIT, &adapter->state);
+
+	/* reprogram the RAR[0] in case user changed it. */
+	hw->ops.set_mac(hw, hw->mac.addr, sriov_flag);
+
+	if (module_enable_ptp) {
+		if (adapter->flags2 & RNP_FLAG2_PTP_ENABLED &&
+		    (adapter->ptp_rx_en || adapter->ptp_tx_en))
+			rnpgbe_ptp_reset(adapter);
+	}
+}
+
+#ifdef OPTM_WITH_LPAGE
+/**
+ * rnpgbe_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+static void rnpgbe_clean_rx_ring(struct rnpgbe_ring *rx_ring)
+{
+	u16 i = rx_ring->next_to_clean;
+	struct rnpgbe_rx_buffer *rx_buffer;
+
+	if (!rx_ring->rx_buffer_info)
+		return;
+
+	if (rx_ring->skb)
+		dev_kfree_skb(rx_ring->skb);
+
+	rx_ring->skb = NULL;
+	rx_buffer = &rx_ring->rx_buffer_info[i];
+
+	/* Free all the Rx ring sk_buffs */
+	while (i != rx_ring->next_to_alloc) {
+		if (!rx_buffer->page)
+			goto next_buffer;
+		/* Invalidate cache lines that may have been written to by
+		 * device so that we avoid corrupting memory.
+		 */
+		dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma,
+					      rx_buffer->page_offset,
+					      rnpgbe_rx_bufsz(rx_ring),
+					      DMA_FROM_DEVICE);
+
+		/* free resources associated with mapping */
+		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+				     rnpgbe_rx_pg_size(rx_ring),
+				     DMA_FROM_DEVICE,
+				     RNP_RX_DMA_ATTR);
+
+		__page_frag_cache_drain(rx_buffer->page,
+					rx_buffer->pagecnt_bias);
+		/* now this page is not used */
+		rx_buffer->page = NULL;
+	next_buffer:
+		i++;
+		rx_buffer++;
+		if (i == rx_ring->count) {
+			i = 0;
+			rx_buffer = rx_ring->rx_buffer_info;
+		}
+	}
+
+	rx_ring->next_to_alloc = 0;
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+}
+
+#else
+
+/**
+ * rnpgbe_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+static void rnpgbe_clean_rx_ring(struct rnpgbe_ring *rx_ring)
+{
+	u16 i = rx_ring->next_to_clean;
+	struct rnpgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
+
+	/* Free all the Rx ring sk_buffs */
+	while (i != rx_ring->next_to_alloc) {
+		if (rx_buffer->skb) {
+			struct sk_buff *skb = rx_buffer->skb;
+
+			dev_kfree_skb(skb);
+			rx_buffer->skb = NULL;
+		}
+
+		/* Invalidate cache lines that may have been written to by
+		 * device so that we avoid corrupting memory.
+		 */
+		dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma,
+					      rx_buffer->page_offset,
+					      rnpgbe_rx_bufsz(rx_ring),
+					      DMA_FROM_DEVICE);
+
+		/* free resources associated with mapping */
+		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+				     rnpgbe_rx_pg_size(rx_ring),
+				     DMA_FROM_DEVICE,
+				     RNP_RX_DMA_ATTR);
+
+		__page_frag_cache_drain(rx_buffer->page,
+					rx_buffer->pagecnt_bias);
+		/* now this page is not used */
+		rx_buffer->page = NULL;
+		i++;
+		rx_buffer++;
+		if (i == rx_ring->count) {
+			i = 0;
+			rx_buffer = rx_ring->rx_buffer_info;
+		}
+	}
+
+	rx_ring->next_to_alloc = 0;
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+}
+#endif
+
+/**
+ * rnpgbe_clean_tx_ring - Free Tx Buffers
+ * @tx_ring: ring to be cleaned
+ **/
+static void rnpgbe_clean_tx_ring(struct rnpgbe_ring *tx_ring)
+{
+	unsigned long size;
+	u16 i = tx_ring->next_to_clean;
+	struct rnpgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
+
+	BUG_ON(tx_ring == NULL);
+
+	/* ring already cleared, nothing to do */
+	if (!tx_ring->tx_buffer_info)
+		return;
+
+	while (i != tx_ring->next_to_use) {
+		struct rnpgbe_tx_desc *eop_desc, *tx_desc;
+
+		dev_kfree_skb_any(tx_buffer->skb);
+		/* unmap skb header data */
+		dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma),
+				 dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE);
+
+		eop_desc = tx_buffer->next_to_watch;
+		tx_desc = RNP_TX_DESC(tx_ring, i);
+		/* unmap remaining buffers */
+		while (tx_desc != eop_desc) {
+			tx_buffer++;
+			tx_desc++;
+			i++;
+			if (unlikely(i == tx_ring->count)) {
+				i = 0;
+				tx_buffer = tx_ring->tx_buffer_info;
+				tx_desc = RNP_TX_DESC(tx_ring, 0);
+			}
+
+			/* unmap any remaining paged data */
+			if (dma_unmap_len(tx_buffer, len))
+				dma_unmap_page(tx_ring->dev,
+					       dma_unmap_addr(tx_buffer, dma),
+					       dma_unmap_len(tx_buffer, len),
+					       DMA_TO_DEVICE);
+		}
+		/* move us one more past the eop_desc for start of next pkt */
+		tx_buffer++;
+		i++;
+		if (unlikely(i == tx_ring->count)) {
+			i = 0;
+			tx_buffer = tx_ring->tx_buffer_info;
+		}
+	}
+
+	netdev_tx_reset_queue(txring_txq(tx_ring));
+	size = sizeof(struct rnpgbe_tx_buffer) * tx_ring->count;
+	memset(tx_ring->tx_buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(tx_ring->desc, 0, tx_ring->size);
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+}
+
+/**
+ * rnpgbe_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void rnpgbe_clean_all_rx_rings(struct rnpgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rnpgbe_clean_rx_ring(adapter->rx_ring[i]);
+}
+
+/**
+ * rnpgbe_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void rnpgbe_clean_all_tx_rings(struct rnpgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		rnpgbe_clean_tx_ring(adapter->tx_ring[i]);
+}
+
+static void rnpgbe_fdir_filter_exit(struct rnpgbe_adapter *adapter)
+{
+	struct hlist_node *node2;
+	struct rnpgbe_fdir_filter *filter;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	spin_lock(&adapter->fdir_perfect_lock);
+
+	hlist_for_each_entry_safe (filter, node2, &adapter->fdir_filter_list,
+				   fdir_node) {
+		/* call earase to hw */
+		rnpgbe_fdir_erase_perfect_filter(adapter->fdir_mode, hw,
+						 &filter->filter,
+						 filter->hw_idx);
+
+		hlist_del(&filter->fdir_node);
+		kfree(filter);
+	}
+	adapter->fdir_filter_count = 0;
+
+	adapter->layer2_count = hw->layer2_count;
+	adapter->tuple_5_count = hw->tuple5_count;
+
+	spin_unlock(&adapter->fdir_perfect_lock);
+}
+
+static int rnpgbe_xmit_nop_frame_ring(struct rnpgbe_adapter *adapter,
+			       struct rnpgbe_ring *tx_ring)
+{
+	u16 i = tx_ring->next_to_use;
+	struct rnpgbe_tx_desc *tx_desc;
+
+	tx_desc = RNP_TX_DESC(tx_ring, i);
+
+	/* set length to 0 */
+	tx_desc->blen_mac_ip_len = 0;
+	tx_desc->vlan_cmd = cpu_to_le32(RNP_TXD_CMD_EOP | RNP_TXD_CMD_RS);
+	/* update tail */
+	rnpgbe_wr_reg(tx_ring->tail, 0);
+	return 0;
+}
+
+static void print_status(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	int i;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	struct net_device *netdev = adapter->netdev;
+
+	netdev_dbg(netdev, "eth 0x120 %x\n", eth_rd32(eth, 0x120));
+	netdev_dbg(netdev, "eth 0x124 %x\n", eth_rd32(eth, 0x124));
+
+	for (i = 0x200; i < 0x220; i = i + 4)
+		netdev_dbg(netdev, "eth 0x%x %x\n", i, eth_rd32(eth, i));
+
+	for (i = 0x300; i < 0x318; i = i + 4)
+		netdev_dbg(netdev, "eth 0x%x %x\n", i, eth_rd32(eth, i));
+
+	netdev_dbg(netdev, "eth 0x%x %x\n", 0x98, eth_rd32(eth, 0x98));
+	netdev_dbg(netdev, "eth 0x%x %x\n", 0x220, eth_rd32(eth, 0x220));
+
+	for (i = 0x138; i < 0x158; i = i + 4)
+		netdev_dbg(netdev, "dma 0x%x %x\n", i, dma_rd32(dma, i));
+	i = 0x170;
+	netdev_dbg(netdev, "dma 0x%x %x\n", i, dma_rd32(dma, i));
+	i = 0x174;
+	netdev_dbg(netdev, "dma 0x%x %x\n", i, dma_rd32(dma, i));
+	for (i = 0x214; i < 0x220; i = i + 4)
+		netdev_dbg(netdev, "dma 0x%x %x\n", i, dma_rd32(dma, i));
+	for (i = 0x234; i < 0x270; i = i + 4)
+		netdev_dbg(netdev, "dma 0x%x %x\n", i, dma_rd32(dma, i));
+	i = 0x1018;
+	netdev_dbg(netdev, "dma 0x%x %x\n", i, dma_rd32(dma, i));
+	i = 0x101c;
+	netdev_dbg(netdev, "dma 0x%x %x\n", i, dma_rd32(dma, i));
+	i = 0x1084;
+	netdev_dbg(netdev, "dma 0x%x %x\n", i, dma_rd32(dma, i));
+}
+
+void rnpgbe_down(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int i;
+	int free_tx_ealay = 0;
+	int err = 0;
+	/* signal that we are down to the interrupt handler */
+	set_bit(__RNP_DOWN, &adapter->state);
+
+	if (!hw->ncsi_en)
+		hw->ops.set_mac_rx(hw, false);
+
+	hw->ops.set_mbx_link_event(hw, 0);
+	hw->ops.set_mbx_ifup(hw, 0);
+
+	rnpgbe_setup_eee_mode(adapter, false);
+
+	if (hw->ops.clean_link)
+		hw->ops.clean_link(hw);
+
+	if (netif_carrier_ok(netdev))
+		e_info(drv, "NIC Link is Down\n");
+
+	rnpgbe_remove_vlan(adapter);
+
+	netif_tx_stop_all_queues(netdev);
+
+	netif_carrier_off(netdev);
+
+	usleep_range(5000, 10000);
+	/* if we have tx desc to clean */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct rnpgbe_ring *tx_ring = adapter->tx_ring[i];
+
+		if (!(tx_ring->ring_flags & RNP_RING_SKIP_TX_START)) {
+			int head, tail;
+			int timeout = 0;
+
+			free_tx_ealay = 1;
+			if (tx_ring->next_to_use == tx_ring->next_to_clean)
+				continue;
+
+			head = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_HEAD);
+			tail = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_TAIL);
+
+			while (head != tail) {
+				usleep_range(30000, 50000);
+
+				head = ring_rd32(tx_ring,
+						 RNP_DMA_REG_TX_DESC_BUF_HEAD);
+				tail = ring_rd32(tx_ring,
+						 RNP_DMA_REG_TX_DESC_BUF_TAIL);
+				timeout++;
+				if ((timeout >= 100) && (timeout < 101)) {
+					e_info(drv,
+					       "wait ring %d tx done timeout %x %x\n",
+					       i, head, tail);
+					adapter->priv_flags |=
+						RNP_PRIV_FLGA_TEST_TX_HANG;
+					print_status(adapter);
+					err = 1;
+				}
+				if (timeout >= 200) {
+					e_info(drv,
+					       "200 wait tx done timeout %x %x\n",
+					       head, tail);
+					print_status(adapter);
+					break;
+				}
+			}
+		}
+	}
+
+	{
+		int time = 0;
+
+		while (test_bit(__RNP_SERVICE_CHECK, &adapter->state)) {
+			usleep_range(100, 200);
+			time++;
+			if (time > 100)
+				break;
+		}
+	}
+
+	if (free_tx_ealay)
+		rnpgbe_clean_all_tx_rings(adapter);
+
+	usleep_range(2000, 5000);
+
+	rnpgbe_irq_disable(adapter);
+
+	usleep_range(5000, 10000);
+
+	netif_tx_disable(netdev);
+
+	/* disable all enabled rx queues */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		rnpgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
+		/* only handle when srio enable and change rx length setup */
+		if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) &&
+		    (adapter->rx_ring[i]->ring_flags &
+		     RNP_RING_FLAG_CHANGE_RX_LEN)) {
+			int head;
+			struct rnpgbe_ring *ring = adapter->rx_ring[i];
+
+			head = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD);
+			adapter->rx_ring[i]->ring_flags &=
+				(~RNP_RING_FLAG_CHANGE_RX_LEN);
+			/* we should delay setup rx length to
+			 * wait rx head to 0
+			 */
+			if (head >= adapter->rx_ring[i]->reset_count) {
+				adapter->rx_ring[i]->ring_flags |=
+					RNP_RING_FLAG_DELAY_SETUP_RX_LEN;
+				/* set sw count to head + 1*/
+				adapter->rx_ring[i]->temp_count = head + 1;
+			}
+		}
+		/* only down without rx_len change no need handle */
+	}
+	/* call carrier off first to avoid false dev_watchdog timeouts */
+
+	rnpgbe_napi_disable_all(adapter);
+
+	adapter->flags2 &=
+		~(RNP_FLAG2_FDIR_REQUIRES_REINIT | RNP_FLAG2_RESET_REQUESTED);
+	adapter->flags &= ~RNP_FLAG_NEED_LINK_UPDATE;
+
+	/* ping all the active vfs to let them know we are going down */
+	if (adapter->num_vfs)
+		rnpgbe_ping_all_vfs(adapter);
+
+	/* disable transmits in the hardware now that interrupts are off */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct rnpgbe_ring *tx_ring = adapter->tx_ring[i];
+		int count = tx_ring->count;
+		int head;
+		int timeout = 0;
+
+		/* 1. stop queue */
+		if (!err) {
+			if (!(tx_ring->ring_flags & RNP_RING_SKIP_TX_START))
+				ring_wr32(tx_ring, RNP_DMA_TX_START, 0);
+		}
+		/* 2. try to set tx head to 0 in sriov mode
+		 * since we don't reset
+		 */
+		if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) &&
+		    (!(tx_ring->ring_flags & RNP_RING_SIZE_CHANGE_FIX))) {
+			/* only do this if hw not support tx head to zero auto */
+
+			/* n10 should wait tx_ready */
+			u32 status = 0;
+
+			timeout = 0;
+			do {
+				status = ring_rd32(tx_ring, RNP_DMA_TX_READY);
+				usleep_range(100, 200);
+				timeout++;
+				rnpgbe_dbg("wait %d tx ready to 1\n",
+					   tx_ring->rnpgbe_queue_idx);
+			} while ((status != 1) && (timeout < 100));
+
+			if (timeout >= 100)
+				rnpgbe_dbg("wait tx ready timeout\n");
+
+			head = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_HEAD);
+			if (head != 0) {
+				u16 next_to_use = tx_ring->next_to_use;
+
+				if (head != (count - 1)) {
+					/* 3 set len head + 1 */
+					ring_wr32(tx_ring,
+						  RNP_DMA_REG_TX_DESC_BUF_LEN,
+						  head + 1);
+				}
+				/* set to use head */
+				tx_ring->next_to_use = head;
+				/* 4 send a len zero packet */
+				rnpgbe_xmit_nop_frame_ring(adapter, tx_ring);
+				if (!(tx_ring->ring_flags &
+				      RNP_RING_SKIP_TX_START))
+					ring_wr32(tx_ring, RNP_DMA_TX_START, 1);
+				/* 5 wait head to zero */
+				while ((head != 0) && (timeout < 1000)) {
+					head = ring_rd32(
+						tx_ring,
+						RNP_DMA_REG_TX_DESC_BUF_HEAD);
+					usleep_range(10000, 20000);
+					timeout++;
+				}
+				if (timeout >= 1000) {
+					rnpgbe_dbg(
+						"[%s] Wait Tx-ring %d head to zero time out\n",
+						netdev->name,
+						tx_ring->rnpgbe_queue_idx);
+				}
+				/* 6 stop queue again*/
+				if (!(tx_ring->ring_flags &
+				      RNP_RING_SKIP_TX_START))
+					ring_wr32(tx_ring, RNP_DMA_TX_START, 0);
+				/* 7 write back next_to_use maybe hw hang */
+				tx_ring->next_to_use = next_to_use;
+			}
+		}
+	}
+
+	if (!err) {
+		if (!pci_channel_offline(adapter->pdev)) {
+			if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+				rnpgbe_reset(adapter);
+			else if (!(adapter->flags & RNP_FLAG_SRIOV_INIT_DONE))
+				rnpgbe_reset(adapter);
+		}
+	}
+	/* power down the optics for n10 SFP+ fiber */
+	if (hw->ops.disable_tx_laser)
+		hw->ops.disable_tx_laser(hw);
+
+	if (!free_tx_ealay)
+		rnpgbe_clean_all_tx_rings(adapter);
+
+	rnpgbe_clean_all_rx_rings(adapter);
+}
+
+/**
+ * rnpgbe_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void rnpgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	/* Do the reset outside of interrupt context */
+	int i;
+	bool real_tx_hang = false;
+
+#define TX_TIMEO_LIMIT 16000
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct rnpgbe_ring *tx_ring = adapter->tx_ring[i];
+
+		if (check_for_tx_hang(tx_ring) &&
+		    rnpgbe_check_tx_hang(tx_ring)) {
+			real_tx_hang = true;
+		}
+	}
+
+	if (real_tx_hang) {
+		e_info(drv, "hw real hang!!!!");
+		/* Do the reset outside of interrupt context */
+		rnpgbe_tx_timeout_reset(adapter);
+	} else {
+		e_info(drv,
+		       "Fake Tx hang detected with timeout of %d "
+		       "seconds\n",
+		       netdev->watchdog_timeo / HZ);
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			struct rnpgbe_ring *temp_ring = adapter->tx_ring[i];
+			u32 head, tail;
+			struct rnpgbe_hw *hw = &adapter->hw;
+
+			head = ring_rd32(temp_ring,
+					 RNP_DMA_REG_TX_DESC_BUF_HEAD);
+			tail = ring_rd32(temp_ring,
+					 RNP_DMA_REG_TX_DESC_BUF_TAIL);
+			e_info(drv, "sw ring %d ---- %d %d\n",
+			       temp_ring->rnpgbe_queue_idx,
+			       temp_ring->next_to_use,
+			       temp_ring->next_to_clean);
+			e_info(drv, "hw ring %d ---- %d %d\n",
+			       temp_ring->rnpgbe_queue_idx, head, tail);
+			e_info(drv, "dma version %d\n",
+			       rnpgbe_rd_reg(hw->hw_addr));
+		}
+		print_status(adapter);
+		/* fake Tx hang - increase the kernel timeout */
+		if (netdev->watchdog_timeo < TX_TIMEO_LIMIT)
+			netdev->watchdog_timeo *= 2;
+	}
+}
+
+/**
+ * rnpgbe_sw_init - Initialize general software structures (struct rnpgbe_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * rnpgbe_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int rnpgbe_sw_init(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	unsigned int rss = 0, fdir;
+	int rss_limit = num_online_cpus();
+
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_device_id = pdev->subsystem_device;
+
+	rss = min_t(int, adapter->max_ring_pair_counts, rss_limit);
+	rss = min_t(int, rss,
+		    hw->mac.max_msix_vectors - adapter->num_other_vectors);
+	adapter->ring_feature[RING_F_RSS].limit =
+		min_t(int, rss, adapter->max_ring_pair_counts);
+
+	adapter->flags |= RNP_FLAG_VXLAN_OFFLOAD_CAPABLE;
+	adapter->flags |= RNP_FLAG_VXLAN_OFFLOAD_ENABLE;
+	adapter->max_q_vectors = hw->max_msix_vectors - 1;
+	adapter->atr_sample_rate = 20;
+
+	fdir = min_t(int, adapter->max_q_vectors, rss_limit);
+	adapter->ring_feature[RING_F_FDIR].limit = fdir;
+
+	if (hw->feature_flags & RNP_NET_FEATURE_RX_NTUPLE_FILTER) {
+		spin_lock_init(&adapter->fdir_perfect_lock);
+		adapter->fdir_filter_count = 0;
+		adapter->fdir_mode = hw->fdir_mode;
+		/* fdir_pballoc not from zero, so add 2 */
+		adapter->fdir_pballoc = 2 + hw->layer2_count + hw->tuple5_count;
+		adapter->layer2_count = hw->layer2_count;
+		adapter->tuple_5_count = hw->tuple5_count;
+	}
+
+	mutex_init(&adapter->eee_lock);
+	adapter->tx_lpi_timer = RNP_DEFAULT_TWT_LS;
+
+	/* itr sw setup here */
+	adapter->sample_interval = 1;
+	adapter->adaptive_rx_coal = 1;
+	adapter->adaptive_tx_coal = 1;
+	adapter->auto_rx_coal = 0;
+	adapter->napi_budge = 64;
+	/* set default work limits */
+	adapter->tx_work_limit = RNP_DEFAULT_TX_WORK;
+	adapter->rx_usecs = RNP_PKT_TIMEOUT;
+	adapter->rx_frames = RNP_RX_PKT_POLL_BUDGET;
+	adapter->priv_flags &= ~RNP_PRIV_FLAG_RX_COALESCE;
+	adapter->priv_flags &= ~RNP_PRIV_FLAG_TX_COALESCE;
+	adapter->tx_usecs = RNP_PKT_TIMEOUT_TX;
+	adapter->tx_frames = RNP_TX_PKT_POLL_BUDGET;
+
+	/* n-tuple support exists, always init our spinlock */
+	/* init fdir count */
+	/* enable itr by default in dynamic mode */
+	/* set default ring sizes */
+	adapter->tx_ring_item_count = RNP_DEFAULT_TXD;
+	adapter->rx_ring_item_count = RNP_DEFAULT_RXD;
+	set_bit(__RNP_DOWN, &adapter->state);
+
+	return 0;
+}
+
+/**
+ * rnpgbe_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @tx_ring:    tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int rnpgbe_setup_tx_resources(struct rnpgbe_ring *tx_ring,
+			      struct rnpgbe_adapter *adapter)
+{
+	struct device *dev = tx_ring->dev;
+	int orig_node = dev_to_node(dev);
+	int numa_node = NUMA_NO_NODE;
+	int size;
+
+	size = sizeof(struct rnpgbe_tx_buffer) * tx_ring->count;
+
+	if (tx_ring->q_vector)
+		numa_node = tx_ring->q_vector->numa_node;
+	tx_ring->tx_buffer_info = vzalloc_node(size, numa_node);
+	if (!tx_ring->tx_buffer_info)
+		tx_ring->tx_buffer_info = vzalloc(size);
+	if (!tx_ring->tx_buffer_info)
+		goto err;
+	/* round up to nearest 4K */
+	tx_ring->size = tx_ring->count * sizeof(struct rnpgbe_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+	set_dev_node(dev, numa_node);
+	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
+					   GFP_KERNEL);
+	set_dev_node(dev, orig_node);
+	if (!tx_ring->desc)
+		tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+						   &tx_ring->dma, GFP_KERNEL);
+	if (!tx_ring->desc)
+		goto err;
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+	DPRINTK(IFUP, INFO,
+		"TxRing:%d, vector:%d ItemCounts:%d "
+		"desc:%p(0x%llx) node:%d\n",
+		tx_ring->rnpgbe_queue_idx, tx_ring->q_vector->v_idx,
+		tx_ring->count, tx_ring->desc, tx_ring->dma, numa_node);
+	return 0;
+
+err:
+
+	vfree(tx_ring->tx_buffer_info);
+	tx_ring->tx_buffer_info = NULL;
+	dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
+	return -ENOMEM;
+}
+
+/**
+ * rnpgbe_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int rnpgbe_setup_all_tx_resources(struct rnpgbe_adapter *adapter)
+{
+	int i, err = 0;
+
+	tx_dbg("adapter->num_tx_queues:%d, adapter->tx_ring[0]:%p\n",
+	       adapter->num_tx_queues, adapter->tx_ring[0]);
+
+	for (i = 0; i < (adapter->num_tx_queues); i++) {
+		BUG_ON(adapter->tx_ring[i] == NULL);
+		err = rnpgbe_setup_tx_resources(adapter->tx_ring[i], adapter);
+		if (!err)
+			continue;
+
+		e_err(probe, "Allocation for Tx Queue %u failed\n", i);
+		goto err_setup_tx;
+	}
+
+	return 0;
+err_setup_tx:
+	/* rewind the index freeing the rings as we go */
+	while (i--)
+		rnpgbe_free_tx_resources(adapter->tx_ring[i]);
+	return err;
+}
+
+/**
+ * rnpgbe_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @rx_ring:    rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int rnpgbe_setup_rx_resources(struct rnpgbe_ring *rx_ring,
+			      struct rnpgbe_adapter *adapter)
+{
+	struct device *dev = rx_ring->dev;
+	int orig_node = dev_to_node(dev);
+	int numa_node = NUMA_NO_NODE;
+	int size;
+
+	BUG_ON(rx_ring == NULL);
+
+	size = sizeof(struct rnpgbe_rx_buffer) * rx_ring->count;
+
+	if (rx_ring->q_vector)
+		numa_node = rx_ring->q_vector->numa_node;
+
+	rx_ring->rx_buffer_info = vzalloc_node(size, numa_node);
+	if (!rx_ring->rx_buffer_info)
+		rx_ring->rx_buffer_info = vzalloc(size);
+	if (!rx_ring->rx_buffer_info)
+		goto err;
+	/* Round up to nearest 4K */
+	rx_ring->size = rx_ring->count * sizeof(union rnpgbe_rx_desc);
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+	set_dev_node(dev, numa_node);
+	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
+					   GFP_KERNEL);
+	set_dev_node(dev, orig_node);
+	if (!rx_ring->desc)
+		rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+						   &rx_ring->dma, GFP_KERNEL);
+	if (!rx_ring->desc)
+		goto err;
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	DPRINTK(IFUP, INFO,
+		"RxRing:%d, vector:%d ItemCounts:%d "
+		"desc:%p(0x%llx) node:%d\n",
+		rx_ring->rnpgbe_queue_idx, rx_ring->q_vector->v_idx,
+		rx_ring->count, rx_ring->desc, rx_ring->dma, numa_node);
+
+	return 0;
+err:
+
+	vfree(rx_ring->rx_buffer_info);
+	rx_ring->rx_buffer_info = NULL;
+	dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
+	return -ENOMEM;
+}
+
+/**
+ * rnpgbe_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int rnpgbe_setup_all_rx_resources(struct rnpgbe_adapter *adapter)
+{
+	int i, err = 0;
+	u32 head;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		BUG_ON(adapter->rx_ring[i] == NULL);
+
+		/* should check count and head */
+		/* in sriov condition may head large than count */
+		head = ring_rd32(adapter->rx_ring[i],
+				 RNP_DMA_REG_RX_DESC_BUF_HEAD);
+		if (unlikely(head >= adapter->rx_ring[i]->count)) {
+			dbg("[%s] Ring %d head large than count",
+			    adapter->netdev->name,
+			    adapter->rx_ring[i]->rnpgbe_queue_idx);
+			adapter->rx_ring[i]->ring_flags |=
+				RNP_RING_FLAG_DELAY_SETUP_RX_LEN;
+			adapter->rx_ring[i]->reset_count =
+				adapter->rx_ring[i]->count;
+			adapter->rx_ring[i]->count = head + 1;
+		}
+		err = rnpgbe_setup_rx_resources(adapter->rx_ring[i], adapter);
+		if (!err)
+			continue;
+
+		e_err(probe, "Allocation for Rx Queue %u failed\n", i);
+		goto err_setup_rx;
+	}
+
+	return 0;
+err_setup_rx:
+	/* rewind the index freeing the rings as we go */
+	while (i--)
+		rnpgbe_free_rx_resources(adapter->rx_ring[i]);
+	return err;
+}
+
+/**
+ * rnpgbe_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void rnpgbe_free_tx_resources(struct rnpgbe_ring *tx_ring)
+{
+	BUG_ON(tx_ring == NULL);
+
+	rnpgbe_clean_tx_ring(tx_ring);
+	vfree(tx_ring->tx_buffer_info);
+	tx_ring->tx_buffer_info = NULL;
+
+	/* if not set, then don't free */
+	if (!tx_ring->desc)
+		return;
+
+	dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
+			  tx_ring->dma);
+
+	tx_ring->desc = NULL;
+}
+
+/**
+ * rnpgbe_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void rnpgbe_free_all_tx_resources(struct rnpgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < (adapter->num_tx_queues); i++)
+		rnpgbe_free_tx_resources(adapter->tx_ring[i]);
+}
+
+/**
+ * rnpgbe_free_rx_resources - Free Rx Resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void rnpgbe_free_rx_resources(struct rnpgbe_ring *rx_ring)
+{
+	BUG_ON(rx_ring == NULL);
+
+	rnpgbe_clean_rx_ring(rx_ring);
+
+	vfree(rx_ring->rx_buffer_info);
+	rx_ring->rx_buffer_info = NULL;
+
+	/* if not set, then don't free */
+	if (!rx_ring->desc)
+		return;
+
+	dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
+			  rx_ring->dma);
+
+	rx_ring->desc = NULL;
+}
+
+/**
+ * rnpgbe_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void rnpgbe_free_all_rx_resources(struct rnpgbe_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < (adapter->num_rx_queues); i++) {
+		if (adapter->rx_ring[i]->desc)
+			rnpgbe_free_rx_resources(adapter->rx_ring[i]);
+	}
+}
+
+/**
+ * rnpgbe_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int rnpgbe_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN * 2;
+
+	/* MTU < 68 is an error and causes problems on some kernels */
+	if ((new_mtu < hw->min_length) || (max_frame > hw->max_length))
+		return -EINVAL;
+
+	e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
+
+	if (netdev->mtu == new_mtu)
+		return 0;
+
+	/* must set new MTU before calling down or up */
+	netdev->mtu = new_mtu;
+
+	rnpgbe_msg_post_status(adapter, PF_SET_MTU);
+
+	if (netif_running(netdev))
+		rnpgbe_reinit_locked(adapter);
+
+	return 0;
+}
+
+/**
+ * rnpgbe_tx_maxrate - callback to set the maximum per-queue bitrate
+ * @netdev: network interface device structure
+ * @queue_index: Tx queue to set
+ * @maxrate: desired maximum transmit bitrate Mbps
+ **/
+__maybe_unused static int rnpgbe_tx_maxrate(struct net_device *netdev,
+					    int queue_index, u32 maxrate)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_ring *tx_ring = adapter->tx_ring[queue_index];
+	u64 real_rate = 0;
+
+	adapter->max_rate[queue_index] = maxrate;
+	rnpgbe_dbg("%s: queue:%d maxrate:%d\n", __func__, queue_index, maxrate);
+	if (!maxrate)
+		return rnpgbe_setup_tx_maxrate(
+			tx_ring, 0, adapter->hw.usecstocount * 100000);
+	/* we need turn it to bytes/s */
+	real_rate = ((u64)maxrate * 1000 * 94) >> 3;
+	rnpgbe_setup_tx_maxrate(tx_ring, real_rate,
+				adapter->hw.usecstocount * 100000);
+
+	return 0;
+}
+
+/**
+ * rnpgbe_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int rnpgbe_open(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int err;
+
+	DPRINTK(IFUP, INFO, "ifup\n");
+
+	/* disallow open during test */
+	if (test_bit(__RNP_TESTING, &adapter->state))
+		return -EBUSY;
+
+	netif_carrier_off(netdev);
+
+	/* allocate transmit descriptors */
+	err = rnpgbe_setup_all_tx_resources(adapter);
+	if (err)
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+	err = rnpgbe_setup_all_rx_resources(adapter);
+	if (err)
+		goto err_setup_rx;
+
+	rnpgbe_configure(adapter);
+
+	err = rnpgbe_request_irq(adapter);
+	if (err)
+		goto err_req_irq;
+
+	/* Notify the stack of the actual queue counts. */
+	err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
+	if (err)
+		goto err_set_queues;
+
+	err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
+	if (err)
+		goto err_set_queues;
+
+	if (module_enable_ptp)
+		rnpgbe_ptp_register(adapter);
+
+	rnpgbe_up_complete(adapter);
+
+	return 0;
+
+err_set_queues:
+	rnpgbe_free_irq(adapter);
+err_req_irq:
+	rnpgbe_free_all_rx_resources(adapter);
+err_setup_rx:
+	rnpgbe_free_all_tx_resources(adapter);
+err_setup_tx:
+	hw->ops.set_mbx_ifup(hw, 0);
+	rnpgbe_reset(adapter);
+
+	return err;
+}
+
+/**
+ * rnpgbe_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int rnpgbe_close(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	DPRINTK(IFDOWN, INFO, "ifdown\n");
+
+#ifdef DISABLE_RX_IRQ
+	adapter->quit_poll_thread = true;
+#endif
+
+	if (module_enable_ptp)
+		rnpgbe_ptp_unregister(adapter);
+
+	rnpgbe_down(adapter);
+
+	rnpgbe_free_irq(adapter);
+
+	rnpgbe_free_all_tx_resources(adapter);
+	rnpgbe_free_all_rx_resources(adapter);
+
+	/* if in sriov mode send link down to all vfs */
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+		adapter->link_up = 0;
+		adapter->link_up_old = 0;
+		rnpgbe_msg_post_status(adapter, PF_SET_LINK_STATUS);
+		/* wait all vf get this status */
+		usleep_range(5000, 10000);
+	}
+
+	return 0;
+}
+
+static int rnpgbe_resume(struct device *dev)
+{
+	struct rnpgbe_adapter *adapter;
+	struct net_device *netdev;
+	u32 err;
+	struct rnpgbe_hw *hw;
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	adapter = pci_get_drvdata(pdev);
+	hw = &adapter->hw;
+	netdev = adapter->netdev;
+	pr_info("call rnpgbe_resume\n");
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	/*
+	 * pci_restore_state clears dev->state_saved so call
+	 * pci_save_state to restore it.
+	 */
+	pci_save_state(pdev);
+
+	err = pcim_enable_device(pdev);
+	if (err) {
+		e_dev_err("Cannot enable PCI device from suspend\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	pci_wake_from_d3(pdev, false);
+
+	rtnl_lock();
+
+	err = rnpgbe_init_interrupt_scheme(adapter);
+	if (!err)
+		err = register_mbx_irq(adapter);
+
+	if (hw->ops.driver_status)
+		hw->ops.driver_status(hw, false, rnpgbe_driver_suspuse);
+
+	if (hw->ops.driver_status)
+		hw->ops.driver_status(hw, true, rnpgbe_driver_insmod);
+
+	rnpgbe_reset(adapter);
+
+	/* we should setup link in default */
+	hw->ops.setup_link(hw, DEFAULT_ADV, 1, 0, 0);
+	hw->advertised_link = DEFAULT_ADV;
+
+	if (!err) {
+		if (netif_running(netdev)) {
+			err = rnpgbe_open(netdev);
+		} else {
+			hw->ops.set_mbx_link_event(hw, 0);
+			hw->ops.set_mbx_ifup(hw, 0);
+		}
+	}
+
+	rtnl_unlock();
+
+	if (err)
+		return err;
+
+	netif_device_attach(netdev);
+
+	return 0;
+}
+
+/**
+ * rnpgbe_freeze - quiesce the device (no IRQ's or DMA)
+ * @dev: The port's netdev
+ */
+static int rnpgbe_freeze(struct device *dev)
+{
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev));
+	struct net_device *netdev = adapter->netdev;
+
+	rtnl_lock();
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev)) {
+		rnpgbe_down(adapter);
+		rnpgbe_free_irq(adapter);
+
+		rnpgbe_free_all_tx_resources(adapter);
+		rnpgbe_free_all_rx_resources(adapter);
+	}
+
+	remove_mbx_irq(adapter);
+	rnpgbe_clear_interrupt_scheme(adapter);
+	rtnl_unlock();
+
+	return 0;
+}
+
+/**
+ * rnpgbe_thaw - un-quiesce the device
+ * @dev: The port's netdev
+ */
+static int rnpgbe_thaw(struct device *dev)
+{
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev));
+	struct net_device *netdev = adapter->netdev;
+	u32 err;
+
+	rtnl_lock();
+	err = rnpgbe_init_interrupt_scheme(adapter);
+
+	if (netif_running(netdev))
+		err = rnpgbe_open(netdev);
+
+	rtnl_unlock();
+
+	if (err)
+		return err;
+
+	netif_device_attach(netdev);
+
+	return 0;
+}
+
+static int __rnpgbe_shutdown_suspuse(struct pci_dev *pdev, bool *enable_wake)
+{
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 wufc = adapter->wol;
+	int retval = 0;
+
+	netif_device_detach(netdev);
+
+	rtnl_lock();
+	if (netif_running(netdev)) {
+		rnpgbe_down(adapter);
+		rnpgbe_free_irq(adapter);
+		rnpgbe_free_all_tx_resources(adapter);
+		rnpgbe_free_all_rx_resources(adapter);
+		/* should consider sriov mode ? */
+	}
+	rtnl_unlock();
+
+	/* if we open wol or ncsi_en, we must send this to hw */
+	if ((hw->ncsi_en || adapter->wol) && hw->ops.driver_status)
+		hw->ops.driver_status(hw, true, rnpgbe_driver_suspuse);
+
+	remove_mbx_irq(adapter);
+	rnpgbe_clear_interrupt_scheme(adapter);
+
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+
+	if (wufc) {
+		rnpgbe_set_rx_mode(netdev);
+
+		/* enable the optics for n10 SFP+ fiber as we can WoL */
+		if (hw->ops.enable_tx_laser)
+			hw->ops.enable_tx_laser(hw);
+
+		/* turn on all-multi mode if wake on multicast is enabled */
+
+	} else {
+	}
+
+	if (hw->ops.setup_wol)
+		hw->ops.setup_wol(hw, adapter->wol);
+
+	pci_wake_from_d3(pdev, !!wufc);
+	*enable_wake = !!wufc;
+
+	pci_disable_device(pdev);
+
+	return 0;
+}
+
+static int __rnpgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
+{
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 wufc = adapter->wol;
+	int retval = 0;
+
+	netif_device_detach(netdev);
+
+	rtnl_lock();
+	if (netif_running(netdev)) {
+		rnpgbe_down(adapter);
+		rnpgbe_free_irq(adapter);
+		rnpgbe_free_all_tx_resources(adapter);
+		rnpgbe_free_all_rx_resources(adapter);
+		/* should consider sriov mode ? */
+	}
+	rtnl_unlock();
+
+	/* only send mbx if ncsi or wol on */
+	if ((hw->ncsi_en || adapter->wol) && hw->ops.driver_status)
+		hw->ops.driver_status(hw, false, rnpgbe_driver_insmod);
+
+	remove_mbx_irq(adapter);
+	rnpgbe_clear_interrupt_scheme(adapter);
+
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+
+	if (wufc) {
+		rnpgbe_set_rx_mode(netdev);
+
+		/* enable the optics for n10 SFP+ fiber as we can WoL */
+		if (hw->ops.enable_tx_laser)
+			hw->ops.enable_tx_laser(hw);
+
+		/* turn on all-multi mode if wake on multicast is enabled */
+
+	} else {
+	}
+
+	if (hw->ops.setup_wol)
+		hw->ops.setup_wol(hw, adapter->wol);
+
+	pci_wake_from_d3(pdev, !!wufc);
+	*enable_wake = !!wufc;
+
+	pci_disable_device(pdev);
+
+	return 0;
+}
+
+static int rnpgbe_suspend(struct device *dev)
+{
+	int retval;
+	bool wake;
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	pr_info("call rnpgbe_suspend\n");
+
+	retval = __rnpgbe_shutdown_suspuse(pdev, &wake);
+	if (retval)
+		return retval;
+
+	if (wake) {
+		pci_prepare_to_sleep(pdev);
+	} else {
+		pci_wake_from_d3(pdev, false);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+
+	return 0;
+}
+
+__maybe_unused static void rnpgbe_shutdown(struct pci_dev *pdev)
+{
+	bool wake = false;
+
+	pr_info("call rnpgbe_shutdown\n");
+
+	__rnpgbe_shutdown(pdev, &wake);
+
+	pr_info("call rnpgbe_shutdown wake %x\n", wake);
+
+	if (system_state == SYSTEM_POWER_OFF) {
+		pci_wake_from_d3(pdev, wake);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+	pr_info("call rnpgbe_shutdown done\n");
+}
+
+/**
+ * rnpgbe_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+void rnpgbe_update_stats(struct rnpgbe_adapter *adapter)
+{
+	struct net_device_stats *net_stats = &adapter->netdev->stats;
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_hw_stats *hw_stats = &adapter->hw_stats;
+
+	int i;
+	struct rnpgbe_ring *ring;
+	u64 hw_csum_rx_error = 0;
+	u64 hw_csum_rx_good = 0;
+	net_stats->tx_packets = 0;
+	net_stats->tx_bytes = 0;
+	net_stats->rx_packets = 0;
+	net_stats->rx_bytes = 0;
+	net_stats->rx_dropped = 0;
+	net_stats->rx_errors = 0;
+	hw_stats->vlan_strip_cnt = 0;
+	hw_stats->vlan_add_cnt = 0;
+
+	if (test_bit(__RNP_DOWN, &adapter->state) ||
+	    test_bit(__RNP_RESETTING, &adapter->state))
+		return;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		rnpgbe_for_each_ring(ring, adapter->q_vector[i]->rx) {
+			hw_csum_rx_error += ring->rx_stats.csum_err;
+			hw_csum_rx_good += ring->rx_stats.csum_good;
+			hw_stats->vlan_strip_cnt += ring->rx_stats.vlan_remove;
+			net_stats->rx_packets += ring->stats.packets;
+			net_stats->rx_bytes += ring->stats.bytes;
+		}
+
+		rnpgbe_for_each_ring(ring, adapter->q_vector[i]->tx) {
+			hw_stats->vlan_add_cnt += ring->tx_stats.vlan_add;
+			net_stats->tx_packets += ring->stats.packets;
+			net_stats->tx_bytes += ring->stats.bytes;
+		}
+	}
+
+	net_stats->rx_errors += hw_csum_rx_error;
+	hw->ops.update_hw_status(hw, hw_stats, net_stats);
+	adapter->hw_csum_rx_error = hw_csum_rx_error;
+	adapter->hw_csum_rx_good = hw_csum_rx_good;
+	net_stats->rx_errors = hw_csum_rx_error;
+}
+
+/**
+ * rnpgbe_watchdog_update_link - update the link status
+ * @adapter: pointer to the device adapter structure
+ * @link_speed: pointer to a u32 to store the link_speed
+ **/
+static void rnpgbe_watchdog_update_link(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 link_speed = adapter->link_speed;
+	bool link_up = adapter->link_up;
+	bool duplex = adapter->duplex_old;
+	bool flow_rx = true, flow_tx = true;
+
+	if (!(adapter->flags & RNP_FLAG_NEED_LINK_UPDATE))
+		return;
+
+	if (hw->ops.check_link) {
+		hw->ops.check_link(hw, &link_speed, &link_up, &duplex, false);
+	} else {
+		/* always assume link is up, if no check link function */
+		link_speed = RNP_LINK_SPEED_10GB_FULL;
+		link_up = true;
+	}
+
+	if (link_up || time_after(jiffies, (adapter->link_check_timeout +
+					    RNP_TRY_LINK_TIMEOUT))) {
+		adapter->flags &= ~RNP_FLAG_NEED_LINK_UPDATE;
+	}
+	adapter->link_up = link_up;
+	adapter->link_speed = link_speed;
+	adapter->duplex_old = duplex;
+
+	if (hw->ops.get_pause_mode)
+		hw->ops.get_pause_mode(hw);
+	switch (hw->fc.current_mode) {
+	case rnpgbe_fc_none:
+		flow_rx = false;
+		flow_tx = false;
+		break;
+	case rnpgbe_fc_tx_pause:
+		flow_rx = false;
+		flow_tx = true;
+
+		break;
+	case rnpgbe_fc_rx_pause:
+		flow_rx = true;
+		flow_tx = false;
+		break;
+
+	case rnpgbe_fc_full:
+		flow_rx = true;
+		flow_tx = true;
+		break;
+	default:
+		hw_dbg(hw, "Flow control param set incorrectly\n");
+	}
+
+	if (adapter->link_up) {
+		if (hw->ops.set_mac_speed)
+			hw->ops.set_mac_speed(hw, true, link_speed, duplex);
+		if (hw->ops.set_pause_mode)
+			hw->ops.set_pause_mode(hw);
+
+		e_info(drv, "NIC Link is Up %s, %s Duplex, Flow Control: %s\n",
+		       (link_speed == RNP_LINK_SPEED_40GB_FULL ?
+				"40 Gbps" :
+				(link_speed == RNP_LINK_SPEED_25GB_FULL ?
+					 "25 Gbps" :
+					 (link_speed == RNP_LINK_SPEED_10GB_FULL ?
+						  "10 Gbps" :
+						  (link_speed == RNP_LINK_SPEED_1GB_FULL ?
+							   "1000 Mbps" :
+							   (link_speed == RNP_LINK_SPEED_100_FULL ?
+								    "100 Mbps" :
+								    (link_speed == RNP_LINK_SPEED_10_FULL ?
+									     "10 Mbps" :
+									     "unknown speed")))))),
+		       ((duplex) ? "Full" : "Half"),
+		       ((flow_rx && flow_tx) ?
+				"RX/TX" :
+				(flow_rx ? "RX" : (flow_tx ? "TX" : "None"))));
+	} else {
+		if (hw->ops.set_mac_speed)
+			hw->ops.set_mac_speed(hw, false, 0, false);
+	}
+}
+
+/**
+ * rnpgbe_eee_ctrl_timer - EEE TX SW timer.
+ * @arg : data hook
+ * Description:
+ *  if there is no data transfer and if we are not in LPI state,
+ *  then MAC Transmitter can be moved to LPI state.
+ */
+static void rnpgbe_eee_ctrl_timer(struct timer_list *t)
+{
+	struct rnpgbe_adapter *adapter = from_timer(adapter, t, eee_ctrl_timer);
+
+	rnpgbe_enable_eee_mode(adapter);
+	if (!test_bit(__RNP_EEE_REMOVE, &adapter->state))
+		mod_timer(&adapter->eee_ctrl_timer,
+			  RNP_LPI_T(adapter->eee_timer));
+}
+
+static bool rnpgbe_eee_init(struct rnpgbe_adapter *adapter)
+{
+	int tx_lpi_timer = adapter->tx_lpi_timer;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	mutex_lock(&adapter->eee_lock);
+
+	/* Check if it needs to be deactivated */
+	if (!adapter->eee_active) {
+		set_bit(__RNP_EEE_REMOVE, &adapter->state);
+		netdev_dbg(adapter->netdev, "disable EEE\n");
+		del_timer_sync(&adapter->eee_ctrl_timer);
+		hw->ops.set_eee_timer(hw, 0, tx_lpi_timer);
+	} else {
+		clear_bit(__RNP_EEE_REMOVE, &adapter->state);
+		timer_setup(&adapter->eee_ctrl_timer, rnpgbe_eee_ctrl_timer, 0);
+		mod_timer(&adapter->eee_ctrl_timer,
+			  RNP_LPI_T(adapter->eee_timer));
+		hw->ops.set_eee_timer(hw, RNP_DEFAULT_LIT_LS, tx_lpi_timer);
+	}
+
+	mutex_unlock(&adapter->eee_lock);
+	netdev_dbg(adapter->netdev, "Energy-Efficient Ethernet initialized\n");
+	return true;
+}
+
+static int rnpgbe_phy_init_eee(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	/* if hw no eee capability or eee closed by ethtool */
+	if ((!hw->eee_capability) || (!adapter->eee_enabled))
+		return -EIO;
+	/* init eee only in full duplex */
+	if (!hw->duplex)
+		return -EIO;
+	/* init eee not in speed 10 */
+	if (hw->speed == 10)
+		return -EIO;
+	/* init eee only local and lp all support eee */
+	if (!(adapter->local_eee & adapter->partner_eee))
+		return -EIO;
+	if ((hw->hw_type == rnpgbe_hw_n500) ||
+	    (hw->hw_type == rnpgbe_hw_n210)) {
+		/* n500 only support eee in 100/1000 full */
+		if (!hw->duplex)
+			return -EIO;
+
+		if ((adapter->speed != RNP_LINK_SPEED_100_FULL) &&
+		    (adapter->speed != RNP_LINK_SPEED_1GB_FULL))
+			return -EIO;
+	}
+	/* if in sriov mode cannot open eee */
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED)
+		return -EIO;
+
+	return 0;
+}
+
+static void rnpgbe_setup_eee_mode(struct rnpgbe_adapter *adapter, bool status)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	if (status) {
+		/* if eee active before, first close it */
+		if (adapter->eee_active) {
+			adapter->eee_active = 0;
+			rnpgbe_eee_init(adapter);
+		}
+		/* if up, try to active eee */
+		adapter->eee_active = rnpgbe_phy_init_eee(adapter) >= 0;
+		/* if we can active eee, init it */
+		if (adapter->eee_active) {
+			rnpgbe_eee_init(adapter);
+			if (hw->ops.set_eee_pls)
+				hw->ops.set_eee_pls(hw, true);
+		}
+	} else {
+		/* if eee active before, close it */
+		if (adapter->eee_active) {
+			adapter->eee_active = 0;
+			rnpgbe_eee_init(adapter);
+		}
+		if (hw->ops.set_eee_pls)
+			hw->ops.set_eee_pls(hw, false);
+	}
+}
+
+/**
+ * rnpgbe_watchdog_link_is_up - update netif_carrier status and
+ *                             print link up message
+ * @adapter: pointer to the device adapter structure
+ **/
+static void rnpgbe_watchdog_link_is_up(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	/* only continue if link was previously down */
+	if (netif_carrier_ok(netdev))
+		return;
+
+	adapter->flags2 &= ~RNP_FLAG2_SEARCH_FOR_SFP;
+	switch (hw->mac.type) {
+	default:
+		break;
+	}
+
+	netif_carrier_on(netdev);
+
+	netif_tx_wake_all_queues(netdev);
+
+	hw->ops.set_mac_rx(hw, true);
+
+	rnpgbe_setup_eee_mode(adapter, true);
+}
+
+/**
+ * rnpgbe_watchdog_link_is_down - update netif_carrier status and
+ *                               print link down message
+ * @adapter: pointer to the adapter structure
+ **/
+static void rnpgbe_watchdog_link_is_down(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	adapter->link_up = false;
+	adapter->link_speed = 0;
+
+	/* only continue if link was up previously */
+	if (!netif_carrier_ok(netdev))
+		return;
+
+	/* poll for SFP+ cable when link is down */
+	adapter->flags2 |= RNP_FLAG2_SEARCH_FOR_SFP;
+
+	e_info(drv, "NIC Link is Down\n");
+
+	netif_carrier_off(netdev);
+
+	netif_tx_stop_all_queues(netdev);
+
+	hw->ops.set_mac_rx(hw, false);
+
+	rnpgbe_setup_eee_mode(adapter, false);
+}
+
+static void rnpgbe_update_link_to_vf(struct rnpgbe_adapter *adapter)
+{
+	if (!(adapter->flags & RNP_FLAG_VF_INIT_DONE))
+		return;
+
+	if ((adapter->link_up_old != adapter->link_up) ||
+	    (adapter->link_speed_old != adapter->link_speed)) {
+		if (!test_bit(__RNP_IN_IRQ, &adapter->state)) {
+			if (rnpgbe_msg_post_status(adapter,
+						   PF_SET_LINK_STATUS) == 0) {
+				adapter->link_up_old = adapter->link_up;
+				adapter->link_speed_old = adapter->link_speed;
+			}
+		}
+	}
+}
+
+/**
+ * rnpgbe_watchdog_subtask - check and bring link up
+ * @adapter: pointer to the device adapter structure
+ **/
+static void rnpgbe_watchdog_subtask(struct rnpgbe_adapter *adapter)
+{
+	/* if interface is down do nothing */
+	/* should do link status if in sriov */
+	if (test_bit(__RNP_DOWN, &adapter->state) ||
+	    test_bit(__RNP_RESETTING, &adapter->state))
+		return;
+
+	rnpgbe_watchdog_update_link(adapter);
+
+	if (adapter->link_up)
+		rnpgbe_watchdog_link_is_up(adapter);
+	else
+		rnpgbe_watchdog_link_is_down(adapter);
+
+	rnpgbe_update_link_to_vf(adapter);
+
+	rnpgbe_update_stats(adapter);
+}
+
+/**
+ * rnpgbe_service_timer - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+void rnpgbe_service_timer(struct timer_list *t)
+{
+	struct rnpgbe_adapter *adapter = from_timer(adapter, t, service_timer);
+	unsigned long next_event_offset;
+	bool ready = true;
+
+	/* poll faster when waiting for link */
+	if (adapter->flags & RNP_FLAG_NEED_LINK_UPDATE)
+		next_event_offset = HZ / 10;
+	else
+		next_event_offset = HZ;
+	/* Reset the timer */
+	if (!test_bit(__RNP_REMOVE, &adapter->state))
+		mod_timer(&adapter->service_timer, next_event_offset + jiffies);
+
+	if (ready)
+		rnpgbe_service_event_schedule(adapter);
+}
+
+static void rnpgbe_reset_pf_subtask(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u32 err;
+
+	if (!(adapter->flags2 & RNP_FLAG2_RESET_PF))
+		return;
+
+	rtnl_lock();
+	netif_device_detach(netdev);
+	if (netif_running(netdev)) {
+		rnpgbe_down(adapter);
+		rnpgbe_free_irq(adapter);
+		rnpgbe_free_all_tx_resources(adapter);
+		rnpgbe_free_all_rx_resources(adapter);
+	}
+	rtnl_unlock();
+	adapter->link_up = 0;
+	adapter->link_up_old = 0;
+	rnpgbe_msg_post_status(adapter, PF_SET_LINK_STATUS);
+	/* wait all vf get this status */
+	usleep_range(500, 1000);
+	rnpgbe_reset(adapter);
+	remove_mbx_irq(adapter);
+	rnpgbe_clear_interrupt_scheme(adapter);
+	rtnl_lock();
+	err = rnpgbe_init_interrupt_scheme(adapter);
+	register_mbx_irq(adapter);
+
+	if (!err && netif_running(netdev))
+		err = rnpgbe_open(netdev);
+
+	rtnl_unlock();
+	rnpgbe_msg_post_status(adapter, PF_SET_RESET);
+	netif_device_attach(netdev);
+	adapter->flags2 &= (~RNP_FLAG2_RESET_PF);
+}
+
+static void rnpgbe_reset_subtask(struct rnpgbe_adapter *adapter)
+{
+	if (!(adapter->flags2 & RNP_FLAG2_RESET_REQUESTED))
+		return;
+
+	adapter->flags2 &= ~RNP_FLAG2_RESET_REQUESTED;
+
+	/* If we're already down or resetting, just bail */
+	if (test_bit(__RNP_DOWN, &adapter->state) ||
+	    test_bit(__RNP_RESETTING, &adapter->state))
+		return;
+
+	netdev_err(adapter->netdev, "Reset adapter\n");
+	adapter->tx_timeout_count++;
+	rtnl_lock();
+	rnpgbe_reinit_locked(adapter);
+	rtnl_unlock();
+}
+
+static void rnpgbe_rx_len_reset_subtask(struct rnpgbe_adapter *adapter)
+{
+	int i;
+	struct rnpgbe_ring *rx_ring;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		rx_ring = adapter->rx_ring[i];
+		if (unlikely(rx_ring->ring_flags &
+			     RNP_RING_FLAG_DO_RESET_RX_LEN)) {
+			dbg("[%s] Rx-ring %d count reset\n",
+			    adapter->netdev->name, rx_ring->rnpgbe_queue_idx);
+			rnpgbe_rx_ring_reinit(adapter, rx_ring);
+			rx_ring->ring_flags &= (~RNP_RING_FLAG_DO_RESET_RX_LEN);
+		}
+	}
+}
+
+/* just modify rx itr */
+static void rnpgbe_auto_itr_moderation(struct rnpgbe_adapter *adapter)
+{
+	int i;
+	struct rnpgbe_ring *rx_ring;
+	u64 period = (u64)(jiffies - adapter->last_moder_jiffies);
+
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_RX_COALESCE)
+		return;
+
+	if (!adapter->adaptive_rx_coal ||
+	    period < adapter->sample_interval * HZ) {
+		return;
+	}
+
+	adapter->last_moder_jiffies = jiffies;
+
+	/* it is time to check moderation */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		u64 x, y, rate;
+		u64 rx_packets, packets, rx_pkt_diff;
+
+		rx_ring = adapter->rx_ring[i];
+		rx_packets = READ_ONCE(rx_ring->stats.packets);
+		rx_pkt_diff = rx_packets -
+			      adapter->last_moder_packets[rx_ring->queue_index];
+		packets = rx_pkt_diff;
+
+		x = packets * HZ;
+		y = do_div(x, period);
+		rate = x;
+
+		if (packets == 0) {
+		} else if (rate < 20000) {
+			rx_ring->ring_flags |= RNP_RING_LOWER_ITR;
+
+		} else {
+			rx_ring->ring_flags &= (~RNP_RING_LOWER_ITR);
+
+		}
+
+		/* write back new count */
+		adapter->last_moder_packets[rx_ring->queue_index] = rx_packets;
+	}
+}
+
+/**
+ * rnpgbe_service_task - manages and runs subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+static void rnpgbe_service_task(struct work_struct *work)
+{
+	struct rnpgbe_adapter *adapter =
+		container_of(work, struct rnpgbe_adapter, service_task);
+
+	rnpgbe_reset_subtask(adapter);
+	rnpgbe_reset_pf_subtask(adapter);
+	rnpgbe_watchdog_subtask(adapter);
+	rnpgbe_rx_len_reset_subtask(adapter);
+	rnpgbe_auto_itr_moderation(adapter);
+	rnpgbe_service_event_complete(adapter);
+}
+
+static int rnpgbe_tso(struct rnpgbe_ring *tx_ring,
+		      struct rnpgbe_tx_buffer *first, u32 *mac_ip_len,
+		      u8 *hdr_len, u32 *tx_flags)
+{
+	struct sk_buff *skb = first->skb;
+	struct net_device *netdev = tx_ring->netdev;
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	union {
+		struct iphdr *v4;
+		struct ipv6hdr *v6;
+		unsigned char *hdr;
+	} ip;
+	union {
+		struct tcphdr *tcp;
+		struct udphdr *udp;
+		unsigned char *hdr;
+	} l4;
+	u32 paylen, l4_offset;
+	int err;
+	u8 *inner_mac;
+	u16 gso_segs, gso_size;
+	u16 gso_need_pad;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return 0;
+
+	if (!skb_is_gso(skb))
+		return 0;
+
+	err = skb_cow_head(skb, 0);
+	if (err < 0)
+		return err;
+
+	inner_mac = skb->data;
+	ip.hdr = skb_network_header(skb);
+	l4.hdr = skb_transport_header(skb);
+
+	/* initialize outer IP header fields */
+	if (ip.v4->version == 4) {
+		/* IP header will have to cancel out any data that
+		 * is not a part of the outer IP header
+		 */
+		ip.v4->tot_len = 0;
+		ip.v4->check = 0x0000;
+	} else {
+		ip.v6->payload_len = 0;
+	}
+
+	if (skb_shinfo(skb)->gso_type &
+	    (SKB_GSO_GRE |
+	     SKB_GSO_GRE_CSUM |
+	     SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) {
+		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+		}
+		/* we should alayws do this */
+		inner_mac = skb_inner_mac_header(skb);
+
+		first->tunnel_hdr_len = (inner_mac - skb->data);
+
+		if (skb_shinfo(skb)->gso_type &
+		    (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) {
+			*tx_flags |= RNP_TXD_TUNNEL_VXLAN;
+			l4.udp->check = 0;
+			tx_dbg("set outer l4.udp to 0\n");
+		} else {
+			*tx_flags |= RNP_TXD_TUNNEL_NVGRE;
+		}
+
+		/* reset pointers to inner headers */
+		ip.hdr = skb_inner_network_header(skb);
+		l4.hdr = skb_inner_transport_header(skb);
+	}
+
+	if (ip.v4->version == 4) {
+		/* IP header will have to cancel out any data that
+		 * is not a part of the outer IP header
+		 */
+		ip.v4->tot_len = 0;
+		ip.v4->check = 0x0000;
+
+	} else {
+		ip.v6->payload_len = 0;
+		/* set ipv6 type */
+		*tx_flags |= RNP_TXD_FLAG_IPv6;
+	}
+
+	/* determine offset of inner transport header */
+	l4_offset = l4.hdr - skb->data;
+
+	paylen = skb->len - l4_offset;
+	tx_dbg("before l4 checksum is %x\n", l4.tcp->check);
+
+	if (skb->csum_offset == offsetof(struct tcphdr, check)) {
+		tx_dbg("tcp before l4 checksum is %x\n", l4.tcp->check);
+		*tx_flags |= RNP_TXD_L4_TYPE_TCP;
+		/* compute length of segmentation header */
+		*hdr_len = (l4.tcp->doff * 4) + l4_offset;
+		csum_replace_by_diff(&l4.tcp->check,
+				     (__force __wsum)htonl(paylen));
+		l4.tcp->psh = 0;
+		tx_dbg("tcp l4 checksum is %x\n", l4.tcp->check);
+	} else {
+		tx_dbg("paylen is %x\n", paylen);
+		*tx_flags |= RNP_TXD_L4_TYPE_UDP;
+		/* compute length of segmentation header */
+		tx_dbg("udp before l4 checksum is %x\n", l4.udp->check);
+		*hdr_len = sizeof(*l4.udp) + l4_offset;
+		csum_replace_by_diff(&l4.udp->check,
+				     (__force __wsum)htonl(paylen));
+		tx_dbg("udp l4 checksum is %x\n", l4.udp->check);
+	}
+
+	tx_dbg("l4 checksum is %x\n", l4.tcp->check);
+	*mac_ip_len = (l4.hdr - ip.hdr) | ((ip.hdr - inner_mac) << 9);
+
+	/* compute header lengths */
+	/* pull values out of skb_shinfo */
+	gso_size = skb_shinfo(skb)->gso_size;
+	gso_segs = skb_shinfo(skb)->gso_segs;
+
+	/* if we close padding check gso confition */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) {
+		gso_need_pad = (first->skb->len - *hdr_len) % gso_size;
+		if (gso_need_pad) {
+			if ((gso_need_pad + *hdr_len) <= 60) {
+				gso_need_pad = 60 - (gso_need_pad + *hdr_len);
+				first->gso_need_padding = !!gso_need_pad;
+			}
+		}
+	}
+
+	/* update gso size and bytecount with header size */
+	/* to fix tx status */
+	first->gso_segs = gso_segs;
+	first->bytecount += (first->gso_segs - 1) * *hdr_len;
+	if (skb->csum_offset == offsetof(struct tcphdr, check)) {
+		first->mss_len_vf_num |=
+			(gso_size | ((l4.tcp->doff * 4) << 24));
+	} else {
+		first->mss_len_vf_num |= (gso_size | ((8) << 24));
+	}
+
+	*tx_flags |= RNP_TXD_FLAG_TSO | RNP_TXD_IP_CSUM | RNP_TXD_L4_CSUM;
+	first->ctx_flag = true;
+
+	return 1;
+}
+
+static int rnpgbe_tx_csum(struct rnpgbe_ring *tx_ring,
+			  struct rnpgbe_tx_buffer *first, u32 *mac_ip_len,
+			  u32 *tx_flags)
+{
+	struct sk_buff *skb = first->skb;
+	u8 l4_proto = 0;
+	u8 ip_len = 0;
+	u8 mac_len = 0;
+	u8 *inner_mac = skb->data;
+	u8 *exthdr;
+	__be16 frag_off;
+	union {
+		struct iphdr *v4;
+		struct ipv6hdr *v6;
+		unsigned char *hdr;
+	} ip;
+	union {
+		struct tcphdr *tcp;
+		struct udphdr *udp;
+		unsigned char *hdr;
+	} l4;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return 0;
+
+	ip.hdr = skb_network_header(skb);
+	l4.hdr = skb_transport_header(skb);
+
+	inner_mac = skb->data;
+
+	/* outer protocol */
+	if (skb->encapsulation) {
+		/* define outer network header type */
+		if (ip.v4->version == 4) {
+			l4_proto = ip.v4->protocol;
+		} else {
+			exthdr = ip.hdr + sizeof(*ip.v6);
+			l4_proto = ip.v6->nexthdr;
+			if (l4.hdr != exthdr)
+				ipv6_skip_exthdr(skb, exthdr - skb->data,
+						 &l4_proto, &frag_off);
+		}
+
+		/* define outer transport */
+		switch (l4_proto) {
+		case IPPROTO_UDP:
+			l4.udp->check = 0;
+			break;
+		default:
+			skb_checksum_help(skb);
+			return -1;
+		}
+
+		/* switch IP header pointer from outer to inner header */
+		ip.hdr = skb_inner_network_header(skb);
+		l4.hdr = skb_inner_transport_header(skb);
+
+		inner_mac = skb_inner_mac_header(skb);
+		first->tunnel_hdr_len = inner_mac - skb->data;
+		first->ctx_flag = true;
+		tx_dbg("tunnel length is %d\n", first->tunnel_hdr_len);
+	}
+
+	mac_len = (ip.hdr - inner_mac); // mac length
+	*mac_ip_len = (ip.hdr - inner_mac) << 9;
+	tx_dbg("inner checksum needed %d", skb_checksum_start_offset(skb));
+	tx_dbg("skb->encapsulation %d\n", skb->encapsulation);
+	ip_len = (l4.hdr - ip.hdr);
+	if (ip.v4->version == 4) {
+		l4_proto = ip.v4->protocol;
+	} else {
+		exthdr = ip.hdr + sizeof(*ip.v6);
+		l4_proto = ip.v6->nexthdr;
+		if (l4.hdr != exthdr)
+			ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
+					 &frag_off);
+		*tx_flags |= RNP_TXD_FLAG_IPv6;
+	}
+	/* Enable L4 checksum offloads */
+	switch (l4_proto) {
+	case IPPROTO_TCP:
+		*tx_flags |= RNP_TXD_L4_TYPE_TCP | RNP_TXD_L4_CSUM;
+		break;
+	case IPPROTO_SCTP:
+		tx_dbg("sctp checksum packet\n");
+		*tx_flags |= RNP_TXD_L4_TYPE_SCTP | RNP_TXD_L4_CSUM;
+		break;
+	case IPPROTO_UDP:
+		*tx_flags |= RNP_TXD_L4_TYPE_UDP | RNP_TXD_L4_CSUM;
+		break;
+	default:
+		skb_checksum_help(skb);
+		return 0;
+	}
+
+	if ((tx_ring->ring_flags & RNP_RING_NO_TUNNEL_SUPPORT) &&
+	    (first->ctx_flag)) {
+		/* if not support tunnel */
+		if (!(first->priv_tags)) {
+			first->ctx_flag = false;
+			mac_len += first->tunnel_hdr_len;
+			first->tunnel_hdr_len = 0;
+		}
+	}
+	tx_dbg("mac length is %d\n", mac_len);
+	tx_dbg("ip length is %d\n", ip_len);
+	*mac_ip_len = (mac_len << 9) | ip_len;
+
+	return 0;
+}
+
+static int __rnpgbe_maybe_stop_tx(struct rnpgbe_ring *tx_ring, u16 size)
+{
+	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	/* Herbert's original patch had:
+	 *  smp_mb__after_netif_stop_queue();
+	 * but since that doesn't exist yet, just open code it.
+	 */
+	smp_mb();
+
+	/* We need to check again in a case another CPU has just
+	 * made room available.
+	 */
+	if (likely(rnpgbe_desc_unused(tx_ring) < size))
+		return -EBUSY;
+
+	/* A reprieve! - use start_queue because it doesn't call schedule */
+	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	++tx_ring->tx_stats.restart_queue;
+
+	return 0;
+}
+
+static inline int rnpgbe_maybe_stop_tx(struct rnpgbe_ring *tx_ring, u16 size)
+{
+	if (likely(rnpgbe_desc_unused(tx_ring) >= size))
+		return 0;
+	return __rnpgbe_maybe_stop_tx(tx_ring, size);
+}
+
+static int rnpgbe_tx_map(struct rnpgbe_ring *tx_ring,
+			 struct rnpgbe_tx_buffer *first, u32 mac_ip_len,
+			 u32 tx_flags)
+{
+	struct sk_buff *skb = first->skb;
+	struct rnpgbe_tx_buffer *tx_buffer;
+	struct rnpgbe_tx_desc *tx_desc;
+	skb_frag_t *frag;
+	dma_addr_t dma;
+	unsigned int data_len, size;
+	u16 i = tx_ring->next_to_use;
+	u64 fun_id = ((u64)(tx_ring->pfvfnum) << (56));
+
+	tx_desc = RNP_TX_DESC(tx_ring, i);
+	size = skb_headlen(skb);
+	data_len = skb->data_len;
+
+	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+	tx_buffer = first;
+
+	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+		if (dma_mapping_error(tx_ring->dev, dma))
+			goto dma_error;
+
+		/* record length, and DMA address */
+		dma_unmap_len_set(tx_buffer, len, size);
+		dma_unmap_addr_set(tx_buffer, dma, dma);
+
+		/* 1st desc */
+		tx_desc->pkt_addr = cpu_to_le64(dma | fun_id);
+
+		while (unlikely(size > RNP_MAX_DATA_PER_TXD)) {
+			tx_desc->vlan_cmd_bsz = build_ctob(
+				tx_flags, mac_ip_len, RNP_MAX_DATA_PER_TXD);
+			buf_dump_line("tx0  ", __LINE__, tx_desc,
+				      sizeof(*tx_desc));
+			i++;
+			tx_desc++;
+			if (i == tx_ring->count) {
+				tx_desc = RNP_TX_DESC(tx_ring, 0);
+				i = 0;
+			}
+			dma += RNP_MAX_DATA_PER_TXD;
+			size -= RNP_MAX_DATA_PER_TXD;
+
+			tx_desc->pkt_addr = cpu_to_le64(dma | fun_id);
+		}
+
+		buf_dump_line("tx1  ", __LINE__, tx_desc, sizeof(*tx_desc));
+		if (likely(!data_len))
+			break;
+		tx_desc->vlan_cmd_bsz = build_ctob(tx_flags, mac_ip_len, size);
+		buf_dump_line("tx2  ", __LINE__, tx_desc, sizeof(*tx_desc));
+
+		/* ==== frag== */
+		i++;
+		tx_desc++;
+		if (i == tx_ring->count) {
+			tx_desc = RNP_TX_DESC(tx_ring, 0);
+			i = 0;
+		}
+
+		size = skb_frag_size(frag);
+		data_len -= size;
+		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+				       DMA_TO_DEVICE);
+		tx_buffer = &tx_ring->tx_buffer_info[i];
+	}
+
+	/* write last descriptor with RS and EOP bits */
+	tx_desc->vlan_cmd_bsz = build_ctob(
+		tx_flags | RNP_TXD_CMD_EOP | RNP_TXD_CMD_RS, mac_ip_len, size);
+	buf_dump_line("tx3  ", __LINE__, tx_desc, sizeof(*tx_desc));
+
+	/* set the timestamp */
+	first->time_stamp = jiffies;
+
+	tx_ring->tx_stats.send_bytes += first->bytecount;
+
+	/*
+	 * Force memory writes to complete before letting h/w know there
+	 * are new descriptors to fetch.  (Only applicable for weak-ordered
+	 * memory model archs, such as IA-64).
+	 *
+	 * We also need this memory barrier to make certain all of the
+	 * status bits have been updated before next_to_watch is written.
+	 */
+	/* timestamp the skb as late as possible, just prior to notifying
+	 * the MAC that it should transmit this packet
+	 */
+	wmb();
+	/* set next_to_watch value indicating a packet is present */
+	first->next_to_watch = tx_desc;
+
+	buf_dump_line("tx4  ", __LINE__, tx_desc, sizeof(*tx_desc));
+	i++;
+	if (i == tx_ring->count)
+		i = 0;
+	tx_ring->next_to_use = i;
+
+	skb_tx_timestamp(skb);
+#ifdef SIMULATE_TX
+	napi_consume_skb(first->skb, 64);
+	dma_unmap_single(tx_ring->dev, dma_unmap_addr(first, dma),
+			 dma_unmap_len(first, len), DMA_TO_DEVICE);
+
+	tx_ring->stats.bytes += skb->len;
+	tx_ring->stats.packets += 1;
+	first->skb = NULL;
+#else
+
+#ifdef NO_BQL_TEST
+#else
+	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
+#endif
+	/* notify HW of packet */
+	rnpgbe_wr_reg(tx_ring->tail, i);
+#endif
+	return 0;
+dma_error:
+	dev_err(tx_ring->dev, "TX DMA map failed\n");
+
+	/* clear dma mappings for failed tx_buffer_info map */
+	for (;;) {
+		tx_buffer = &tx_ring->tx_buffer_info[i];
+		rnpgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+		if (tx_buffer == first)
+			break;
+		if (i == 0)
+			i += tx_ring->count;
+		i--;
+	}
+	dev_kfree_skb_any(first->skb);
+	first->skb = NULL;
+	tx_ring->next_to_use = i;
+
+	return -1;
+}
+
+static void rnpgbe_force_src_mac(struct sk_buff *skb, struct net_device *netdev)
+{
+	u8 *data = skb->data;
+	bool ret = false;
+	struct netdev_hw_addr *ha;
+
+	if (is_multicast_ether_addr(data)) {
+		if (memcmp(data + netdev->addr_len, netdev->dev_addr,
+			   netdev->addr_len) == 0) {
+			ret = true;
+			goto DONE;
+		}
+		netdev_for_each_uc_addr (ha, netdev) {
+			if (memcmp(data + netdev->addr_len, ha->addr,
+				   netdev->addr_len) == 0) {
+				ret = true;
+				/* if it is src mac, nothing todo */
+				goto DONE;
+			}
+		}
+		/* if not src mac, force to src mac */
+		if (!ret)
+			memcpy(data + netdev->addr_len, netdev->dev_addr,
+			       netdev->addr_len);
+	}
+DONE:
+	return;
+}
+
+static netdev_tx_t rnpgbe_xmit_frame_ring(struct sk_buff *skb,
+				   struct rnpgbe_adapter *adapter,
+				   struct rnpgbe_ring *tx_ring, bool tx_padding)
+{
+	struct rnpgbe_tx_buffer *first;
+	int tso;
+	u32 tx_flags = 0;
+	unsigned short f;
+	u16 count = TXD_USE_COUNT(skb_headlen(skb));
+	__be16 protocol = skb->protocol;
+	u8 hdr_len = 0;
+	int ignore_vlan = 0;
+	/* default len should not 0 (hw request) */
+	u32 mac_ip_len = 20;
+
+	tx_dbg("=== begin ====\n");
+	tx_dbg("rnp skb:%p, skb->len:%d  headlen:%d, data_len:%d\n", skb,
+	       skb->len, skb_headlen(skb), skb->data_len);
+	tx_dbg("next_to_clean %d, next_to_use %d\n", tx_ring->next_to_clean,
+	       tx_ring->next_to_use);
+	/*
+	 * need: 1 descriptor per page * PAGE_SIZE/RNP_MAX_DATA_PER_TXD,
+	 *       + 1 desc for skb_headlen/RNP_MAX_DATA_PER_TXD,
+	 *       + 2 desc gap to keep tail from touching head,
+	 *       + 1 desc for context descriptor,
+	 * otherwise try next time
+	 */
+	if (adapter->tx_path_in_lpi_mode)
+		rnpgbe_disable_eee_mode(adapter);
+
+	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+		skb_frag_t *frag_temp = &skb_shinfo(skb)->frags[f];
+
+		count += TXD_USE_COUNT(skb_frag_size(frag_temp));
+		tx_dbg(" rnp #%d frag: size:%d\n", f, skb_frag_size(frag_temp));
+	}
+
+	if (rnpgbe_maybe_stop_tx(tx_ring, count + 3)) {
+		tx_ring->tx_stats.tx_busy++;
+		return NETDEV_TX_BUSY;
+	}
+
+	if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) &&
+	    (!(tx_ring->ring_flags & RNP_RING_VEB_MULTI_FIX)))
+		rnpgbe_force_src_mac(skb, tx_ring->netdev);
+
+	/* record the location of the first descriptor for this packet */
+	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+	first->skb = skb;
+	/* maybe consider len smaller than 60 */
+	first->bytecount = (skb->len > 60) ? skb->len : 60;
+	first->gso_segs = 1;
+	first->priv_tags = 0;
+	first->mss_len_vf_num = 0;
+	first->inner_vlan_tunnel_len = 0;
+	first->ctx_flag =
+		(adapter->flags & RNP_FLAG_SRIOV_ENABLED) ? true : false;
+
+	/* if we have a HW VLAN tag being added default to the HW one */
+	/* RNP_TXD_VLAN_VALID is used for veb */
+	/* setup padding flag */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) {
+		first->ctx_flag = true;
+		first->gso_need_padding = tx_padding;
+	}
+
+	/* RNP_FLAG2_VLAN_STAGS_ENABLED and
+	 * tx-stags-offload not support together
+	 */
+	if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) {
+		/* always add a stags for any packets out */
+		if (tx_ring->ring_flags & RNP_RING_OUTER_VLAN_FIX) {
+			/* n500 set outer_vlan to ctx */
+			first->inner_vlan_tunnel_len |= (adapter->stags_vid);
+			first->priv_tags = 1;
+			first->ctx_flag = true;
+
+			if (skb_vlan_tag_present(skb)) {
+				tx_flags |= RNP_TXD_VLAN_VALID |
+					    RNP_TXD_VLAN_CTRL_INSERT_VLAN;
+				tx_flags |= skb_vlan_tag_get(skb);
+				/* else if it is a SW VLAN check the next
+				 * protocol and store the tag
+				 */
+			} else if (protocol == htons(ETH_P_8021Q)) {
+				struct vlan_hdr *vhdr, _vhdr;
+
+				vhdr = skb_header_pointer(
+					skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+				if (!vhdr)
+					goto out_drop;
+
+				protocol = vhdr->h_vlan_encapsulated_proto;
+				tx_flags |= ntohs(vhdr->h_vlan_TCI);
+				tx_flags |= RNP_TXD_VLAN_VALID;
+			}
+
+		} else {
+			/* sriov mode not support this */
+			tx_flags |= adapter->stags_vid;
+			tx_flags |= RNP_TXD_VLAN_CTRL_INSERT_VLAN;
+			if (skb_vlan_tag_present(skb)) {
+				tx_flags |= RNP_TXD_VLAN_VALID;
+				first->inner_vlan_tunnel_len |=
+					(skb_vlan_tag_get(skb) << 8);
+				first->ctx_flag = true;
+				/* else if it is a SW VLAN check the next
+				 * protocol and store the tag
+				 */
+			} else if (protocol == htons(ETH_P_8021Q)) {
+				struct vlan_hdr *vhdr, _vhdr;
+
+				vhdr = skb_header_pointer(
+					skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+				if (!vhdr)
+					goto out_drop;
+
+				protocol = vhdr->h_vlan_encapsulated_proto;
+				tx_flags |= RNP_TXD_VLAN_VALID;
+			}
+		}
+	} else {
+		/* normal mode*/
+		if (skb_vlan_tag_present(skb)) {
+			if (skb->vlan_proto != htons(ETH_P_8021Q)) {
+				/* veb only use ctags */
+				tx_flags |= skb_vlan_tag_get(skb);
+				tx_flags |= RNP_TXD_SVLAN_TYPE |
+					    RNP_TXD_VLAN_CTRL_INSERT_VLAN;
+			} else {
+				tx_flags |= skb_vlan_tag_get(skb);
+				tx_flags |= RNP_TXD_VLAN_VALID |
+					    RNP_TXD_VLAN_CTRL_INSERT_VLAN;
+			}
+			tx_ring->tx_stats.vlan_add++;
+			/* else if it is a SW VLAN check the next
+			 * protocol and store the tag
+			 */
+			/* veb only use ctags */
+		} else if (protocol == htons(ETH_P_8021Q)) {
+			struct vlan_hdr *vhdr, _vhdr;
+
+			vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr),
+						  &_vhdr);
+			if (!vhdr)
+				goto out_drop;
+
+			protocol = vhdr->h_vlan_encapsulated_proto;
+			tx_flags |= ntohs(vhdr->h_vlan_TCI);
+			tx_flags |= RNP_TXD_VLAN_VALID;
+			ignore_vlan = 1;
+		}
+	}
+	protocol = vlan_get_protocol(skb);
+	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+	    adapter->flags2 & RNP_FLAG2_PTP_ENABLED && adapter->ptp_tx_en) {
+		if (!test_and_set_bit_lock(__RNP_PTP_TX_IN_PROGRESS,
+					   &adapter->state)) {
+			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+			tx_flags |= RNP_TXD_FLAG_PTP;
+			adapter->ptp_tx_skb = skb_get(skb);
+			adapter->tx_hwtstamp_start = jiffies;
+			schedule_work(&adapter->tx_hwtstamp_work);
+		} else {
+			netdev_dbg(tx_ring->netdev, "ptp_tx_skb miss\n");
+		}
+	}
+	/* record initial flags and protocol */
+	tso = rnpgbe_tso(tx_ring, first, &mac_ip_len, &hdr_len, &tx_flags);
+	if (tso < 0)
+		goto out_drop;
+	else if (!tso)
+		rnpgbe_tx_csum(tx_ring, first, &mac_ip_len, &tx_flags);
+	/* check sriov mode */
+	/* in this mode pf send msg should with vf_num */
+	if (unlikely(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) {
+		first->ctx_flag = true;
+		first->mss_len_vf_num |= (adapter->vf_num_for_pf << 16);
+	}
+
+	/* add control desc */
+	rnpgbe_maybe_tx_ctxtdesc(tx_ring, first, ignore_vlan);
+
+	if (rnpgbe_tx_map(tx_ring, first, mac_ip_len, tx_flags))
+		goto cleanup_tx_tstamp;
+	/* need this */
+	rnpgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+	tx_dbg("=== end ====\n\n\n\n");
+	return NETDEV_TX_OK;
+
+out_drop:
+	dev_kfree_skb_any(first->skb);
+	first->skb = NULL;
+cleanup_tx_tstamp:
+	if (unlikely(tx_flags & RNP_TXD_FLAG_PTP)) {
+		dev_kfree_skb_any(adapter->ptp_tx_skb);
+		adapter->ptp_tx_skb = NULL;
+		cancel_work_sync(&adapter->tx_hwtstamp_work);
+		clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state);
+	}
+
+	return NETDEV_TX_OK;
+}
+
+static bool check_sctp_no_padding(struct sk_buff *skb)
+{
+	bool no_padding = false;
+	u8 l4_proto = 0;
+	u8 *exthdr;
+	__be16 frag_off;
+	union {
+		struct iphdr *v4;
+		struct ipv6hdr *v6;
+		unsigned char *hdr;
+	} ip;
+	union {
+		struct tcphdr *tcp;
+		struct udphdr *udp;
+		unsigned char *hdr;
+	} l4;
+
+	ip.hdr = skb_network_header(skb);
+	l4.hdr = skb_transport_header(skb);
+
+	if (ip.v4->version == 4) {
+		l4_proto = ip.v4->protocol;
+	} else {
+		exthdr = ip.hdr + sizeof(*ip.v6);
+		l4_proto = ip.v6->nexthdr;
+		if (l4.hdr != exthdr)
+			ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
+					 &frag_off);
+	}
+	switch (l4_proto) {
+	case IPPROTO_SCTP:
+		no_padding = true;
+		break;
+	default:
+
+		break;
+	}
+
+	return no_padding;
+}
+
+static netdev_tx_t rnpgbe_xmit_frame(struct sk_buff *skb,
+				     struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_ring *tx_ring;
+	bool tx_padding = false;
+
+	if (!netif_carrier_ok(netdev)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	/*
+	 * The minimum packet size for olinfo paylen is 17 so pad the skb
+	 * in order to meet this minimum size requirement.
+	 */
+	if ((adapter->priv_flags & RNP_PRIV_FLAG_TX_PADDING) &&
+	    (!(adapter->priv_flags & RNP_PRIV_FLAG_SOFT_TX_PADDING))) {
+		if (skb->len < 60) {
+			if (!check_sctp_no_padding(skb)) {
+				if (skb_put_padto(skb, 60))
+					return NETDEV_TX_OK;
+
+			} else {
+				tx_padding = true;
+			}
+		}
+	} else {
+		if (skb->len < 33) {
+			if (skb_padto(skb, 33))
+				return NETDEV_TX_OK;
+			skb->len = 33;
+		}
+	}
+	tx_ring = adapter->tx_ring[skb->queue_mapping];
+
+	return rnpgbe_xmit_frame_ring(skb, adapter, tx_ring, tx_padding);
+}
+
+/**
+ * rnpgbe_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int rnpgbe_set_mac(struct net_device *netdev, void *p)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct sockaddr *addr = p;
+	const u8 target_addr[ETH_ALEN];
+	bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED);
+
+	dbg("[%s] call set mac\n", netdev->name);
+
+	memcpy((void *)target_addr, addr->sa_data, netdev->addr_len);
+
+	if (!is_valid_ether_addr(target_addr))
+		return -EADDRNOTAVAIL;
+
+	eth_hw_addr_set(netdev, target_addr);
+	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+	hw->ops.set_mac(hw, hw->mac.addr, sriov_flag);
+	/* reset veb table */
+	rnpgbe_configure_virtualization(adapter);
+	return 0;
+}
+
+static int rnpgbe_mdio_read(struct net_device *netdev, int prtad, int devad,
+			    u32 addr, u32 *phy_value)
+{
+	int rc = -EIO;
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u16 value;
+
+	rc = hw->ops.phy_read_reg(hw, addr, 0, &value);
+	*phy_value = value;
+
+	return rc;
+}
+
+static int rnpgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
+			     u16 addr, u16 value)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	return hw->ops.phy_write_reg(hw, addr, 0, value);
+}
+
+static int rnpgbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+			    int cmd)
+{
+	struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&ifr->ifr_data;
+	int prtad, devad, ret;
+	u32 phy_value;
+
+	prtad = (mii->phy_id & MDIO_PHY_ID_PRTAD) >> 5;
+	devad = (mii->phy_id & MDIO_PHY_ID_DEVAD);
+
+	if (cmd == SIOCGMIIREG) {
+		ret = rnpgbe_mdio_read(netdev, prtad, devad, mii->reg_num,
+				       &phy_value);
+		if (ret < 0)
+			return ret;
+		mii->val_out = phy_value;
+		return 0;
+	} else {
+		return rnpgbe_mdio_write(netdev, prtad, devad, mii->reg_num,
+					 mii->val_in);
+	}
+}
+
+static int rnpgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	/* ptp 1588 used this */
+	switch (cmd) {
+	case SIOCGHWTSTAMP:
+		if (module_enable_ptp)
+			return rnpgbe_ptp_get_ts_config(adapter, req);
+		break;
+	case SIOCSHWTSTAMP:
+		if (module_enable_ptp)
+			return rnpgbe_ptp_set_ts_config(adapter, req);
+		break;
+	case SIOCGMIIPHY:
+		return 0;
+	case SIOCGMIIREG:
+		/*fall through */
+	case SIOCSMIIREG:
+		return rnpgbe_mii_ioctl(netdev, req, cmd);
+	}
+	return -EINVAL;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void rnpgbe_netpoll(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int i;
+
+	/* if interface is down do nothing */
+	if (test_bit(__RNP_DOWN, &adapter->state))
+		return;
+
+	adapter->flags |= RNP_FLAG_IN_NETPOLL;
+	for (i = 0; i < adapter->num_q_vectors; i++)
+		rnpgbe_msix_clean_rings(0, adapter->q_vector[i]);
+	adapter->flags &= ~RNP_FLAG_IN_NETPOLL;
+}
+
+#endif
+
+static void rnpgbe_get_stats64(struct net_device *netdev,
+			       struct rtnl_link_stats64 *stats)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int i;
+
+	rcu_read_lock();
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct rnpgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
+		u64 bytes, packets;
+		unsigned int start;
+
+		if (ring) {
+			do {
+				start = u64_stats_fetch_begin(&ring->syncp);
+				packets = ring->stats.packets;
+				bytes = ring->stats.bytes;
+			} while (u64_stats_fetch_retry(&ring->syncp, start));
+			stats->rx_packets += packets;
+			stats->rx_bytes += bytes;
+		}
+	}
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct rnpgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
+		u64 bytes, packets;
+		unsigned int start;
+
+		if (ring) {
+			do {
+				start = u64_stats_fetch_begin(&ring->syncp);
+				packets = ring->stats.packets;
+				bytes = ring->stats.bytes;
+			} while (u64_stats_fetch_retry(&ring->syncp, start));
+			stats->tx_packets += packets;
+			stats->tx_bytes += bytes;
+		}
+	}
+	rcu_read_unlock();
+	/* following stats updated by rnpgbe_watchdog_task() */
+	stats->multicast = netdev->stats.multicast;
+	stats->rx_errors = netdev->stats.rx_errors;
+	stats->rx_length_errors = netdev->stats.rx_length_errors;
+	stats->rx_crc_errors = netdev->stats.rx_crc_errors;
+	stats->rx_missed_errors = netdev->stats.rx_missed_errors;
+}
+
+/**
+ * rnpgbe_setup_tc - configure net_device for multiple traffic classes
+ *
+ * @netdev: net device to configure
+ * @tc: number of traffic classes to enable
+ */
+int rnpgbe_setup_tc(struct net_device *dev, u8 tc)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int ret = 0;
+
+	if ((hw->hw_type != rnpgbe_hw_n10) && (tc))
+		return -EINVAL;
+
+	/* if now we are in force mode, never need force, if not force it */
+	if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) {
+		hw->ops.set_mac_rx(hw, false);
+		if (hw->ops.driver_status)
+			hw->ops.driver_status(hw, true,
+					      rnpgbe_driver_force_control_phy);
+	}
+
+	/* Hardware supports up to 8 traffic classes */
+	if ((tc > RNP_MAX_TCS_NUM) || (tc == 1))
+		return -EINVAL;
+	/* we canot support tc with sriov mode */
+	if ((tc) && (adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return -EINVAL;
+
+	/* Hardware has to reinitialize queues and interrupts to
+	 * match packet buffer alignment. Unfortunately, the
+	 * hardware is not flexible enough to do this dynamically.
+	 */
+	while (test_and_set_bit(__RNP_RESETTING, &adapter->state))
+		usleep_range(1000, 2000);
+
+	if (netif_running(dev))
+		rnpgbe_close(dev);
+
+	rnpgbe_fdir_filter_exit(adapter);
+	adapter->priv_flags &= (~RNP_PRIV_FLAG_TCP_SYNC);
+	remove_mbx_irq(adapter);
+	rnpgbe_clear_interrupt_scheme(adapter);
+	adapter->num_tc = tc;
+
+	if (tc) {
+		netdev_set_num_tc(dev, tc);
+		adapter->flags |= RNP_FLAG_DCB_ENABLED;
+
+	} else {
+		netdev_reset_tc(dev);
+
+		adapter->flags &= ~RNP_FLAG_DCB_ENABLED;
+	}
+
+	rnpgbe_init_interrupt_scheme(adapter);
+
+	register_mbx_irq(adapter);
+	/* rss table must reset */
+	adapter->rss_tbl_setup_flag = 0;
+
+	if (netif_running(dev))
+		ret = rnpgbe_open(dev);
+	if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) {
+		if (hw->ops.driver_status)
+			hw->ops.driver_status(hw, false,
+					      rnpgbe_driver_force_control_phy);
+	}
+
+	clear_bit(__RNP_RESETTING, &adapter->state);
+	return ret;
+}
+
+#ifdef CONFIG_PCI_IOV
+void rnpgbe_sriov_reinit(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	rtnl_lock();
+	rnpgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
+	rtnl_unlock();
+	usleep_range(10000, 20000);
+}
+#endif
+
+static void rnpgbe_do_reset(struct net_device *netdev)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (netif_running(netdev))
+		rnpgbe_reinit_locked(adapter);
+	else
+		rnpgbe_reset(adapter);
+}
+
+static netdev_features_t rnpgbe_fix_features(struct net_device *netdev,
+					     netdev_features_t features)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
+	if (!(features & NETIF_F_RXCSUM))
+		features &= ~NETIF_F_LRO;
+
+	/* close rx csum when rx fcs on */
+	if (!(adapter->flags2 & RNP_FLAG2_CHKSM_FIX)) {
+		if (features & NETIF_F_RXFCS)
+			features &= (~NETIF_F_RXCSUM);
+	}
+	/* Turn off LRO if not RSC capable */
+	if (!(adapter->flags2 & RNP_FLAG2_RSC_CAPABLE))
+		features &= ~NETIF_F_LRO;
+
+	if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
+		if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER)
+			features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
+	}
+
+	if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER) {
+		if (!(features & NETIF_F_HW_VLAN_STAG_FILTER))
+			features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+	}
+
+	if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) {
+		if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD)
+			features &= ~NETIF_F_HW_VLAN_STAG_RX;
+	}
+
+	if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) {
+		if (!(features & NETIF_F_HW_VLAN_STAG_RX)) {
+			features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+		}
+	}
+
+	if (!(features & NETIF_F_HW_VLAN_CTAG_TX)) {
+		if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD)
+			features &= ~NETIF_F_HW_VLAN_STAG_TX;
+	}
+
+	if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) {
+		if (!(features & NETIF_F_HW_VLAN_STAG_TX)) {
+			features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+		}
+	}
+
+	return features;
+}
+
+static int rnpgbe_set_features(struct net_device *netdev,
+			       netdev_features_t features)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	netdev_features_t changed = netdev->features ^ features;
+	bool need_reset = false;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	netdev->features = features;
+
+	/* if changed ntuple should close all */
+	if (changed & NETIF_F_NTUPLE) {
+		if (!(features & NETIF_F_NTUPLE))
+			rnpgbe_fdir_filter_exit(adapter);
+	}
+
+	switch (features & NETIF_F_NTUPLE) {
+	case NETIF_F_NTUPLE:
+		/* turn off ATR, enable perfect filters and reset */
+		if (!(adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE))
+			need_reset = true;
+
+		adapter->flags &= ~RNP_FLAG_FDIR_HASH_CAPABLE;
+		adapter->flags |= RNP_FLAG_FDIR_PERFECT_CAPABLE;
+		break;
+	default:
+		/* turn off perfect filters, enable ATR and reset */
+		if (adapter->flags & RNP_FLAG_FDIR_PERFECT_CAPABLE)
+			need_reset = true;
+
+		adapter->flags &= ~RNP_FLAG_FDIR_PERFECT_CAPABLE;
+
+		/* We cannot enable ATR if SR-IOV is enabled */
+		if (adapter->flags & RNP_FLAG_SRIOV_ENABLED)
+			break;
+
+		/* We cannot enable ATR if we have 2 or more traffic classes */
+		if (netdev_get_num_tc(netdev) > 1)
+			break;
+
+		/* A sample rate of 0 indicates ATR disabled */
+		if (!adapter->atr_sample_rate)
+			break;
+
+		adapter->flags |= RNP_FLAG_FDIR_HASH_CAPABLE;
+		break;
+	}
+
+	/* vlan filter changed */
+	if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+		if (features & (NETIF_F_HW_VLAN_CTAG_FILTER)) {
+			/* not open if in promise mode */
+			if (!(netdev->flags & IFF_PROMISC))
+				hw->ops.set_vlan_filter_en(hw, true);
+		} else {
+			hw->ops.set_vlan_filter_en(hw, false);
+		}
+		rnpgbe_msg_post_status(adapter, PF_VLAN_FILTER_STATUS);
+	}
+
+	/* rss hash changed */
+	if (changed & (NETIF_F_RXHASH)) {
+		bool iov_en = (adapter->flags & RNP_FLAG_SRIOV_ENABLED) ? true :
+									  false;
+
+		if (netdev->features & (NETIF_F_RXHASH))
+			hw->ops.set_rx_hash(hw, true, iov_en);
+		else
+			hw->ops.set_rx_hash(hw, false, iov_en);
+	}
+
+	/* rx fcs changed */
+	/* in this mode rx l4/sctp checksum will get error */
+	if (changed & NETIF_F_RXFCS) {
+
+		if (features & NETIF_F_RXFCS) {
+			adapter->priv_flags |= RNP_PRIV_FLAG_RX_FCS;
+			hw->ops.set_fcs_mode(hw, true);
+		} else {
+			adapter->priv_flags &= (~RNP_PRIV_FLAG_RX_FCS);
+			hw->ops.set_fcs_mode(hw, false);
+		}
+		rnpgbe_msg_post_status(adapter, PF_FCS_STATUS);
+	}
+
+	if (changed & NETIF_F_RXALL)
+		need_reset = true;
+
+	if (features & NETIF_F_RXALL)
+		adapter->priv_flags |= RNP_PRIV_FLAG_RX_ALL;
+	else
+		adapter->priv_flags &= (~RNP_PRIV_FLAG_RX_ALL);
+
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		rnpgbe_vlan_strip_enable(adapter);
+	else
+		rnpgbe_vlan_strip_disable(adapter);
+
+	if (need_reset)
+		rnpgbe_do_reset(netdev);
+
+	return 0;
+}
+
+static int
+rnpgbe_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+			  __always_unused u16 flags,
+			  struct netlink_ext_ack __always_unused *ext)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct nlattr *attr, *br_spec;
+	int rem;
+
+	if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return -EOPNOTSUPP;
+
+	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+	nla_for_each_nested(attr, br_spec, rem) {
+		__u16 mode;
+
+		if (nla_type(attr) != IFLA_BRIDGE_MODE)
+			continue;
+
+		mode = nla_get_u16(attr);
+		if (mode == BRIDGE_MODE_VEPA) {
+			adapter->flags2 &= ~RNP_FLAG2_BRIDGE_MODE_VEB;
+			wr32(hw, RNP_DMA_CONFIG,
+			     rd32(hw, RNP_DMA_CONFIG) | DMA_VEB_BYPASS);
+		} else if (mode == BRIDGE_MODE_VEB) {
+			adapter->flags2 |= RNP_FLAG2_BRIDGE_MODE_VEB;
+			wr32(hw, RNP_DMA_CONFIG,
+			     rd32(hw, RNP_DMA_CONFIG) & (~DMA_VEB_BYPASS));
+
+		} else
+			return -EINVAL;
+
+		e_info(drv, "enabling bridge mode: %s\n",
+		       mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+	}
+
+	return 0;
+}
+
+static int rnpgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+				     struct net_device *dev,
+				     u32 __maybe_unused filter_mask,
+				     int nlflags)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(dev);
+	u16 mode;
+
+	if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return 0;
+
+	if (adapter->flags2 & RNP_FLAG2_BRIDGE_MODE_VEB)
+		mode = BRIDGE_MODE_VEB;
+	else
+		mode = BRIDGE_MODE_VEPA;
+
+	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags,
+				       filter_mask, NULL);
+}
+
+
+#define RNP_MAX_TUNNEL_HDR_LEN 80
+#define RNP_MAX_MAC_HDR_LEN 127
+#define RNP_MAX_NETWORK_HDR_LEN 511
+
+static netdev_features_t rnpgbe_features_check(struct sk_buff *skb,
+					       struct net_device *dev,
+					       netdev_features_t features)
+{
+	unsigned int network_hdr_len, mac_hdr_len;
+
+	/* Make certain the headers can be described by a context descriptor */
+	mac_hdr_len = skb_network_header(skb) - skb->data;
+	if (unlikely(mac_hdr_len > RNP_MAX_MAC_HDR_LEN))
+		return features &
+		       ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC |
+			 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_TSO | NETIF_F_TSO6);
+
+	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+	if (unlikely(network_hdr_len > RNP_MAX_NETWORK_HDR_LEN))
+		return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC |
+				    NETIF_F_TSO | NETIF_F_TSO6);
+
+	/* We can only support IPV4 TSO in tunnels if we can mangle the
+	 * inner IP ID field, so strip TSO if MANGLEID is not supported.
+	 */
+	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+		features &= ~NETIF_F_TSO;
+
+	return features;
+}
+
+const struct net_device_ops rnpgbe_netdev_ops = {
+	.ndo_open = rnpgbe_open,
+	.ndo_stop = rnpgbe_close,
+	.ndo_start_xmit = rnpgbe_xmit_frame,
+	.ndo_set_rx_mode = rnpgbe_set_rx_mode,
+	.ndo_validate_addr = eth_validate_addr,
+	.ndo_eth_ioctl = rnpgbe_ioctl,
+	.ndo_change_mtu = rnpgbe_change_mtu,
+	.ndo_get_stats64 = rnpgbe_get_stats64,
+	.ndo_tx_timeout = rnpgbe_tx_timeout,
+	.ndo_set_tx_maxrate = rnpgbe_tx_maxrate,
+	.ndo_set_mac_address = rnpgbe_set_mac,
+	.ndo_vlan_rx_add_vid = rnpgbe_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid = rnpgbe_vlan_rx_kill_vid,
+	.ndo_set_vf_mac = rnpgbe_ndo_set_vf_mac,
+	.ndo_set_vf_vlan = rnpgbe_ndo_set_vf_vlan,
+	.ndo_set_vf_rate = rnpgbe_ndo_set_vf_bw,
+	.ndo_set_vf_spoofchk = rnpgbe_ndo_set_vf_spoofchk,
+	.ndo_set_vf_link_state = rnpgbe_ndo_set_vf_link_state,
+	.ndo_set_vf_trust = rnpgbe_ndo_set_vf_trust,
+	.ndo_get_vf_config = rnpgbe_ndo_get_vf_config,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller = rnpgbe_netpoll,
+#endif
+	.ndo_bridge_setlink = rnpgbe_ndo_bridge_setlink,
+	.ndo_bridge_getlink = rnpgbe_ndo_bridge_getlink,
+	.ndo_features_check = rnpgbe_features_check,
+	.ndo_set_features = rnpgbe_set_features,
+	.ndo_fix_features = rnpgbe_fix_features,
+};
+
+void rnpgbe_assign_netdev_ops(struct net_device *dev)
+{
+	/* different hw can assign difference fun */
+	dev->netdev_ops = &rnpgbe_netdev_ops;
+	dev->watchdog_timeo = 5 * HZ;
+}
+
+/**
+ * rnpgbe_wol_supported - Check whether device supports WoL
+ * @hw: hw specific details
+ * @device_id: the device ID
+ *
+ * This function is used by probe and ethtool to determine
+ * which devices have WoL support
+ *
+ **/
+int rnpgbe_wol_supported(struct rnpgbe_adapter *adapter, u16 device_id)
+{
+	int is_wol_supported = 0;
+
+	switch (device_id) {
+	case PCI_DEVICE_ID_N210:
+	case PCI_DEVICE_ID_N500_QUAD_PORT:
+	case PCI_DEVICE_ID_N500_DUAL_PORT:
+		is_wol_supported = 1;
+		break;
+	default:
+		is_wol_supported = 0;
+		break;
+	}
+
+	return is_wol_supported;
+}
+
+static inline unsigned long rnpgbe_tso_features(struct rnpgbe_hw *hw)
+{
+	unsigned long features = 0;
+
+	if (hw->feature_flags & RNP_NET_FEATURE_TSO)
+		features |= NETIF_F_TSO;
+	if (hw->feature_flags & RNP_NET_FEATURE_TSO)
+		features |= NETIF_F_TSO6;
+	features |= NETIF_F_GSO_PARTIAL;
+	if (hw->feature_flags & RNP_NET_FEATURE_TX_UDP_TUNNEL)
+		features |= RNP_GSO_PARTIAL_FEATURES;
+
+	return features;
+}
+
+static void remove_mbx_irq(struct rnpgbe_adapter *adapter)
+{
+	/* mbx */
+	if (adapter->num_other_vectors) {
+		/* only msix use indepented intr */
+		if (adapter->flags & RNP_FLAG_MSIX_ENABLED) {
+			adapter->hw.mbx.ops.configure(
+				&adapter->hw, adapter->msix_entries[0].entry,
+				false);
+			if (adapter->hw.mbx.other_irq_enabled) {
+				free_irq(adapter->msix_entries[0].vector, adapter);
+				adapter->hw.mbx.other_irq_enabled = false;
+			}
+		}
+	}
+}
+
+static int register_mbx_irq(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	int err = 0;
+
+	/* for mbx:vector0 */
+	if (adapter->num_other_vectors) {
+		/* only do this in msix mode */
+		if (adapter->flags & RNP_FLAG_MSIX_ENABLED) {
+			err = request_irq(adapter->msix_entries[0].vector,
+					  rnpgbe_msix_other, 0, netdev->name,
+					  adapter);
+			if (err) {
+				e_err(probe,
+				      "request_irq for msix_other failed: %d\n",
+				      err);
+				goto err_mbx;
+			}
+			hw->mbx.ops.configure(
+				hw, adapter->msix_entries[0].entry, true);
+			adapter->hw.mbx.other_irq_enabled = true;
+		}
+	}
+
+err_mbx:
+	return err;
+}
+
+static int rnpgbe_rm_adpater(struct rnpgbe_adapter *adapter)
+{
+	struct net_device *netdev;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	netdev = adapter->netdev;
+	pr_info("= remove adapter:%s =\n", netdev->name);
+
+	rnpgbe_dbg_adapter_exit(adapter);
+
+	netif_carrier_off(netdev);
+
+	set_bit(__RNP_DOWN, &adapter->state);
+	set_bit(__RNP_REMOVE, &adapter->state);
+	if (module_enable_ptp) {
+		while (test_bit(__RNP_PTP_TX_IN_PROGRESS, &adapter->state))
+			usleep_range(10000, 20000);
+		cancel_work_sync(&adapter->tx_hwtstamp_work);
+	}
+	if (adapter->eee_active) {
+		adapter->eee_active = 0;
+		rnpgbe_eee_init(adapter);
+	}
+	cancel_work_sync(&adapter->service_task);
+	del_timer_sync(&adapter->service_timer);
+	rnpgbe_sysfs_exit(adapter);
+
+	rnpgbe_fdir_filter_exit(adapter);
+	adapter->priv_flags &= (~RNP_PRIV_FLAG_TCP_SYNC);
+
+	if (netdev->reg_state == NETREG_REGISTERED)
+		unregister_netdev(netdev);
+
+	adapter->netdev = NULL;
+
+	if (hw->ops.driver_status)
+		hw->ops.driver_status(hw, false, rnpgbe_driver_insmod);
+
+	remove_mbx_irq(adapter);
+
+	rnpgbe_clear_interrupt_scheme(adapter);
+
+	if (adapter->io_addr)
+		iounmap(adapter->io_addr);
+
+	if (adapter->io_addr_bar0)
+		iounmap(adapter->io_addr_bar0);
+
+	free_netdev(netdev);
+
+	pr_info("remove complete\n");
+
+	return 0;
+}
+
+static int rnpgbe_init_firmware(struct rnpgbe_hw *hw, struct file *file,
+				int file_size)
+{
+	struct device *dev = &(hw->pdev->dev);
+	loff_t old_pos = 0;
+	loff_t pos = 0;
+	loff_t end_pos = file_size;
+	u32 rd_len = 0x1000;
+	int get_len = 0;
+	u32 iter = 0;
+	int err = 0;
+	u32 fw_off = 0;
+	u32 old_data = 0;
+	u32 new_data = 0;
+	char *buf = kzalloc(0x1000, GFP_KERNEL);
+
+	dev_info(dev, "initializing firmware, which will take some time.");
+	/* capy bin to bar */
+	while (pos < end_pos) {
+		if ((pos >= 0x1f000) && (pos < 0x20000)) {
+			pos += rd_len;
+			continue;
+		}
+
+		old_pos = pos;
+		get_len = kernel_read(file, buf, rd_len, &pos);
+		if (((get_len < rd_len) && ((old_pos + get_len) != end_pos)) ||
+		    (get_len < 0)) {
+			dev_err(dev, "read err, pos 0x%x, get len %d",
+				(u32)old_pos, get_len);
+			err = -EIO;
+			return err;
+		}
+
+		for (iter = 0; iter < get_len; iter += 4) {
+			old_data = *((u32 *)(buf + iter));
+			fw_off = (u32)old_pos + iter + 0x1000;
+			iowrite32(old_data, (hw->hw_addr + fw_off));
+		}
+
+		if (pos == old_pos) {
+			pos += get_len;
+		}
+	}
+
+	dev_info(dev, "Checking for firmware. Wait a moment, please.");
+	/* check */
+	pos = 0x0;
+	while (pos < end_pos) {
+		if ((pos >= 0x1f000) && (pos < 0x20000)) {
+			pos += rd_len;
+			continue;
+		}
+
+		old_pos = pos;
+		get_len = kernel_read(file, buf, rd_len, &pos);
+		if (((get_len < rd_len) && ((old_pos + get_len) != end_pos)) ||
+		    (get_len < 0)) {
+			dev_err(dev, "read err, pos 0x%x, get len %d",
+				(u32)old_pos, get_len);
+			kfree(buf);
+			err = -EIO;
+			return err;
+		}
+
+		for (iter = 0; iter < get_len; iter += 4) {
+			old_data = *((u32 *)(buf + iter));
+			fw_off = (u32)old_pos + iter + 0x1000;
+			new_data = ioread32(hw->hw_addr + fw_off);
+			if (old_data != new_data) {
+				dev_err(dev,
+					"Err at 0x%08x write:0x%08x read:0x%08x",
+					fw_off, old_data, new_data);
+				err = -EIO;
+			}
+		}
+
+		if (pos == old_pos) {
+			pos += get_len;
+		}
+	}
+
+	kfree(buf);
+	return err;
+}
+
+static int rnpgbe_add_adpater(struct pci_dev *pdev, struct rnpgbe_info *ii,
+			      struct rnpgbe_adapter **padapter)
+{
+	int i, err = 0;
+	struct rnpgbe_adapter *adapter = NULL;
+	struct net_device *netdev;
+	struct rnpgbe_hw *hw;
+	u8 __iomem *hw_addr = NULL;
+	u8 __iomem *hw_addr_bar0 = NULL;
+
+	u32 dma_version = 0;
+	u32 nic_version = 0;
+	u32 queues = ii->total_queue_pair_cnts;
+	static int bd_number;
+
+	pr_info("====  add adapter queues:%d ====", queues);
+	netdev = alloc_etherdev_mq(sizeof(struct rnpgbe_adapter), queues);
+	if (!netdev)
+		return -ENOMEM;
+
+	if (!fix_eth_name)
+		SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	adapter = netdev_priv(netdev);
+
+	memset((char *)adapter, 0x00, sizeof(struct rnpgbe_adapter));
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	adapter->max_ring_pair_counts = queues;
+	if (padapter)
+		*padapter = adapter;
+
+	adapter->bd_number = bd_number++;
+	adapter->port = 0;
+	snprintf(adapter->name, sizeof(netdev->name), "%s%d",
+		 rnpgbe_driver_name, adapter->bd_number);
+	pci_set_drvdata(pdev, adapter);
+
+	hw = &adapter->hw;
+	hw->back = adapter;
+	/* first setup hw type */
+	hw->pdev = pdev;
+	hw->rss_type = ii->rss_type;
+	hw->hw_type = ii->hw_type;
+	switch (hw->hw_type) {
+	case rnpgbe_hw_n500:
+		/* n500 use bar2 */
+#define RNP_NIC_BAR_N500 2
+		hw_addr = ioremap(pci_resource_start(pdev, RNP_NIC_BAR_N500),
+				  pci_resource_len(pdev, RNP_NIC_BAR_N500));
+		if (!hw_addr) {
+			dev_err(&pdev->dev, "pcim_iomap bar%d failed!\n",
+				RNP_NIC_BAR_N500);
+			return -EIO;
+		}
+		pr_info("[bar%d]: %p %llx len=%d kB\n", RNP_NIC_BAR_N500,
+			hw_addr,
+			(unsigned long long)pci_resource_start(
+				pdev, RNP_NIC_BAR_N500),
+			(int)pci_resource_len(pdev, RNP_NIC_BAR_N500) / 1024);
+		/* get dma version */
+		dma_version = rnpgbe_rd_reg(hw_addr);
+
+		hw->hw_addr = hw_addr;
+		/* setup msix base */
+		hw->ring_msix_base = hw->hw_addr + 0x28700;
+		hw->pfvfnum_system = PF_NUM_N500(rnpgbe_get_fuc(pdev));
+		nic_version = rd32(hw, RNP500_TOP_NIC_VERSION);
+		adapter->irq_mode = irq_mode_msix;
+		adapter->flags |= RNP_FLAG_MSI_CAPABLE | RNP_FLAG_MSIX_CAPABLE |
+				  RNP_FLAG_LEGACY_CAPABLE;
+		/* ifdef uart show tod */
+#ifdef FW_UART_SHOW_TSTAMPS
+		hw_addr_bar0 = ioremap(pci_resource_start(pdev, 0),
+				       pci_resource_len(pdev, 0));
+#endif
+		break;
+
+	case rnpgbe_hw_n210:
+#define RNP_NIC_BAR_N210 2
+		if (pci_resource_len(pdev, 0) == 0x100000) {
+			char *path = "/lib/firmware/n210_driver_update.bin";
+			struct file *file = NULL;
+			int file_size = 0;
+
+			hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+					      pci_resource_len(pdev, 0));
+			if (!(hw->hw_addr)) {
+				dev_err(&pdev->dev, "pci_iomap bar%d failed",
+					RNP_NIC_BAR_N210);
+				return -EIO;
+			}
+
+			file = filp_open(path, O_RDONLY, 0);
+			if (IS_ERR(file)) {
+				dev_err(&pdev->dev,
+					"filp_open(%s) failed with err %ld",
+					path, PTR_ERR(file));
+				err = PTR_ERR(file);
+				return err;
+			}
+			file_size = file->f_inode->i_size;
+			dev_info(&pdev->dev, "%s size %u", path, file_size);
+
+			err = rsp_hal_sfc_flash_erase(hw, file_size);
+			if (err) {
+				dev_err(&pdev->dev, "erase flash failed!");
+				fput(file);
+				return err;
+			}
+
+			err = rnpgbe_init_firmware(hw, file, file_size);
+			if (err) {
+				dev_err(&pdev->dev, "init firmware failed!");
+				fput(file);
+				return err;
+			}
+			dev_info(&pdev->dev, "init firmware successfully.");
+			dev_info(&pdev->dev,
+				 "Please reboot. Then you can use the device.");
+			fput(file);
+			return 0;
+		}
+		hw_addr = ioremap(pci_resource_start(pdev, RNP_NIC_BAR_N210),
+				  pci_resource_len(pdev, RNP_NIC_BAR_N210));
+		if (!hw_addr) {
+			dev_err(&pdev->dev, "pcim_iomap bar%d failed!\n",
+				RNP_NIC_BAR_N210);
+			return -EIO;
+		}
+		pr_info("[bar%d]:%p %llx len=%d MB\n", RNP_NIC_BAR_N210,
+			hw_addr,
+			(unsigned long long)pci_resource_start(
+				pdev, RNP_NIC_BAR_N210),
+			(int)pci_resource_len(pdev, RNP_NIC_BAR_N210) / 1024 /
+				1024);
+		/* get dma version */
+		dma_version = rnpgbe_rd_reg(hw_addr);
+
+		hw->hw_addr = hw_addr;
+		/* setup msix base */
+		hw->ring_msix_base = hw->hw_addr + 0x29000;
+
+		hw->pfvfnum_system = PF_NUM_N500(rnpgbe_get_fuc(pdev));
+		nic_version = rd32(hw, RNP500_TOP_NIC_VERSION);
+		adapter->irq_mode = irq_mode_msix;
+		adapter->flags |= RNP_FLAG_MSI_CAPABLE | RNP_FLAG_MSIX_CAPABLE |
+				  RNP_FLAG_LEGACY_CAPABLE;
+		break;
+	default:
+		hw_addr = ioremap(pci_resource_start(pdev, 0),
+				  pci_resource_len(pdev, 0));
+		goto err_free_net;
+	}
+
+	/* assign to adapter */
+	adapter->io_addr = hw_addr;
+	adapter->io_addr_bar0 = hw_addr_bar0;
+
+	hw->dma_version = dma_version;
+	adapter->msg_enable = netif_msg_init(debug, NETIF_MSG_DRV
+#ifdef MSG_PROBE_ENABLE
+			| NETIF_MSG_PROBE
+#endif
+#ifdef MSG_IFUP_ENABLE
+			| NETIF_MSG_IFUP
+#endif
+#ifdef MSG_IFDOWN_ENABLE
+			| NETIF_MSG_IFDOWN
+#endif
+	);
+
+	/* we have other irq */
+	adapter->num_other_vectors = 1;
+	/* get software info */
+	ii->get_invariants(hw);
+
+	spin_lock_init(&adapter->link_stat_lock);
+
+	if (adapter->num_other_vectors) {
+		/* Mailbox */
+		rnpgbe_init_mbx_params_pf(hw);
+		memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+		if (dma_version >= 0x20210111) {
+			rnpgbe_mbx_link_event_enable(hw, 0);
+			if ((hw->hw_type == rnpgbe_hw_n10) ||
+			    (hw->hw_type == rnpgbe_hw_n400))
+				rnpgbe_mbx_force_speed(hw, 0);
+			/* call driver status */
+			if (hw->ops.driver_status)
+				hw->ops.driver_status(hw, true, rnpgbe_driver_insmod);
+			if (rnpgbe_mbx_get_capability(hw, ii)) {
+				dev_err(&pdev->dev,
+					"rnpgbe_mbx_get_capability failed!\n");
+				err = -EIO;
+				goto err_free_net;
+			}
+
+			/* get lldp status if version large than 0.1.1.40 */
+			if ((hw->fw_version >= 0x00010128) &&
+			    ((hw->fw_version & 0xff000000) == 0))
+				rnpgbe_mbx_lldp_get(hw);
+
+			if (hw->lldp_status.enable)
+				adapter->priv_flags |= RNP_PRIV_FLAG_LLDP;
+
+			hw->usecstocount = hw->axi_mhz;
+
+			{
+				struct rnpgbe_eee_cap eee_cap;
+
+				memset(&eee_cap, 0x00,
+				       sizeof(struct rnpgbe_eee_cap));
+
+				if (hw->feature_flags & RNP_HW_FEATURE_EEE) {
+					rnpgbe_mbx_get_eee_capability(hw,
+								      &eee_cap);
+					if (eee_cap.local_capability) {
+						hw->eee_capability =
+							eee_cap.local_capability;
+						adapter->eee_enabled = 1;
+						adapter->local_eee =
+							eee_cap.local_eee;
+						adapter->partner_eee =
+							eee_cap.partner_eee;
+						rnpgbe_mbx_phy_eee_set(
+							hw,
+							adapter->tx_lpi_timer,
+							hw->eee_capability);
+					}
+				}
+			}
+			{
+				/* pf0 we cannot detect in vm, close mask in default */
+				if (!hw->pfvfnum) {
+					hw->feature_flags &=
+						(~RNP_HW_SOFT_MASK_OTHER_IRQ);
+
+#ifdef FW_UART_SHOW_TSTAMPS
+					rnpgbe_wr_reg(hw_addr_bar0 + 0xc04,
+						      0xffff0101);
+#endif
+				} else {
+					if (hw->pfvfnum == hw->pfvfnum_system)
+						hw->feature_flags |=
+							RNP_HW_SOFT_MASK_OTHER_IRQ;
+				}
+			}
+
+			adapter->portid_of_card = hw->port_id[0];
+			adapter->portid_of_card = hw->pfvfnum >> 5;
+			adapter->wol = hw->wol;
+		}
+	}
+
+	if (hw->force_en)
+		adapter->priv_flags |= RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE;
+	hw->driver_version = driver_version;
+
+	hw->default_rx_queue = 0;
+	pr_info("%s %s: dma version:0x%x, nic version:0x%x, pfvfnum:0x%x\n",
+		adapter->name, pci_name(pdev), hw->dma_version, nic_version,
+		hw->pfvfnum);
+
+	/* Setup hw api */
+	hw->mac.type = ii->mac;
+	/* EEPROM */
+	if (ii->eeprom_ops)
+		memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
+
+	hw->phy.sfp_type = rnpgbe_sfp_type_unknown;
+	hw->ops.setup_ethtool(netdev);
+	rnpgbe_assign_netdev_ops(netdev);
+
+	rnpgbe_check_options(adapter);
+	/* setup the private structure */
+	/* this private is used only once
+	 */
+	err = rnpgbe_sw_init(adapter);
+	if (err)
+		goto err_sw_init;
+
+	err = hw->ops.reset_hw(hw);
+	hw->phy.reset_if_overtemp = false;
+	if (err) {
+		e_dev_err("HW Init failed: %d\n", err);
+		goto err_sw_init;
+	}
+	hw->ops.setup_link(hw, DEFAULT_ADV, 1, 0, 0);
+	hw->advertised_link = DEFAULT_ADV;
+	/* should force phy down first */
+	hw->ops.set_mbx_link_event(hw, 0);
+	hw->ops.set_mbx_ifup(hw, 0);
+
+#if defined(CONFIG_PCI_IOV)
+	if (adapter->num_other_vectors) {
+		rnpgbe_enable_sriov(adapter);
+		pci_sriov_set_totalvfs(pdev, hw->max_vfs - 1);
+	}
+#endif
+
+	/* MTU range: 68 - 9710 */
+	netdev->min_mtu = hw->min_length;
+	netdev->max_mtu = hw->max_length - (ETH_HLEN + 2 * ETH_FCS_LEN);
+
+	if (hw->feature_flags & RNP_NET_FEATURE_SG)
+		netdev->features |= NETIF_F_SG;
+	if (hw->feature_flags & RNP_NET_FEATURE_TSO)
+		netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+	if (hw->feature_flags & RNP_NET_FEATURE_RX_HASH)
+		netdev->features |= NETIF_F_RXHASH;
+	if (hw->feature_flags & RNP_NET_FEATURE_RX_CHECKSUM)
+		netdev->features |= NETIF_F_RXCSUM;
+	if (hw->feature_flags & RNP_NET_FEATURE_TX_CHECKSUM)
+		netdev->features |= NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC;
+
+	if (hw->feature_flags & RNP_NET_FEATURE_USO)
+		netdev->features |= NETIF_F_GSO_UDP_L4;
+
+	if (enable_hi_dma)
+		netdev->features |= NETIF_F_HIGHDMA;
+
+	if (hw->feature_flags & RNP_NET_FEATURE_TX_UDP_TUNNEL) {
+		netdev->gso_partial_features = RNP_GSO_PARTIAL_FEATURES;
+		netdev->features |=
+			NETIF_F_GSO_PARTIAL | RNP_GSO_PARTIAL_FEATURES;
+	}
+
+	netdev->hw_features |= netdev->features;
+
+	if (hw->feature_flags & RNP_NET_FEATURE_VLAN_OFFLOAD) {
+		if (!hw->ncsi_en) {
+			netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+			netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+		}
+	}
+	if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) {
+		if (!hw->ncsi_en) {
+			netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+			netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+		}
+	}
+	netdev->hw_features |= NETIF_F_RXALL;
+	if (hw->feature_flags & RNP_NET_FEATURE_RX_NTUPLE_FILTER)
+		netdev->hw_features |= NETIF_F_NTUPLE;
+	/* only open rx-fcs in no ocp mode */
+	if ((hw->feature_flags & RNP_NET_FEATURE_RX_FCS) && (!hw->ncsi_en))
+		netdev->hw_features |= NETIF_F_RXFCS;
+	if (hw->feature_flags & RNP_NET_FEATURE_HW_TC)
+		netdev->hw_features |= NETIF_F_HW_TC;
+
+	netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+	netdev->hw_enc_features |= netdev->vlan_features;
+	netdev->mpls_features |= NETIF_F_HW_CSUM;
+
+	if (hw->feature_flags & RNP_NET_FEATURE_VLAN_FILTER)
+		netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+	if (hw->feature_flags & RNP_NET_FEATURE_STAG_FILTER)
+		netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
+	if (hw->feature_flags & RNP_NET_FEATURE_VLAN_OFFLOAD) {
+		if (!hw->ncsi_en) {
+			netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+			netdev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+		}
+	}
+	if (hw->feature_flags & RNP_NET_FEATURE_STAG_OFFLOAD) {
+		if (!hw->ncsi_en) {
+			netdev->features |= NETIF_F_HW_VLAN_STAG_RX;
+			netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
+		}
+	}
+
+	netdev->priv_flags |= IFF_UNICAST_FLT;
+	netdev->priv_flags |= IFF_SUPP_NOFCS;
+
+	if (adapter->flags2 & RNP_FLAG2_RSC_CAPABLE)
+		netdev->hw_features |= NETIF_F_LRO;
+
+	netdev->priv_flags |= IFF_UNICAST_FLT;
+	netdev->priv_flags |= IFF_SUPP_NOFCS;
+
+	if (adapter->flags2 & RNP_FLAG2_RSC_ENABLED)
+		netdev->features |= NETIF_F_LRO;
+
+	eth_hw_addr_set(netdev, hw->mac.perm_addr);
+	memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
+	pr_info("dev mac:%pM\n", netdev->dev_addr);
+
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
+		e_dev_err("invalid MAC address\n");
+		err = -EIO;
+		goto err_sw_init;
+	}
+	ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
+
+	timer_setup(&adapter->service_timer, rnpgbe_service_timer, 0);
+
+	if (module_enable_ptp) {
+		/* setup ptp_addr according to mac type */
+		switch (adapter->hw.mac.mac_type) {
+		case mac_dwc_xlg:
+			adapter->ptp_addr = adapter->hw.mac.mac_addr + 0xd00;
+			adapter->gmac4 = 1;
+			break;
+		case mac_dwc_g:
+			adapter->ptp_addr = adapter->hw.mac.mac_addr + 0x700;
+			adapter->gmac4 = 0;
+			break;
+		}
+		adapter->flags2 |= RNP_FLAG2_PTP_ENABLED;
+		if (adapter->flags2 & RNP_FLAG2_PTP_ENABLED) {
+			adapter->tx_timeout_factor = 10;
+			INIT_WORK(&adapter->tx_hwtstamp_work,
+				  rnpgbe_tx_hwtstamp_work);
+		}
+	}
+
+	INIT_WORK(&adapter->service_task, rnpgbe_service_task);
+	clear_bit(__RNP_SERVICE_SCHED, &adapter->state);
+
+	if (fix_eth_name)
+		strncpy(netdev->name, adapter->name, sizeof(netdev->name) - 1);
+	else
+		strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+
+	err = rnpgbe_init_interrupt_scheme(adapter);
+	if (err)
+		goto err_sw_init;
+
+	err = register_mbx_irq(adapter);
+	if (err)
+		goto err_register;
+
+#ifdef CONFIG_PCI_IOV
+	rnpgbe_enable_sriov_true(adapter);
+#endif
+
+	/* WOL not supported for all devices */
+	{
+		struct ethtool_wolinfo wol;
+
+		if (rnpgbe_wol_exclusion(adapter, &wol) ||
+		    !device_can_wakeup(&adapter->pdev->dev))
+			adapter->wol = 0;
+
+		device_set_wakeup_enable(&adapter->pdev->dev, !!adapter->wol);
+	}
+	/* reset the hardware with the new settings */
+	err = hw->ops.start_hw(hw);
+
+	/* we should start down? test it */
+	set_bit(__RNP_DOWN, &adapter->state);
+
+	if (!fix_eth_name)
+		strscpy(netdev->name, "eth%d", sizeof(netdev->name));
+	err = register_netdev(netdev);
+	if (err) {
+		e_dev_err("register_netdev failed!\n");
+		goto err_register;
+	}
+
+	/* power down the optics for n10 SFP+ fiber */
+	if (hw->ops.disable_tx_laser)
+		hw->ops.disable_tx_laser(hw);
+
+	/* carrier off reporting is important to ethtool even BEFORE open */
+	netif_carrier_off(netdev);
+
+	if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) {
+		DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
+			adapter->num_vfs);
+		for (i = 0; i < adapter->num_vfs; i++)
+			rnpgbe_vf_configuration(pdev, (i | 0x10000000));
+	}
+
+	if (rnpgbe_sysfs_init(adapter))
+		e_err(probe, "failed to allocate sysfs resources\n");
+
+	rnpgbe_dbg_adapter_init(adapter);
+
+	return 0;
+err_register:
+	remove_mbx_irq(adapter);
+	rnpgbe_clear_interrupt_scheme(adapter);
+err_sw_init:
+	rnpgbe_disable_sriov(adapter);
+	adapter->flags2 &= ~RNP_FLAG2_SEARCH_FOR_SFP;
+err_free_net:
+	free_netdev(netdev);
+	return err;
+}
+
+/**
+ * rnpgbe_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in rnpgbe_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * rnpgbe_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int rnpgbe_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct rnpgbe_adapter *adapter;
+	struct rnpgbe_info *ii = rnpgbe_info_tbl[id->driver_data];
+	int err;
+
+	/* Catch broken hardware that put the wrong VF device ID in
+	 * the PCIe SR-IOV capability.
+	 */
+	if (pdev->is_virtfn) {
+		WARN(1, "%s (%hx:%hx) should not be a VF!\n", pci_name(pdev),
+		     pdev->vendor, pdev->device);
+		return -EINVAL;
+	}
+
+	err = pci_enable_device_mem(pdev);
+	if (err)
+		return err;
+
+	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(56)) &&
+	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(56))) {
+		enable_hi_dma = 1;
+	} else {
+		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (err) {
+			err = dma_set_coherent_mask(&pdev->dev,
+						    DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pdev->dev,
+					"No usable DMA configuration, aborting\n");
+				goto err_dma;
+			}
+		}
+		enable_hi_dma = 0;
+	}
+
+	err = pci_request_mem_regions(pdev, rnpgbe_driver_name);
+	if (err) {
+		dev_err(&pdev->dev,
+			"pci_request_selected_regions failed 0x%x\n", err);
+		goto err_pci_reg;
+	}
+
+	pci_set_master(pdev);
+	pci_save_state(pdev);
+
+	err = rnpgbe_add_adpater(pdev, ii, &adapter);
+	if (err)
+		goto err_regions;
+
+	return 0;
+err_regions:
+	pci_release_mem_regions(pdev);
+err_dma:
+err_pci_reg:
+	return err;
+}
+
+/**
+ * rnpgbe_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * rnpgbe_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void rnpgbe_remove(struct pci_dev *pdev)
+{
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(pdev);
+
+#ifdef CONFIG_PCI_IOV
+	/*
+	 * Only disable SR-IOV on unload if the user specified the now
+	 * deprecated max_vfs module parameter.
+	 */
+	rnpgbe_disable_sriov(adapter);
+#endif
+	rnpgbe_rm_adpater(adapter);
+	pci_release_mem_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+/**
+ * rnpgbe_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t rnpgbe_io_error_detected(struct pci_dev *pdev,
+						 pci_channel_state_t state)
+{
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
+
+#ifdef CONFIG_PCI_IOV
+	struct pci_dev *bdev, *vfdev;
+	u32 dw0, dw1, dw2, dw3;
+	int vf, pos;
+	u16 req_id, pf_func;
+
+	if (adapter->num_vfs == 0)
+		goto skip_bad_vf_detection;
+
+	bdev = pdev->bus->self;
+	while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
+		bdev = bdev->bus->self;
+
+	if (!bdev)
+		goto skip_bad_vf_detection;
+
+	pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
+	if (!pos)
+		goto skip_bad_vf_detection;
+
+	pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0);
+	pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1);
+	pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2);
+	pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3);
+
+	req_id = dw1 >> 16;
+	/* On the n500 if bit 7 of the requestor ID is set then it's a VF ? */
+	if (!(req_id & 0x0080))
+		goto skip_bad_vf_detection;
+
+	pf_func = req_id & 0x01;
+	if ((pf_func & 1) == (pdev->devfn & 1)) {
+		unsigned int device_id;
+
+		vf = (req_id & 0x7F) >> 1;
+		e_dev_err("VF %d has caused a PCIe error\n", vf);
+		e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
+			  "%8.8x\tdw3: %8.8x\n",
+			  dw0, dw1, dw2, dw3);
+
+		device_id = PCI_DEVICE_ID_N500_VF;
+
+		/* Find the pci device of the offending VF */
+		vfdev = pci_get_device(PCI_VENDOR_ID_MUCSE, device_id, NULL);
+		while (vfdev) {
+			if (vfdev->devfn == (req_id & 0xFF))
+				break;
+			vfdev = pci_get_device(PCI_VENDOR_ID_MUCSE, device_id,
+					       vfdev);
+		}
+		/*
+		 * There's a slim chance the VF could have been hot plugged,
+		 * so if it is no longer present we don't need to issue the
+		 * VFLR.  Just clean up the AER in that case.
+		 */
+		if (vfdev) {
+			e_dev_err("Issuing VFLR to VF %d\n", vf);
+			pci_write_config_dword(vfdev, 0xA8, 0x00008000);
+			/* Free device reference count */
+			pci_dev_put(vfdev);
+		}
+
+		pci_aer_clear_nonfatal_status(pdev);
+	}
+
+	/*
+	 * Even though the error may have occurred on the other port
+	 * we still need to increment the vf error reference count for
+	 * both ports because the I/O resume function will be called
+	 * for both of them.
+	 */
+	adapter->vferr_refcount++;
+
+	return PCI_ERS_RESULT_RECOVERED;
+
+skip_bad_vf_detection:
+#endif /* CONFIG_PCI_IOV */
+	netif_device_detach(netdev);
+
+	if (state == pci_channel_io_perm_failure)
+		return PCI_ERS_RESULT_DISCONNECT;
+
+	if (netif_running(netdev))
+		rnpgbe_down(adapter);
+	pci_disable_device(pdev);
+	/* Request a slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * rnpgbe_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t rnpgbe_io_slot_reset(struct pci_dev *pdev)
+{
+	pci_ers_result_t result = PCI_ERS_RESULT_NONE;
+
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(pdev);
+
+	if (pci_enable_device_mem(pdev)) {
+		e_err(probe, "Cannot re-enable PCI device after reset.\n");
+		result = PCI_ERS_RESULT_DISCONNECT;
+	} else {
+		/* we need this */
+		smp_mb__before_atomic();
+
+		pci_set_master(pdev);
+		pci_restore_state(pdev);
+		pci_save_state(pdev);
+		pci_wake_from_d3(pdev, false);
+		rnpgbe_reset(adapter);
+		result = PCI_ERS_RESULT_RECOVERED;
+	}
+
+	pci_aer_clear_nonfatal_status(pdev);
+	return result;
+}
+
+/**
+ * rnpgbe_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void rnpgbe_io_resume(struct pci_dev *pdev)
+{
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
+
+#ifdef CONFIG_PCI_IOV
+	if (adapter->vferr_refcount) {
+		e_info(drv, "Resuming after VF err\n");
+		adapter->vferr_refcount--;
+		return;
+	}
+
+#endif
+	if (netif_running(netdev))
+		rnpgbe_up(adapter);
+
+	netif_device_attach(netdev);
+}
+
+static void pci_io_reset_prepare(struct pci_dev *pdev)
+{
+	struct device *dev = pci_dev_to_dev(pdev);
+
+	rnpgbe_suspend(dev);
+}
+
+static void pci_io_reset_done(struct pci_dev *pdev)
+{
+	struct device *dev = pci_dev_to_dev(pdev);
+
+	rnpgbe_resume(dev);
+}
+
+static const struct pci_error_handlers rnpgbe_err_handler = {
+	.error_detected = rnpgbe_io_error_detected,
+	.slot_reset = rnpgbe_io_slot_reset,
+	.reset_prepare = pci_io_reset_prepare,
+	.reset_done = pci_io_reset_done,
+	.resume = rnpgbe_io_resume,
+};
+
+static const struct dev_pm_ops rnpgbe_pm_ops = {
+	.suspend = rnpgbe_suspend,
+	.resume = rnpgbe_resume,
+	.freeze = rnpgbe_freeze,
+	.thaw = rnpgbe_thaw,
+	.poweroff = rnpgbe_suspend,
+	.restore = rnpgbe_resume,
+};
+
+static struct pci_driver rnpgbe_driver = {
+	.name = rnpgbe_driver_name,
+	.id_table = rnpgbe_pci_tbl,
+	.probe = rnpgbe_probe,
+	.remove = rnpgbe_remove,
+	.driver = {
+		.pm = &rnpgbe_pm_ops,
+	},
+	.shutdown = rnpgbe_shutdown,
+	.sriov_configure = rnpgbe_pci_sriov_configure,
+	.err_handler = &rnpgbe_err_handler
+};
+
+static int __init rnpgbe_init_module(void)
+{
+	int ret;
+
+	pr_info("%s - version %s\n", rnpgbe_driver_string,
+		rnpgbe_driver_version);
+	pr_info("%s\n", rnpgbe_copyright);
+	rnpgbe_wq = create_singlethread_workqueue(rnpgbe_driver_name);
+
+	if (!rnpgbe_wq) {
+		pr_err("%s: Failed to create workqueue\n", rnpgbe_driver_name);
+		return -ENOMEM;
+	}
+
+	rnpgbe_dbg_init();
+
+	ret = pci_register_driver(&rnpgbe_driver);
+	if (ret) {
+		destroy_workqueue(rnpgbe_wq);
+		rnpgbe_dbg_exit();
+		return ret;
+	}
+
+	return 0;
+}
+
+module_init(rnpgbe_init_module);
+
+static void __exit rnpgbe_exit_module(void)
+{
+	pci_unregister_driver(&rnpgbe_driver);
+
+	destroy_workqueue(rnpgbe_wq);
+
+	rnpgbe_dbg_exit();
+
+	rcu_barrier(); /* Wait for completion of call_rcu()'s */
+}
+
+module_exit(rnpgbe_exit_module);
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c
new file mode 100644
index 0000000000000..63a62a697b7a4
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c
@@ -0,0 +1,643 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+#include 
+#include "rnpgbe.h"
+#include "rnpgbe_type.h"
+#include "rnpgbe_common.h"
+#include "rnpgbe_mbx.h"
+#include "rnpgbe_mbx_fw.h"
+
+#define VF2PF_MBOX_VEC(mbx, vf) (mbx->vf2pf_mbox_vec_base + 4 * (vf))
+#define CPU2PF_MBOX_VEC(mbx) (mbx->cpu2pf_mbox_vec)
+
+/* == PF <--> VF mailbox ==== */
+#define SHARE_MEM_BYTES 64
+#define PF_VF_SHM(mbx, vf)                                                     \
+	(mbx->pf_vf_shm_base +                                                 \
+	 mbx->mbx_mem_size * vf)
+#define PF2VF_COUNTER(mbx, vf) (PF_VF_SHM(mbx, vf) + 0)
+#define VF2PF_COUNTER(mbx, vf) (PF_VF_SHM(mbx, vf) + 4)
+#define PF_VF_SHM_DATA(mbx, vf) (PF_VF_SHM(mbx, vf) + 8)
+#define PF2VF_MBOX_CTRL(mbx, vf) (mbx->pf2vf_mbox_ctrl_base + 4 * vf)
+#define PF_VF_MBOX_MASK_LO(mbx) (mbx->pf_vf_mbox_mask_lo)
+#define PF_VF_MBOX_MASK_HI(mbx) (mbx->pf_vf_mbox_mask_hi)
+/* === CPU <--> PF === */
+#define CPU_PF_SHM(mbx) (mbx->cpu_pf_shm_base)
+#define CPU2PF_COUNTER(mbx) (CPU_PF_SHM(mbx) + 0)
+#define PF2CPU_COUNTER(mbx) (CPU_PF_SHM(mbx) + 4)
+#define CPU_PF_SHM_DATA(mbx) (CPU_PF_SHM(mbx) + 8)
+#define PF2CPU_MBOX_CTRL(mbx) (mbx->pf2cpu_mbox_ctrl)
+#define CPU_PF_MBOX_MASK(mbx) (mbx->cpu_pf_mbox_mask)
+#define MBOX_CTRL_REQ (1 << 0) /* WO */
+#define MBOX_CTRL_PF_HOLD_SHM (1 << 3) /* VF:RO, PF:WR */
+#define MBOX_IRQ_EN 0
+#define MBOX_IRQ_DISABLE 1
+#define mbx_prd32(hw, reg) prnpgbe_rd_reg((hw)->hw_addr + (reg))
+#define mbx_rd32(hw, reg) rnpgbe_rd_reg((hw)->hw_addr + (reg))
+#define mbx_pwr32(hw, reg, val) p_rnpgbe_wr_reg((hw)->hw_addr + (reg), (val))
+#define mbx_wr32(hw, reg, val) rnpgbe_wr_reg((hw)->hw_addr + (reg), (val))
+
+/**
+ *  rnpgbe_read_mbx - Reads a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox/vfnum to read
+ *
+ *  returns SUCCESS if it successfully read message from buffer
+ **/
+s32 rnpgbe_read_mbx(struct rnpgbe_hw *hw, u32 *msg, u16 size,
+		    enum MBX_ID mbx_id)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = RNP_ERR_MBX;
+
+	/* limit read to size of mailbox */
+	if (size > mbx->size)
+		size = mbx->size;
+
+	if (mbx->ops.read)
+		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  rnpgbe_write_mbx - Write a message to the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 rnpgbe_write_mbx(struct rnpgbe_hw *hw, u32 *msg, u16 size,
+		     enum MBX_ID mbx_id)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = 0;
+
+	if (size > mbx->size)
+		ret_val = RNP_ERR_MBX;
+	else if (mbx->ops.write)
+		ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+	return ret_val;
+}
+
+static inline u16 rnpgbe_mbx_get_req(struct rnpgbe_hw *hw, int reg)
+{
+	mb();
+	return ioread32(hw->hw_addr + reg) & 0xffff;
+}
+
+static inline u16 rnpgbe_mbx_get_ack(struct rnpgbe_hw *hw, int reg)
+{
+	mb();
+	return (mbx_rd32(hw, reg) >> 16);
+}
+
+static inline void rnpgbe_mbx_inc_pf_req(struct rnpgbe_hw *hw,
+					 enum MBX_ID mbx_id)
+{
+	u16 req;
+	int reg;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	u32 v;
+
+	reg = (mbx_id == MBX_CM3CPU) ? PF2CPU_COUNTER(mbx) :
+				       PF2VF_COUNTER(mbx, mbx_id);
+	v = mbx_rd32(hw, reg);
+
+	req = (v & 0xffff);
+	req++;
+	v &= ~(0x0000ffff);
+	v |= req;
+	mb();
+	mbx_wr32(hw, reg, v);
+
+	/* update stats */
+	hw->mbx.stats.msgs_tx++;
+}
+
+static inline void rnpgbe_mbx_inc_pf_ack(struct rnpgbe_hw *hw,
+					 enum MBX_ID mbx_id)
+{
+	u16 ack;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	int reg = (mbx_id == MBX_CM3CPU) ? PF2CPU_COUNTER(mbx) :
+					   PF2VF_COUNTER(mbx, mbx_id);
+	u32 v = mbx_rd32(hw, reg);
+
+	ack = (v >> 16) & 0xffff;
+	ack++;
+	v &= ~(0xffff0000);
+	v |= (ack << 16);
+	mb();
+	mbx_wr32(hw, reg, v);
+
+	/* update stats */
+	hw->mbx.stats.msgs_rx++;
+}
+
+/**
+ *  rnpgbe_check_for_msg - checks to see if someone sent us mail
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 rnpgbe_check_for_msg(struct rnpgbe_hw *hw, enum MBX_ID mbx_id)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = RNP_ERR_MBX;
+
+	if (mbx->ops.check_for_msg)
+		ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  rnpgbe_check_for_ack - checks to see if someone sent us ACK
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 rnpgbe_check_for_ack(struct rnpgbe_hw *hw, enum MBX_ID mbx_id)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = RNP_ERR_MBX;
+
+	if (mbx->ops.check_for_ack)
+		ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  rnpgbe_poll_for_msg - Wait for message notification
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification
+ **/
+static s32 rnpgbe_poll_for_msg(struct rnpgbe_hw *hw, enum MBX_ID mbx_id)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	if (!countdown || !mbx->ops.check_for_msg)
+		goto out;
+
+	while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+		countdown--;
+		if (!countdown)
+			break;
+		udelay(mbx->usec_delay);
+	}
+
+out:
+	return countdown ? 0 : -ETIME;
+}
+
+/**
+ *  rnpgbe_poll_for_ack - Wait for message acknowledgement
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 rnpgbe_poll_for_ack(struct rnpgbe_hw *hw, enum MBX_ID mbx_id)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	if (!countdown || !mbx->ops.check_for_ack)
+		goto out;
+
+	while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+		countdown--;
+		if (!countdown) {
+			printk("wait ack timeout\n");
+			break;
+		}
+		udelay(mbx->usec_delay);
+	}
+
+out:
+	return countdown ? 0 : RNP_ERR_MBX;
+}
+
+/**
+ *  rnpgbe_read_posted_mbx - Wait for message notification and receive message
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification and
+ *  copied it into the receive buffer.
+ **/
+static s32 rnpgbe_read_posted_mbx(struct rnpgbe_hw *hw, u32 *msg, u16 size,
+				  enum MBX_ID mbx_id)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = RNP_ERR_MBX;
+
+	if (!mbx->ops.read)
+		goto out;
+
+	ret_val = rnpgbe_poll_for_msg(hw, mbx_id);
+
+	/* if ack received read message, otherwise we timed out */
+	if (!ret_val)
+		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+	return ret_val;
+}
+
+/**
+ *  rnpgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer and
+ *  received an ack to that message within delay * timeout period
+ **/
+static s32 rnpgbe_write_posted_mbx(struct rnpgbe_hw *hw, u32 *msg, u16 size,
+				   enum MBX_ID mbx_id)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = RNP_ERR_MBX;
+
+	/* exit if either we can't write or there isn't a defined timeout */
+	if (!mbx->ops.write || !mbx->timeout)
+		goto out;
+
+	/* send msg and hold buffer lock */
+	ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+	/* if msg sent wait until we receive an ack */
+	if (!ret_val)
+		ret_val = rnpgbe_poll_for_ack(hw, mbx_id);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  rnpgbe_check_for_msg_pf - checks to see if the VF has sent mail
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 rnpgbe_check_for_msg_pf(struct rnpgbe_hw *hw, enum MBX_ID mbx_id)
+{
+	s32 ret_val = RNP_ERR_MBX;
+	u16 hw_req_count = 0;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+
+	if (mbx_id == MBX_CM3CPU) {
+		hw_req_count = rnpgbe_mbx_get_req(hw, CPU2PF_COUNTER(mbx));
+		if (mbx->mbx_feature & MBX_FEATURE_NO_ZERO) {
+			if ((hw_req_count != 0) &&
+			    (hw_req_count != hw->mbx.cpu_req)) {
+				ret_val = 0;
+				hw->mbx.stats.reqs++;
+			}
+
+		} else {
+			if (hw_req_count != hw->mbx.cpu_req) {
+				ret_val = 0;
+				hw->mbx.stats.reqs++;
+			}
+		}
+	} else {
+		if (rnpgbe_mbx_get_req(hw, VF2PF_COUNTER(mbx, mbx_id)) !=
+		    hw->mbx.vf_req[mbx_id]) {
+			ret_val = 0;
+			hw->mbx.stats.reqs++;
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ *  rnpgbe_check_for_ack_pf - checks to see if the VF has ACKed
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 rnpgbe_check_for_ack_pf(struct rnpgbe_hw *hw, enum MBX_ID mbx_id)
+{
+	s32 ret_val = RNP_ERR_MBX;
+	u16 hw_cpu_ack = 0;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+
+	if (mbx_id == MBX_CM3CPU) {
+		hw_cpu_ack = rnpgbe_mbx_get_ack(hw, CPU2PF_COUNTER(mbx));
+		if ((hw_cpu_ack != 0) &&
+		   (hw_cpu_ack != hw->mbx.cpu_ack)) {
+			ret_val = 0;
+			hw->mbx.stats.acks++;
+		}
+	} else {
+		if (rnpgbe_mbx_get_ack(hw, VF2PF_COUNTER(mbx, mbx_id)) !=
+		    hw->mbx.vf_ack[mbx_id]) {
+			ret_val = 0;
+			hw->mbx.stats.acks++;
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ *  rnpgbe_obtain_mbx_lock_pf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: the VF index or CPU
+ *
+ *  return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 rnpgbe_obtain_mbx_lock_pf(struct rnpgbe_hw *hw, enum MBX_ID mbx_id)
+{
+	int try_cnt = 5000;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) :
+						PF2VF_MBOX_CTRL(mbx, mbx_id);
+
+	while (try_cnt-- > 0) {
+		/* Take ownership of the buffer */
+		mbx_wr32(hw, CTRL_REG, MBOX_CTRL_PF_HOLD_SHM);
+		wmb();
+		/* reserve mailbox for cm3 use */
+		if (mbx_rd32(hw, CTRL_REG) & MBOX_CTRL_PF_HOLD_SHM)
+			return 0;
+		udelay(100);
+	}
+
+	rnpgbe_err("%s: faild to get:%d lock\n", __func__, mbx_id);
+	return -EPERM;
+}
+
+/**
+ *  rnpgbe_write_mbx_pf - Places a message in the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: the VF index
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 rnpgbe_write_mbx_pf(struct rnpgbe_hw *hw, u32 *msg, u16 size,
+			       enum MBX_ID mbx_id)
+{
+	s32 ret_val = 0;
+	u16 i;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	u32 DATA_REG = (mbx_id == MBX_CM3CPU) ? CPU_PF_SHM_DATA(mbx) :
+						PF_VF_SHM_DATA(mbx, mbx_id);
+	u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) :
+						PF2VF_MBOX_CTRL(mbx, mbx_id);
+
+	if (size > RNP_VFMAILBOX_SIZE) {
+		printk(KERN_DEBUG "%s: size:%d should <%d\n", __func__, size,
+		       RNP_VFMAILBOX_SIZE);
+		return -EINVAL;
+	}
+
+	/* lock the mailbox to prevent pf/vf/cpu race condition */
+	ret_val = rnpgbe_obtain_mbx_lock_pf(hw, mbx_id);
+	if (ret_val) {
+		printk(KERN_DEBUG
+		       "%s: get mbx:%d wlock failed. ret:%d. req:0x%08x-0x%08x\n",
+		       __func__, mbx_id, ret_val, msg[0], msg[1]);
+		goto out_no_write;
+	}
+
+	/* copy the caller specified message to the mailbox memory buffer */
+	for (i = 0; i < size; i++) {
+		mbx_wr32(hw, DATA_REG + i * 4, msg[i]);
+		rnpgbe_logd(LOG_MBX_OUT, "  w-mbx:0x%x <= 0x%x\n",
+			    DATA_REG + i * 4, msg[i]);
+	}
+
+	/* flush msg and acks as we are overwriting the message buffer */
+	if (mbx_id == MBX_CM3CPU) {
+		hw->mbx.cpu_ack = rnpgbe_mbx_get_ack(hw, CPU2PF_COUNTER(mbx));
+	} else {
+		hw->mbx.vf_ack[mbx_id] =
+			rnpgbe_mbx_get_ack(hw, VF2PF_COUNTER(mbx, mbx_id));
+	}
+	rnpgbe_mbx_inc_pf_req(hw, mbx_id);
+
+	/* Interrupt VF/CM3 to tell it a message
+	 * has been sent and release buffer
+	 */
+	if (mbx->mbx_feature & MBX_FEATURE_WRITE_DELAY)
+		udelay(300);
+	mbx_wr32(hw, CTRL_REG, MBOX_CTRL_REQ);
+
+out_no_write:
+	/* sometimes happen */
+
+	return ret_val;
+}
+
+/**
+ *  rnpgbe_read_mbx_pf - Read a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  This function copies a message from the mailbox buffer to the caller's
+ *  memory buffer.  The presumption is that the caller knows that there was
+ *  a message due to a VF/CPU request so no polling for message is needed.
+ **/
+static s32 rnpgbe_read_mbx_pf(struct rnpgbe_hw *hw, u32 *msg, u16 size,
+			      enum MBX_ID mbx_id)
+{
+	s32 ret_val = -EIO;
+	u32 i;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	u32 BUF_REG = (mbx_id == MBX_CM3CPU) ? CPU_PF_SHM_DATA(mbx) :
+					       PF_VF_SHM_DATA(mbx, mbx_id);
+	u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) :
+						PF2VF_MBOX_CTRL(mbx, mbx_id);
+	if (size > RNP_VFMAILBOX_SIZE) {
+		printk(KERN_DEBUG "%s: size:%d should <%d\n", __func__, size,
+		       RNP_VFMAILBOX_SIZE);
+		return -EINVAL;
+	}
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = rnpgbe_obtain_mbx_lock_pf(hw, mbx_id);
+	if (ret_val)
+		goto out_no_read;
+
+	/* we need this */
+	mb();
+	/* copy the message from the mailbox memory buffer */
+	for (i = 0; i < size; i++) {
+		msg[i] = mbx_rd32(hw, BUF_REG + 4 * i);
+		rnpgbe_logd(LOG_MBX_IN, "  r-mbx:0x%x => 0x%x\n",
+			    BUF_REG + 4 * i, msg[i]);
+	}
+	mbx_wr32(hw, BUF_REG, 0);
+
+	/* update req. used by rnpvf_check_for_msg_vf  */
+	if (mbx_id == MBX_CM3CPU) {
+		hw->mbx.cpu_req = rnpgbe_mbx_get_req(hw, CPU2PF_COUNTER(mbx));
+	} else {
+		hw->mbx.vf_req[mbx_id] =
+			rnpgbe_mbx_get_req(hw, VF2PF_COUNTER(mbx, mbx_id));
+	}
+	/* this ack maybe too earier? */
+	/* Acknowledge receipt and release mailbox, then we're done */
+	rnpgbe_mbx_inc_pf_ack(hw, mbx_id);
+
+	/* free ownership of the buffer */
+	mbx_wr32(hw, CTRL_REG, 0);
+
+out_no_read:
+
+	return ret_val;
+}
+
+static void rnpgbe_mbx_reset(struct rnpgbe_hw *hw)
+{
+	int idx, v;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+
+	for (idx = 0; idx < hw->max_vfs; idx++) {
+		v = mbx_rd32(hw, VF2PF_COUNTER(mbx, idx));
+		hw->mbx.vf_req[idx] = v & 0xffff;
+		hw->mbx.vf_ack[idx] = (v >> 16) & 0xffff;
+		mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0);
+	}
+	v = mbx_rd32(hw, CPU2PF_COUNTER(mbx));
+	hw->mbx.cpu_req = v & 0xffff;
+	hw->mbx.cpu_ack = (v >> 16) & 0xffff;
+
+	printk(KERN_DEBUG "now mbx.cpu_req %d mbx.cpu_ack %d\n",
+	       hw->mbx.cpu_req, hw->mbx.cpu_ack);
+	mbx_wr32(hw, PF2CPU_MBOX_CTRL(mbx), 0);
+
+	if (PF_VF_MBOX_MASK_LO(mbx))
+		wr32(hw, PF_VF_MBOX_MASK_LO(mbx), 0);
+	if (PF_VF_MBOX_MASK_HI(mbx))
+		wr32(hw, PF_VF_MBOX_MASK_HI(mbx), 0);
+
+	wr32(hw, CPU_PF_MBOX_MASK(mbx), 0xffff0000);
+}
+
+static int rnpgbe_mbx_configure_pf(struct rnpgbe_hw *hw, int nr_vec,
+				   bool enable)
+{
+	int idx = 0;
+	u32 v;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+
+	if (enable) {
+		for (idx = 0; idx < hw->max_vfs; idx++) {
+			v = mbx_rd32(hw, VF2PF_COUNTER(mbx, idx));
+			hw->mbx.vf_req[idx] = v & 0xffff;
+			hw->mbx.vf_ack[idx] = (v >> 16) & 0xffff;
+
+			mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0);
+		}
+		v = mbx_rd32(hw, CPU2PF_COUNTER(mbx));
+		hw->mbx.cpu_req = v & 0xffff;
+		hw->mbx.cpu_ack = (v >> 16) & 0xffff;
+		mbx_wr32(hw, PF2CPU_MBOX_CTRL(mbx), 0);
+
+		for (idx = 0; idx < hw->max_vfs; idx++) {
+			mbx_wr32(hw, VF2PF_MBOX_VEC(mbx, idx),
+				 nr_vec);
+			/* vf to pf req interrupt */
+		}
+
+		if (PF_VF_MBOX_MASK_LO(mbx))
+			wr32(hw, PF_VF_MBOX_MASK_LO(mbx),
+			     0);
+		/* allow vf to vectors */
+
+		if (PF_VF_MBOX_MASK_HI(mbx))
+			wr32(hw, PF_VF_MBOX_MASK_HI(mbx), 0);
+		/* enable irq */
+
+		/* bind cm3cpu mbx to irq */
+		wr32(hw, CPU2PF_MBOX_VEC(mbx), nr_vec);
+		/* cm3 and VF63 share #63 irq */
+		/* allow CM3CPU to PF MBX IRQ */
+		wr32(hw, CPU_PF_MBOX_MASK(mbx), 0xffff0000);
+
+		rnpgbe_dbg("[%s] mbx-vector:%d\n", __func__, nr_vec);
+
+	} else {
+		if (PF_VF_MBOX_MASK_LO(mbx))
+			wr32(hw, PF_VF_MBOX_MASK_LO(mbx),
+			     0xffffffff);
+		/* disable irq */
+		if (PF_VF_MBOX_MASK_HI(mbx))
+			wr32(hw, PF_VF_MBOX_MASK_HI(mbx),
+			     0xffffffff);
+
+		/* disable CM3CPU to PF MBX IRQ */
+		wr32(hw, CPU_PF_MBOX_MASK(mbx), 0xfffffffe);
+
+		/* reset vf->pf status/ctrl */
+		for (idx = 0; idx < hw->max_vfs; idx++)
+			mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0);
+		/* reset pf->cm3 ctrl */
+		mbx_wr32(hw, PF2CPU_MBOX_CTRL(mbx), 0);
+		/* used to sync link status */
+		wr32(hw, RNP_DMA_DUMY, 0);
+	}
+	return 0;
+}
+
+/**
+ *  rnpgbe_init_mbx_params_pf - set initial values for pf mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+s32 rnpgbe_init_mbx_params_pf(struct rnpgbe_hw *hw)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+
+	mbx->usec_delay = 100;
+	mbx->timeout = (4 * 1000 * 1000) / mbx->usec_delay;
+	mbx->stats.msgs_tx = 0;
+	mbx->stats.msgs_rx = 0;
+	mbx->stats.reqs = 0;
+	mbx->stats.acks = 0;
+	mbx->stats.rsts = 0;
+	mbx->size = RNP_VFMAILBOX_SIZE;
+
+	mutex_init(&mbx->lock);
+	rnpgbe_mbx_reset(hw);
+	return 0;
+}
+
+struct rnpgbe_mbx_operations rnpgbe_mbx_ops_generic = {
+	.init_params = rnpgbe_init_mbx_params_pf,
+	.read = rnpgbe_read_mbx_pf,
+	.write = rnpgbe_write_mbx_pf,
+	.read_posted = rnpgbe_read_posted_mbx,
+	.write_posted = rnpgbe_write_posted_mbx,
+	.check_for_msg = rnpgbe_check_for_msg_pf,
+	.check_for_ack = rnpgbe_check_for_ack_pf,
+	.configure = rnpgbe_mbx_configure_pf,
+};
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h
new file mode 100644
index 0000000000000..e0a2ca9abb168
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h
@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBE_MBX_H_
+#define _RNPGBE_MBX_H_
+
+#include "rnpgbe_type.h"
+#include "rnpgbe_mbx_fw.h"
+
+#define RNP_VFMAILBOX_SIZE 14 /* 16 32 bit words - 64 bytes */
+#define RNP_ERR_MBX -100
+#define RNP_VT_MSGTYPE_ACK 0x80000000
+/* Messages below or'd with */
+/* this are the ACK */
+#define RNP_VT_MSGTYPE_NACK 0x40000000
+/* Messages below or'd with
+ * this are the NACK
+ */
+#define RNP_VT_MSGTYPE_CTS 0x20000000
+/* Indicates that VF is still
+ *clear to send requests
+ */
+#define RNP_VT_MSGINFO_SHIFT 14
+/* bits 23:16 are used for exra info for certain messages */
+#define RNP_VT_MSGINFO_MASK (0x7F << RNP_VT_MSGINFO_SHIFT)
+/* VLAN pool filtering masks */
+#define RNP_VLVF_VIEN 0x80000000 /* filter is valid */
+#define RNP_VLVF_ENTRIES 64
+#define RNP_VLVF_VLANID_MASK 0x00000FFF
+/* mailbox msg_data */
+#define RNP_VNUM_OFFSET (21)
+#define RNP_VF_MASK (0x7f << 21)
+#define RNP_MAIL_CMD_MASK 0x3fff
+/* mailbox API, legacy requests */
+#define RNP_VF_RESET 0x01 /* VF requests reset */
+#define RNP_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define RNP_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define RNP_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+/* mailbox API, version 1.0 VF requests */
+#define RNP_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define RNP_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+#define RNP_VF_GET_MACADDR 0x07 /* get vf macaddr */
+#define RNP_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+/* mailbox API, version 1.1 VF requests */
+#define RNP_VF_GET_QUEUES 0x09 /* get queue configuration */
+#define RNP_VF_SET_VLAN_STRIP 0x0a /* VF requests PF to set VLAN STRIP */
+#define RNP_VF_REG_RD 0x0b /* vf read reg */
+#define RNP_VF_GET_MTU 0x0c /* vf get pf ethtool setup */
+#define RNP_VF_SET_MTU 0x0d /* vf get pf ethtool setup */
+#define RNP_VF_GET_FW 0x0e /* vf get firmware version */
+#define RNP_VF_GET_LINK 0x10 /* get link status */
+#define RNP_VF_RESET_PF 0x11
+#define RNP_VF_GET_DMA_FRAG 0x12
+#define RNP_VF_SET_DMA_FRAG 0x13
+#define RNP_VF_SET_STATS_CLR 0x14 /* vf set stats status */
+#define RNP_VF_GET_STATS_CLR 0x15
+#define RNP_PF_SET_FCS 0x10 /* PF set fcs status */
+#define RNP_PF_SET_PAUSE 0x11 /* PF set pause status */
+#define RNP_PF_SET_FT_PADDING 0x12 /* PF set ft padding status */
+#define RNP_PF_SET_VLAN_FILTER 0x13 /* PF set ntuple status */
+#define RNP_PF_SET_VLAN 0x14 /* PF set ntuple status */
+#define RNP_PF_SET_LINK 0x15 /* PF set ntuple status */
+#define RNP_PF_SET_MTU 0x16 /* PF set ntuple status */
+#define RNP_PF_SET_RESET 0x17 /* PF set ntuple status */
+#define RNP_PF_LINK_UP (1 << 31)
+#define RNP_PF_REMOVE 0x0f
+/* GET_QUEUES return data indices within the mailbox */
+#define RNP_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define RNP_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define RNP_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define RNP_VF_DEF_QUEUE 4 /* Default queue offset */
+#define RNP_VF_QUEUE_START 5 /* Default queue offset */
+#define RNP_VF_QUEUE_DEPTH 6 /* ring depth */
+/* length of permanent address message returned from PF */
+#define RNP_VF_PERMADDR_MSG_LEN 11
+/* word in permanent address message with the current multicast type */
+#define RNP_VF_MC_TYPE_WORD 3
+#define RNP_VF_DMA_VERSION_WORD 4
+#define RNP_VF_VLAN_WORD 5
+#define RNP_VF_PHY_TYPE_WORD 6
+#define RNP_VF_FW_VERSION_WORD 7
+#define RNP_VF_LINK_STATUS_WORD 8
+#define RNP_VF_AXI_MHZ 9
+#define PF_FEATRURE_VLAN_FILTER BIT(0)
+#define PF_NCSI_EN BIT(1)
+#define RNP_VF_FEATURE 10
+#define RNP_PF_CONTROL_PRING_MSG 0x0100 /* PF control message */
+#define RNP_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define RNP_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+enum MBX_ID {
+	MBX_VF0 = 0,
+	MBX_VF1,
+	MBX_VF2,
+	MBX_VF3,
+	MBX_VF4,
+	MBX_VF5,
+	MBX_VF6,
+	MBX_VF7,
+	MBX_VF8,
+	MBX_VF9,
+	MBX_VF10,
+	MBX_VF11,
+	MBX_VF12,
+	MBX_VF13,
+	MBX_VF14,
+	MBX_VF15,
+	MBX_VF16,
+	MBX_VF17,
+	MBX_VF18,
+	MBX_VF19,
+	MBX_VF20,
+	MBX_VF21,
+	MBX_VF22,
+	MBX_VF23,
+	MBX_VF24,
+	MBX_VF25,
+	MBX_VF26,
+	MBX_VF27,
+	MBX_VF28,
+	MBX_VF29,
+	MBX_VF30,
+	MBX_VF31,
+	MBX_VF32,
+	MBX_VF33,
+	MBX_VF34,
+	MBX_VF35,
+	MBX_VF36,
+	MBX_VF37,
+	MBX_VF38,
+	MBX_VF39,
+	MBX_VF40,
+	MBX_VF41,
+	MBX_VF42,
+	MBX_VF43,
+	MBX_VF44,
+	MBX_VF45,
+	MBX_VF46,
+	MBX_VF47,
+	MBX_VF48,
+	MBX_VF49,
+	MBX_VF50,
+	MBX_VF51,
+	MBX_VF52,
+	MBX_VF53,
+	MBX_VF54,
+	MBX_VF55,
+	MBX_VF56,
+	MBX_VF57,
+	MBX_VF58,
+	MBX_VF59,
+	MBX_VF60,
+	MBX_VF61,
+	MBX_VF62,
+	MBX_VF63,
+	MBX_CM3CPU,
+	MBX_FW = MBX_CM3CPU,
+	MBX_VFCNT
+};
+
+enum PF_STATUS {
+	PF_FCS_STATUS,
+	PF_PAUSE_STATUS,
+	PF_FT_PADDING_STATUS,
+	PF_VLAN_FILTER_STATUS,
+	PF_SET_VLAN_STATUS,
+	PF_SET_LINK_STATUS,
+	PF_SET_MTU,
+	PF_SET_RESET,
+};
+
+s32 rnpgbe_read_mbx(struct rnpgbe_hw *hw, u32 *msg, u16 size, enum MBX_ID);
+s32 rnpgbe_write_mbx(struct rnpgbe_hw *hw, u32 *msg, u16 size, enum MBX_ID);
+s32 rnpgbe_check_for_msg(struct rnpgbe_hw *hw, enum MBX_ID);
+s32 rnpgbe_check_for_ack(struct rnpgbe_hw *hw, enum MBX_ID);
+s32 rnpgbe_check_for_rst(struct rnpgbe_hw *hw, enum MBX_ID);
+s32 rnpgbe_init_mbx_params_pf(struct rnpgbe_hw *hw);
+extern struct rnpgbe_mbx_operations rnpgbe_mbx_ops_generic;
+int rnpgbe_fw_get_macaddr(struct rnpgbe_hw *hw, int pfvfnum, u8 *mac_addr,
+			  int lane);
+int rnpgbe_mbx_fw_reset_phy(struct rnpgbe_hw *hw);
+unsigned int rnpgbe_mbx_change_timeout(struct rnpgbe_hw *hw, int timeout_ms);
+struct rnpgbe_info;
+int rnpgbe_mbx_get_capability(struct rnpgbe_hw *hw, struct rnpgbe_info *info);
+int rnpgbe_mbx_get_eee_capability(struct rnpgbe_hw *hw,
+				  struct rnpgbe_eee_cap *eee_cap);
+int rnpgbe_mbx_link_event_enable(struct rnpgbe_hw *hw, int enable);
+int rnpgbe_mbx_ifup_down(struct rnpgbe_hw *hw, int up);
+int rnpgbe_mbx_tstamps_show(struct rnpgbe_hw *hw, u32 sec, u32 nanosec);
+int rnpgbe_mbx_led_set(struct rnpgbe_hw *hw, int value);
+int rnpgbe_mbx_get_dump(struct rnpgbe_hw *hw, int flags, u32 *data_out,
+			int buflen);
+int rnpgbe_mbx_get_dump_flags(struct rnpgbe_hw *hw);
+int rnpgbe_mbx_set_dump(struct rnpgbe_hw *hw, int flag);
+int rnpgbe_mbx_sfp_write(struct rnpgbe_hw *hw, int sfp_addr, int reg, short v);
+int rnpgbe_mbx_sfp_module_eeprom_info(struct rnpgbe_hw *hw, int sfp_addr,
+				      int reg, int data_len, u8 *buf);
+int rnpgbe_mbx_get_temp(struct rnpgbe_hw *hw, int *voltage);
+int rnpgbe_mbx_phy_pause_set(struct rnpgbe_hw *hw, u32 pause_mode);
+int rnpgbe_mbx_phy_link_set(struct rnpgbe_hw *hw, int adv, int autoneg,
+			    int speed, int duplex, int mdix_ctrl);
+int rnpgbe_mbx_phy_pause_get(struct rnpgbe_hw *hw, u32 *pause_mode);
+int rnpgbe_mbx_phy_eee_set(struct rnpgbe_hw *hw, u32 tx_lpi_timer,
+			   u32 local_eee);
+int rnpgbe_maintain_req(struct rnpgbe_hw *hw, int cmd, int arg0,
+			int req_data_bytes, int reply_bytes,
+			dma_addr_t dma_phy_addr);
+int rnpgbe_mbx_get_lane_stat(struct rnpgbe_hw *hw);
+int rnpgbe_mbx_wol_set(struct rnpgbe_hw *hw, u32 mode);
+int rnpgbe_mbx_gephy_test_set(struct rnpgbe_hw *hw, u32 mode);
+int rnpgbe_mbx_lldp_set(struct rnpgbe_hw *hw, u32 enable);
+int rnpgbe_mbx_lldp_get(struct rnpgbe_hw *hw);
+int rnpgbe_mbx_ifsuspuse(struct rnpgbe_hw *hw, int status);
+int rnpgbe_mbx_ifinsmod(struct rnpgbe_hw *hw, int status);
+int rnpgbe_mbx_ifforce_control_mac(struct rnpgbe_hw *hw, int status);
+int rnpgbe_set_lane_fun(struct rnpgbe_hw *hw, int fun, int value0, int value1,
+			int value2, int value3);
+int rnpgbe_mbx_reg_write(struct rnpgbe_hw *hw, int fw_reg, int value);
+int rnpgbe_mbx_fw_reg_read(struct rnpgbe_hw *hw, int fw_reg);
+int rnpgbe_mbx_force_speed(struct rnpgbe_hw *hw, int speed);
+
+#endif /* _RNPGBE_MBX_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c
new file mode 100644
index 0000000000000..ae17b1f964c87
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c
@@ -0,0 +1,1587 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+#include 
+#include 
+
+#include "rnpgbe.h"
+#include "rnpgbe_mbx.h"
+#include "rnpgbe_mbx_fw.h"
+
+#define RNP_FW_MAILBOX_SIZE RNP_VFMAILBOX_SIZE
+
+static struct mbx_req_cookie *mbx_cookie_zalloc(int priv_len)
+{
+	struct mbx_req_cookie *cookie =
+		kzalloc(sizeof(*cookie) + priv_len, GFP_KERNEL);
+
+	if (cookie) {
+		cookie->timeout_jiffes = 30 * HZ;
+		cookie->magic = COOKIE_MAGIC;
+		cookie->priv_len = priv_len;
+	}
+
+	return cookie;
+}
+
+static int rnpgbe_mbx_write_posted_locked(struct rnpgbe_hw *hw,
+					  struct mbx_fw_cmd_req *req)
+{
+	int err = 0;
+	int retry = 3;
+
+	if (mutex_lock_interruptible(&hw->mbx.lock)) {
+		rnpgbe_err("[%s] get mbx lock faild opcode:0x%x\n", __func__,
+			   req->opcode);
+		return -EAGAIN;
+	}
+
+	rnpgbe_logd(LOG_MBX_LOCK, "%s %d lock:%p hw:%p opcode:0x%x\n", __func__,
+		    hw->pfvfnum, &hw->mbx.lock, hw, req->opcode);
+
+try_again:
+	retry--;
+	if (retry < 0) {
+		mutex_unlock(&hw->mbx.lock);
+		rnpgbe_err("%s: write_posted faild! err:0x%x opcode:0x%x\n",
+			   __func__, err, req->opcode);
+		return -EIO;
+	}
+
+	err = hw->mbx.ops.write_posted(
+		hw, (u32 *)req, (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+	if (err)
+		goto try_again;
+
+	mutex_unlock(&hw->mbx.lock);
+
+	return err;
+}
+
+/*
+ * force firmware report link event to driver
+ */
+static void rnpgbe_link_stat_mark_reset(struct rnpgbe_hw *hw)
+{
+	wr32(hw, RNP_DMA_DUMY, 0xa0000000);
+}
+
+static void rnpgbe_link_stat_mark_disable(struct rnpgbe_hw *hw)
+{
+	wr32(hw, RNP_DMA_DUMY, 0);
+}
+
+static int rnpgbe_mbx_fw_post_req(struct rnpgbe_hw *hw,
+				  struct mbx_fw_cmd_req *req,
+				  struct mbx_req_cookie *cookie)
+{
+	int err = 0;
+	struct rnpgbe_adapter *adpt = hw->back;
+
+	cookie->errcode = 0;
+	cookie->done = 0;
+	init_waitqueue_head(&cookie->wait);
+
+	if (mutex_lock_interruptible(&hw->mbx.lock)) {
+		rnpgbe_err("[%s] wait mbx lock timeout opcode:0x%x\n", __func__,
+			   req->opcode);
+		return -EAGAIN;
+	}
+
+	rnpgbe_logd(LOG_MBX_LOCK, "%s %d lock:%p hw:%p opcode:0x%x\n", __func__,
+		    hw->pfvfnum, &hw->mbx.lock, hw, req->opcode);
+
+	err = rnpgbe_write_mbx(hw, (u32 *)req,
+			       (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+	if (err) {
+		rnpgbe_err("rnpgbe_write_mbx faild! err:%d opcode:0x%x\n", err,
+			   req->opcode);
+		mutex_unlock(&hw->mbx.lock);
+		return err;
+	}
+
+	if (cookie->timeout_jiffes != 0) {
+	retry:
+		err = wait_event_interruptible_timeout(cookie->wait,
+						       cookie->done == 1,
+						       cookie->timeout_jiffes);
+		if (err == -ERESTARTSYS)
+			goto retry;
+		if (err == 0) {
+			rnpgbe_err(
+				"[%s] %s faild! pfvfnum:0x%x hw:%p timeout err:%d opcode:%x\n",
+				adpt->name, __func__, hw->pfvfnum, hw, err,
+				req->opcode);
+			err = -ETIME;
+		} else {
+			err = 0;
+		}
+	} else {
+		wait_event_interruptible(cookie->wait, cookie->done == 1);
+	}
+
+	mutex_unlock(&hw->mbx.lock);
+
+	if (cookie->errcode)
+		err = cookie->errcode;
+
+	return err;
+}
+
+static int rnpgbe_fw_send_cmd_wait(struct rnpgbe_hw *hw,
+				   struct mbx_fw_cmd_req *req,
+				   struct mbx_fw_cmd_reply *reply)
+{
+	int err;
+	int retry_cnt = 3;
+
+	if (!hw || !req || !reply || !hw->mbx.ops.read_posted) {
+		printk(KERN_DEBUG "error: hw:%p req:%p reply:%p\n", hw, req,
+		       reply);
+		return -EINVAL;
+	}
+
+	if (mutex_lock_interruptible(&hw->mbx.lock)) {
+		rnpgbe_err("[%s] get mbx lock faild opcode:0x%x\n", __func__,
+			   req->opcode);
+		return -EAGAIN;
+	}
+
+	rnpgbe_logd(LOG_MBX_LOCK, "%s %d lock:%p hw:%p opcode:0x%x\n", __func__,
+		    hw->pfvfnum, &hw->mbx.lock, hw, req->opcode);
+	err = hw->mbx.ops.write_posted(
+		hw, (u32 *)req, (req->datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+	if (err) {
+		rnpgbe_err("%s: write_posted faild! err:0x%x opcode:0x%x\n",
+			   __func__, err, req->opcode);
+		mutex_unlock(&hw->mbx.lock);
+		return err;
+	}
+
+retry:
+	retry_cnt--;
+	if (retry_cnt < 0)
+		return -EIO;
+
+	err = hw->mbx.ops.read_posted(hw, (u32 *)reply, sizeof(*reply) / 4,
+				      MBX_FW);
+	if (err) {
+		rnpgbe_err("%s: read_posted faild! err:0x%x opcode:0x%x\n",
+			   __func__, err, req->opcode);
+		mutex_unlock(&hw->mbx.lock);
+		return err;
+	}
+	if (reply->opcode != req->opcode)
+		goto retry;
+
+	mutex_unlock(&hw->mbx.lock);
+
+	if (reply->error_code) {
+		rnpgbe_err("%s: reply err:0x%x req:0x%x\n", __func__,
+			   reply->error_code, req->opcode);
+		return -reply->error_code;
+	}
+	return 0;
+}
+
+int rnpgbe_mbx_get_lane_stat(struct rnpgbe_hw *hw)
+{
+	int err = 0;
+	struct mbx_fw_cmd_req req;
+	struct rnpgbe_adapter *adpt = hw->back;
+	struct lane_stat_data *st;
+	struct mbx_req_cookie *cookie = NULL;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+
+	if (hw->mbx.other_irq_enabled) {
+		cookie = mbx_cookie_zalloc(sizeof(struct lane_stat_data));
+
+		if (!cookie) {
+			rnpgbe_err("%s: no memory\n", __func__);
+			return -ENOMEM;
+		}
+
+		st = (struct lane_stat_data *)cookie->priv;
+
+		build_get_lane_status_req(&req, hw->nr_lane, cookie);
+
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+
+		if (err) {
+			rnpgbe_err("%s: error:%d\n", __func__, err);
+			goto quit;
+		}
+	} else {
+		memset(&reply, 0, sizeof(reply));
+
+		build_get_lane_status_req(&req, hw->nr_lane, &req);
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		if (err) {
+			rnpgbe_err("%s: 1 error:%d\n", __func__, err);
+			goto quit;
+		}
+		st = (struct lane_stat_data *)&(reply.data);
+	}
+
+	hw->phy_type = st->phy_type;
+	hw->speed = adpt->speed = st->speed;
+	if (st->is_sgmii) {
+		adpt->phy_addr = st->phy_addr;
+	} else {
+		adpt->sfp.fault = st->sfp.fault;
+		adpt->sfp.los = st->sfp.los;
+		adpt->sfp.mod_abs = st->sfp.mod_abs;
+		adpt->sfp.tx_dis = st->sfp.tx_dis;
+	}
+	adpt->si.main = st->si_main;
+	adpt->si.pre = st->si_pre;
+	adpt->si.post = st->si_post;
+	adpt->si.tx_boost = st->si_tx_boost;
+	adpt->link_traing = st->link_traing;
+	adpt->fec = st->fec;
+	hw->is_sgmii = st->is_sgmii;
+	hw->pci_gen = st->pci_gen;
+	hw->pci_lanes = st->pci_lanes;
+	adpt->speed = st->speed;
+	adpt->hw.link = st->linkup;
+	hw->is_backplane = st->is_backplane;
+	hw->supported_link = st->supported_link;
+	hw->advertised_link = st->advertised_link;
+	hw->tp_mdx = st->tp_mdx;
+
+	if ((hw->hw_type == rnpgbe_hw_n10) || (hw->hw_type == rnpgbe_hw_n400)) {
+		if (hw->fw_version >= 0x00050000) {
+			hw->sfp_connector = st->sfp_connector;
+			hw->duplex = st->duplex;
+			adpt->an = st->autoneg;
+		} else {
+			hw->sfp_connector = 0xff;
+			hw->duplex = 1;
+			adpt->an = st->an;
+		}
+		if (hw->fw_version <= 0x00050000) {
+			hw->supported_link |= RNP_LINK_SPEED_10GB_FULL |
+					      RNP_LINK_SPEED_1GB_FULL;
+		}
+	}
+
+	rnpgbe_logd(
+		LOG_MBX_LINK_STAT,
+		"%s:pma_type:0x%x phy_type:0x%x,linkup:%d duplex:%d auton:%d "
+		"fec:%d an:%d lt:%d is_sgmii:%d supported_link:0x%x, backplane:%d "
+		"speed:%d sfp_connector:0x%x adv %0xx\n",
+		adpt->name, st->pma_type, st->phy_type, st->linkup, st->duplex,
+		st->autoneg, st->fec, st->an, st->link_traing, st->is_sgmii,
+		hw->supported_link, hw->is_backplane, st->speed,
+		st->sfp_connector, hw->advertised_link);
+quit:
+	if (cookie)
+		kfree(cookie);
+	return err;
+}
+
+int rnpgbe_mbx_fw_reset_phy(struct rnpgbe_hw *hw)
+{
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+	int ret;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	if (hw->mbx.other_irq_enabled) {
+		struct mbx_req_cookie *cookie = mbx_cookie_zalloc(0);
+
+		if (!cookie)
+			return -ENOMEM;
+
+		build_reset_phy_req(&req, cookie);
+
+		ret = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+		kfree(cookie);
+		return ret;
+
+	} else {
+		build_reset_phy_req(&req, &req);
+		return rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+	}
+}
+
+int rnpgbe_maintain_req(struct rnpgbe_hw *hw, int cmd, int arg0,
+			int req_data_bytes, int reply_bytes,
+			dma_addr_t dma_phy_addr)
+{
+	int err;
+	struct mbx_req_cookie *cookie = NULL;
+
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	cookie = mbx_cookie_zalloc(0);
+	if (!cookie)
+		return -ENOMEM;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+	cookie->timeout_jiffes = 60 * HZ;
+
+	build_maintain_req(&req, cookie, cmd, arg0, req_data_bytes, reply_bytes,
+			   dma_phy_addr & 0xffffffff,
+			   (dma_phy_addr >> 32) & 0xffffffff);
+
+	if (hw->mbx.other_irq_enabled) {
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+	} else {
+		int old_mbx_timeout = hw->mbx.timeout;
+
+		hw->mbx.timeout =
+			(60 * 1000 * 1000) / hw->mbx.usec_delay; /* wait 60s */
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		hw->mbx.timeout = old_mbx_timeout;
+	}
+
+	if (cookie)
+		kfree(cookie);
+
+	return (err) ? -EIO : 0;
+}
+
+int rnpgbe_fw_get_macaddr(struct rnpgbe_hw *hw, int pfvfnum, u8 *mac_addr,
+			  int nr_lane)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	rnpgbe_dbg("%s: pfvfnum:0x%x nr_lane:%d\n", __func__, pfvfnum, nr_lane);
+
+	if (!mac_addr) {
+		rnpgbe_err("%s: mac_addr is null\n", __func__);
+		return -EINVAL;
+	}
+
+	if (hw->mbx.other_irq_enabled) {
+		struct mbx_req_cookie *cookie =
+			mbx_cookie_zalloc(sizeof(reply.mac_addr));
+		struct mac_addr *mac = (struct mac_addr *)cookie->priv;
+
+		if (!cookie)
+			return -ENOMEM;
+
+		build_get_macaddress_req(&req, 1 << nr_lane, pfvfnum, cookie);
+
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+		if (err) {
+			kfree(cookie);
+			return err;
+		}
+
+		if ((1 << nr_lane) & mac->lanes)
+			memcpy(mac_addr, mac->addrs[nr_lane].mac, 6);
+
+		kfree(cookie);
+		return 0;
+
+	} else {
+		build_get_macaddress_req(&req, 1 << nr_lane, pfvfnum, &req);
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		if (err) {
+			rnpgbe_err("%s: faild. err:%d\n", __func__, err);
+			return err;
+		}
+
+		if ((1 << nr_lane) & reply.mac_addr.lanes) {
+			memcpy(mac_addr, reply.mac_addr.addrs[nr_lane].mac, 6);
+			return 0;
+		}
+	}
+
+	return -ENODATA;
+}
+
+static int rnpgbe_mbx_sfp_read(struct rnpgbe_hw *hw, int sfp_i2c_addr, int reg,
+			       int cnt, u8 *out_buf)
+{
+	struct mbx_fw_cmd_req req;
+	int err = -EIO;
+	int nr_lane = hw->nr_lane;
+
+	if ((cnt > MBX_SFP_READ_MAX_CNT) || !out_buf) {
+		rnpgbe_err("%s: cnt:%d should <= %d out_buf:%p\n", __func__,
+			   cnt, MBX_SFP_READ_MAX_CNT, out_buf);
+		return -EINVAL;
+	}
+
+	memset(&req, 0, sizeof(req));
+
+	if (hw->mbx.other_irq_enabled) {
+		struct mbx_req_cookie *cookie = mbx_cookie_zalloc(cnt);
+
+		if (!cookie)
+			return -ENOMEM;
+
+		build_mbx_sfp_read(&req, nr_lane, sfp_i2c_addr, reg, cnt,
+				   cookie);
+
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+		if (err) {
+			kfree(cookie);
+			return err;
+
+		} else {
+			memcpy(out_buf, cookie->priv, cnt);
+			err = 0;
+		}
+		kfree(cookie);
+	} else {
+		struct mbx_fw_cmd_reply reply;
+
+		memset(&reply, 0, sizeof(reply));
+		build_mbx_sfp_read(&req, nr_lane, sfp_i2c_addr, reg, cnt,
+				   &reply);
+
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		if (err == 0)
+			memcpy(out_buf, reply.sfp_read.value, cnt);
+	}
+
+	return err;
+}
+
+int rnpgbe_mbx_sfp_module_eeprom_info(struct rnpgbe_hw *hw, int sfp_addr,
+				      int reg, int data_len, u8 *buf)
+{
+	int left = data_len;
+	int cnt, err;
+
+	do {
+		cnt = (left > MBX_SFP_READ_MAX_CNT) ? MBX_SFP_READ_MAX_CNT :
+						      left;
+		err = rnpgbe_mbx_sfp_read(hw, sfp_addr, reg, cnt, buf);
+		if (err) {
+			rnpgbe_err("%s: error:%d\n", __func__, err);
+			return err;
+		}
+		reg += cnt;
+		buf += cnt;
+		left -= cnt;
+	} while (left > 0);
+
+	return 0;
+}
+
+int rnpgbe_mbx_sfp_write(struct rnpgbe_hw *hw, int sfp_addr, int reg, short v)
+{
+	struct mbx_fw_cmd_req req;
+	int err;
+	int nr_lane = hw->nr_lane;
+
+	memset(&req, 0, sizeof(req));
+
+	build_mbx_sfp_write(&req, nr_lane, sfp_addr, reg, v);
+
+	err = rnpgbe_mbx_write_posted_locked(hw, &req);
+	return err;
+}
+
+int rnpgbe_mbx_fw_reg_read(struct rnpgbe_hw *hw, int fw_reg)
+{
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+	int err, ret = 0xffffffff;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	if (hw->fw_version < 0x00050200)
+		return -EOPNOTSUPP;
+
+	if (hw->mbx.other_irq_enabled) {
+		struct mbx_req_cookie *cookie =
+			mbx_cookie_zalloc(sizeof(reply.r_reg));
+
+		build_readreg_req(&req, fw_reg, cookie);
+
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+		if (err) {
+			kfree(cookie);
+			return ret;
+		}
+		ret = ((int *)(cookie->priv))[0];
+	} else {
+		build_readreg_req(&req, fw_reg, &reply);
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		if (err) {
+			rnpgbe_err("%s: faild. err:%d\n", __func__, err);
+			return err;
+		}
+		ret = reply.r_reg.value[0];
+	}
+	return ret;
+}
+
+int rnpgbe_mbx_reg_write(struct rnpgbe_hw *hw, int fw_reg, int value)
+{
+	struct mbx_fw_cmd_req req;
+	int err;
+
+	memset(&req, 0, sizeof(req));
+	if (hw->fw_version < 0x00050200)
+		return -EOPNOTSUPP;
+
+	build_writereg_req(&req, NULL, fw_reg, 4, &value);
+
+	err = rnpgbe_mbx_write_posted_locked(hw, &req);
+	return err;
+}
+
+int rnpgbe_mbx_wol_set(struct rnpgbe_hw *hw, u32 mode)
+{
+	struct mbx_fw_cmd_req req;
+	int err;
+	int nr_lane = hw->nr_lane;
+
+	memset(&req, 0, sizeof(req));
+
+	build_mbx_wol_set(&req, nr_lane, mode);
+
+	err = rnpgbe_mbx_write_posted_locked(hw, &req);
+	return err;
+}
+
+int rnpgbe_mbx_gephy_test_set(struct rnpgbe_hw *hw, u32 mode)
+{
+	struct mbx_fw_cmd_req req;
+	int err;
+	int nr_lane = hw->nr_lane;
+
+	memset(&req, 0, sizeof(req));
+
+	build_mbx_gephy_test_set(&req, nr_lane, mode);
+
+	err = rnpgbe_mbx_write_posted_locked(hw, &req);
+	return err;
+}
+
+int rnpgbe_mbx_lldp_set(struct rnpgbe_hw *hw, u32 enable)
+{
+	struct mbx_fw_cmd_req req;
+	int err;
+	int nr_lane = hw->nr_lane;
+
+	memset(&req, 0, sizeof(req));
+
+	build_mbx_lldp_set(&req, nr_lane, enable);
+
+	err = rnpgbe_mbx_write_posted_locked(hw, &req);
+
+	return err;
+}
+
+int rnpgbe_mbx_lldp_get(struct rnpgbe_hw *hw)
+{
+	int err;
+	struct mbx_req_cookie *cookie = NULL;
+	struct mbx_fw_cmd_reply reply;
+	struct mbx_fw_cmd_req req;
+	struct get_lldp_reply *get_lldp;
+
+	cookie = mbx_cookie_zalloc(sizeof(*get_lldp));
+	if (!cookie)
+		return -ENOMEM;
+	get_lldp = (struct get_lldp_reply *)cookie->priv;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	build_get_lldp_req(&req, cookie, hw->nr_lane);
+
+	if (hw->mbx.other_irq_enabled) {
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+	} else {
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		get_lldp = &reply.get_lldp;
+	}
+
+	if (err == 0) {
+		hw->lldp_status.enable = get_lldp->value;
+		hw->lldp_status.inteval = get_lldp->inteval;
+	}
+
+	if (cookie)
+		kfree(cookie);
+
+	return err ? -err : 0;
+}
+
+int rnpgbe_mbx_set_dump(struct rnpgbe_hw *hw, int flag)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+
+	memset(&req, 0, sizeof(req));
+	build_set_dump(&req, hw->nr_lane, flag);
+
+	err = rnpgbe_mbx_write_posted_locked(hw, &req);
+
+	return err;
+}
+
+/*
+ * @speed :
+ * 0 : disable force speed
+ * 1000 : force 1000Mbps
+ * 10000 : force 10000Mbps
+ */
+int rnpgbe_mbx_force_speed(struct rnpgbe_hw *hw, int speed)
+{
+	int cmd = 0x01150000;
+
+	if (hw->force_10g_1g_speed_ablity == 0)
+		return -EINVAL;
+
+	if (speed == RNP_LINK_SPEED_10GB_FULL) {
+		cmd = 0x01150002;
+		hw->force_speed_stat = FORCE_SPEED_STAT_10G;
+	} else if (speed == RNP_LINK_SPEED_1GB_FULL) {
+		cmd = 0x01150001;
+		hw->force_speed_stat = FORCE_SPEED_STAT_1G;
+	} else {
+		cmd = 0x01150000;
+		hw->force_speed_stat = FORCE_SPEED_STAT_DISABLED;
+	}
+	return rnpgbe_mbx_set_dump(hw, cmd);
+}
+
+int rnpgbe_mbx_get_dump_flags(struct rnpgbe_hw *hw)
+{
+	int err;
+	struct mbx_req_cookie *cookie = NULL;
+	struct mbx_fw_cmd_reply reply;
+	struct mbx_fw_cmd_req req;
+	struct get_dump_reply *get_dump;
+
+	cookie = mbx_cookie_zalloc(sizeof(*get_dump));
+	if (!cookie)
+		return -ENOMEM;
+	get_dump = (struct get_dump_reply *)cookie->priv;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	build_get_dump_req(&req, cookie, hw->nr_lane, 0, 0, 0);
+
+	if (hw->mbx.other_irq_enabled) {
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+	} else {
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		get_dump = &reply.get_dump;
+	}
+
+	if (err == 0) {
+		hw->dump.version = get_dump->version;
+		hw->dump.flag = get_dump->flags;
+		hw->dump.len = get_dump->bytes;
+	}
+	if (cookie)
+		kfree(cookie);
+
+	return err ? -err : 0;
+}
+
+int rnpgbe_mbx_get_dump(struct rnpgbe_hw *hw, int flags, u32 *data_out,
+			int bytes)
+{
+	int err;
+	struct mbx_req_cookie *cookie = NULL;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	struct mbx_fw_cmd_reply reply;
+	struct mbx_fw_cmd_req req;
+	struct get_dump_reply *get_dump;
+	int ram_size = mbx->share_size;
+	int i, offset = 0;
+
+	cookie = mbx_cookie_zalloc(sizeof(*get_dump));
+	if (!cookie)
+		return -ENOMEM;
+
+	get_dump = (struct get_dump_reply *)cookie->priv;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	do {
+		build_get_dump_req(&req, cookie, hw->nr_lane, offset, 0,
+				   ram_size);
+
+		if (hw->mbx.other_irq_enabled) {
+			err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+		} else {
+			err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+			get_dump = &reply.get_dump;
+		}
+
+		if (err == 0 && data_out) {
+			int len = ram_size;
+
+			if ((bytes - offset) < ram_size)
+				len = bytes - offset;
+
+			for (i = 0; i < len; i = i + 4)
+				*(data_out + offset / 4 + i / 4) =
+					rnpgbe_rd_reg(hw->hw_addr +
+						      mbx->cpu_vf_share_ram +
+						      i);
+		}
+
+		offset += ram_size;
+
+	} while (offset < bytes);
+
+	if (cookie)
+		kfree(cookie);
+
+	return err ? -err : 0;
+}
+
+int rnp500_fw_update(struct rnpgbe_hw *hw, int partition, const u8 *fw_bin,
+		     int bytes)
+{
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	int err = 0;
+	int offset = 0, ram_size = mbx->share_size;
+	struct mbx_req_cookie *cookie = NULL;
+
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	int i;
+	u32 *msg = (u32 *)fw_bin;
+
+	cookie = mbx_cookie_zalloc(0);
+	if (!cookie) {
+		dev_err(&hw->pdev->dev, "%s: no memory:%d!", __func__, 0);
+		return -ENOMEM;
+	}
+
+	/* if bytes more than ram_size, we update header at last */
+	if (bytes > ram_size) {
+		offset += ram_size;
+
+		/* todo */
+		/* n210 should clean header first */
+		if (hw->hw_type == rnpgbe_hw_n210) {
+			memset(&req, 0, sizeof(req));
+			memset(&reply, 0, sizeof(reply));
+
+			for (i = 0; i < ram_size; i = i + 4)
+				rnpgbe_wr_reg(hw->hw_addr + mbx->cpu_vf_share_ram + i,
+						0xffffffff);
+
+			build_fw_update_n500_req(&req, cookie, partition, 0);
+			if (hw->mbx.other_irq_enabled) {
+				err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+			} else {
+				int old_mbx_timeout = hw->mbx.timeout;
+
+				hw->mbx.timeout = (20 * 1000 * 1000) /
+					hw->mbx.usec_delay;
+				err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+				hw->mbx.timeout = old_mbx_timeout;
+			}
+		}
+	}
+
+	while (offset < bytes) {
+		memset(&req, 0, sizeof(req));
+		memset(&reply, 0, sizeof(reply));
+
+		for (i = 0; i < ram_size; i = i + 4)
+			rnpgbe_wr_reg(hw->hw_addr + mbx->cpu_vf_share_ram + i,
+				      *(msg + offset / 4 + i / 4));
+
+		build_fw_update_n500_req(&req, cookie, partition, offset);
+		if (hw->mbx.other_irq_enabled) {
+			err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+		} else {
+			int old_mbx_timeout = hw->mbx.timeout;
+
+			hw->mbx.timeout = (20 * 1000 * 1000) /
+					  hw->mbx.usec_delay;
+			err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+			hw->mbx.timeout = old_mbx_timeout;
+		}
+
+		if (err)
+			goto out;
+		offset += ram_size;
+	}
+	/* we write header at last */
+	if (bytes > ram_size) {
+		offset = 0;
+		memset(&req, 0, sizeof(req));
+		memset(&reply, 0, sizeof(reply));
+
+		for (i = 0; i < ram_size; i = i + 4)
+			rnpgbe_wr_reg(hw->hw_addr + mbx->cpu_vf_share_ram + i,
+				      *(msg + offset / 4 + i / 4));
+
+		build_fw_update_n500_req(&req, cookie, partition, offset);
+		if (hw->mbx.other_irq_enabled) {
+			err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+		} else {
+			int old_mbx_timeout = hw->mbx.timeout;
+
+			hw->mbx.timeout = (20 * 1000 * 1000) /
+					  hw->mbx.usec_delay;
+			err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+			hw->mbx.timeout = old_mbx_timeout;
+		}
+	}
+out:
+	return err ? -err : 0;
+}
+
+int rnpgbe_fw_update(struct rnpgbe_hw *hw, int partition, const u8 *fw_bin,
+		     int bytes)
+{
+	int err;
+	struct mbx_req_cookie *cookie = NULL;
+
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	void *dma_buf = NULL;
+	dma_addr_t dma_phy;
+
+	cookie = mbx_cookie_zalloc(0);
+	if (!cookie) {
+		dev_err(&hw->pdev->dev, "%s: no memory:%d!", __func__, 0);
+		return -ENOMEM;
+	}
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	dma_buf =
+		dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy, GFP_ATOMIC);
+	if (!dma_buf) {
+		err = -ENOMEM;
+		goto quit;
+	}
+
+	memcpy(dma_buf, fw_bin, bytes);
+
+	build_fw_update_req(&req, cookie, partition, dma_phy & 0xffffffff,
+			    (dma_phy >> 32) & 0xffffffff, bytes);
+	if (hw->mbx.other_irq_enabled) {
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+	} else {
+		int old_mbx_timeout = hw->mbx.timeout;
+
+		hw->mbx.timeout = (20 * 1000 * 1000) / hw->mbx.usec_delay;
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		hw->mbx.timeout = old_mbx_timeout;
+	}
+
+quit:
+	if (dma_buf)
+		dma_free_coherent(&hw->pdev->dev, bytes, dma_buf, dma_phy);
+
+	if (cookie)
+		kfree(cookie);
+
+	printk(KERN_DEBUG "%s: %s (errcode:%d)\n", __func__,
+	       err ? " failed" : " success", err);
+	return (err) ? -EIO : 0;
+}
+
+int rnpgbe_mbx_link_event_enable(struct rnpgbe_hw *hw, int enable)
+{
+	struct mbx_fw_cmd_reply reply;
+	struct mbx_fw_cmd_req req;
+	int err;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	if (enable)
+		wr32(hw, RNP_DMA_DUMY, 0xa0000000);
+
+	build_link_set_event_mask(&req, BIT(EVT_LINK_UP),
+				  (enable & 1) << EVT_LINK_UP, &req);
+
+	err = rnpgbe_mbx_write_posted_locked(hw, &req);
+	if (!enable)
+		wr32(hw, RNP_DMA_DUMY, 0);
+
+	return err;
+}
+
+int rnpgbe_fw_get_capablity(struct rnpgbe_hw *hw, struct phy_abilities *abil)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+	build_phy_abalities_req(&req, &req);
+	err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+
+	if (err == 0)
+		memcpy(abil, &reply.phy_abilities, sizeof(*abil));
+
+	return err;
+}
+
+static int to_mac_type(struct phy_abilities *ability)
+{
+	int lanes = hweight_long(ability->lane_mask);
+
+	if ((ability->phy_type == PHY_TYPE_40G_BASE_KR4) ||
+	    (ability->phy_type == PHY_TYPE_40G_BASE_LR4) ||
+	    (ability->phy_type == PHY_TYPE_40G_BASE_CR4) ||
+	    (ability->phy_type == PHY_TYPE_40G_BASE_SR4)) {
+		if (lanes == 1)
+			return rnpgbe_mac_n10g_x8_40G;
+		else
+			return rnpgbe_mac_n10g_x8_10G;
+
+	} else if ((ability->phy_type == PHY_TYPE_10G_BASE_KR) ||
+		   (ability->phy_type == PHY_TYPE_10G_BASE_LR) ||
+		   (ability->phy_type == PHY_TYPE_10G_BASE_ER) ||
+		   (ability->phy_type == PHY_TYPE_10G_BASE_SR)) {
+		if (lanes == 1)
+			return rnpgbe_mac_n10g_x2_10G;
+		else if (lanes == 2)
+			return rnpgbe_mac_n10g_x4_10G;
+		else
+			return rnpgbe_mac_n10g_x8_10G;
+
+	} else if (ability->phy_type == PHY_TYPE_1G_BASE_KX) {
+		return rnpgbe_mac_n10l_x8_1G;
+
+	} else if (ability->phy_type == PHY_TYPE_SGMII) {
+		return rnpgbe_mac_n10l_x8_1G;
+	}
+	return rnpgbe_mac_unknown;
+}
+
+int rnpgbe_set_lane_fun(struct rnpgbe_hw *hw, int fun, int value0, int value1,
+			int value2, int value3)
+{
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+	build_set_lane_fun(&req, hw->nr_lane, fun, value0, value1, value2,
+			   value3);
+
+	return rnpgbe_mbx_write_posted_locked(hw, &req);
+}
+
+int rnpgbe_mbx_ifinsmod(struct rnpgbe_hw *hw, int status)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	build_ifinsmod(&req, hw->driver_version, status);
+
+	if (mutex_lock_interruptible(&hw->mbx.lock))
+		return -EAGAIN;
+
+	if (status) {
+		err = hw->mbx.ops.write_posted(
+			hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4,
+			MBX_FW);
+	} else {
+		err = hw->mbx.ops.write(hw, (u32 *)&req,
+					(req.datalen + MBX_REQ_HDR_LEN) / 4,
+					MBX_FW);
+	}
+
+	mutex_unlock(&hw->mbx.lock);
+
+	rnpgbe_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d status:%d\n", __func__,
+		    hw->nr_lane, status);
+
+	return err;
+}
+
+int rnpgbe_mbx_ifsuspuse(struct rnpgbe_hw *hw, int status)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	build_ifsuspuse(&req, hw->nr_lane, status);
+
+	if (mutex_lock_interruptible(&hw->mbx.lock))
+		return -EAGAIN;
+
+	err = hw->mbx.ops.write_posted(
+		hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+
+	mutex_unlock(&hw->mbx.lock);
+
+	rnpgbe_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d status:%d\n", __func__,
+		    hw->nr_lane, status);
+
+	return err;
+}
+
+int rnpgbe_mbx_ifforce_control_mac(struct rnpgbe_hw *hw, int status)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	build_ifforce(&req, hw->nr_lane, status);
+
+	if (mutex_lock_interruptible(&hw->mbx.lock))
+		return -EAGAIN;
+
+	err = hw->mbx.ops.write_posted(
+		hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+
+	mutex_unlock(&hw->mbx.lock);
+
+	rnpgbe_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d status:%d\n", __func__,
+		    hw->nr_lane, status);
+
+	return err;
+}
+
+int rnpgbe_mbx_tstamps_show(struct rnpgbe_hw *hw, u32 sec, u32 nanosec)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	build_tstamp_show(&req, sec, nanosec);
+
+	if (mutex_lock_interruptible(&hw->mbx.lock))
+		return -EAGAIN;
+
+	err = hw->mbx.ops.write_posted(
+		hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+
+	mutex_unlock(&hw->mbx.lock);
+
+	return err;
+}
+
+int rnpgbe_mbx_ifup_down(struct rnpgbe_hw *hw, int up)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	build_ifup_down(&req, hw->nr_lane, up);
+
+	if (mutex_lock_interruptible(&hw->mbx.lock))
+		return -EAGAIN;
+
+	err = hw->mbx.ops.write_posted(
+		hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+
+	mutex_unlock(&hw->mbx.lock);
+
+	rnpgbe_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d up:%d\n", __func__,
+		    hw->nr_lane, up);
+
+	if (up)
+		rnpgbe_link_stat_mark_reset(hw);
+
+	return err;
+}
+
+int rnpgbe_mbx_led_set(struct rnpgbe_hw *hw, int value)
+{
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	build_led_set(&req, hw->nr_lane, value, &reply);
+
+	return rnpgbe_mbx_write_posted_locked(hw, &req);
+}
+
+int rnpgbe_mbx_get_eee_capability(struct rnpgbe_hw *hw,
+				  struct rnpgbe_eee_cap *eee_cap)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+	memset(&reply, 0, sizeof(reply));
+
+	build_phy_eee_abalities_req(&req, &req);
+
+	err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+
+	if (err == 0) {
+		memcpy(eee_cap, &reply.phy_eee_abilities, sizeof(*eee_cap));
+		return 0;
+	}
+
+	return -EIO;
+}
+
+int rnpgbe_mbx_phy_eee_set(struct rnpgbe_hw *hw, u32 tx_lpi_timer,
+			   u32 local_eee)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+
+	memset(&req, 0, sizeof(req));
+
+	build_phy_eee_set(&req, local_eee, tx_lpi_timer, hw->nr_lane);
+
+	if (mutex_lock_interruptible(&hw->mbx.lock))
+		return -EAGAIN;
+
+	err = hw->mbx.ops.write_posted(
+		hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+
+	mutex_unlock(&hw->mbx.lock);
+
+	return -EIO;
+}
+
+int rnpgbe_mbx_get_capability(struct rnpgbe_hw *hw, struct rnpgbe_info *info)
+{
+	int err;
+	struct phy_abilities ablity;
+	int try_cnt = 3;
+
+	memset(&ablity, 0, sizeof(ablity));
+	rnpgbe_link_stat_mark_disable(hw);
+
+	while (try_cnt--) {
+		err = rnpgbe_fw_get_capablity(hw, &ablity);
+		if (err == 0 && info) {
+			hw->lane_mask = ablity.lane_mask & 0xf;
+			info->mac = to_mac_type(&ablity);
+			info->adapter_cnt = hweight_long(hw->lane_mask);
+			hw->sfc_boot = (ablity.nic_mode & 0x1) ? 1 : 0;
+			hw->pxe_en = (ablity.nic_mode & 0x2) ? 1 : 0;
+			hw->ncsi_en = (ablity.nic_mode & 0x4) ? 1 : 0;
+			hw->pfvfnum = ablity.pfnum;
+			hw->speed = ablity.speed;
+			hw->nr_lane = 0;
+			hw->fw_version = ablity.fw_version;
+			hw->mac_type = info->mac;
+			hw->phy_type = ablity.phy_type;
+			hw->axi_mhz = ablity.axi_mhz;
+			hw->port_ids = ablity.port_ids;
+			hw->bd_uid = ablity.bd_uid;
+			hw->phy_id = ablity.phy_id;
+
+			if ((hw->fw_version >= 0x00050201) &&
+			    (ablity.speed == SPEED_10000)) {
+				hw->force_speed_stat =
+					FORCE_SPEED_STAT_DISABLED;
+				hw->force_10g_1g_speed_ablity = 1;
+			}
+			if (hw->fw_version >= 0x0001012C) {
+				/* this version can get wol_en from hw */
+				hw->wol = ablity.wol_status & 0xff;
+				hw->wol_en = ablity.wol_status & 0x100;
+			} else {
+				/* other version only pf0 or ncsi can wol */
+				hw->wol = ablity.wol_status & 0xff;
+				if ((hw->ncsi_en) || (!ablity.pfnum))
+					hw->wol_en = 1;
+			}
+			/* 0.1.5.0 can get force status from fw */
+			if (hw->fw_version >= 0x00010500) {
+				hw->force_en = ablity.e.force_down_en;
+				hw->force_cap = 1;
+			}
+
+			/* 0.1.6.0 can get trim valid from hw */
+			if (hw->fw_version >= 0x00010600)
+				hw->trim_valid = (ablity.nic_mode & 0x8) ? 1 : 0;
+
+			pr_info("%s: nic-mode:%d mac:%d adpt_cnt:%d lane_mask:0x%x, phy_type: "
+				"0x%x, "
+				"pfvfnum:0x%x, fw-version:0x%08x\n, axi:%d Mhz,"
+				"port_id:%d bd_uid:0x%08x 0x%x ex-ablity:0x%x fs:%d speed:%d\n",
+				__func__, hw->mode, info->mac,
+				info->adapter_cnt, hw->lane_mask, hw->phy_type,
+				hw->pfvfnum, ablity.fw_version, ablity.axi_mhz,
+				ablity.port_id[0], hw->bd_uid, ablity.phy_id,
+				ablity.ext_ablity,
+				hw->force_10g_1g_speed_ablity, ablity.speed);
+			if (info->adapter_cnt != 0)
+				return 0;
+		}
+	}
+
+	dev_err(&hw->pdev->dev, "%s: error!\n", __func__);
+	return -EIO;
+}
+
+int rnpgbe_mbx_get_temp(struct rnpgbe_hw *hw, int *voltage)
+{
+	int err;
+	struct mbx_req_cookie *cookie = NULL;
+	struct mbx_fw_cmd_reply reply;
+	struct mbx_fw_cmd_req req;
+	struct get_temp *temp;
+	int temp_v = 0;
+
+	cookie = mbx_cookie_zalloc(sizeof(*temp));
+	if (!cookie)
+		return -ENOMEM;
+	temp = (struct get_temp *)cookie->priv;
+
+	memset(&req, 0, sizeof(req));
+
+	build_get_temp(&req, cookie);
+
+	if (hw->mbx.other_irq_enabled) {
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+	} else {
+		memset(&reply, 0, sizeof(reply));
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		temp = &reply.get_temp;
+	}
+
+	if (voltage)
+		*voltage = temp->volatage;
+	temp_v = temp->temp;
+
+	if (cookie)
+		kfree(cookie);
+	return temp_v;
+}
+
+enum speed_enum {
+	speed_10,
+	speed_100,
+	speed_1000,
+	speed_10000,
+	speed_25000,
+	speed_40000,
+
+};
+
+static void rnpgbe_link_stat_mark(struct rnpgbe_hw *hw, int up)
+{
+	u32 v;
+	struct rnpgbe_adapter *adapter = (struct rnpgbe_adapter *)hw->back;
+
+	v = rd32(hw, RNP_DMA_DUMY);
+	if ((hw->hw_type == rnpgbe_hw_n10) || (hw->hw_type == rnpgbe_hw_n400)) {
+		v &= ~(0xffff0000);
+		v |= 0xa5a40000;
+		if (up)
+			v |= BIT(0);
+		else
+			v &= ~BIT(0);
+
+	} else if ((hw->hw_type == rnpgbe_hw_n500) ||
+		   (hw->hw_type == rnpgbe_hw_n210)) {
+		v &= ~(0x0f000f11);
+		v |= 0xa0000000;
+		if (up) {
+			v |= BIT(0);
+			switch (hw->speed) {
+			case 10:
+				v |= (speed_10 << 8);
+				break;
+			case 100:
+				v |= (speed_100 << 8);
+				break;
+			case 1000:
+				v |= (speed_1000 << 8);
+				break;
+			case 10000:
+				v |= (speed_10000 << 8);
+				break;
+			case 25000:
+				v |= (speed_25000 << 8);
+				break;
+			case 40000:
+				v |= (speed_40000 << 8);
+				break;
+			}
+			v |= (hw->duplex << 4);
+			v |= (hw->fc.current_mode << 24);
+		} else {
+			v &= ~BIT(0);
+		}
+		/* we shoul update lldp_status */
+		if (hw->fw_version >= 0x00010500) {
+			if (adapter->priv_flags & RNP_PRIV_FLAG_LLDP)
+				v |= BIT(6);
+			else
+				v &= (~BIT(6));
+		}
+	}
+	wr32(hw, RNP_DMA_DUMY, v);
+}
+
+static inline int rnpgbe_mbx_fw_req_handler(struct rnpgbe_adapter *adapter,
+					    struct mbx_fw_cmd_req *req)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	switch (req->opcode) {
+	case LINK_STATUS_EVENT:
+		rnpgbe_logd(
+			LOG_LINK_EVENT,
+			"[LINK_STATUS_EVENT:0x%x] %s:link changed: changed_lane:0x%x, "
+			"status:0x%x, speed:%d, duplex:%d\n",
+			req->opcode, adapter->name,
+			req->link_stat.changed_lanes,
+			req->link_stat.lane_status, req->link_stat.st[0].speed,
+			req->link_stat.st[0].duplex);
+
+		if (req->link_stat.lane_status)
+			adapter->hw.link = 1;
+		else
+			adapter->hw.link = 0;
+
+		if ((hw->hw_type == rnpgbe_hw_n500) ||
+		    (hw->hw_type == rnpgbe_hw_n210)) {
+			adapter->local_eee = req->link_stat.st[0].local_eee;
+			adapter->partner_eee = req->link_stat.st[0].partner_eee;
+			/* fw_version more than 0.1.5.0 can up lldp_status */
+			if (hw->fw_version >= 0x00010500) {
+				if (req->link_stat.st[0].lldp_status)
+					adapter->priv_flags |=  RNP_PRIV_FLAG_LLDP;
+				else
+					adapter->priv_flags &= (~RNP_PRIV_FLAG_LLDP);
+			}
+		}
+
+		if (req->link_stat.port_st_magic == SPEED_VALID_MAGIC) {
+			hw->speed = req->link_stat.st[0].speed;
+			hw->duplex = req->link_stat.st[0].duplex;
+			/* n500 can update pause and tp */
+			if ((hw->hw_type == rnpgbe_hw_n500) ||
+			    (hw->hw_type == rnpgbe_hw_n210)) {
+				hw->fc.current_mode =
+					req->link_stat.st[0].pause;
+				hw->tp_mdx = req->link_stat.st[0].tp_mdx;
+			}
+
+			switch (hw->speed) {
+			case 10:
+				adapter->speed = RNP_LINK_SPEED_10_FULL;
+				break;
+			case 100:
+				adapter->speed = RNP_LINK_SPEED_100_FULL;
+				break;
+			case 1000:
+				adapter->speed = RNP_LINK_SPEED_1GB_FULL;
+				break;
+			case 10000:
+				adapter->speed = RNP_LINK_SPEED_10GB_FULL;
+				break;
+			case 25000:
+				adapter->speed = RNP_LINK_SPEED_25GB_FULL;
+				break;
+			case 40000:
+				adapter->speed = RNP_LINK_SPEED_40GB_FULL;
+				break;
+			}
+		}
+		if (req->link_stat.lane_status)
+			rnpgbe_link_stat_mark(hw, 1);
+		else
+			rnpgbe_link_stat_mark(hw, 0);
+
+		adapter->flags |= RNP_FLAG_NEED_LINK_UPDATE;
+		break;
+	}
+	rnpgbe_service_event_schedule(adapter);
+
+	return 0;
+}
+
+static inline int rnpgbe_mbx_fw_reply_handler(struct rnpgbe_adapter *adapter,
+					      struct mbx_fw_cmd_reply *reply)
+{
+	struct mbx_req_cookie *cookie;
+
+	cookie = reply->cookie;
+	if (!cookie || cookie->magic != COOKIE_MAGIC)
+		return -EIO;
+
+	if (cookie->priv_len > 0)
+		memcpy(cookie->priv, reply->data, cookie->priv_len);
+
+	cookie->done = 1;
+
+	if (reply->flags & FLAGS_ERR)
+		cookie->errcode = reply->error_code;
+	else
+		cookie->errcode = 0;
+
+	wake_up_interruptible(&cookie->wait);
+
+	return 0;
+}
+
+static inline int rnpgbe_rcv_msg_from_fw(struct rnpgbe_adapter *adapter)
+{
+	u32 msgbuf[RNP_FW_MAILBOX_SIZE];
+	struct rnpgbe_hw *hw = &adapter->hw;
+	s32 retval;
+
+	retval = rnpgbe_read_mbx(hw, msgbuf, RNP_FW_MAILBOX_SIZE, MBX_FW);
+	if (retval) {
+		printk(KERN_DEBUG "Error receiving message from FW:%d\n",
+		       retval);
+		return retval;
+	}
+
+	rnpgbe_logd(LOG_MBX_MSG_IN,
+		    "msg from fw: msg[0]=0x%08x_0x%08x_0x%08x_0x%08x\n",
+		    msgbuf[0], msgbuf[1], msgbuf[2], msgbuf[3]);
+
+	/* this is a message we already processed, do nothing */
+	if (((unsigned short *)msgbuf)[0] & FLAGS_DD) {
+		return rnpgbe_mbx_fw_reply_handler(
+			adapter, (struct mbx_fw_cmd_reply *)msgbuf);
+	} else {
+		return rnpgbe_mbx_fw_req_handler(
+			adapter, (struct mbx_fw_cmd_req *)msgbuf);
+	}
+}
+
+static void rnpgbe_rcv_ack_from_fw(struct rnpgbe_adapter *adapter)
+{
+	/* do-nothing */
+}
+
+int rnpgbe_fw_msg_handler(struct rnpgbe_adapter *adapter)
+{
+	/* == check fw-req */
+	if (!rnpgbe_check_for_msg(&adapter->hw, MBX_FW)) {
+		/* printk("check msg from fw\n"); */
+		rnpgbe_rcv_msg_from_fw(adapter);
+	}
+
+	/* process any acks */
+	if (!rnpgbe_check_for_ack(&adapter->hw, MBX_FW))
+		rnpgbe_rcv_ack_from_fw(adapter);
+
+	return 0;
+}
+
+int rnpgbe_mbx_phy_link_set(struct rnpgbe_hw *hw, int adv, int autoneg,
+			    int speed, int duplex, int mdix_ctrl)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+
+	memset(&req, 0, sizeof(req));
+
+	build_phy_link_set(&req, adv, hw->nr_lane, autoneg, speed, duplex,
+			   mdix_ctrl);
+
+	if (mutex_lock_interruptible(&hw->mbx.lock))
+		return -EAGAIN;
+	err = hw->mbx.ops.write_posted(
+		hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+
+	mutex_unlock(&hw->mbx.lock);
+
+	return err;
+}
+
+int rnpgbe_mbx_phy_pause_set(struct rnpgbe_hw *hw, u32 pause_mode)
+{
+	int err;
+	struct mbx_fw_cmd_req req;
+
+	memset(&req, 0, sizeof(req));
+
+	build_phy_pause_set(&req, pause_mode, hw->nr_lane);
+
+	if (mutex_lock_interruptible(&hw->mbx.lock))
+		return -EAGAIN;
+	err = hw->mbx.ops.write_posted(
+		hw, (u32 *)&req, (req.datalen + MBX_REQ_HDR_LEN) / 4, MBX_FW);
+
+	mutex_unlock(&hw->mbx.lock);
+
+	return err;
+}
+
+int rnpgbe_mbx_phy_pause_get(struct rnpgbe_hw *hw, u32 *pause_mode)
+{
+	struct mbx_fw_cmd_req req;
+	int err = -EIO;
+	struct mbx_req_cookie *cookie = NULL;
+	struct phy_pause_data *st;
+	struct mbx_fw_cmd_reply reply;
+
+	memset(&req, 0, sizeof(req));
+
+	if (hw->mbx.other_irq_enabled) {
+		cookie = mbx_cookie_zalloc(sizeof(struct lane_stat_data));
+
+		if (!cookie) {
+			rnpgbe_err("%s: no memory\n", __func__);
+			return -ENOMEM;
+		}
+
+		st = (struct phy_pause_data *)cookie->priv;
+		build_get_phy_pause_req(&req, hw->nr_lane, cookie);
+		err = rnpgbe_mbx_fw_post_req(hw, &req, cookie);
+		if (err) {
+			rnpgbe_err("%s: error:%d\n", __func__, err);
+			goto quit;
+		}
+	} else {
+		memset(&reply, 0, sizeof(reply));
+
+		build_get_phy_pause_req(&req, hw->nr_lane, &req);
+		err = rnpgbe_fw_send_cmd_wait(hw, &req, &reply);
+		if (err) {
+			rnpgbe_err("%s: 1 error:%d\n", __func__, err);
+			goto quit;
+		}
+		st = (struct phy_pause_data *)&(reply.data);
+	}
+
+	*pause_mode = st->pause_mode;
+quit:
+	if (cookie)
+		kfree(cookie);
+	return err;
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h
new file mode 100644
index 0000000000000..1059d0c11954e
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h
@@ -0,0 +1,1239 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBE_MBX_FW_H
+#define _RNPGBE_MBX_FW_H
+
+#include 
+#include 
+#include 
+
+#ifndef _PACKED_ALIGN4
+#define _PACKED_ALIGN4 __attribute__((packed, aligned(4)))
+#endif
+
+struct mbx_fw_cmd_reply;
+typedef void (*cookie_cb)(struct mbx_fw_cmd_reply *reply, void *priv);
+
+struct mbx_req_cookie {
+	int magic;
+#define COOKIE_MAGIC 0xCE
+	cookie_cb cb;
+	int timeout_jiffes;
+	int errcode;
+
+	wait_queue_head_t wait;
+
+	int done;
+	int priv_len;
+#define MAX_PRIV_LEN 64
+	char priv[MAX_PRIV_LEN];
+};
+
+enum GENERIC_CMD {
+	/* generat */
+	GET_VERSION = 0x0001,
+	READ_REG = 0xFF03,
+	WRITE_REG = 0xFF04,
+	MODIFY_REG = 0xFF07,
+
+	/* virtualization */
+	IFUP_DOWN = 0x0800,
+	SEND_TO_PF = 0x0801,
+	SEND_TO_VF = 0x0802,
+	DRIVER_INSMOD = 0x0803,
+	SYSTEM_SUSPUSE = 0x0804,
+	SYSTEM_FORCE = 0x0805,
+
+	/* link configuration admin commands */
+	GET_PHY_ABALITY = 0x0601,
+	GET_MAC_ADDRES = 0x0602,
+	RESET_PHY = 0x0603,
+	LED_SET = 0x0604,
+	GET_LINK_STATUS = 0x0607,
+	LINK_STATUS_EVENT = 0x0608,
+	SET_LANE_FUN = 0x0609,
+	GET_LANE_STATUS = 0x0610,
+	SFP_SPEED_CHANGED_EVENT = 0x0611,
+	SET_EVENT_MASK = 0x0613,
+	SET_LOOPBACK_MODE = 0x0618,
+	SET_PHY_REG = 0x0628,
+	GET_PHY_REG = 0x0629,
+	PHY_LINK_SET = 0x0630,
+	GET_PHY_STATISTICS = 0x0631,
+	PHY_PAUSE_SET = 0x0632,
+	PHY_PAUSE_GET = 0x0633,
+	PHY_EEE_SET = 0x0636,
+	PHY_EEE_GET = 0x0637,
+
+	/*sfp-module*/
+	SFP_MODULE_READ = 0x0900,
+	SFP_MODULE_WRITE = 0x0901,
+
+	/* fw update */
+	FW_UPDATE = 0x0700,
+	FW_MAINTAIN = 0x0701,
+	FW_UPDATE_N500 = 0x0702,
+	WOL_EN = 0x0910,
+	GET_DUMP = 0x0a00,
+	SET_DUMP = 0x0a10,
+	GET_TEMP = 0x0a11,
+	SET_WOL = 0x0a12,
+	SET_TEST_MODE = 0x0a13,
+	SHOW_TX_STAMP = 0x0a14,
+	LLDP_TX_CTRL = 0x0a15,
+};
+
+enum link_event_mask {
+	EVT_LINK_UP = 1,
+	EVT_NO_MEDIA = 2,
+	EVT_LINK_FAULT = 3,
+	EVT_PHY_TEMP_ALARM = 4,
+	EVT_EXCESSIVE_ERRORS = 5,
+	EVT_SIGNAL_DETECT = 6,
+	EVT_AUTO_NEGOTIATION_DONE = 7,
+	EVT_MODULE_QUALIFICATION_FAILD = 8,
+	EVT_PORT_TX_SUSPEND = 9,
+};
+
+enum pma_type {
+	PHY_TYPE_NONE = 0,
+	PHY_TYPE_1G_BASE_KX,
+	PHY_TYPE_SGMII,
+	PHY_TYPE_10G_BASE_KR,
+	PHY_TYPE_25G_BASE_KR,
+	PHY_TYPE_40G_BASE_KR4,
+	PHY_TYPE_10G_BASE_SR,
+	PHY_TYPE_40G_BASE_SR4,
+	PHY_TYPE_40G_BASE_CR4,
+	PHY_TYPE_40G_BASE_LR4,
+	PHY_TYPE_10G_BASE_LR,
+	PHY_TYPE_10G_BASE_ER,
+};
+
+struct phy_abilities {
+	unsigned char link_stat;
+	unsigned char lane_mask;
+
+	int speed;
+	short phy_type;
+	short nic_mode;
+	short pfnum;
+	unsigned int fw_version;
+	unsigned int axi_mhz;
+	union {
+		unsigned char port_id[4];
+		unsigned int port_ids;
+	};
+	unsigned int bd_uid;
+	int phy_id;
+	int wol_status;
+
+	union {
+		int ext_ablity;
+		struct {
+			unsigned int valid : 1; /* 0 */
+			unsigned int wol_en : 1; /* 1 */
+			unsigned int pci_preset_runtime_en : 1; /* 2 */
+			unsigned int smbus_en : 1; /* 3 */
+			unsigned int ncsi_en : 1; /* 4 */
+			unsigned int rpu_en : 1; /* 5 */
+			unsigned int v2 : 1; /* 6 */
+			unsigned int pxe_en : 1; /* 7 */
+			unsigned int mctp_en : 1; /* 8 */
+			unsigned int yt8614 : 1; /* 9 */
+			unsigned int pci_ext_reset : 1; /* 10 */
+			unsigned int rpu_availble : 1; /* 11 */
+			unsigned int fw_lldp_ablity : 1; /* 12 */
+			unsigned int lldp_enabled : 1; /* 13 */
+			unsigned int only_1g : 1; /* 14 */
+			unsigned int force_down_en: 1;
+		} e;
+	};
+
+} _PACKED_ALIGN4;
+
+enum LOOPBACK_LEVEL {
+	LOOPBACK_DISABLE = 0,
+	LOOPBACK_MAC = 1,
+	LOOPBACK_PCS = 5,
+	LOOPBACK_EXTERNAL = 6,
+};
+enum LOOPBACK_TYPE {
+	/* Tx->Rx */
+	LOOPBACK_TYPE_LOCAL = 0x0,
+};
+
+enum LOOPBACK_FORCE_SPEED {
+	LOOPBACK_FORCE_SPEED_NONE = 0x0,
+	LOOPBACK_FORCE_SPEED_1GBS = 0x1,
+	LOOPBACK_FORCE_SPEED_10GBS = 0x2,
+	LOOPBACK_FORCE_SPEED_40_25GBS = 0x3,
+};
+
+enum PHY_INTERFACE {
+	PHY_INTERNAL_PHY = 0,
+	PHY_EXTERNAL_PHY_MDIO = 1,
+};
+
+/* Table 3-54.  Get link status response (opcode: 0x0607) */
+struct link_stat_data {
+	char phy_type;
+	unsigned char speed;
+#define LNK_STAT_SPEED_UNKOWN 0
+#define LNK_STAT_SPEED_10 1
+#define LNK_STAT_SPEED_100 2
+#define LNK_STAT_SPEED_1000 3
+#define LNK_STAT_SPEED_10000 4
+#define LNK_STAT_SPEED_25000 5
+#define LNK_STAT_SPEED_40000 6
+
+	/* 2 */
+	char link_stat : 1;
+#define LINK_UP 1
+#define LINK_DOWN 0
+
+	char link_fault : 4;
+#define LINK_LINK_FAULT BIT(0)
+#define LINK_TX_FAULT BIT(1)
+#define LINK_RX_FAULT BIT(2)
+#define LINK_REMOTE_FAULT BIT(3)
+
+	char extern_link_stat : 1;
+	char media_availble : 1;
+
+	char rev1 : 1;
+
+	/* 3:ignore */
+	char an_completed : 1;
+	char lp_an_ablity : 1;
+	char parallel_detection_fault : 1;
+	char fec_enabled : 1;
+	char low_power_state : 1;
+	char link_pause_status : 2;
+	char qualified_odule : 1;
+
+	/* 4 */
+	char phy_temp_alarm : 1;
+	char excessive_link_errors : 1;
+	char port_tx_suspended : 2;
+	char force_40G_enabled : 1;
+	char external_25G_phy_err_code : 3;
+#define EXTERNAL_25G_PHY_NOT_PRESENT 1
+#define EXTERNAL_25G_PHY_NVM_CRC_ERR 2
+#define EXTERNAL_25G_PHY_MDIO_ACCESS_FAILD 6
+#define EXTERNAL_25G_PHY_INIT_SUCCED 7
+
+	/* 5 */
+	char loopback_enabled_status : 4;
+#define LOOPBACK_DISABLE 0x0
+#define LOOPBACK_MAC 0x1
+#define LOOPBACK_SERDES 0x2
+#define LOOPBACK_PHY_INTERNAL 0x3
+#define LOOPBACK_PHY_EXTERNAL 0x4
+	char loopback_type_status : 1;
+#define LOCAL_LOOPBACK 0 /* tx->rx */
+#define FAR_END_LOOPBACK 0 /* rx->Tx */
+	char rev3 : 1;
+	char external_dev_power_ability : 2;
+	/* 6-7 */
+	short max_frame_sz;
+	/* 8 */
+	char _25gb_kr_fec_enabled : 1;
+	char _25gb_rs_fec_enabled : 1;
+	char crc_enabled : 1;
+	char rev4 : 5;
+	/* 9 */
+	int link_type; /* same as Phy type */
+	char link_type_ext;
+} _PACKED_ALIGN4;
+
+struct port_stat {
+	u8 phyid;
+
+	u8 duplex : 1;
+	u8 autoneg : 1;
+	u8 fec : 1;
+	u16 speed;
+	u16 pause : 4;
+	u16 local_eee : 3;
+	u16 partner_eee : 3;
+	u16 tp_mdx : 2;
+	u16 lldp_status : 1;
+	u16 revs : 3;
+} __attribute__((packed));
+
+struct phy_pause_data {
+	u32 pause_mode;
+} __attribute__((packed));
+
+struct lane_stat_data {
+	u8 nr_lane;
+	u8 pci_gen : 4;
+	u8 pci_lanes : 4;
+	u8 pma_type;
+	u8 phy_type;
+
+	u16 linkup : 1;
+	u16 duplex : 1;
+	u16 autoneg : 1;
+	u16 fec : 1;
+	u16 an : 1;
+	u16 link_traing : 1;
+	u16 media_availble : 1; //
+	u16 is_sgmii : 1; //
+	u16 link_fault : 4;
+#define LINK_LINK_FAULT BIT(0)
+#define LINK_TX_FAULT BIT(1)
+#define LINK_RX_FAULT BIT(2)
+#define LINK_REMOTE_FAULT BIT(3)
+	u16 is_backplane : 1;
+	u16 tp_mdx : 2;
+
+	union {
+		u8 phy_addr;
+		struct {
+			u8 mod_abs : 1;
+			u8 fault : 1;
+			u8 tx_dis : 1;
+			u8 los : 1;
+		} sfp;
+	};
+	u8 sfp_connector;
+	u32 speed;
+
+	u32 si_main;
+	u32 si_pre;
+	u32 si_post;
+	u32 si_tx_boost;
+	u32 supported_link;
+	u32 phy_id;
+	u32 advertised_link;
+} __attribute__((packed));
+
+struct yt_phy_statistics {
+	u32 pkg_ib_valid; /* rx crc good and length 64-1518 */
+	u32 pkg_ib_os_good; /* rx crc good and length >1518 */
+	u32 pkg_ib_us_good; /* rx crc good and length <64 */
+	u16 pkg_ib_err; /* rx crc wrong and length 64-1518 */
+	u16 pkg_ib_os_bad; /* rx crc wrong and length >1518 */
+	u16 pkg_ib_frag; /* rx crc wrong and length <64 */
+	u16 pkg_ib_nosfd; /* rx sfd missed */
+	u32 pkg_ob_valid; /* tx crc good and length 64-1518 */
+	u32 pkg_ob_os_good; /* tx crc good and length >1518 */
+	u32 pkg_ob_us_good; /* tx crc good and length <64 */
+	u16 pkg_ob_err; /* tx crc wrong and length 64-1518 */
+	u16 pkg_ob_os_bad; /* tx crc wrong and length >1518 */
+	u16 pkg_ob_frag; /* tx crc wrong and length <64 */
+	u16 pkg_ob_nosfd; /* tx sfd missed */
+} __attribute__((packed));
+
+struct phy_statistics {
+	union {
+		struct yt_phy_statistics yt;
+	};
+} __attribute__((packed));
+/* == flags == */
+#define FLAGS_DD BIT(0) /* driver clear 0, FW must set 1 */
+#define FLAGS_CMP BIT(1) /* driver clear 0, FW mucst set */
+#define FLAGS_ERR                                                              \
+	BIT(2) /* driver clear 0, FW must set only if it reporting an error */
+#define FLAGS_LB BIT(9)
+#define FLAGS_RD BIT(10) /* set if additonal buffer has command paramters */
+#define FLAGS_BUF BIT(12) /* set 1 on indirect command */
+#define FLAGS_SI BIT(13) /* not irq when command complete */
+#define FLAGS_EI BIT(14) /* interrupt on error */
+#define FLAGS_FE BIT(15) /* flush erro */
+
+#ifndef SHM_DATA_MAX_BYTES
+#define SHM_DATA_MAX_BYTES (64 - 2 * 4)
+#endif
+
+#define MBX_REQ_HDR_LEN 24
+#define MBX_REPLYHDR_LEN 16
+#define MBX_REQ_MAX_DATA_LEN (SHM_DATA_MAX_BYTES - MBX_REQ_HDR_LEN)
+#define MBX_REPLY_MAX_DATA_LEN (SHM_DATA_MAX_BYTES - MBX_REPLYHDR_LEN)
+
+// TODO req is little endian. bigendian should be conserened
+
+struct mbx_fw_cmd_req {
+	unsigned short flags; /* 0-1 */
+	unsigned short opcode; /* 2-3 enum LINK_ADM_CMD */
+	unsigned short datalen; /* 4-5 */
+	unsigned short ret_value; /* 6-7 */
+	union {
+		struct {
+			unsigned int cookie_lo; /* 8-11 */
+			unsigned int cookie_hi; /* 12-15 */
+		};
+		void *cookie;
+	};
+	unsigned int reply_lo; /* 16-19 5dw */
+	unsigned int reply_hi; /* 20-23 */
+	/*=== data === 7dw [24-64] */
+	union {
+		char data[0];
+
+		struct {
+			unsigned int addr;
+			unsigned int bytes;
+		} r_reg;
+
+		struct {
+			unsigned int addr;
+			unsigned int bytes;
+			unsigned int data[4];
+		} w_reg;
+
+		struct {
+			unsigned int lanes;
+		} ptp;
+
+		struct {
+			int lane;
+			int up;
+		} ifup;
+		struct {
+			u32 sec;
+			u32 nanosec;
+
+		} tstamps;
+
+		struct {
+			int lane;
+			int status;
+		} ifinsmod;
+		struct {
+			int lane;
+			int status;
+		} ifforce;
+
+		struct {
+			int lane;
+			int status;
+		} ifsuspuse;
+
+		struct {
+			int nr_lane;
+		} get_lane_st;
+
+		struct {
+			int nr_lane;
+			int func;
+#define LANE_FUN_AN 0
+#define LANE_FUN_LINK_TRAING 1
+#define LANE_FUN_FEC 2
+#define LANE_FUN_SI 3
+#define LANE_FUN_SFP_TX_DISABLE 4
+#define LANE_FUN_PCI_LANE 5
+#define LANE_FUN_PRBS 6
+#define LANE_FUN_SPEED_CHANGE 7
+
+			int value0;
+			int value1;
+			int value2;
+			int value3;
+		} set_lane_fun;
+
+		struct {
+			int flag;
+			int nr_lane;
+		} set_dump;
+
+		struct {
+			int lane;
+			int enable;
+		} wol;
+
+		struct {
+			int lane;
+			int mode;
+		} gephy_test;
+
+		struct {
+			int lane;
+			int op;
+			int enable;
+			int inteval;
+		} lldp_tx;
+
+		struct {
+			unsigned int bytes;
+			unsigned int nr_lane;
+			unsigned int bin_offset;
+			unsigned int no_use;
+		} get_dump;
+
+		struct {
+			unsigned int nr_lane;
+			int value;
+#define LED_IDENTIFY_INACTIVE 0
+#define LED_IDENTIFY_ACTIVE 1
+#define LED_IDENTIFY_ON 2
+#define LED_IDENTIFY_OFF 3
+		} led_set;
+
+		struct {
+			unsigned int addr;
+			unsigned int data;
+			unsigned int mask;
+		} modify_reg;
+
+		struct {
+			unsigned int adv_speed_mask;
+			unsigned int autoneg;
+			unsigned int speed;
+			unsigned int duplex;
+			int nr_lane;
+			unsigned int tp_mdix_ctrl;
+		} phy_link_set;
+
+		struct {
+			unsigned int pause_mode;
+			int nr_lane;
+		} phy_pause_set;
+		struct {
+			unsigned int pause_mode;
+			int nr_lane;
+		} phy_pause_get;
+		struct {
+			u32 local_eee;
+			u32 tx_lpi_timer;
+			int nr_lane;
+		} phy_eee_set;
+		struct {
+			unsigned int nr_lane;
+			unsigned int sfp_adr; /* 0xa0 or 0xa2 */
+			unsigned int reg;
+			unsigned int cnt;
+		} sfp_read;
+
+		struct {
+			unsigned int nr_lane;
+			unsigned int sfp_adr; /* 0xa0 or 0xa2 */
+			unsigned int reg;
+			unsigned int val;
+		} sfp_write;
+
+		struct {
+			unsigned int nr_lane; /* 0-3 */
+		} get_linkstat;
+		struct {
+			unsigned short changed_lanes;
+			unsigned short lane_status;
+			unsigned int port_st_magic;
+#define SPEED_VALID_MAGIC 0xa4a6a8a9
+			struct port_stat st[4];
+		} link_stat; /* FW->RC */
+
+		struct {
+			unsigned short enable_stat;
+			unsigned short event_mask;
+		} stat_event_mask;
+
+		struct { /* set loopback */
+			unsigned char loopback_level;
+			unsigned char loopback_type;
+			unsigned char loopback_force_speed;
+
+			char loopback_force_speed_enable : 1;
+		} loopback;
+
+		struct {
+			int cmd;
+			int arg0;
+			int req_bytes;
+			int reply_bytes;
+			int ddr_lo;
+			int ddr_hi;
+		} maintain;
+
+		struct { /* set phy register */
+			char phy_interface;
+			union {
+				char page_num;
+				char external_phy_addr;
+			};
+			int phy_reg_addr;
+			int phy_w_data;
+			int reg_addr;
+			int w_data;
+			/* 1 = ignore page_num, use last QSFP */
+			char recall_qsfp_page : 1;
+			/* page value */
+			/* 0 = use page_num for QSFP */
+			char nr_lane;
+		} set_phy_reg;
+		struct {
+		} get_phy_ablity;
+
+		struct {
+			int lane_mask;
+			int pfvf_num;
+		} get_mac_addr;
+
+		struct {
+			char phy_interface;
+			union {
+				char page_num;
+				char external_phy_addr;
+			};
+			int phy_reg_addr;
+			char nr_lane;
+		} get_phy_reg;
+
+		struct {
+			unsigned int nr_lane;
+		} phy_statistics;
+
+		struct {
+			char paration;
+			unsigned int bytes;
+			unsigned int bin_phy_lo;
+			unsigned int bin_phy_hi;
+		} fw_update;
+	};
+} _PACKED_ALIGN4;
+
+#define EEE_1000BT BIT(2)
+#define EEE_100BT BIT(1)
+
+struct rnpgbe_eee_cap {
+	unsigned int local_capability;
+	unsigned int local_eee;
+	unsigned int partner_eee;
+};
+
+/* firmware -> driver */
+struct mbx_fw_cmd_reply {
+	/* fw must set: DD, CMP, Error(if error), copy value */
+	unsigned short flags;
+	/* from command: LB,RD,VFC,BUF,SI,EI,FE */
+	unsigned short opcode; /* 2-3: copy from req */
+	unsigned short error_code; /* 4-5: 0 if no error */
+	unsigned short datalen; /* 6-7: */
+	union {
+		struct {
+			unsigned int cookie_lo; /* 8-11: */
+			unsigned int cookie_hi; /* 12-15: */
+		};
+		void *cookie;
+	};
+	/* ===== data ==== [16-64] */
+	union {
+		char data[0];
+
+		struct version {
+			unsigned int major;
+			unsigned int sub;
+			unsigned int modify;
+		} version;
+
+		struct {
+			unsigned int value[4];
+		} r_reg;
+
+		struct {
+			unsigned int new_value;
+		} modify_reg;
+
+		struct get_temp {
+			int temp;
+			int volatage;
+		} get_temp;
+
+		struct {
+#define MBX_SFP_READ_MAX_CNT 32
+			char value[MBX_SFP_READ_MAX_CNT];
+		} sfp_read;
+
+		struct mac_addr {
+			int lanes;
+			struct _addr {
+				/*
+				 * for macaddr:01:02:03:04:05:06
+				 * mac-hi=0x01020304 mac-lo=0x05060000
+				 */
+				unsigned char mac[8];
+			} addrs[4];
+		} mac_addr;
+
+		struct get_dump_reply {
+			int flags;
+			int version;
+			int bytes;
+			int data[4];
+		} get_dump;
+
+		struct get_lldp_reply {
+			int value;
+			int inteval;
+		} get_lldp;
+
+		struct rnpgbe_eee_cap phy_eee_abilities;
+		struct lane_stat_data lanestat;
+		struct link_stat_data linkstat;
+		struct phy_abilities phy_abilities;
+		struct phy_statistics phy_statistics;
+	};
+} _PACKED_ALIGN4;
+
+static inline void build_maintain_req(struct mbx_fw_cmd_req *req, void *cookie,
+				      int cmd, int arg0, int req_bytes,
+				      int reply_bytes, u32 dma_phy_lo,
+				      u32 dma_phy_hi)
+{
+	req->flags = 0;
+	req->opcode = FW_MAINTAIN;
+	req->datalen = sizeof(req->maintain);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->maintain.cmd = cmd;
+	req->maintain.arg0 = arg0;
+	req->maintain.req_bytes = req_bytes;
+	req->maintain.reply_bytes = reply_bytes;
+	req->maintain.ddr_lo = dma_phy_lo;
+	req->maintain.ddr_hi = dma_phy_hi;
+}
+
+static inline void build_fw_update_req(struct mbx_fw_cmd_req *req, void *cookie,
+				       int partition, u32 fw_bin_phy_lo,
+				       u32 fw_bin_phy_hi, int fw_bytes)
+{
+	req->flags = 0;
+	req->opcode = FW_UPDATE;
+	req->datalen = sizeof(req->fw_update);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->fw_update.paration = partition;
+	req->fw_update.bytes = fw_bytes;
+	req->fw_update.bin_phy_lo = fw_bin_phy_lo;
+	req->fw_update.bin_phy_hi = fw_bin_phy_hi;
+}
+
+static inline void build_fw_update_n500_req(struct mbx_fw_cmd_req *req,
+					    void *cookie, int partition,
+					    int fw_bytes)
+{
+	req->flags = 0;
+	req->opcode = FW_UPDATE_N500;
+	req->datalen = sizeof(req->fw_update);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->fw_update.paration = partition;
+	req->fw_update.bytes = fw_bytes;
+}
+
+static inline void build_reset_phy_req(struct mbx_fw_cmd_req *req, void *cookie)
+{
+	req->flags = 0;
+	req->opcode = RESET_PHY;
+	req->datalen = 0;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->cookie = cookie;
+}
+
+static inline void build_phy_eee_abalities_req(struct mbx_fw_cmd_req *req,
+					       void *cookie)
+{
+	req->flags = 0;
+	req->opcode = PHY_EEE_GET;
+	req->datalen = 0;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->cookie = cookie;
+}
+
+static inline void build_phy_abalities_req(struct mbx_fw_cmd_req *req,
+					   void *cookie)
+{
+	req->flags = 0;
+	req->opcode = GET_PHY_ABALITY;
+	req->datalen = 0;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->cookie = cookie;
+}
+
+static inline void build_get_macaddress_req(struct mbx_fw_cmd_req *req,
+					    int lane_mask, int pfvfnum,
+					    void *cookie)
+{
+	req->flags = 0;
+	req->opcode = GET_MAC_ADDRES;
+	req->datalen = sizeof(req->get_mac_addr);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+
+	req->get_mac_addr.lane_mask = lane_mask;
+	req->get_mac_addr.pfvf_num = pfvfnum;
+}
+
+static inline void build_version_req(struct mbx_fw_cmd_req *req, void *cookie)
+{
+	req->flags = 0;
+	req->opcode = GET_VERSION;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->datalen = 0;
+	req->cookie = cookie;
+}
+
+/* 7.10.11.8 Read egister admin command */
+static inline void build_readreg_req(struct mbx_fw_cmd_req *req, int reg_addr,
+				     void *cookie)
+{
+	req->flags = 0;
+	req->opcode = READ_REG;
+	req->datalen = sizeof(req->r_reg);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->r_reg.addr = reg_addr & ~(3);
+	req->r_reg.bytes = 4;
+}
+
+static inline void mbx_fw_req_set_reply(struct mbx_fw_cmd_req *req,
+					dma_addr_t reply)
+{
+	u64 address = reply;
+
+	req->reply_hi = (address >> 32);
+	req->reply_lo = (address) & 0xffffffff;
+}
+
+/* 7.10.11.9 Write egister admin command */
+static inline void build_writereg_req(struct mbx_fw_cmd_req *req, void *cookie,
+				      int reg_addr, int bytes, int value[4])
+{
+	int i;
+
+	req->flags = 0;
+	req->opcode = WRITE_REG;
+	req->datalen = sizeof(req->w_reg);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->w_reg.addr = reg_addr & ~3;
+	req->w_reg.bytes = bytes;
+	for (i = 0; i < bytes / 4; i++)
+		req->w_reg.data[i] = value[i];
+}
+
+/* 7.10.11.10 modify egister admin command */
+static inline void build_modifyreg_req(struct mbx_fw_cmd_req *req, void *cookie,
+				       int reg_addr, int value,
+				       unsigned int mask)
+{
+	req->flags = 0;
+	req->opcode = MODIFY_REG;
+	req->datalen = sizeof(req->modify_reg);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->modify_reg.addr = reg_addr;
+	req->modify_reg.data = value;
+	req->modify_reg.mask = mask;
+}
+
+static inline void build_get_lane_status_req(struct mbx_fw_cmd_req *req,
+					     int nr_lane, void *cookie)
+{
+	req->flags = 0;
+	req->opcode = GET_LANE_STATUS;
+	req->datalen = sizeof(req->get_lane_st);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->get_lane_st.nr_lane = nr_lane;
+}
+
+static inline void build_get_link_status_req(struct mbx_fw_cmd_req *req,
+					     int nr_lane, void *cookie)
+{
+	req->flags = 0;
+	req->opcode = GET_LINK_STATUS;
+	req->datalen = sizeof(req->get_linkstat);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->get_linkstat.nr_lane = nr_lane;
+}
+
+static inline void build_get_temp(struct mbx_fw_cmd_req *req, void *cookie)
+{
+	req->flags = 0;
+	req->opcode = GET_TEMP;
+	req->datalen = 0;
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+}
+static inline void build_get_dump_req(struct mbx_fw_cmd_req *req, void *cookie,
+				      int nr_lane, u32 fw_bin_phy_lo,
+				      u32 fw_bin_phy_hi, int bytes)
+{
+	req->flags = 0;
+	req->opcode = GET_DUMP;
+	req->datalen = sizeof(req->get_dump);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->get_dump.bytes = bytes;
+	req->get_dump.nr_lane = nr_lane;
+	req->get_dump.bin_offset = fw_bin_phy_lo;
+	req->get_dump.no_use = fw_bin_phy_hi;
+}
+
+static inline void build_set_dump(struct mbx_fw_cmd_req *req, int nr_lane,
+				  int flag)
+{
+	req->flags = 0;
+	req->opcode = SET_DUMP;
+	req->datalen = sizeof(req->set_dump);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->set_dump.flag = flag;
+	req->set_dump.nr_lane = nr_lane;
+}
+
+static inline void build_led_set(struct mbx_fw_cmd_req *req,
+				 unsigned int nr_lane, int value, void *cookie)
+{
+	req->flags = 0;
+	req->opcode = LED_SET;
+	req->datalen = sizeof(req->led_set);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->led_set.nr_lane = nr_lane;
+	req->led_set.value = value;
+}
+
+static inline void build_set_lane_fun(struct mbx_fw_cmd_req *req, int nr_lane,
+				      int fun, int value0, int value1,
+				      int value2, int value3)
+{
+	req->flags = 0;
+	req->opcode = SET_LANE_FUN;
+	req->datalen = sizeof(req->set_lane_fun);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->set_lane_fun.func = fun;
+	req->set_lane_fun.nr_lane = nr_lane;
+	req->set_lane_fun.value0 = value0;
+	req->set_lane_fun.value1 = value1;
+	req->set_lane_fun.value2 = value2;
+	req->set_lane_fun.value3 = value3;
+}
+
+static inline void build_set_phy_reg(struct mbx_fw_cmd_req *req, void *cookie,
+				     enum PHY_INTERFACE phy_inf, char nr_lane,
+				     int reg, int w_data, int recall_qsfp_page)
+{
+	req->flags = 0;
+	req->opcode = SET_PHY_REG;
+	req->datalen = sizeof(req->set_phy_reg);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+
+	req->set_phy_reg.phy_interface = phy_inf;
+	req->set_phy_reg.nr_lane = nr_lane;
+	req->set_phy_reg.phy_reg_addr = reg;
+	req->set_phy_reg.phy_w_data = w_data;
+
+	if (recall_qsfp_page)
+		req->set_phy_reg.recall_qsfp_page = 1;
+	else
+		req->set_phy_reg.recall_qsfp_page = 0;
+}
+
+static inline void build_get_phy_reg(struct mbx_fw_cmd_req *req, void *cookie,
+				     enum PHY_INTERFACE phy_inf, char nr_lane,
+				     int reg)
+{
+	req->flags = 0;
+	req->opcode = GET_PHY_REG;
+	req->datalen = sizeof(req->get_phy_reg);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+
+	req->get_phy_reg.phy_interface = phy_inf;
+
+	req->get_phy_reg.nr_lane = nr_lane;
+	req->get_phy_reg.phy_reg_addr = reg;
+}
+
+static inline void build_phy_pause_set(struct mbx_fw_cmd_req *req,
+				       int pause_mode, int nr_lane)
+{
+	req->flags = 0;
+	req->opcode = PHY_PAUSE_SET;
+	req->datalen = sizeof(req->phy_pause_set);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->phy_pause_set.nr_lane = nr_lane;
+	req->phy_pause_set.pause_mode = pause_mode;
+}
+
+static inline void build_get_phy_pause_req(struct mbx_fw_cmd_req *req,
+					   int nr_lane, void *cookie)
+{
+	req->flags = 0;
+	req->opcode = PHY_PAUSE_GET;
+	req->datalen = sizeof(req->phy_pause_get);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->phy_pause_set.nr_lane = nr_lane;
+	req->phy_pause_set.pause_mode = 0;
+}
+
+static inline void build_phy_eee_set(struct mbx_fw_cmd_req *req, u32 local_eee,
+				     u32 tx_lpi_timer, int nr_lane)
+{
+	req->flags = 0;
+	req->opcode = PHY_EEE_SET;
+	req->datalen = sizeof(req->phy_eee_set);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->phy_eee_set.nr_lane = nr_lane;
+	req->phy_eee_set.local_eee = local_eee;
+	req->phy_eee_set.tx_lpi_timer = tx_lpi_timer;
+}
+
+static inline void build_phy_link_set(struct mbx_fw_cmd_req *req,
+				      unsigned int adv, int nr_lane,
+				      unsigned int autoneg, unsigned int speed,
+				      unsigned int duplex,
+				      unsigned int tp_mdix_ctrl)
+{
+	req->flags = 0;
+	req->opcode = PHY_LINK_SET;
+	req->datalen = sizeof(req->phy_link_set);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->phy_link_set.nr_lane = nr_lane;
+	req->phy_link_set.adv_speed_mask = adv;
+	req->phy_link_set.autoneg = autoneg;
+	req->phy_link_set.speed = speed;
+	req->phy_link_set.duplex = duplex;
+	req->phy_link_set.tp_mdix_ctrl = tp_mdix_ctrl;
+}
+static inline void build_tstamp_show(struct mbx_fw_cmd_req *req, u32 sec,
+				     u32 nanosec)
+{
+	req->flags = 0;
+	req->opcode = SHOW_TX_STAMP;
+	req->datalen = sizeof(req->tstamps);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->tstamps.sec = sec;
+	req->tstamps.nanosec = nanosec;
+}
+
+static inline void build_ifup_down(struct mbx_fw_cmd_req *req,
+				   unsigned int nr_lane, int up)
+{
+	req->flags = 0;
+	req->opcode = IFUP_DOWN;
+	req->datalen = sizeof(req->ifup);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->ifup.lane = nr_lane;
+	req->ifup.up = up;
+}
+
+static inline void build_ifinsmod(struct mbx_fw_cmd_req *req,
+				  unsigned int nr_lane, int status)
+{
+	req->flags = 0;
+	req->opcode = DRIVER_INSMOD;
+	req->datalen = sizeof(req->ifinsmod);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->ifinsmod.lane = nr_lane;
+	req->ifinsmod.status = status;
+}
+
+static inline void build_ifsuspuse(struct mbx_fw_cmd_req *req,
+				   unsigned int nr_lane, int status)
+{
+	req->flags = 0;
+	req->opcode = SYSTEM_SUSPUSE;
+	req->datalen = sizeof(req->ifsuspuse);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->ifinsmod.lane = nr_lane;
+	req->ifinsmod.status = status;
+}
+
+static inline void build_ifforce(struct mbx_fw_cmd_req *req,
+				 unsigned int nr_lane, int status)
+{
+	req->flags = 0;
+	req->opcode = SYSTEM_FORCE;
+	req->datalen = sizeof(req->ifforce);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->ifforce.lane = nr_lane;
+	req->ifforce.status = status;
+}
+
+static inline void build_mbx_sfp_read(struct mbx_fw_cmd_req *req,
+				      unsigned int nr_lane, int sfp_addr,
+				      int reg, int cnt, void *cookie)
+{
+	req->flags = 0;
+	req->opcode = SFP_MODULE_READ;
+	req->datalen = sizeof(req->sfp_read);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->sfp_read.nr_lane = nr_lane;
+	req->sfp_read.sfp_adr = sfp_addr;
+	req->sfp_read.reg = reg;
+	;
+	req->sfp_read.cnt = cnt;
+}
+
+static inline void build_mbx_sfp_write(struct mbx_fw_cmd_req *req,
+				       unsigned int nr_lane, int sfp_addr,
+				       int reg, int v)
+{
+	req->flags = 0;
+	req->opcode = SFP_MODULE_WRITE;
+	req->datalen = sizeof(req->sfp_write);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->sfp_write.nr_lane = nr_lane;
+	req->sfp_write.sfp_adr = sfp_addr;
+	req->sfp_write.reg = reg;
+	req->sfp_write.val = v;
+}
+
+static inline void build_mbx_wol_set(struct mbx_fw_cmd_req *req,
+				     unsigned int nr_lane, u32 mode)
+{
+	req->flags = 0;
+	req->opcode = SET_WOL;
+	req->datalen = sizeof(req->sfp_write);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->wol.lane = nr_lane;
+	req->wol.enable = mode;
+}
+
+static inline void build_mbx_gephy_test_set(struct mbx_fw_cmd_req *req,
+					    unsigned int nr_lane, u32 mode)
+{
+	req->flags = 0;
+	req->opcode = SET_TEST_MODE;
+	req->datalen = sizeof(req->sfp_write);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->gephy_test.lane = nr_lane;
+	req->gephy_test.mode = mode;
+}
+static inline void build_get_lldp_req(struct mbx_fw_cmd_req *req, void *cookie,
+				      int nr_lane)
+{
+#define LLDP_TX_GET (1)
+
+	req->flags = 0;
+	req->opcode = LLDP_TX_CTRL;
+	req->datalen = sizeof(req->lldp_tx);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->lldp_tx.lane = nr_lane;
+	req->lldp_tx.op = LLDP_TX_GET;
+	req->lldp_tx.enable = 0;
+}
+
+static inline void build_mbx_lldp_set(struct mbx_fw_cmd_req *req,
+				      unsigned int nr_lane, u32 enable)
+{
+#define LLDP_TX_SET (0)
+	req->flags = 0;
+	req->opcode = LLDP_TX_CTRL;
+	req->datalen = sizeof(req->sfp_write);
+	req->cookie = NULL;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->lldp_tx.lane = nr_lane;
+	req->lldp_tx.op = LLDP_TX_SET;
+	req->lldp_tx.enable = enable;
+	req->lldp_tx.inteval = 30;
+}
+
+/* enum link_event_mask or */
+static inline void build_link_set_event_mask(struct mbx_fw_cmd_req *req,
+					     unsigned short event_mask,
+					     unsigned short enable,
+					     void *cookie)
+{
+	req->flags = 0;
+	req->opcode = SET_EVENT_MASK;
+	req->datalen = sizeof(req->stat_event_mask);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+	req->stat_event_mask.event_mask = event_mask;
+	req->stat_event_mask.enable_stat = enable;
+}
+
+static inline void
+build_link_set_loopback_req(struct mbx_fw_cmd_req *req, void *cookie,
+			    enum LOOPBACK_LEVEL level,
+			    enum LOOPBACK_FORCE_SPEED force_speed)
+{
+	req->flags = 0;
+	req->opcode = SET_LOOPBACK_MODE;
+	req->datalen = sizeof(req->loopback);
+	req->cookie = cookie;
+	req->reply_lo = 0;
+	req->reply_hi = 0;
+
+	req->loopback.loopback_level = level;
+	req->loopback.loopback_type = LOOPBACK_TYPE_LOCAL;
+	if (force_speed != LOOPBACK_FORCE_SPEED_NONE) {
+		req->loopback.loopback_force_speed = force_speed;
+		req->loopback.loopback_force_speed_enable = 1;
+	}
+}
+
+/* =========== errcode======= */
+enum MBX_ERR {
+	MBX_OK = 0,
+	MBX_ERR_NO_PERM,
+	MBX_ERR_INVAL_OPCODE,
+	MBX_ERR_INVALID_PARAM,
+	MBX_ERR_INVALID_ADDR,
+	MBX_ERR_INVALID_LEN,
+	MBX_ERR_NODEV,
+	MBX_ERR_IO,
+};
+int rnpgbe_fw_get_capablity(struct rnpgbe_hw *hw, struct phy_abilities *abil);
+
+#endif /* _RNPGBE_MBX_FW_H */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_param.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_param.c
new file mode 100644
index 0000000000000..4cc4c44b96dda
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_param.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+
+#include "rnpgbe.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define RNP_MAX_NIC 32
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+
+#define STRINGIFY(foo) #foo /* magic for getting defines into strings */
+#define XSTRINGIFY(bar) STRINGIFY(bar)
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define RNP_PARAM_INIT                                                         \
+	{                                                                      \
+		[0 ... RNP_MAX_NIC] = OPTION_UNSET                             \
+	}
+#define RNP_PARAM(X, desc)                                                     \
+	static int X[RNP_MAX_NIC + 1] = RNP_PARAM_INIT;          \
+	static unsigned int num_##X;                                           \
+	module_param_array_named(X, X, int, &num_##X, 0);                      \
+	MODULE_PARM_DESC(X, desc);
+/* IntMode (Interrupt Mode)
+ *
+ * Valid Range: 0-2
+ *  - 0 - Legacy Interrupt
+ *  - 1 - MSI Interrupt
+ *  - 2 - MSI-X Interrupt(s)
+ *
+ * Default Value: 2
+ */
+RNP_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), "
+		   "default 2");
+#define RNP_INT_LEGACY 0
+#define RNP_INT_MSI 1
+#define RNP_INT_MSIX 2
+
+#ifdef CONFIG_PCI_IOV
+/* max_vfs - SR I/O Virtualization
+ *
+ * Valid Range: 0-63 for n10
+ * Valid Range: 0-7 for n400/n10
+ *  - 0 Disables SR-IOV
+ *  - 1-x - enables SR-IOV and sets the number of VFs enabled
+ *
+ * Default Value: 0
+ */
+
+RNP_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable (default), "
+		   "1-" XSTRINGIFY(MAX_SRIOV_VFS) " = enable "
+						  "this many VFs");
+
+/* SRIOV_Mode (SRIOV Mode)
+ *
+ * Valid Range: 0-1
+ *  - 0 - Legacy Interrupt
+ *  - 1 - MSI Interrupt
+ *  - 2 - MSI-X Interrupt(s)
+ *
+ * Default Value: 0
+ */
+RNP_PARAM(SRIOV_Mode, "Change SRIOV Mode (0=MAC_MODE, 1=VLAN_MODE), "
+		      "default 0");
+#define RNP_SRIOV_MAC_MODE 0
+#define RNP_SRIOV_VLAN_MODE 1
+#endif
+
+/* pf_msix_counts_set - Limit max msix counts
+ *
+ * Valid Range: 2-63 for n10
+ * Valid Range: 2-7 for n400/n10
+ *
+ * Default Value: 0 (un-limit)
+ */
+RNP_PARAM(pf_msix_counts_set, "Number of Max MSIX Count: (default un-limit)");
+#define RNP_INT_MIN 2
+#define RNP_INT_MAX 64
+
+/* eee_timer - LPI tx expiration time in msec
+ *
+ * Valid Range: 100-10000
+ *
+ * Default Value: 4000
+ */
+RNP_PARAM(eee_timer, "LPI tx expiration time in msec: (default 1000)");
+#define RNP_EEE_MIN (100)
+#define RNP_EEE_DEFAULT (4000)
+#define RNP_EEE_MAX (10000)
+
+/* priv_rx_skip - priv header len
+ *
+ * Valid Range: [0, 16]
+ *
+ * Default Value: -
+ */
+RNP_PARAM(rx_skip, "rx_skip header in DW: (default 0)");
+#define RNP_RX_SKIP_MIN (0)
+#define RNP_RX_SKIP_DEFAULT (0)
+#define RNP_RX_SKIP_MAX (16)
+
+struct rnpgbe_option {
+	enum { enable_option, range_option, list_option } type;
+	const char *name;
+	const char *err;
+	const char *msg;
+	int def;
+	union {
+		struct { /* range_option info */
+			int min;
+			int max;
+		} r;
+		struct { /* list_option info */
+			int nr;
+			const struct rnpgbe_opt_list {
+				int i;
+				char *str;
+			} *p;
+		} l;
+	} arg;
+};
+
+static int rnpgbe_validate_option(struct net_device *netdev,
+				  unsigned int *value,
+				  struct rnpgbe_option *opt)
+{
+	if (*value == OPTION_UNSET) {
+		netdev_info(netdev, "Invalid %s specified (%d),  %s\n",
+			    opt->name, *value, opt->err);
+		*value = opt->def;
+		return 0;
+	}
+
+	switch (opt->type) {
+	case enable_option:
+		switch (*value) {
+		case OPTION_ENABLED:
+			netdev_info(netdev, "%s Enabled\n", opt->name);
+			return 0;
+		case OPTION_DISABLED:
+			netdev_info(netdev, "%s Disabled\n", opt->name);
+			return 0;
+		}
+		break;
+	case range_option:
+		if ((*value >= opt->arg.r.min && *value <= opt->arg.r.max) ||
+		    *value == opt->def) {
+			if (opt->msg)
+				netdev_info(netdev, "%s set to %d, %s\n",
+					    opt->name, *value, opt->msg);
+			else
+				netdev_info(netdev, "%s set to %d\n", opt->name,
+					    *value);
+			return 0;
+		}
+		break;
+	case list_option: {
+		int i;
+
+		for (i = 0; i < opt->arg.l.nr; i++) {
+			const struct rnpgbe_opt_list *ent = &opt->arg.l.p[i];
+
+			if (*value == ent->i) {
+				if (ent->str[0] != '\0')
+					netdev_info(netdev, "%s\n", ent->str);
+				return 0;
+			}
+		}
+	} break;
+	default:
+		BUG();
+	}
+
+	netdev_info(netdev, "Invalid %s specified (%d),  %s\n", opt->name,
+		    *value, opt->err);
+	*value = opt->def;
+	return -1;
+}
+
+#define LIST_LEN(l) (sizeof(l) / sizeof(l[0]))
+#define PSTR_LEN 10
+
+/**
+ * rnpgbe_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input.  If an invalid value is given, or if no user specified
+ * value exists, a default value is used.  The final value is stored
+ * in a variable in the adapter structure.
+ **/
+void rnpgbe_check_options(struct rnpgbe_adapter *adapter)
+{
+	int bd = adapter->bd_number;
+	u32 *aflags = &adapter->flags;
+
+	if (bd >= RNP_MAX_NIC) {
+		netdev_notice(adapter->netdev,
+			      "Warning: no configuration for board #%d\n", bd);
+		netdev_notice(adapter->netdev,
+			      "Using defaults for all values\n");
+	}
+
+	{ /* Interrupt Mode */
+		unsigned int int_mode;
+		static struct rnpgbe_option opt = {
+			.type = range_option,
+			.name = "Interrupt Mode",
+			.err = "using default of " __MODULE_STRING(
+				RNP_INT_MSIX),
+			.def = RNP_INT_MSIX,
+			.arg = { .r = { .min = RNP_INT_LEGACY,
+					.max = RNP_INT_MSIX } }
+		};
+
+		if (num_IntMode > bd) {
+			int_mode = IntMode[bd];
+			if (int_mode == OPTION_UNSET)
+				int_mode = RNP_INT_MSIX;
+			rnpgbe_validate_option(adapter->netdev, &int_mode,
+					       &opt);
+			switch (int_mode) {
+			case RNP_INT_MSIX:
+				if (!(*aflags & RNP_FLAG_MSIX_CAPABLE)) {
+					netdev_info(adapter->netdev,
+						    "Ignoring MSI-X setting; "
+						    "support unavailable\n");
+				} else
+					adapter->irq_mode = irq_mode_msix;
+				break;
+			case RNP_INT_MSI:
+				if (!(*aflags & RNP_FLAG_MSI_CAPABLE)) {
+					netdev_info(adapter->netdev,
+						    "Ignoring MSI setting; "
+						    "support unavailable\n");
+				} else
+					adapter->irq_mode = irq_mode_msi;
+
+				break;
+			case RNP_INT_LEGACY:
+				if (!(*aflags & RNP_FLAG_LEGACY_CAPABLE)) {
+					netdev_info(adapter->netdev,
+						    "Ignoring MSI setting; "
+						    "support unavailable\n");
+				} else
+					adapter->irq_mode = irq_mode_legency;
+
+				break;
+			}
+		} else {
+			/* default settings */
+			if (*aflags & RNP_FLAG_MSIX_CAPABLE)
+				adapter->irq_mode = irq_mode_msix;
+			else if (*aflags & RNP_FLAG_MSI_CAPABLE)
+				adapter->irq_mode = irq_mode_msi;
+			else
+				adapter->irq_mode = irq_mode_legency;
+		}
+	}
+
+#ifdef CONFIG_PCI_IOV
+	{ /* Single Root I/O Virtualization (SR-IOV) */
+		struct rnpgbe_hw *hw = &adapter->hw;
+		static struct rnpgbe_option opt = {
+			.type = range_option,
+			.name = "I/O Virtualization (IOV)",
+			.err = "defaulting to Disabled",
+			.def = OPTION_DISABLED,
+			.arg = { .r = { .min = OPTION_DISABLED,
+					.max = OPTION_DISABLED } }
+		};
+
+		opt.arg.r.max = hw->max_vfs;
+		if (num_max_vfs > bd) {
+			unsigned int vfs = max_vfs[bd];
+
+			if (rnpgbe_validate_option(adapter->netdev, &vfs,
+						   &opt)) {
+				vfs = 0;
+				DPRINTK(PROBE, INFO,
+					"max_vfs out of range "
+					"Disabling SR-IOV.\n");
+			}
+
+			adapter->num_vfs = vfs;
+
+			if (vfs)
+				*aflags |= RNP_FLAG_SRIOV_ENABLED;
+			else
+				*aflags &= ~RNP_FLAG_SRIOV_ENABLED;
+		} else {
+			if (opt.def == OPTION_DISABLED) {
+				adapter->num_vfs = 0;
+				*aflags &= ~RNP_FLAG_SRIOV_ENABLED;
+			} else {
+				adapter->num_vfs = opt.def;
+				*aflags |= RNP_FLAG_SRIOV_ENABLED;
+			}
+		}
+	}
+
+	{ /* Interrupt Mode */
+		unsigned int sriov_mode;
+		static struct rnpgbe_option opt = {
+			.type = range_option,
+			.name = "SRIOV Mode",
+			.err = "using default of " __MODULE_STRING(
+				RNP_SRIOV_MAC_MODE),
+			.def = RNP_SRIOV_MAC_MODE,
+			.arg = { .r = { .min = RNP_SRIOV_MAC_MODE,
+					.max = RNP_SRIOV_VLAN_MODE } }
+		};
+		if (num_SRIOV_Mode > bd) {
+			sriov_mode = SRIOV_Mode[bd];
+			if (sriov_mode == OPTION_UNSET)
+				sriov_mode = RNP_SRIOV_MAC_MODE;
+			rnpgbe_validate_option(adapter->netdev, &sriov_mode,
+					       &opt);
+
+			if (sriov_mode == RNP_SRIOV_VLAN_MODE)
+				adapter->priv_flags |=
+					RNP_PRIV_FLAG_SRIOV_VLAN_MODE;
+		} else {
+			/* default settings */
+			/* msix -> msi -> Legacy */
+			adapter->priv_flags &= (~RNP_PRIV_FLAG_SRIOV_VLAN_MODE);
+		}
+	}
+#endif /* CONFIG_PCI_IOV */
+
+	{ /* max msix count setup */
+		unsigned int pf_msix_counts;
+		struct rnpgbe_hw *hw = &adapter->hw;
+		static struct rnpgbe_option opt = {
+			.type = range_option,
+			.name = "Limit Msix Count",
+			.err = "using default of Un-limit",
+			.def = OPTION_DISABLED,
+			.arg = { .r = { .min = RNP_INT_MIN,
+					.max = RNP_INT_MIN } }
+		};
+
+		opt.arg.r.max = hw->max_msix_vectors;
+		if (num_pf_msix_counts_set > bd) {
+			pf_msix_counts = pf_msix_counts_set[bd];
+			if (pf_msix_counts == OPTION_DISABLED)
+				pf_msix_counts = 0;
+			rnpgbe_validate_option(adapter->netdev, &pf_msix_counts,
+					       &opt);
+
+			if (pf_msix_counts) {
+				if (hw->ops.update_msix_count)
+					hw->ops.update_msix_count(
+						hw, pf_msix_counts);
+			}
+
+		} else {
+		}
+	}
+
+	{ /* LPI tx expiration time in msec */
+		unsigned int eee_timer_delay;
+		static struct rnpgbe_option opt = {
+			.type = range_option,
+			.name = "eee timer exp",
+			.err = "using default of 1000",
+			.def = OPTION_DISABLED,
+			.arg = { .r = { .min = RNP_EEE_MIN,
+					.max = RNP_EEE_MAX } }
+		};
+
+		if (num_eee_timer > bd) {
+			eee_timer_delay = eee_timer[bd];
+			if (eee_timer_delay == OPTION_DISABLED)
+				eee_timer_delay = RNP_EEE_DEFAULT;
+			rnpgbe_validate_option(adapter->netdev,
+					       &eee_timer_delay, &opt);
+			adapter->eee_timer = eee_timer_delay;
+		} else {
+			adapter->eee_timer = RNP_EEE_DEFAULT;
+		}
+	}
+
+	{ /* rx_skip in DW */
+		unsigned int rx_skip_priv;
+		static struct rnpgbe_option opt = {
+			.type = range_option,
+			.name = "rx_skip in DW",
+			.err = "using default of 0",
+			.def = OPTION_DISABLED,
+			.arg = { .r = { .min = RNP_RX_SKIP_MIN,
+					.max = RNP_RX_SKIP_MAX } }
+		};
+
+		if (num_rx_skip > bd) {
+			rx_skip_priv = rx_skip[bd];
+			if (rx_skip_priv == OPTION_DISABLED)
+				rx_skip_priv = RNP_RX_SKIP_DEFAULT;
+			rnpgbe_validate_option(adapter->netdev, &rx_skip_priv,
+					       &opt);
+			if (rx_skip_priv) {
+				adapter->priv_skip_count = rx_skip_priv - 1;
+				adapter->priv_flags |= RNP_PRIV_FLAG_RX_SKIP_EN;
+			} else
+				adapter->priv_flags &=
+					~RNP_PRIV_FLAG_RX_SKIP_EN;
+		} else {
+			adapter->priv_skip_count = RNP_RX_SKIP_DEFAULT;
+			adapter->priv_flags &= ~RNP_PRIV_FLAG_RX_SKIP_EN;
+		}
+	}
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_phy.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_phy.h
new file mode 100644
index 0000000000000..5fd912d35b771
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_phy.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBE_PHY_H_
+#define _RNPGBE_PHY_H_
+
+#include "rnpgbe_type.h"
+
+#define RNP_I2C_EEPROM_DEV_ADDR 0xA0
+#define RNP_I2C_EEPROM_DEV_ADDR2 0xA2
+#define RNP_YT8531_PHY_SPEC_CTRL 0x10
+#define RNP_YT8531_PHY_SPEC_CTRL_FORCE_MDIX 0x0020
+#define RNP_YT8531_PHY_SPEC_CTRL_AUTO_MDI_MDIX 0x0060
+#define RNP_YT8531_PHY_SPEC_CTRL_MDIX_CFG_MASK 0x0060
+/* EEPROM byte offsets */
+#define SFF_MODULE_ID_OFFSET 0x00
+#define SFF_DIAG_SUPPORT_OFFSET 0x5c
+#define SFF_MODULE_ID_SFP 0x3
+#define SFF_MODULE_ID_QSFP 0xc
+#define SFF_MODULE_ID_QSFP_PLUS 0xd
+#define SFF_MODULE_ID_QSFP28 0x11
+/* Bitmasks */
+#define RNP_SFF_DA_PASSIVE_CABLE 0x4
+#define RNP_SFF_DA_ACTIVE_CABLE 0x8
+#define RNP_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
+#define RNP_SFF_1GBASESX_CAPABLE 0x1
+#define RNP_SFF_1GBASELX_CAPABLE 0x2
+#define RNP_SFF_1GBASET_CAPABLE 0x8
+#define RNP_SFF_10GBASESR_CAPABLE 0x10
+#define RNP_SFF_10GBASELR_CAPABLE 0x20
+#define RNP_SFF_ADDRESSING_MODE 0x4
+#define RNP_I2C_EEPROM_READ_MASK 0x100
+#define RNP_I2C_EEPROM_STATUS_MASK 0x3
+#define RNP_I2C_EEPROM_STATUS_NO_OPERATION 0x0
+#define RNP_I2C_EEPROM_STATUS_PASS 0x1
+#define RNP_I2C_EEPROM_STATUS_FAIL 0x2
+#define RNP_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
+/* Flow control defines */
+#define RNP_TAF_SYM_PAUSE 0x400
+#define RNP_TAF_ASM_PAUSE 0x800
+/* Bit-shift macros */
+#define RNP_SFF_VENDOR_OUI_BYTE0_SHIFT 24
+#define RNP_SFF_VENDOR_OUI_BYTE1_SHIFT 16
+#define RNP_SFF_VENDOR_OUI_BYTE2_SHIFT 8
+/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
+#define RNP_SFF_VENDOR_OUI_TYCO 0x00407600
+#define RNP_SFF_VENDOR_OUI_FTL 0x00906500
+#define RNP_SFF_VENDOR_OUI_AVAGO 0x00176A00
+#define RNP_SFF_VENDOR_OUI_INTEL 0x001B2100
+/* I2C SDA and SCL timing parameters for standard mode */
+#define RNP_I2C_T_HD_STA 4
+#define RNP_I2C_T_LOW 5
+#define RNP_I2C_T_HIGH 4
+#define RNP_I2C_T_SU_STA 5
+#define RNP_I2C_T_HD_DATA 5
+#define RNP_I2C_T_SU_DATA 1
+#define RNP_I2C_T_RISE 1
+#define RNP_I2C_T_FALL 1
+#define RNP_I2C_T_SU_STO 4
+#define RNP_I2C_T_BUF 5
+#define RNP_TN_LASI_STATUS_REG 0x9005
+#define RNP_TN_LASI_STATUS_TEMP_ALARM 0x0008
+/* SFP+ SFF-8472 Compliance code */
+#define RNP_SFF_SFF_8472_UNSUP 0x00
+#endif /* _RNPGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.c
new file mode 100644
index 0000000000000..e163cfd1720ba
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.c
@@ -0,0 +1,760 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "rnpgbe.h"
+#include "rnpgbe_regs.h"
+#include "rnpgbe_ptp.h"
+#include "rnpgbe_mbx.h"
+
+/* #define DEBUG_PTP_TX_TIMESTAMP */
+/* #define DEBUG_PTP_RX_TIMESTAMP */
+
+/* PTP and HW Timer ops */
+static void config_hw_tstamping(void __iomem *ioaddr, u32 data)
+{
+	writel(data, ioaddr + PTP_TCR);
+}
+
+static void config_sub_second_increment(void __iomem *ioaddr, u32 ptp_clock,
+					int gmac4, u32 *ssinc)
+{
+	u32 value = readl(ioaddr + PTP_TCR);
+	unsigned long data;
+	u32 reg_value;
+
+	/* For GMAC3.x, 4.x versions, in "fine adjustement mode" set sub-second
+	 * increment to twice the number of nanoseconds of a clock cycle.
+	 * The calculation of the default_addend value by the caller will set it
+	 * to mid-range = 2^31 when the remainder of this division is zero,
+	 * which will make the accumulator overflow once every 2 ptp_clock
+	 * cycles, adding twice the number of nanoseconds of a clock cycle :
+	 * 2000000000ULL / ptp_clock.
+	 */
+	if (value & RNP_PTP_TCR_TSCFUPDT)
+		data = (2000000000ULL / ptp_clock);
+	else
+		data = (1000000000ULL / ptp_clock);
+
+	/* 0.465ns accuracy */
+	if (!(value & RNP_PTP_TCR_TSCTRLSSR))
+		data = (data * 1000) / 465;
+
+	data &= RNP_PTP_SSIR_SSINC_MASK;
+
+	reg_value = data;
+	if (gmac4)
+		reg_value <<= RNP_PTP_SSIR_SSINC_SHIFT;
+
+	writel(reg_value, ioaddr + PTP_SSIR);
+
+	if (ssinc)
+		*ssinc = data;
+}
+
+static int config_addend(void __iomem *ioaddr, u32 addend)
+{
+	u32 value;
+	int limit;
+
+	writel(addend, ioaddr + PTP_TAR);
+	/* issue command to update the addend value */
+	value = readl(ioaddr + PTP_TCR);
+	value |= RNP_PTP_TCR_TSADDREG;
+	writel(value, ioaddr + PTP_TCR);
+
+	/* wait for present addend update to complete */
+	limit = 10;
+	while (limit--) {
+		if (!(readl(ioaddr + PTP_TCR) & RNP_PTP_TCR_TSADDREG))
+			break;
+		mdelay(10);
+	}
+	if (limit < 0)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
+{
+	int limit;
+	u32 value;
+
+	writel(sec, ioaddr + PTP_STSUR);
+	writel(nsec, ioaddr + PTP_STNSUR);
+	/* issue command to initialize the system time value */
+	value = readl(ioaddr + PTP_TCR);
+	value |= RNP_PTP_TCR_TSINIT;
+	writel(value, ioaddr + PTP_TCR);
+
+	/* wait for present system time initialize to complete */
+	limit = 10;
+	while (limit--) {
+		if (!(readl(ioaddr + PTP_TCR) & RNP_PTP_TCR_TSINIT))
+			break;
+		mdelay(10);
+	}
+	if (limit < 0)
+		return -EBUSY;
+#ifdef FW_UART_SHOW_TSTAMPS
+	/* setup pps control */
+	writel(0x1, ioaddr + PTP_PPS_CONTROL);
+#endif
+	return 0;
+}
+
+static void get_systime(void __iomem *ioaddr, u64 *systime)
+{
+	u64 ns;
+
+	/* Get the TSSS value */
+	ns = readl(ioaddr + PTP_STNSR);
+	/* Get the TSS and convert sec time value to nanosecond */
+	ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
+
+	if (systime)
+		*systime = ns;
+}
+
+static void config_mac_interrupt_enable(void __iomem *ioaddr, bool on)
+{
+	rnpgbe_wr_reg(ioaddr + RNP_MAC_INTERRUPT_ENABLE, on);
+}
+
+static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, int add_sub,
+			  int gmac4)
+{
+	u32 value;
+	int limit;
+
+	if (add_sub) {
+		/* If the new sec value needs to be subtracted with
+		 * the system time, then MAC_STSUR reg should be
+		 * programmed with (2^32 – )
+		 */
+		if (gmac4)
+			sec = -sec;
+
+		value = readl(ioaddr + PTP_TCR);
+		if (value & RNP_PTP_TCR_TSCTRLSSR)
+			nsec = (RNP_PTP_DIGITAL_ROLLOVER_MODE - nsec);
+		else
+			nsec = (RNP_PTP_BINARY_ROLLOVER_MODE - nsec);
+	}
+
+	writel(sec, ioaddr + PTP_STSUR);
+	value = (add_sub << RNP_PTP_STNSUR_ADDSUB_SHIFT) | nsec;
+	writel(value, ioaddr + PTP_STNSUR);
+
+	/* issue command to initialize the system time value */
+	value = readl(ioaddr + PTP_TCR);
+	value |= RNP_PTP_TCR_TSUPDT;
+	writel(value, ioaddr + PTP_TCR);
+
+	/* wait for present system time adjust/update to complete */
+	limit = 10;
+	while (limit--) {
+		if (!(readl(ioaddr + PTP_TCR) & RNP_PTP_TCR_TSUPDT))
+			break;
+		mdelay(10);
+	}
+	if (limit < 0)
+		return -EBUSY;
+
+	return 0;
+}
+
+const struct rnpgbe_hwtimestamp mac_ptp = {
+	.config_hw_tstamping = config_hw_tstamping,
+	.config_mac_irq_enable = config_mac_interrupt_enable,
+	.init_systime = init_systime,
+	.config_sub_second_increment = config_sub_second_increment,
+	.config_addend = config_addend,
+	.adjust_systime = adjust_systime,
+	.get_systime = get_systime,
+};
+
+static int rnpgbe_ptp_adjfreq(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+	struct rnpgbe_adapter *pf =
+		container_of(ptp, struct rnpgbe_adapter, ptp_clock_ops);
+	unsigned long flags;
+	u32 addend;
+
+	if (pf == NULL) {
+		printk(KERN_DEBUG "adapter_of contail is null\n");
+		return 0;
+	}
+	addend = adjust_by_scaled_ppm(pf->default_addend, scaled_ppm);
+	spin_lock_irqsave(&pf->ptp_lock, flags);
+	pf->hwts_ops->config_addend(pf->ptp_addr, addend);
+	spin_unlock_irqrestore(&pf->ptp_lock, flags);
+
+	return 0;
+}
+
+static int rnpgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	struct rnpgbe_adapter *pf =
+		container_of(ptp, struct rnpgbe_adapter, ptp_clock_ops);
+	unsigned long flags;
+	u32 sec, nsec;
+	u32 quotient, reminder;
+	int neg_adj = 0;
+
+	if (delta < 0) {
+		neg_adj = 1;
+		delta = -delta;
+	}
+
+	if (delta == 0)
+		return 0;
+
+	quotient = div_u64_rem(delta, 1000000000ULL, &reminder);
+	sec = quotient;
+	nsec = reminder;
+
+	spin_lock_irqsave(&pf->ptp_lock, flags);
+	pf->hwts_ops->adjust_systime(pf->ptp_addr, sec, nsec, neg_adj,
+				     pf->gmac4);
+	spin_unlock_irqrestore(&pf->ptp_lock, flags);
+
+	return 0;
+}
+
+static int rnpgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+	struct rnpgbe_adapter *pf =
+		container_of(ptp, struct rnpgbe_adapter, ptp_clock_ops);
+	unsigned long flags;
+	u64 ns = 0;
+
+	spin_lock_irqsave(&pf->ptp_lock, flags);
+
+	pf->hwts_ops->get_systime(pf->ptp_addr, &ns);
+
+	spin_unlock_irqrestore(&pf->ptp_lock, flags);
+
+	*ts = ns_to_timespec64(ns);
+
+	return 0;
+}
+
+static int rnpgbe_ptp_settime(struct ptp_clock_info *ptp,
+			      const struct timespec64 *ts)
+{
+	struct rnpgbe_adapter *pf =
+		container_of(ptp, struct rnpgbe_adapter, ptp_clock_ops);
+	unsigned long flags;
+
+	spin_lock_irqsave(&pf->ptp_lock, flags);
+	pf->hwts_ops->init_systime(pf->ptp_addr, ts->tv_sec, ts->tv_nsec);
+	spin_unlock_irqrestore(&pf->ptp_lock, flags);
+
+	return 0;
+}
+
+static int rnpgbe_ptp_feature_enable(struct ptp_clock_info *ptp,
+				     struct ptp_clock_request *rq, int on)
+{
+	return -EOPNOTSUPP;
+}
+
+int rnpgbe_ptp_get_ts_config(struct rnpgbe_adapter *pf, struct ifreq *ifr)
+{
+	struct hwtstamp_config *config = &pf->tstamp_config;
+
+	return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? -EFAULT :
+								      0;
+}
+
+int rnpgbe_ptp_setup_ptp(struct rnpgbe_adapter *pf, u32 value)
+{
+	u32 sec_inc = 0;
+	u64 temp = 0;
+	struct timespec64 now;
+
+	/*For now just use extrnal clock(the kernel-system clock)*/
+	/* 1.Mask the Timestamp Trigger interrupt */
+	/* 2.enable time stamping */
+	/* 2.1 clear all bytes about time ctrl reg*/
+	pf->hwts_ops->config_hw_tstamping(pf->ptp_addr, value);
+	/* 3.Program the PTPclock frequency */
+	/* program Sub Second Increment reg
+	 * we use kernel-system clock
+	 */
+	pf->hwts_ops->config_sub_second_increment(
+		pf->ptp_addr, pf->clk_ptp_rate, pf->gmac4, &sec_inc);
+	/* 4.If use fine correction approash then,
+	 * Program MAC_Timestamp_Addend register
+	 */
+	if (sec_inc == 0) {
+		printk(KERN_DEBUG "%s:%d the sec_inc is zero this is a bug\n",
+		       __func__, __LINE__);
+		return -EFAULT;
+	}
+	temp = div_u64(1000000000ULL, sec_inc);
+	/* Store sub second increment and flags for later use */
+	pf->sub_second_inc = sec_inc;
+	pf->systime_flags = value;
+	/* calculate default added value:
+	 * formula is :
+	 * addend = (2^32)/freq_div_ratio;
+	 * where, freq_div_ratio = 1e9ns/sec_inc
+	 */
+	temp = (u64)(temp << 32);
+
+	if (pf->clk_ptp_rate == 0) {
+		pf->clk_ptp_rate = 1000;
+		printk(KERN_DEBUG "%s:%d clk_ptp_rate is zero\n", __func__,
+		       __LINE__);
+	}
+
+	pf->default_addend = div_u64(temp, pf->clk_ptp_rate);
+
+	pf->hwts_ops->config_addend(pf->ptp_addr, pf->default_addend);
+	/* 5.Poll wait for the TCR Update Addend Register*/
+	/* 6.enabled Fine Update method */
+	/* 7.program the second and nanosecond register*/
+	/*TODO If we need to enable one-step timestamp */
+
+	/* initialize system time */
+	ktime_get_real_ts64(&now);
+
+	/* lower 32 bits of tv_sec are safe until y2106 */
+	pf->hwts_ops->init_systime(pf->ptp_addr, (u32)now.tv_sec, now.tv_nsec);
+
+	return 0;
+}
+
+int rnpgbe_ptp_set_ts_config(struct rnpgbe_adapter *pf, struct ifreq *ifr)
+{
+	struct hwtstamp_config config;
+	u32 ptp_v2 = 0;
+	u32 tstamp_all = 0;
+	u32 ptp_over_ipv4_udp = 0;
+	u32 ptp_over_ipv6_udp = 0;
+	u32 ptp_over_ethernet = 0;
+	u32 snap_type_sel = 0;
+	u32 ts_master_en = 0;
+	u32 ts_event_en = 0;
+	u32 value = 0;
+	s32 ret = -1;
+
+	if (!(pf->flags2 & RNP_FLAG2_PTP_ENABLED)) {
+		pci_alert(pf->pdev, "No support for HW time stamping\n");
+		pf->ptp_tx_en = 0;
+		pf->ptp_tx_en = 0;
+
+		return -EOPNOTSUPP;
+	}
+
+	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+		return -EFAULT;
+
+	netdev_info(pf->netdev,
+		    "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
+		    __func__, config.flags, config.tx_type, config.rx_filter);
+	/* reserved for future extensions */
+	if (config.flags)
+		return -EINVAL;
+
+	if (config.tx_type != HWTSTAMP_TX_OFF &&
+	    config.tx_type != HWTSTAMP_TX_ON)
+		return -ERANGE;
+
+	switch (config.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		/* time stamp no incoming packet at all */
+		config.rx_filter = HWTSTAMP_FILTER_NONE;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+		/* PTP v1, UDP, any kind of event packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+		/* 'mac' hardware can support Sync, Pdelay_Req and
+		 * Pdelay_resp by setting bit14 and bits17/16 to 01
+		 * This leaves Delay_Req timestamps out.
+		 * Enable all events *and* general purpose message
+		 * timestamping
+		 */
+		snap_type_sel = RNP_PTP_TCR_SNAPTYPSEL_1;
+		ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA;
+		ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+		/* PTP v1, UDP, Sync packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+		/* take time stamp for SYNC messages only */
+		ts_event_en = RNP_PTP_TCR_TSEVNTENA;
+
+		ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA;
+		ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+		/* PTP v1, UDP, Delay_req packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+		/* take time stamp for Delay_Req messages only */
+		ts_master_en = RNP_PTP_TCR_TSMSTRENA;
+		ts_event_en = RNP_PTP_TCR_TSEVNTENA;
+
+		ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA;
+		ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+		/* PTP v2, UDP, any kind of event packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+		ptp_v2 = RNP_PTP_TCR_TSVER2ENA;
+
+		/* take time stamp for all event messages */
+		snap_type_sel = RNP_PTP_TCR_SNAPTYPSEL_1;
+
+		ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA;
+		ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+		/* PTP v2, UDP, Sync packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+		ptp_v2 = RNP_PTP_TCR_TSVER2ENA;
+		/* take time stamp for SYNC messages only */
+		ts_event_en = RNP_PTP_TCR_TSEVNTENA;
+		ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA;
+		ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+		/* PTP v2, UDP, Delay_req packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+		ptp_v2 = RNP_PTP_TCR_TSVER2ENA;
+		/* take time stamp for Delay_Req messages only */
+		ts_master_en = RNP_PTP_TCR_TSMSTRENA;
+		ts_event_en = RNP_PTP_TCR_TSEVNTENA;
+		ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA;
+		ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+		/* PTP v2/802.AS1 any layer, any kind of event packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+		ptp_v2 = RNP_PTP_TCR_TSVER2ENA;
+		snap_type_sel = RNP_PTP_TCR_SNAPTYPSEL_1;
+		ts_event_en = RNP_PTP_TCR_TSEVNTENA;
+		ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA;
+		ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA;
+		ptp_over_ethernet = RNP_PTP_TCR_TSIPENA;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+		/* PTP v2/802.AS1, any layer, Sync packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+		ptp_v2 = RNP_PTP_TCR_TSVER2ENA;
+		/* take time stamp for SYNC messages only */
+		ts_event_en = RNP_PTP_TCR_TSEVNTENA;
+		ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA;
+		ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA;
+		ptp_over_ethernet = RNP_PTP_TCR_TSIPENA;
+		break;
+
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+		/* PTP v2/802.AS1, any layer, Delay_req packet */
+		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+		ptp_v2 = RNP_PTP_TCR_TSVER2ENA;
+		/* take time stamp for Delay_Req messages only */
+		ts_master_en = RNP_PTP_TCR_TSMSTRENA;
+		ts_event_en = RNP_PTP_TCR_TSEVNTENA;
+
+		ptp_over_ipv4_udp = RNP_PTP_TCR_TSIPV4ENA;
+		ptp_over_ipv6_udp = RNP_PTP_TCR_TSIPV6ENA;
+		ptp_over_ethernet = RNP_PTP_TCR_TSIPENA;
+		break;
+
+	case HWTSTAMP_FILTER_NTP_ALL:
+	case HWTSTAMP_FILTER_ALL:
+		/* time stamp any incoming packet */
+		config.rx_filter = HWTSTAMP_FILTER_ALL;
+		tstamp_all = RNP_PTP_TCR_TSENALL;
+		break;
+
+	default:
+		return -ERANGE;
+	}
+
+	pf->ptp_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
+	pf->ptp_tx_en = config.tx_type == HWTSTAMP_TX_ON;
+
+	netdev_info(
+		pf->netdev,
+		"ptp config rx filter 0x%.2x tx_type 0x%.2x rx_en[%d] tx_en[%d]\n",
+		config.rx_filter, config.tx_type, pf->ptp_rx_en, pf->ptp_tx_en);
+	if (!pf->ptp_rx_en && !pf->ptp_tx_en)
+		/*rx and tx is not use hardware ts so clear the ptp register */
+		pf->hwts_ops->config_hw_tstamping(pf->ptp_addr, 0);
+	else {
+		value = (RNP_PTP_TCR_TSENA | RNP_PTP_TCR_TSCFUPDT |
+			 RNP_PTP_TCR_TSCTRLSSR | tstamp_all | ptp_v2 |
+			 ptp_over_ethernet | ptp_over_ipv6_udp |
+			 ptp_over_ipv4_udp | ts_master_en | snap_type_sel);
+
+		ret = rnpgbe_ptp_setup_ptp(pf, value);
+		if (ret < 0)
+			return ret;
+	}
+	pf->ptp_config_value = value;
+	memcpy(&pf->tstamp_config, &config, sizeof(config));
+
+	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT :
+								      0;
+}
+
+/* structure describing a PTP hardware clock */
+static struct ptp_clock_info rnpgbe_ptp_clock_ops = {
+	.owner = THIS_MODULE,
+	.name = "rnp ptp",
+	.max_adj = 50000000,
+	.n_alarm = 0,
+	.n_ext_ts = 0,
+	.n_per_out = 0, /* will be overwritten in stmmac_ptp_register */
+	.n_pins = 0, /*should be 0 if not set*/
+	.adjfine = rnpgbe_ptp_adjfreq,
+	.adjtime = rnpgbe_ptp_adjtime,
+	.gettime64 = rnpgbe_ptp_gettime,
+	.settime64 = rnpgbe_ptp_settime,
+	.enable = rnpgbe_ptp_feature_enable,
+};
+
+int rnpgbe_ptp_register(struct rnpgbe_adapter *pf)
+{
+	pf->hwts_ops = &mac_ptp;
+
+	pf->ptp_tx_en = 0;
+	pf->ptp_rx_en = 0;
+
+	spin_lock_init(&pf->ptp_lock);
+	pf->flags2 |= RNP_FLAG2_PTP_ENABLED;
+	pf->ptp_clock_ops = rnpgbe_ptp_clock_ops;
+
+	/*default mac clock rate is 50Mhz */
+	pf->clk_ptp_rate = 50000000;
+	if (pf->pdev == NULL)
+		printk(KERN_DEBUG "pdev dev is null\n");
+
+	pf->ptp_clock = ptp_clock_register(&pf->ptp_clock_ops, &pf->pdev->dev);
+	if (pf->ptp_clock == NULL)
+		pci_err(pf->pdev, "ptp clock register failed\n");
+
+	if (IS_ERR(pf->ptp_clock)) {
+		pci_err(pf->pdev, "ptp_clock_register failed\n");
+		pf->ptp_clock = NULL;
+	} else {
+		pci_info(pf->pdev, "registered PTP clock\n");
+	}
+
+	return 0;
+}
+
+void rnpgbe_ptp_unregister(struct rnpgbe_adapter *pf)
+{
+	/*1. stop the ptp module*/
+	if (pf->ptp_clock) {
+		ptp_clock_unregister(pf->ptp_clock);
+		pf->ptp_clock = NULL;
+		pr_debug("Removed PTP HW clock successfully on %s\n",
+			 "rnpgbe_ptp");
+	}
+}
+
+#if defined(DEBUG_PTP_HARD_SOFTWAY_RX) || defined(DEBUG_PTP_HARD_SOFTWAY_TX)
+static u64 rnpgbe_get_software_ts(void)
+{
+	struct timespec64 ts;
+
+	ktime_get_real_ts64(&ts);
+	return (ts.tv_nsec + ts.tv_sec * 1000000000ULL);
+}
+#endif
+
+#if defined(DEBUG_PTP_TX_TIMESTAMP) || defined(DEBUG_PTP_RX_TIMESTAMP)
+#define TIME_ZONE_CHINA (8)
+char *asctime(const struct tm *timeptr)
+{
+	static const char wday_name[][4] = { "Sun", "Mon", "Tue", "Wed",
+					     "Thu", "Fri", "Sat" };
+	static const char mon_name[][4] = { "Jan", "Feb", "Mar", "Apr",
+					    "May", "Jun", "Jul", "Aug",
+					    "Sep", "Oct", "Nov", "Dec" };
+	static char result[26];
+
+	sprintf(result, "%.3s %.3s%3d %.2d:%.2d:%.2d %ld\n",
+		wday_name[timeptr->tm_wday], mon_name[timeptr->tm_mon],
+		timeptr->tm_mday, timeptr->tm_hour + TIME_ZONE_CHINA,
+		timeptr->tm_min, timeptr->tm_sec, 1900 + timeptr->tm_year);
+	return result;
+}
+
+static void rnpgbe_print_human_timestamp(uint64_t ns, uint8_t *direct)
+{
+	struct timespec64 ts;
+	struct tm tms;
+	ktime_t ktm = ns_to_ktime(ns);
+
+	ts = ktime_to_timespec64(ktm);
+	time64_to_tm(ts.tv_sec, ts.tv_nsec / 1000000000ULL, &tms);
+	printk(KERN_DEBUG "[%s] %s ------\n", direct, asctime(&tms));
+}
+#endif
+
+void rnpgbe_tx_hwtstamp_work(struct work_struct *work)
+{
+	struct rnpgbe_adapter *adapter =
+		container_of(work, struct rnpgbe_adapter, tx_hwtstamp_work);
+#ifdef FW_UART_SHOW_TSTAMPS
+	struct rnpgbe_hw *hw = &adapter->hw;
+#endif
+	void __iomem *ioaddr = adapter->hw.hw_addr;
+
+	/* 1. read port belone timestatmp status reg */
+	/* 2. status enabled read nsec and sec reg*/
+	/* 3. */
+	u64 nanosec = 0, sec = 0;
+
+	if (!adapter->ptp_tx_skb) {
+		clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state);
+		return;
+	}
+
+	if (rnpgbe_rd_reg(ioaddr + RNP_ETH_PTP_TX_TSVALUE_STATUS(0)) & 0x01) {
+		struct sk_buff *skb = adapter->ptp_tx_skb;
+		struct skb_shared_hwtstamps shhwtstamps;
+		u64 txstmp = 0;
+		/* read  and add nsec, sec turn to nsec*/
+
+		nanosec = rnpgbe_rd_reg(ioaddr + RNP_ETH_PTP_TX_LTIMES(0));
+		sec = rnpgbe_rd_reg(ioaddr + RNP_ETH_PTP_TX_HTIMES(0));
+		/* when we read the timestamp finish need to notice the hardware
+		 * that the timestamp need to update via set tx_hwts_clear-reg
+		 * from high to low
+		 */
+		//printk("tx time %llx-- %llx\n", nanosec, sec);
+		rnpgbe_wr_reg(ioaddr + RNP_ETH_PTP_TX_CLEAR(0),
+			      PTP_GET_TX_HWTS_FINISH);
+		rnpgbe_wr_reg(ioaddr + RNP_ETH_PTP_TX_CLEAR(0),
+			      PTP_GET_TX_HWTS_UPDATE);
+
+		txstmp = nanosec & PTP_HWTX_TIME_VALUE_MASK;
+		txstmp += (sec & PTP_HWTX_TIME_VALUE_MASK) * 1000000000ULL;
+
+		/* Clear the global tx_hwtstamp_skb pointer and force writes
+		 * prior to notifying the stack of a Tx timestamp.
+		 */
+		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+		shhwtstamps.hwtstamp = ns_to_ktime(txstmp);
+		adapter->ptp_tx_skb = NULL;
+#ifdef DEBUG_PTP_TX_TIMESTAMP
+		rnpgbe_print_human_timestamp(txstmp, "TX");
+#endif
+		/* force write prior to skb_tstamp_tx
+		 * because the xmit will re used the point to store ptp skb
+		 */
+		wmb();
+
+		skb_tstamp_tx(skb, &shhwtstamps);
+		dev_consume_skb_any(skb);
+		clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state);
+		/* send tstamps to hw */
+#ifdef FW_UART_SHOW_TSTAMPS
+		rnpgbe_mbx_tstamps_show(hw, sec, nanosec);
+#endif
+	} else if (time_after(jiffies,
+			      adapter->tx_hwtstamp_start +
+				      adapter->tx_timeout_factor * HZ)) {
+		/* this function will mark the skb drop*/
+		if (adapter->ptp_tx_skb)
+			dev_kfree_skb_any(adapter->ptp_tx_skb);
+		adapter->ptp_tx_skb = NULL;
+		adapter->tx_hwtstamp_timeouts++;
+		clear_bit_unlock(__RNP_PTP_TX_IN_PROGRESS, &adapter->state);
+		netdev_warn(adapter->netdev, "clearing Tx timestamp hang\n");
+	} else {
+		/* reschedule to check later */
+#ifdef DEBUG_PTP_HARD_SOFTWAY_TX
+		struct skb_shared_hwtstamps shhwtstamp;
+		u64 ns = 0;
+
+		ns = rnpgbe_get_software_ts();
+		shhwtstamp.hwtstamp = ns_to_ktime(ns);
+		if (adapter->ptp_tx_skb) {
+			skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamp);
+			dev_consume_skb_any(adapter->ptp_tx_skb);
+			adapter->ptp_tx_skb = NULL;
+		}
+#else
+		schedule_work(&adapter->tx_hwtstamp_work);
+#endif
+	}
+}
+
+void rnpgbe_ptp_get_rx_hwstamp(struct rnpgbe_adapter *adapter,
+			       union rnpgbe_rx_desc *desc, struct sk_buff *skb)
+{
+	u64 ns = 0;
+	u64 tsvalueh = 0, tsvaluel = 0;
+	struct skb_shared_hwtstamps *hwtstamps = NULL;
+
+	if (!skb || !adapter->ptp_rx_en) {
+		netdev_dbg(adapter->netdev,
+			   "hwstamp skb is null or "
+			   "rx_en iszero %u\n",
+			   adapter->ptp_rx_en);
+		return;
+	}
+
+#ifdef DEBUG_PTP_HARD_SOFTWAY_RX
+	ns = rnpgbe_get_software_ts();
+#else
+	if (likely(!((desc->wb.cmd) & RNP_RXD_STAT_PTP)))
+		return;
+	hwtstamps = skb_hwtstamps(skb);
+	/* because of rx hwstamp store before the mac head
+	 * skb->head and skb->data is point to same location when call alloc_skb
+	 * so we must move 16 bytes the skb->data to the mac head location
+	 * but for the head point if we need move the skb->head need to be diss
+	 */
+	/* low8bytes is null high8bytes is timestamp
+	 * high32bit is seconds low32bits is nanoseconds
+	 */
+	skb_copy_from_linear_data_offset(skb, RNP_RX_TIME_RESERVE, &tsvalueh,
+					 RNP_RX_SEC_SIZE);
+	skb_copy_from_linear_data_offset(skb,
+					 RNP_RX_TIME_RESERVE + RNP_RX_SEC_SIZE,
+					 &tsvaluel, RNP_RX_NANOSEC_SIZE);
+	skb_pull(skb, RNP_RX_HWTS_OFFSET);
+	tsvalueh = ntohl(tsvalueh);
+	tsvaluel = ntohl(tsvaluel);
+
+	ns = tsvaluel & RNP_RX_NSEC_MASK;
+	ns += ((tsvalueh & RNP_RX_SEC_MASK) * 1000000000ULL);
+
+	netdev_dbg(adapter->netdev,
+		   "ptp get hardware ts-sec %llu ts-nanosec %llu\n", tsvalueh,
+		   tsvaluel);
+#endif
+	hwtstamps->hwtstamp = ns_to_ktime(ns);
+#ifdef DEBUG_PTP_RX_TIMESTAMP
+	rnpgbe_print_human_timestamp(ns, "RX");
+#endif
+}
+
+void rnpgbe_ptp_reset(struct rnpgbe_adapter *adapter)
+{
+	rnpgbe_ptp_setup_ptp(adapter, adapter->ptp_config_value);
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.h
new file mode 100644
index 0000000000000..b23d03bb9b30e
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_ptp.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef __RNPGBE_PTP_H__
+#define __RNPGBE_PTP_H__
+
+struct rnpgbe_hwtimestamp {
+	void (*config_hw_tstamping)(void __iomem *ioaddr, u32 data);
+	void (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock,
+					    int gmac4, u32 *ssinc);
+	void (*config_mac_irq_enable)(void __iomem *ioaddr, bool on);
+	int (*init_systime)(void __iomem *ioaddr, u32 sec, u32 nsec);
+	int (*config_addend)(void __iomem *ioaddr, u32 addend);
+	int (*adjust_systime)(void __iomem *ioaddr, u32 sec, u32 nsec,
+			      int add_sub, int gmac4);
+	void (*get_systime)(void __iomem *ioaddr, u64 *systime);
+};
+/* IEEE 1588 PTP register offsets */
+#define PTP_TCR 0x00 /* Timestamp Control Reg */
+#define PTP_SSIR 0x04 /* Sub-Second Increment Reg */
+#define PTP_STSR 0x08 /* System Time – Seconds Regr */
+#define PTP_STNSR 0x0c /* System Time – Nanoseconds Reg */
+#define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */
+#define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */
+#define PTP_TAR 0x18 /* Timestamp Addend Reg */
+#define PTP_PPS_CONTROL 0x2c
+#define RNP_PTP_STNSUR_ADDSUB_SHIFT 31
+#define RNP_PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */
+#define RNP_PTP_BINARY_ROLLOVER_MODE 0x80000000 /* ~0.466 ns */
+/* PTP Timestamp control register defines */
+#define RNP_PTP_TCR_TSENA BIT(0) /*Timestamp Enable*/
+#define RNP_PTP_TCR_TSCFUPDT BIT(1) /* Timestamp Fine/Coarse Update */
+#define RNP_PTP_TCR_TSINIT BIT(2) /* Timestamp Initialize */
+#define RNP_PTP_TCR_TSUPDT BIT(3) /* Timestamp Update */
+#define RNP_PTP_TCR_TSTRIG BIT(4) /* Timestamp Interrupt Trigger Enable */
+#define RNP_PTP_TCR_TSADDREG BIT(5) /* Addend Reg Update */
+#define RNP_PTP_TCR_TSENALL BIT(8) /* Enable Timestamp for All Frames */
+#define RNP_PTP_TCR_TSCTRLSSR BIT(9) /* Digital or Binary Rollover Control */
+#define RNP_PTP_TCR_TSVER2ENA                                                  \
+	BIT(10) /* Enable PTP packet Processing for Version 2 Format */
+#define RNP_PTP_TCR_TSIPENA                                                    \
+	BIT(11) /* Enable Processing of PTP over Ethernet Frames */
+#define RNP_PTP_TCR_TSIPV6ENA                                                  \
+	BIT(12) /* Enable Processing of PTP Frames Sent over IPv6-UDP */
+#define RNP_PTP_TCR_TSIPV4ENA                                                  \
+	BIT(13) /* Enable Processing of PTP Frames Sent over IPv4-UDP */
+#define RNP_PTP_TCR_TSEVNTENA                                                  \
+	BIT(14) /* Enable Timestamp Snapshot for Event Messages */
+#define RNP_PTP_TCR_TSMSTRENA                                                  \
+	BIT(15) /* Enable Snapshot for Messages Relevant to Master */
+/* Note 802.1 AS Is work Over Ethernet FramesC_Sub_Second_Incremen
+ * and Normal PTP Is work Oveer UDP
+ */
+
+/* Select PTP packets for Taking Snapshots
+ * On mac specifically:
+ * Enable SYNC, Pdelay_Req, Pdelay_Resp when TSEVNTENA is enabled.
+ * or
+ * Enable  SYNC, Follow_Up, Delay_Req, Delay_Resp, Pdelay_Req, Pdelay_Resp,
+ * Pdelay_Resp_Follow_Up if TSEVNTENA is disabled
+ */
+#define RNP_PTP_TCR_SNAPTYPSEL_1 BIT(16)
+#define RNP_PTP_TCR_TSENMACADDR                                                \
+	BIT(18) /* Enable MAC address for PTP Frame Filtering */
+#define RNP_PTP_TCR_ESTI                                                       \
+	BIT(20) /* External System Time Input Or MAC Internal Clock*/
+#define RNP_PTP_TCR_AV8021ASMEN BIT(28) /* AV802.1 AS Mode Enable*/
+/* Sub Second increament define */
+#define RNP_PTP_SSIR_SSINC_MASK (0xff) /* Sub-second increment value */
+#define RNP_PTP_SSIR_SSINC_SHIFT (16) /* Sub-second increment offset */
+#define RNP_MAC_TXTSC BIT(15) /* TX timestamp reg is fill complete */
+#define RNP_MAC_TXTSSTSLO GENMASK(30, 0) /* nano second avalid value  */
+#define RNP_RX_SEC_MASK GENMASK(30, 0)
+#define RNP_RX_NSEC_MASK GENMASK(30, 0)
+#define RNP_RX_TIME_RESERVE (8)
+#define RNP_RX_SEC_SIZE (4)
+#define RNP_RX_NANOSEC_SIZE (4)
+#define RNP_RX_HWTS_OFFSET                                                     \
+	(RNP_RX_SEC_SIZE + RNP_RX_NANOSEC_SIZE + RNP_RX_TIME_RESERVE)
+#define PTP_HWTX_TIME_VALUE_MASK GENMASK(31, 0)
+#define PTP_GET_TX_HWTS_FINISH (1)
+#define PTP_GET_TX_HWTS_UPDATE (0)
+/* hardware ts can't so fake ts from the software clock */
+#define DEBUG_PTP_HARD_SOFTWAY
+
+int rnpgbe_ptp_get_ts_config(struct rnpgbe_adapter *pf, struct ifreq *ifr);
+int rnpgbe_ptp_set_ts_config(struct rnpgbe_adapter *pf, struct ifreq *ifr);
+int rnpgbe_ptp_register(struct rnpgbe_adapter *pf);
+void rnpgbe_ptp_unregister(struct rnpgbe_adapter *pf);
+void rnpgbe_ptp_get_rx_hwstamp(struct rnpgbe_adapter *pf,
+			       union rnpgbe_rx_desc *desc, struct sk_buff *skb);
+void rnpgbe_tx_hwtstamp_work(struct work_struct *work);
+void rnpgbe_ptp_reset(struct rnpgbe_adapter *adapter);
+
+#endif /* __RNPGBE_PTP_H__ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_regs.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_regs.h
new file mode 100644
index 0000000000000..b82919c4c0358
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_regs.h
@@ -0,0 +1,774 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef RNPGBE_REGS_H
+#define RNPGBE_REGS_H
+
+/*             BAR2 memory                   */
+/* ------------------------------------------*/
+/*	module  | size  |  start   |    end  */
+/*	DMA	| 32KB	| 0_0000H  | 0_7FFFH */
+/*	REG	| 32KB	| 0_8000H  | 0_FFFFH */
+/*	ETH	| 64KB	| 1_0000H  | 1_FFFFH */
+/*	GMAC	| 32KB	| 2_0000H  | 2_7FFFH */
+/*	MSIX    | 32KB  | 2_8000H  | 2_FFFFH */
+/* ------------------------------------------*/
+
+/* ==================== RNP-DMA Global Registers ==================== */
+/* n10 */
+#define RNP10_RING_BASE (0x8000)
+/* n20 */
+#define RNP20_RING_BASE (0x8000)
+/* n500 */
+#define RNP500_RING_BASE (0x1000)
+#define RING_OFFSET(queue_idx) (0x100 * (queue_idx))
+#define RNP_DMA_VERSION (0x0000)
+#define RNP_DMA_CONFIG (0x0004)
+#define DMA_MAC_LOOPBACK (1 << 0)
+#define DMA_SWITCH_LOOPBACK (1 << 1)
+#define DMA_VEB_BYPASS (1 << 4)
+#define DMA_AXI_ORDER (1 << 5)
+#define DMA_RX_PADDING (1 << 8)
+#define DMA_MAP_MODE(n) (n << 12)
+#define DMA_RX_FRAGMENT_BYTES(n) (((n) / 16) << 16)
+#define RNP_DMA_STATUS (0x0008)
+#define RNP_DMA_RX_DATA_PROG_FULL_THRESH (0x00a0)
+#define DMA_RING_NUM (0xff << 24)
+#define RC_CONTROL_HW (0x01)
+#define RC_CONTROL_PHY_DRIVER (0x02)
+#define RC_JUMP_STATUS (0x04)
+#define RC_PHY_LINK_DONE (0x08)
+#define RC_LINK_CHANGE (0x10)
+#define RNP_DMA_DUMY (0x000c)
+#define RNP_DMA_RX_START (0x10)
+#define RNP_DMA_RX_READY (0x14)
+#define RNP_DMA_TX_START (0x18)
+#define RNP_DMA_TX_READY (0x1c)
+#define RNP_DMA_INT_STAT (0x20)
+#define RNP_DMA_INT_MASK (0x24)
+#define TX_INT_MASK (1 << 1)
+#define RX_INT_MASK (1 << 0)
+#define RNP_DMA_INT_CLR (0x28)
+#define RNP_DMA_INT_TRIG (0x2c)
+#define RNP_DMA_AXI_EN (0x0010)
+#define RX_AXI_RW_EN (0x03 << 0)
+#define TX_AXI_RW_EN (0x03 << 2)
+#define RNP_DMA_AXI_STAT (0x0014)
+#define RNP_VEB_MAC_MASK_LO (0x0020)
+#define RNP_VEB_MAC_MASK_HI (0x0024)
+#define RNP_VEB_VLAN_MASK (0x0028)
+#define DEBUG_PROBE_NUM 16
+#define RNP_DMA_DEBUG_PROBE_LO_REG(n) (0x0100 + 0x08 * (n))
+#define RNP_DMA_DEBUG_PROBE_HI_REG(n) (0x0100 + 0x08 * (n))
+#define DEBUG_CNT_NUM 76
+#define RNP_DMA_DEBUG_CNT(n) (0x0200 + 0x04 * (n))
+#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_0 (RNP_DMA_DEBUG_CNT(17))
+#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_1 (RNP_DMA_DEBUG_CNT(18))
+#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_2 (RNP_DMA_DEBUG_CNT(19))
+#define RNP_DMA_STATS_DMA_TO_MAC_CHANNEL_3 (RNP_DMA_DEBUG_CNT(20))
+#define RNP_DMA_STATS_DMA_TO_SWITCH (RNP_DMA_DEBUG_CNT(21))
+#define RNP_DMA_STATS_MAC_TO_DMA (RNP_DMA_DEBUG_CNT(22))
+#define RNP_DMA_STATS_SWITCH_TO_DMA (RNP_DMA_DEBUG_CNT(23))
+#define RNP_PCI_WR_TO_HOST (RNP_DMA_DEBUG_CNT(34))
+/* RX-Queue Registers */
+#define RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_HI (0x30)
+#define RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_LO (0x34)
+#define RNP_DMA_REG_RX_DESC_BUF_LEN (0x38)
+#define RNP_DMA_REG_RX_DESC_BUF_HEAD (0x3c)
+#define RNP_DMA_REG_RX_DESC_BUF_TAIL (0x40)
+#define RNP_DMA_REG_RX_DESC_FETCH_CTRL (0x44)
+#define RNP_DMA_REG_RX_INT_DELAY_TIMER (0x48)
+#define RNP_DMA_REG_RX_INT_DELAY_PKTCNT (0x4c)
+#define RNP_DMA_REG_RX_ARB_DEF_LVL (0x50)
+#define PCI_DMA_REG_RX_DESC_TIMEOUT_TH (0x54)
+#define PCI_DMA_REG_RX_SCATTER_LENGTH (0x58)
+/* TX-Queue Registers */
+#define RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_HI (0x60)
+#define RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_LO (0x64)
+#define RNP_DMA_REG_TX_DESC_BUF_LEN (0x68)
+#define RNP_DMA_REG_TX_DESC_BUF_HEAD (0x6c)
+#define RNP_DMA_REG_TX_DESC_BUF_TAIL (0x70)
+#define RNP_DMA_REG_TX_DESC_FETCH_CTRL (0x74)
+#define RNP_DMA_REG_TX_INT_DELAY_TIMER (0x78)
+#define RNP_DMA_REG_TX_INT_DELAY_PKTCNT (0x7c)
+#define RNP_DMA_REG_TX_ARB_DEF_LVL (0x80)
+#define RNP_DMA_REG_TX_FLOW_CTRL_TH (0x84)
+#define RNP_DMA_REG_TX_FLOW_CTRL_TM (0x88)
+#define RNP_DMA_PKT_FIFO_DATA_PROG_FULL_THRESH (0x0098)
+/* VEB Registers */
+#define VEB_TBL_CNTS 64
+#define RNP_DMA_PORT_VBE_MAC_LO_TBL(port, vf)                                  \
+	(0x80A0 + 4 * (port) + 0x100 * (vf))
+#define RNP_DMA_PORT_VBE_MAC_HI_TBL(port, vf)                                  \
+	(0x80B0 + 4 * (port) + 0x100 * (vf))
+#define RNP_DMA_PORT_VEB_VID_TBL(port, vf) (0x80C0 + 4 * (port) + 0x100 * (vf))
+#define RNP_DMA_PORT_VEB_VF_RING_TBL(port, vf)                                 \
+	(0x80D0 + 4 * (port) + 0x100 * (vf))
+#define RNP_DMA_STATS_MAC_TO_MAC (0x1b0)
+#define RNP_DMA_STATS_SWITCH_TO_SWITCH (0x1a4)
+
+/* ================================================================== */
+#define RNP500_NIC_BASE (0x8000)
+#define RNP500_TOP_NIC_REST_N (0x8010 - RNP500_NIC_BASE)
+#define RNP500_TOP_MAC_OUI (0xc004 - RNP500_NIC_BASE)
+#define RNP500_TOP_MAC_SN (0xc008 - RNP500_NIC_BASE)
+#define RNP500_TOP_NIC_CONFIG (0x0004)
+
+/* ==================== RNP-ETH Global Registers ==================== */
+#define RNP_ETH_BASE (0x10000)
+
+/*
+ * [3:0]:
+ * 4'b0000:RSS disable
+ * 4'b0001:RSS only
+ * 4'b0100:DCB and RSS--8*16
+ * 4'b1010:POOLS and RSS--32*4
+ * [3] :virtual enable
+ * [16]:ipv4_hash_tcp_enable
+ * [17]:ipv4_hash_enable
+ * [20]:ipv6_hash_enable
+ * [21]:ipv6_hash_tcp_enable
+ * [22]:ipv4_hash_udp_enable
+ * [23]:ipv6_hash_udp_enable
+ * [24]:ipv4_hash_sctp_enable
+ * [25]:ipv6_hash_sctp_enable
+ */
+
+#define INNER_L4_BIT BIT(6)
+#define PKT_LEN_ERR (2)
+#define HDR_LEN_ERR (1)
+#define DROP_ALL_THRESH (2046) /* drop all rx */
+#define RECEIVE_ALL_THRESH (0x270) /* receive all rx */
+#define RNP500_VEB_TBL_CNTS 8
+#define RNP500_DMA_RBUF_FIFO (0x00b0)
+#define RNP500_DMA_PORT_VBE_MAC_LO_TBL(port, vf)                               \
+	(0x10c0 + 4 * (port) + 0x100 * (vf))
+#define RNP500_DMA_PORT_VBE_MAC_HI_TBL(port, vf)                               \
+	(0x10c4 + 4 * (port) + 0x100 * (vf))
+#define RNP500_DMA_PORT_VEB_VID_TBL(port, vf)                                  \
+	(0x10C8 + 4 * (port) + 0x100 * (vf))
+#define RNP500_DMA_PORT_VEB_VF_RING_TBL(port, vf)                              \
+	(0x10cc + 4 * (port) + 0x100 * (vf))
+#define RNP500_ETH_BASE (0x10000)
+#define RNP500_ETH_TUPLE5_SAQF(n) (0xc000 + 0x04 * (n))
+#define RNP500_ETH_TUPLE5_DAQF(n) (0xc400 + 0x04 * (n))
+#define RNP500_ETH_TUPLE5_SDPQF(n) (0xc800 + 0x04 * (n))
+#define RNP500_ETH_TUPLE5_FTQF(n) (0xcc00 + 0x04 * (n))
+#define RNP500_ETH_TUPLE5_POLICY(n) (0xce00 + 0x04 * (n))
+#define RNP500_ETH_DEFAULT_RX_MIN_LEN (0x80f0)
+#define RNP500_ETH_DEFAULT_RX_MAX_LEN (0x80f4)
+#define RNP500_ETH_VLAN_VME_REG(n) (0x8040 + 0x04 * (n))
+#define RNP500_ETH_ERR_MASK_VECTOR (0x8060)
+#define RNP500_ETH_RSS_MASK (0x3ff0001)
+#define RNP500_ETH_ENABLE_RSS_ONLY (0x3f30001)
+#define RNP500_ETH_RSS_CONTROL (0x92a0)
+#define RNP500_MRQC_IOV_EN (0x92a0)
+#define RNP500_IOV_ENABLED (1 << 3)
+#define RNP500_ETH_DISABLE_RSS (0)
+#define RNP500_ETH_SYNQF (0x9290)
+#define RNP500_ETH_SYNQF_PRIORITY (0x9294)
+#define RNP500_ETH_FCS_EN (0x804c)
+#define RNP500_ETH_HIGH_WATER(n) (0x80c0 + n * (0x08))
+#define RNP500_ETH_LOW_WATER(n) (0x80c4 + n * (0x08))
+#define RNP500_ETH_WRAP_FIELD_TYPE (0x805c)
+#define RNP500_ETH_TX_VLAN_CONTROL_EANBLE (0x0070)
+#define RNP500_ETH_TX_VLAN_TYPE (0x0074)
+#define RNP500_ETH_RX_MAC_LEN_REG (0x80e0)
+#define RNP500_ETH_WHOLE_PKT_LEN_ERR_DROP (0x807c)
+#define RNP500_RAH_AV 0x80000000
+#define RNP500_ETH_RAR_RL(n) (0xa000 + 0x04 * n)
+#define RNP500_ETH_RAR_RH(n) (0xa400 + 0x04 * n)
+#define RNP500_FCTRL_BPE BIT(10)
+#define RNP500_FCTRL_UPE BIT(9)
+#define RNP500_FCTRL_MPE BIT(8)
+#define RNP500_ETH_DMAC_FCTRL (0x9110)
+#define RNP500_ETH_DMAC_MCSTCTRL (0x9114)
+#define RNP500_MCSTCTRL_MULTICASE_TBL_EN (1 << 4)
+#define RNP500_MCSTCTRL_UNICASE_TBL_EN (1 << 3)
+#define RNP500_VM_DMAC_MPSAR_RING(entry)                                       \
+	(0xb400 + (4 * (entry)))
+#define RNP500_ETH_MUTICAST_HASH_TABLE(n) (0xac00 + 0x04 * n)
+#define RNP500_ETH_RSS_KEY (0x92d0)
+#define RNP500_ETH_TC_IPH_OFFSET_TABLE(n) (0xe800 + 0x04 * (n))
+#define RNP500_ETH_RSS_INDIR_TBL(n) (0xe000 + 0x04 * (n))
+#define RNP500_ETH_VLAN_FILTER_TABLE(n) (0xb000 + 0x04 * (n))
+#define RNP500_VFTA RNP500_ETH_VLAN_FILTER_TABLE
+#define RNP500_VLVF(idx) (0xb600 + 4 * (idx))
+#define RNP500_VLVF_TABLE(idx) (0xb700 + 4 * (idx))
+#define RNP500_ETH_VLAN_FILTER_ENABLE (0x9118)
+#define RNP500_PRIORITY_1_MARK (0x8080)
+#define RNP500_PRIORITY_1 (400)
+#define RNP500_PRIORITY_0 (300)
+#define RNP500_PRIORITY_0_MARK (0x8084)
+#define RNP500_PRIORITY_EN (0x8088)
+#define RNP500_PRIORITY_EN_8023 (0x808c)
+#define RNP500_ETH_LAYER2_ETQF(n) (0x9200 + 0x04 * (n))
+#define RNP500_ETH_LAYER2_ETQS(n) (0x9240 + 0x04 * (n))
+#define RNP500_ETH_BYPASS (0x8000)
+#define RNP500_ETH_ERR_MASK_VECTOR (0x8060)
+#define RNP500_ETH_PRIV_DATA_CONTROL_REG (0x8068)
+#define RNP500_ETH_DEFAULT_RX_RING (0x806c)
+#define RNP500_ETH_DOUBLE_VLAN_DROP (0x8078)
+#define RNP500_HOST_FILTER_EN (0x800c)
+#define RNP500_BAD_PACKETS_RECEIVE_EN (0x8024)
+#define RNP500_REDIR_EN (0x8030)
+#define WATCHDOG_TIMER_ERROR BIT(0)
+#define RUN_FRAME_ERROR BIT(1)
+#define GAINT_FRAME_ERROR BIT(2)
+#define LATE_COLLISION_ERROR BIT(3)
+#define GMII_ERROR BIT(4)
+#define DRIBBLING_BIT_ERROR BIT(5)
+#define CRC_ERROR BIT(6)
+#define LENGTH_ERROR BIT(8)
+#define DA_FILTER_ERROR BIT(9)
+#define SA_FILTER_ERROR BIT(10)
+#define RNP500_MAC_ERR_MASK (0x8034)
+#define RNP500_ETH_SCTP_CHECKSUM_EN (0x8038)
+#define RNP500_ETH_VLAN_RM_TYPE (0x8054)
+#define RNP500_ETH_EXCEPT_DROP_PROC (0x0470)
+#define RNP500_ETH_EMAC_PARSE_PROGFULL_THRESH (0x8098)
+#define RNP500_ETH_TX_MUX_DROP (0x98)
+#define RNP500_VEB_VFMPRC(n) (0x4018 + 0x100 * n)
+#define RNP500_VEB_VFBPRC(n) (0x401c + 0x100 * n)
+#define RNP500_RX_TIMEOUT_DROP(n) (0x404c + 0x100 * n)
+#define RNP500_STATISTIC_CRL(n) (0x4048 + 0x100 * n)
+/* n500 statistics REG */
+#define RNP500_RX_MULTI_PKT_NUM (0x8224)
+#define RNP500_RX_BROAD_PKT_NUM (0x8228)
+#define RNP500_RX_MAC_CUT_NUM (0x8304)
+#define RNP500_RX_MAC_LCS_ERR_NUM (0x8308)
+#define RNP500_RX_MAC_LEN_ERR_NUM (0X830C)
+#define RNP500_RX_MAC_SLEN_ERR_NUM (0x8310)
+#define RNP500_RX_MAC_GLEN_ERR_NUM (0x8314)
+#define RNP500_RX_MAC_FCS_ERR_NUM (0x8318)
+#define RNP500_RX_MAC_SFCS_ERR_NUM (0x831c)
+#define RNP500_RX_MAC_GFCS_ERR_NUM (0x8320)
+#define RNP500_TX_MULTI_NUM (0x214)
+#define RNP500_TX_BROADCAST_NUM (0x218)
+#define RNP500_RX_DROP_PKT_NUM (0X8230)
+#define RNP500_RXTRANS_DROP (0x8908)
+#define RNP500_RXTRANS_CUT_ERR_PKTS (0x894c)
+#define RNP500_DECAP_PKT_DROP1_NUM (0X82ec)
+#define RNP500_MAC_COUNT_CONTROL (0x0100)
+#define RNP500_MAC_GLEN_ERR_NUM (0X01a8)
+#define RNP500_RX_DEBUG(n) (0x8400 + 0x04 * n)
+#define RNP500_ETH_HOST_L2_DROP_PKTS RNP500_RX_DEBUG(4)
+#define RNP500_ETH_REDIR_INPUT_MATCH_DROP_PKTS RNP500_RX_DEBUG(5)
+#define RNP500_ETH_ETYPE_DROP_PKTS RNP500_RX_DEBUG(6)
+#define RNP500_ETH_TCP_SYN_DROP_PKTS RNP500_RX_DEBUG(7)
+#define RNP500_ETH_REDIR_TUPLE5_DROP_PKTS RNP500_RX_DEBUG(8)
+
+/* ================================================================== */
+#define ETH_ERR_SCTP (1 << 4)
+#define ETH_ERR_L4 (1 << 3)
+#define ETH_ERR_L3 (1 << 2)
+#define ETH_ERR_PKT_LEN_ERR (1 << 1)
+#define ETH_ERR_HDR_LEN_ERR (1 << 0)
+#define ETH_IGNORE_ALL_ERR                                                     \
+	(ETH_ERR_SCTP | ETH_ERR_L4 | ETH_ERR_L3 | ETH_ERR_PKT_LEN_ERR |        \
+	 ETH_ERR_HDR_LEN_ERR)
+#define VM_DMAC_TBL_SZ 128
+#define RNP_ETH_ENABLE_RSS_ONLY (0x3f30001)
+#define RNP_ETH_DISABLE_RSS (0)
+#define RNP_ETH_TX_PROGFULL_THRESH_PORT(n) (RNP_ETH_BASE + 0x0060 + 0x08 * (n))
+#define RNP_ETH_TX_PROGEMPTY_THRESH_PORT(n) (RNP_ETH_BASE + 0x0064 + 0x08 * (n))
+#define RNP_ETH_EMAC_DMA_PROFULL_THRESH (RNP_ETH_BASE + 0x0080)
+#define RNP_ETH_EMAC_DMA_PROEMPTY_THRESH (RNP_ETH_BASE + 0x0084)
+#define RNP_ETH_EMAC_SW_PROFULL_THRESH (RNP_ETH_BASE + 0x0088)
+#define RNP_ETH_EMAC_SW_PROEMPTY_THRESH (RNP_ETH_BASE + 0x008c)
+#define RNP_ETH_EMAC_BMC_TX_PROFULL_THRESH (RNP_ETH_BASE + 0x0090)
+#define RNP_ETH_EMAC_BMC_TX_PROEMPTY_THRESH (RNP_ETH_BASE + 0x0094)
+#define RNP_ETH_CNT_PKT_EMAC_TX(n) (RNP_ETH_BASE + 0x00a0 + 0x04 * (n))
+#define RNP_ETH_CNT_PKT_PECL_TX(n) (RNP_ETH_BASE + 0x00b0 + 0x04 * (n))
+#define RNP_ETH_STATUS_TX_FLOWCTRL(n) (RNP_ETH_BASE + 0x00c0 + 0x04 * (n))
+#define RNP_ETH_VERSION_FLOWWCTRL (RNP_ETH_BASE + 0x00d0)
+#define RNP_ETH_CFG_ETH_MAC (RNP_ETH_BASE + 0x00d4)
+#define RNP_ETH_SCA_TX_CS(port) (RNP_ETH_BASE + 0x0100 + 0x08 * (port))
+#define RNP_ETH_SCA_TX_NS(port) (RNP_ETH_BASE + 0x0104 + 0x08 * (port))
+#define RNP_ETH_TXTRANS_CS(port) (RNP_ETH_BASE + 0x0120 + 0x08 * (port))
+#define RNP_ETH_TXTRANS_NS(port) (RNP_ETH_BASE + 0x0124 + 0x08 * (port))
+#define RNP_ETH_1TO4_INST0_IN_PKTS (RNP_ETH_BASE + 0x0200)
+#define RNP_ETH_1TO4_INST1_IN_PKTS (RNP_ETH_BASE + 0x0204)
+#define RNP_ETH_1TO4_INST2_IN_PKTS (RNP_ETH_BASE + 0x0208)
+#define RNP_ETH_1TO4_INST3_IN_PKTS (RNP_ETH_BASE + 0x020c)
+#define RNP_ETH_IN_0_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x0210 + 0x10 * (port))
+#define RNP_ETH_IN_1_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x0214 + 0x10 * (port))
+#define RNP_ETH_IN_2_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x0218 + 0x10 * (port))
+#define RNP_ETH_IN_3_TX_PKT_NUM(port) (RNP_ETH_BASE + 0x021c + 0x10 * (port))
+#define RNP_ETH_EMAC_TX_TO_PHY_PKTS(port) (RNP_ETH_BASE + 0x0250 + 4 * (port))
+#define RNP_ETH_TXTRANS_PTP_PKT_NUM(port) (RNP_ETH_BASE + 0x0260 + 4 * (port))
+#define RNP_ETH_TX_DEBUG(n) (RNP_ETH_BASE + 0x0300 + 0x04 * (n))
+#define RNP_ETH_PTP_TX_STATUS(n) (RNP_ETH_BASE + 0x0400)
+#define RNP_ETH_PTP_TX_HTIMES(n) (RNP_ETH_BASE + 0x0404)
+#define RNP_ETH_PTP_TX_LTIMES(n) (RNP_ETH_BASE + 0x0408)
+#define RNP_ETH_PTP_TX_TSVALUE_STATUS(n) (RNP_ETH_BASE + 0x040c)
+#define RNP_ETH_PTP_TX_CLEAR(n) (RNP_ETH_BASE + 0x0410)
+#define RNP_ETH_MAC_SPEED_PORT(n) (RNP_ETH_BASE + 0x0450 + 0x04 * (n))
+#define RNP_ETH_MAC_LOOPBACK_MODE_PORT(n) (RNP_ETH_BASE + 0x0460 + 0x04 * (n))
+#define RNP_ETH_EXCEPT_DROP_PROC (RNP_ETH_BASE + 0x0470)
+#define RNP_ETH_IPP (RNP_ETH_BASE + 0x8000)
+#define RNP_ETH_BYPASS (RNP_ETH_BASE + 0x8000)
+#define RNP_ETH_TUNNEL_MOD (RNP_ETH_BASE + 0x8004)
+#define RNP_ETH_LOOPBACK_EN (RNP_ETH_BASE + 0x8008)
+#define RNP_FIFO_CTRL_MODE (RNP_ETH_BASE + 0x800c)
+#define RNP_ETH_VXLAN_PORT (RNP_ETH_BASE + 0x8010)
+#define RNP_ETH_NVGRE_PORT (RNP_ETH_BASE + 0x8014)
+#define RNP_ETH_RDMA_PORT (RNP_ETH_BASE + 0x8018)
+#define RNP_HOST_FILTER_EN (RNP_ETH_BASE + 0x801c)
+#define RNP_MNG_FILTER_EN (RNP_ETH_BASE + 0x8020)
+#define RNP_ETH_TCAM_EN (RNP_ETH_BASE + 0x8024)
+#define RNP_CONGEST_DROP_EN (RNP_ETH_BASE + 0x8028)
+#define RNP_REDIR_EN (RNP_ETH_BASE + 0x8030)
+#define RNP_ETH_SCTP_CHECKSUM_EN (RNP_ETH_BASE + 0x8038)
+#define RNP_ETH_ARP_FUNC_EN (RNP_ETH_BASE + 0x803c)
+#define RNP_ETH_VLAN_VME_REG(n) (RNP_ETH_BASE + 0x8040 + 0x04 * (n))
+#define RNP_ETH_CVLAN_RM_EN (RNP_ETH_BASE + 0x8050)
+#define RNP_ETH_VLAN_RM_TYPE (RNP_ETH_BASE + 0x8054)
+#define RNP_ETH_WRAP_FIELD_TYPE (RNP_ETH_BASE + 0x805c)
+#define RNP_ETH_ERR_MASK_VECTOR (RNP_ETH_BASE + 0x8060)
+#define RNP_ETH_DEFAULT_RX_RING (RNP_ETH_BASE + 0x806c)
+#define RNP_ETH_RX_PROGFULL_THRESH_PORT(n) (RNP_ETH_BASE + 0x8070 + 0x08 * (n))
+#define RNP_ETH_RX_PROGEMPTY_THRESH_PORT(n) (RNP_ETH_BASE + 0x8074 + 0x08 * (n))
+#define RNP_ETH_EMAC_GAT_PROGFULL_THRESH (RNP_ETH_BASE + 0x8090)
+#define RNP_ETH_EMAC_GAT_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x8094)
+#define RNP_ETH_EMAC_PARSE_PROGFULL_THRESH (RNP_ETH_BASE + 0x8098)
+#define RNP_ETH_EMAC_PARSE_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x809c)
+#define RNP_ETH_FC_PROGFULL_THRESH (RNP_ETH_BASE + 0x80a0)
+#define RNP_ETH_FC_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80a4)
+#define RNP_ETH_DIS_PROGFULL_THRESH (RNP_ETH_BASE + 0x80a8)
+#define RNP_ETH_DIS_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80ac)
+#define RNP_ETH_COV_PROGFULL_THRESH (RNP_ETH_BASE + 0x80b0)
+#define RNP_ETH_COV_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80b4)
+#define RNP_ETH_BMC_RX_PROGFULL_THRESH (RNP_ETH_BASE + 0x80b8)
+#define RNP_ETH_BMC_RX_PROGEMPTY_THRESH (RNP_ETH_BASE + 0x80bc)
+#define RNP_ETH_HIGH_WATER(n) (RNP_ETH_BASE + 0x80c0 + n * (0x08))
+#define RNP_ETH_LOW_WATER(n) (RNP_ETH_BASE + 0x80c4 + n * (0x08))
+#define RNP_ETH_DEFAULT_RX_MIN_LEN (RNP_ETH_BASE + 0x80f0)
+#define RNP_ETH_DEFAULT_RX_MAX_LEN (RNP_ETH_BASE + 0x80f4)
+#define RNP_ETH_PTP_EVENT_PORT (RNP_ETH_BASE + 0x80f8)
+#define RNP_ETH_PTP_GENER_PORT_REG (RNP_ETH_BASE + 0x80fc)
+#define RNP_ETH_RX_TRANS_CS_PORT(n) (RNP_ETH_BASE + 0x8100 + 0x08 * (n))
+#define RNP_ETH_RX_TRANS_NS_PORT(n) (RNP_ETH_BASE + 0x8104 + 0x08 * (n))
+#define RNP_ETH_GAT_RX_CS (RNP_ETH_BASE + 0x8120)
+#define RNP_ETH_GAT_RX_NS (RNP_ETH_BASE + 0x8124)
+#define RNP_ETH_EMAC_PIP_CS (RNP_ETH_BASE + 0x8128)
+#define RNP_ETH_EMAC_PIP_NS (RNP_ETH_BASE + 0x812c)
+#define RNP_ETH_EMAC_FC_CS (RNP_ETH_BASE + 0x8138)
+#define RNP_ETH_EMAC_FC_NS (RNP_ETH_BASE + 0x813c)
+#define RNP_ETH_EMAC_DIS_CS (RNP_ETH_BASE + 0x8140)
+#define RNP_ETH_EMAC_DIS_NS (RNP_ETH_BASE + 0x8144)
+#define RNP_ETH_HOST_L2_FILTER_CS (RNP_ETH_BASE + 0x8150)
+#define RNP_ETH_HOST_L2_FILTER_NS (RNP_ETH_BASE + 0x8154)
+#define RNP_ETH_EMAC_DECAP_CS (RNP_ETH_BASE + 0x8158)
+#define RNP_ETH_EMAC_DECAP_NS (RNP_ETH_BASE + 0x815c)
+#define RNP_ETH_PFC_CONFIG_PROT(n) (RNP_ETH_BASE + 0x8180 + n * (0x04))
+#define RNP_ETH_RX_PKT_NUM(port) (RNP_ETH_BASE + 0x8220 + 0x04 * (port))
+#define RNP_ETH_RX_DROP_PKT_NUM(port) (RNP_ETH_BASE + 0x8230 + 0x04 * (port))
+#define RNP_ETH_TOTAL_GAT_RX_PKT_NUM (RNP_ETH_BASE + 0x8240)
+#define RNP_ETH_PKT_ARP_REQ_NUM (RNP_ETH_BASE + 0x8250)
+#define RNP_ETH_PKT_ARP_RESPONSE_NUM (RNP_ETH_BASE + 0x8254)
+#define RNP_ETH_ICMP_NUM (RNP_ETH_BASE + 0x8258)
+#define RNP_ETH_PKT_UDP_NUM (RNP_ETH_BASE + 0x825c)
+#define RNP_ETH_PKT_TCP_NUM (RNP_ETH_BASE + 0x8260)
+#define RNP_ETH_PKT_ESP_NUM (RNP_ETH_BASE + 0x8264)
+#define RNP_ETH_PKT_GRE_NUM (RNP_ETH_BASE + 0x8268)
+#define RNP_ETH_PKT_SCTP_NUM (RNP_ETH_BASE + 0x826c)
+#define RNP_ETH_PKT_TCPSYN_NUM (RNP_ETH_BASE + 0x8270)
+#define RNP_ETH_PKT_VXLAN_NUM (RNP_ETH_BASE + 0x8274)
+#define RNP_ETH_PKT_NVGRE_NUM (RNP_ETH_BASE + 0x8278)
+#define RNP_ETH_PKT_FRAGMENT_NUM (RNP_ETH_BASE + 0x827c)
+#define RNP_ETH_PKT_LAYER1_VLAN_NUM (RNP_ETH_BASE + 0x8280)
+#define RNP_ETH_PKT_LAYER2_VLAN_NUM (RNP_ETH_BASE + 0x8284)
+#define RNP_ETH_PKT_IPV4_NUM (RNP_ETH_BASE + 0x8288)
+#define RNP_ETH_PKT_IPV6_NUM (RNP_ETH_BASE + 0x828c)
+#define RNP_ETH_PKT_INGRESS_NUM (RNP_ETH_BASE + 0x8290)
+#define RNP_ETH_PKT_EGRESS_NUM (RNP_ETH_BASE + 0x8294)
+#define RNP_ETH_PKT_IP_HDR_LEN_ERR_NUM (RNP_ETH_BASE + 0x8298)
+#define RNP_ETH_PKT_IP_PKT_LEN_ERR_NUM (RNP_ETH_BASE + 0x829c)
+#define RNP_ETH_PKT_L3_HDR_CHK_ERR_NUM (RNP_ETH_BASE + 0x82a0)
+#define RNP_ETH_PKT_L4_HDR_CHK_ERR_NUM (RNP_ETH_BASE + 0x82a4)
+#define RNP_ETH_PKT_SCTP_CHK_ERR_NUM (RNP_ETH_BASE + 0x82a8)
+#define RNP_ETH_PKT_VLAN_ERR_NUM (RNP_ETH_BASE + 0x82ac)
+#define RNP_ETH_PKT_RDMA_NUM (RNP_ETH_BASE + 0x82b0)
+#define RNP_ETH_PKT_ARP_AUTO_RESPONSE_NUM (RNP_ETH_BASE + 0x82b4)
+#define RNP_ETH_PKT_ICMPV6_NUM (RNP_ETH_BASE + 0x82b8)
+#define RNP_ETH_PKT_IPV6_EXTEND_NUM (RNP_ETH_BASE + 0x82bc)
+#define RNP_ETH_PKT_802_3_NUM (RNP_ETH_BASE + 0x82c0)
+#define RNP_ETH_PKT_EXCEPT_SHORT_NUM (RNP_ETH_BASE + 0x82c4)
+#define RNP_ETH_PKT_PTP_NUM (RNP_ETH_BASE + 0x82c8)
+#define RNP_ETH_DECAP_PKT_IN_NUM (RNP_ETH_BASE + 0x82d0)
+#define RNP_ETH_DECAP_PKT_OUT_NUM (RNP_ETH_BASE + 0x82d4)
+#define RNP_ETH_DECAP_DMAC_OUT_NUM (RNP_ETH_BASE + 0x82d8)
+#define RNP_ETH_DECAP_BMC_OUT_NUM (RNP_ETH_BASE + 0x82dc)
+#define RNP_ETH_DECAP_SW_OUT_NUM (RNP_ETH_BASE + 0x82e0)
+#define RNP_ETH_DECAP_MIRROR_OUT_NUM (RNP_ETH_BASE + 0x82e4)
+#define RNP_ETH_DECAP_PKT_DROP_NUM(port) (RNP_ETH_BASE + 0x82e8 + 0x04 * (port))
+#define RNP_ETH_INVALID_DROP_PKTS RNP_ETH_DECAP_PKT_DROP_NUM(0)
+#define RNP_ETH_FILTER_DROP_PKTS RNP_ETH_DECAP_PKT_DROP_NUM(1)
+#define RNP_ETH_DECAP_DMAC_DROP_NUM (RNP_ETH_BASE + 0x82f0)
+#define RNP_ETH_DECAP_BMC_DROP_NUM (RNP_ETH_BASE + 0x82f4)
+#define RNP_ETH_DECAP_SWITCH_DROP_NUM (RNP_ETH_BASE + 0x82f8)
+#define RNP_ETH_DECAP_RM_VLAN_NUM (RNP_ETH_BASE + 0x82fc)
+#define RNP_ETH_RX_FC_PKT_IN_NUM (RNP_ETH_BASE + 0x8300)
+#define RNP_ETH_RX_FC_PKT_OUT_NUM (RNP_ETH_BASE + 0x8304)
+#define RNP_ETH_RX_FC_PKT_DROP0_NUM (RNP_ETH_BASE + 0x8308)
+#define RNP_ETH_RX_FC_PKT_DROP1_NUM (RNP_ETH_BASE + 0x830c)
+#define RNP_ETH_RING_FC_STATUS0 (RNP_ETH_BASE + 0x8310)
+#define RNP_ETH_RING_FC_STATUS1 (RNP_ETH_BASE + 0x8314)
+#define RNP_ETH_RING_FC_STATUS2 (RNP_ETH_BASE + 0x8318)
+#define RNP_ETH_RING_FC_STATUS3 (RNP_ETH_BASE + 0x831c)
+#define RNP_ETH_RX_DEBUG(n) (RNP_ETH_BASE + 0x8400 + 0x04 * (n))
+#define RNP_ETH_RX_FC_DEBUG0_NUM RNP_ETH_RX_DEBUG(0)
+#define RNP_ETH_RX_FC_DEBUG1_NUM RNP_ETH_RX_DEBUG(1)
+#define RNP_ETH_RX_DIS_DEBUG0_NUM RNP_ETH_RX_DEBUG(2)
+#define RNP_ETH_RX_DIS_DEBUG1_NUM RNP_ETH_RX_DEBUG(3)
+#define RNP_ETH_HOST_L2_DROP_PKTS RNP_ETH_RX_DEBUG(4)
+#define RNP_ETH_REDIR_INPUT_MATCH_DROP_PKTS RNP_ETH_RX_DEBUG(5)
+#define RNP_ETH_ETYPE_DROP_PKTS RNP_ETH_RX_DEBUG(6)
+#define RNP_ETH_TCP_SYN_DROP_PKTS RNP_ETH_RX_DEBUG(7)
+#define RNP_ETH_REDIR_TUPLE5_DROP_PKTS RNP_ETH_RX_DEBUG(8)
+#define RNP_ETH_REDIR_TCAM_DROP_PKTS RNP_ETH_RX_DEBUG(9)
+#define RNP_ETH_VMARK_TC(n) (RNP_ETH_BASE + 0x8500 + 0x04 * (n))
+#define RNP_RING_FC_ENABLE (RNP_ETH_BASE + 0x8520)
+#define RNP_SELECT_RING_EN(n) (RNP_ETH_BASE + 0x8524 + (0x4 * n))
+#define RNP_TC_FC_SW_EN (RNP_ETH_BASE + 0x8534)
+#define RNP_ETH_LOCAL_DIP(n) (RNP_ETH_BASE + 0x8600 + 0x04 * (n))
+#define RNP_ETH_LOCAL_DMAC_H(n) (RNP_ETH_BASE + 0x8700 + 0x04 * (n))
+#define RNP_ETH_LOCAL_DMAC_L(n) (RNP_ETH_BASE + 0x8800 + 0x04 * (n))
+#define RNP_RXTRANS_RX_PKTS(port) (RNP_ETH_BASE + 0x8900 + 0x40 * (port))
+#define RNP_RXTRANS_DROP_PKTS(port) (RNP_ETH_BASE + 0x8904 + 0x40 * (port))
+#define RNP_RXTRANS_WDT_ERR_PKTS(port) (RNP_ETH_BASE + 0x8908 + 0x40 * (port))
+#define RNP_RXTRANS_CODE_ERR_PKTS(port) (RNP_ETH_BASE + 0x890c + 0x40 * (port))
+#define RNP_RXTRANS_CRC_ERR_PKTS(port) (RNP_ETH_BASE + 0x8910 + 0x40 * (port))
+#define RNP_RXTRANS_SLEN_ERR_PKTS(port) (RNP_ETH_BASE + 0x8914 + 0x40 * (port))
+#define RNP_RXTRANS_GLEN_ERR_PKTS(port) (RNP_ETH_BASE + 0x8918 + 0x40 * (port))
+#define RNP_RXTRANS_IPH_ERR_PKTS(port) (RNP_ETH_BASE + 0x891c + 0x40 * (port))
+#define RNP_RXTRANS_CSUM_ERR_PKTS(port) (RNP_ETH_BASE + 0x8920 + 0x40 * (port))
+#define RNP_RXTRANS_LEN_ERR_PKTS(port) (RNP_ETH_BASE + 0x8924 + 0x40 * (port))
+#define RNP_RXTRANS_CUT_ERR_PKTS(port) (RNP_ETH_BASE + 0x8928 + 0x40 * (port))
+#define RNP_RXTRANS_EXCEPT_BYTES(port) (RNP_ETH_BASE + 0x892c + 0x40 * (port))
+#define RNP_RXTRANS_G1600_BYTES_PKTS(port)                                     \
+	(RNP_ETH_BASE + 0x8930 + 0x40 * (port))
+#define RNP_RX_RING_MAXRATE(n) (RNP_ETH_BASE + 0x8a00 + (0x4 * n))
+#define RNP_ETH_RX_PROGFULL_RTRN(n) (RNP_ETH_BASE + 0x8c00 + 0x04 * (n))
+#define RNP_ETH_CNT_PKT_EMAC_RX(n) (RNP_ETH_BASE + 0x8c10 + 0x04 * (n))
+#define RNP_ETH_CNT_PKT_PECL_RX(n) (RNP_ETH_BASE + 0x8c20 + 0x04 * (n))
+#define RNP_ETH_STATUS_RX_FLOWCTRL(n) (RNP_ETH_BASE + 0x8c30 + 0x04 * (n))
+#define RNP_ETH_DMAC_FCTRL (RNP_ETH_BASE + 0x9110)
+#define RNP_ETH_DMAC_MCSTCTRL (RNP_ETH_BASE + 0x9114)
+#define RNP_MCSTCTRL_MULTICASE_TBL_EN (1 << 2)
+#define RNP_MCSTCTRL_UNICASE_TBL_EN (1 << 3)
+#define RNP_MCSTCTRL_DMAC_47 0x00
+#define RNP_MCSTCTRL_DMAC_46 0x01
+#define RNP_MCSTCTRL_DMAC_45 0x02
+#define RNP_MCSTCTRL_DMAC_43 0x03
+#define RNP_ETH_VLAN_FILTER_ENABLE (RNP_ETH_BASE + 0x9118)
+#define RNP_ETH_INPORT_POLICY_VAL (RNP_ETH_BASE + 0x91d0)
+#define RNP_ETH_INPORT_POLICY_REG(n) (RNP_ETH_BASE + 0x91e0 + 0x04 * (n))
+#define ETH_LAYER2_NUM (16)
+#define RNP_ETH_LAYER2_ETQF(n) (RNP_ETH_BASE + 0x9200 + 0x04 * (n))
+#define RNP_ETH_LAYER2_ETQS(n) (RNP_ETH_BASE + 0x9240 + 0x04 * (n))
+#define RNP_ETH_LAYER2_ETQS_DEFAULT (RNP_ETH_BASE + 0x9280)
+#define RNP_ETH_ETQF_DEFAULT (RNP_ETH_BASE + 0x9284)
+#define RNP_ETH_SYNQF (RNP_ETH_BASE + 0x9290)
+#define RNP_ETH_SYNQF_PRIORITY (RNP_ETH_BASE + 0x9294)
+/*
+ * [3:0]:
+ * 4'b0000:RSS disable
+ * 4'b0001:RSS only
+ * 4'b0100:DCB and RSS--8*16
+ * 4'b1010:POOLS and RSS--32*4
+ * [3] :virtual enable
+ * [16]:ipv4_hash_tcp_enable
+ * [17]:ipv4_hash_enable
+ * [20]:ipv6_hash_enable
+ * [21]:ipv6_hash_tcp_enable
+ * [22]:ipv4_hash_udp_enable
+ * [23]:ipv6_hash_udp_enable
+ * [24]:ipv4_hash_sctp_enable
+ * [25]:ipv6_hash_sctp_enable
+ */
+#define RNP_ETH_RSS_CONTROL (RNP_ETH_BASE + 0x92a0)
+#define RNP_MRQC_IOV_EN (RNP_ETH_BASE + 0x92a0)
+#define RNP_IOV_ENABLED (1 << 3)
+#define RNP_ETH_RSS_KEY (RNP_ETH_BASE + 0x92d0)
+#define RNP_ETH_RAR_RL(n) (RNP_ETH_BASE + 0xa000 + 0x04 * n)
+#define RNP_ETH_RAR_RH(n) (RNP_ETH_BASE + 0xa400 + 0x04 * n)
+#define RNP_ETH_UTA(n) (RNP_ETH_BASE + 0xa800 + 0x04 * n)
+#define RNP_ETH_MUTICAST_HASH_TABLE(n) (RNP_ETH_BASE + 0xac00 + 0x04 * n)
+#define RNP_MTA(n) RNP_ETH_MUTICAST_HASH_TABLE(n)
+#define RNP_ETH_VLAN_FILTER_TABLE(n) (RNP_ETH_BASE + 0xb000 + 0x04 * (n))
+#define RNP_VFTA RNP_ETH_VLAN_FILTER_TABLE
+#define RNP_FCTRL_MULTICASE_BYPASS (1 << 8)
+#define RNP_FCTRL_UNICASE_BYPASS (1 << 9)
+#define RNP_FCTRL_BROADCASE_BYPASS (1 << 10)
+#define RNP_ETH_ETYPE_TABLE(n) (RNP_ETH_BASE + 0xb300 + 0x04 * (n))
+#define RNP_VM_DMAC_MPSAR_RING(entry)                                          \
+	(RNP_ETH_BASE + 0xb400 + (4 * (entry)))
+#define RNP_VLVF(idx) (RNP_ETH_BASE + 0xb600 + 4 * (idx))
+#define RNP_VLVFB(idx) (RNP_ETH_BASE + 0xb700 + 4 * (idx))
+#define RNP_VM_TUNNEL_PFVLVF_L(n) (RNP_ETH_BASE + 0xb800 + 0x04 * (n))
+#define RNP_VM_TUNNEL_PFVLVF_H(n) (RNP_ETH_BASE + 0xb900 + 0x04 * (n))
+/* 5 tuple */
+#define ETH_TUPLE5_NUM 128
+#define RNP_ETH_TUPLE5_SAQF(n) (RNP_ETH_BASE + 0xc000 + 0x04 * (n))
+#define RNP_ETH_TUPLE5_DAQF(n) (RNP_ETH_BASE + 0xc400 + 0x04 * (n))
+#define RNP_ETH_TUPLE5_SDPQF(n) (RNP_ETH_BASE + 0xc800 + 0x04 * (n))
+#define RNP_ETH_TUPLE5_FTQF(n) (RNP_ETH_BASE + 0xcc00 + 0x04 * (n))
+#define RNP_ETH_TUPLE5_POLICY(n) (RNP_ETH_BASE + 0xd000 + 0x04 * (n))
+#define RNP_ETH_RSS_INDIR_TBL(p, n)                                            \
+	(RNP_ETH_BASE + 0xe000 + 0x04 * (n) + 0x200 * (p))
+#define RNP_ETH_TC_IPH_OFFSET_TABLE(n) (RNP_ETH_BASE + 0xe800 + 0x04 * (n))
+#define RNP_ETH_TC_VLAN_OFFSET_TABLE(n) (RNP_ETH_BASE + 0xe820 + 0x04 * (n))
+#define RNP_ETH_TC_PORT_OFFSET_TABLE(n) (RNP_ETH_BASE + 0xe840 + 0x04 * (n))
+#define RNP_REDIR_RING_MASK (RNP_ETH_BASE + 0xe860)
+#define RNP_ETH_RSS_MODE (0x6fe00)
+#define RNP_ETH_RSS_INDIR_TBL_UV3P(n) (0x6ff00 + 0x04 * (n))
+
+/* ================================================================== */
+
+/* ==================== RNP-REG Global Registers ==================== */
+#define RNP_COMM_REG0 0x30000
+#define RNP_TOP_NIC_VERSION (RNP_COMM_REG0 + 0x0000)
+#define RNP500_PHY_RELEASE (0x30000)
+#define RNP500_TP_SFP (0x30200)
+#define RNP500_TOP_NIC_VERSION (0x8000 + 0x0000)
+#define RNP500_FPGA_VERSION (0x8020)
+#define RNP500_FPGA_TIME (0x8024)
+#define RNP500_LEGANCY_TIME (0xd000)
+#define RNP500_LEGANCY_ENABLE (0xd004)
+#define RNP_TOP_NIC_CONFIG (RNP_COMM_REG0 + 0x0004)
+#define RNP_TOP_NIC_STAT (RNP_COMM_REG0 + 0x0008)
+#define RNP_TOP_NIC_DUMMY (RNP_COMM_REG0 + 0x000c)
+#define RNP_TOP_NIC_REST_N (RNP_COMM_REG0 + 0x0010)
+#define NIC_RESET 0
+#define RNP_TOP_DMA_MEM_SLP (RNP_COMM_REG0 + 0x4004)
+#define RNP_TOP_DMA_MEM_SD (RNP_COMM_REG0 + 0x4008)
+#define RNP_TOP_ETH_TIMESTAMP_SEL (RNP_COMM_REG0 + 0x8010)
+#define RNP_TOP_ETH_MAC_CLK_SEL (RNP_COMM_REG0 + 0x8014)
+#define RNP_TOP_ETH_INF_ETH_STATUS (RNP_COMM_REG0 + 0x8018)
+#define RNP_TOP_ETH_BUG_40G_PATCH (RNP_COMM_REG0 + 0x801c)
+#define RNP_TOP_ETH_PWR_PORT_NUM (4)
+#define RNP_TOP_ETH_PWR_CLAMP_CTRL_PORT(n) (RNP_COMM_REG0 + 0x8020 + 0xc * (n))
+#define RNP_TOP_ETH_PWR_ISOLATE_PORT(n) (RNP_COMM_REG0 + 0x8024 + 0xc * (n))
+#define RNP_TOP_ETH_PWR_DOWN_PORT(n) (RNP_COMM_REG0 + 0x8028 + 0xc * (n))
+#define RNP_TOP_ETH_TCAM_CONFIG_ENABLE (RNP_COMM_REG0 + 0x8050)
+#define RNP_TOP_ETH_SLIP (RNP_COMM_REG0 + 0x8060)
+#define RNP_TOP_ETH_SHUT_DOWN (RNP_COMM_REG0 + 0x8064)
+#define RNP_TOP_ETH_OVS_SLIP (RNP_COMM_REG0 + 0x8068)
+#define RNP_TOP_ETH_OVS_SHUT_DOWN (RNP_COMM_REG0 + 0x806c)
+#define RNP_FC_PORT_ENABLE (RNP_COMM_REG0 + 0x9004)
+#define RNP_FC_PORT_PRIO_MAP(n) (RNP_COMM_REG0 + 0x9008 + (0x04 * n))
+#define RNP_FC_EN_CONF_AVAILBLE (RNP_COMM_REG0 + 0x9018)
+#define RNP_FC_UNCTAGS_MAP_OFFSET (16)
+#define RNP_TOP_MAC_OUI (RNP_COMM_REG0 + 0xc004)
+#define RNP_TOP_MAC_SN (RNP_COMM_REG0 + 0xc008)
+/* ================================================================== */
+
+/* ==================== RNP-SERDES Global Registers ================= */
+#define RNP_SERDES (0x40000)
+#define RNP_PCS_OFFSET (0x1000)
+#define RNP_PCS_BASE(i) (RNP_SERDES + RNP_PCS_OFFSET * i)
+#define RNP_PCS_1G_OR_10G BIT(13)
+#define RNP_PCS_SPPEED_MASK (0x1c)
+#define RNP_PCS_SPPEED_10G (0x0)
+#define RNP_PCS_SPPEED_40G (0xc)
+#define RNP_PCS_LINK_SPEED (0x30000)
+#define RNP_PCS_LINKUP BIT(2)
+#define RNP_PCS_LINK_STATUS (0x30001)
+/* ================================================================== */
+
+/* ==================== RNP-MAC Global Registers ==================== */
+#define RNP10_MAC_BASE (0x60000)
+#define RNP_XLMAC (0x60000)
+#define RNP10_MAC_TX_CFG (0x0000)
+#define RNP10_MAC_RX_CFG (0x0004)
+#define RNP_RX_ALL BIT(31)
+#define RNP_RX_ALL_MUL BIT(4)
+#define RNP10_MAC_PKT_FLT (0x0008)
+#define RNP10_MAC_LPI_CTRL (0x00d0)
+#define RNP10_MAC_Q0_TX_FLOW_CTRL(i) (0x0070 + 0x04 * (i))
+#define RNP10_MAC_RX_FLOW_CTRL (0x0090)
+#define RNP10_TX_FLOW_ENABLE_MASK (0x2)
+#define RNP10_RX_FLOW_ENABLE_MASK (0x1)
+#define RNP10_MAC_TX_VLAN_TAG (0x0050)
+#define RNP10_MAC_TX_VLAN_MODE (0x0060)
+#define RNP10_MAC_INNER_VLAN_INCL (0x0064)
+#define RNP10_MAC_UNICAST_LOW(i) (0x304 + i * 0x08)
+#define RNP10_MAC_UNICAST_HIGH(i) (0x300 + i * 0x08)
+#define RNP500_MAC_BASE (0x20000)
+#define RNP_MODE_NO_SA_INSER (0x0)
+#define RNP_SARC_OFFSET (28)
+#define RNP_TWOKPE_MASK BIT(27)
+#define RNP_SFTERR_MASK BIT(26)
+#define RNP_CST_MASK BIT(25)
+#define RNP_TC_MASK BIT(24)
+#define RNP_WD_MASK BIT(23)
+#define RNP_JD_MASK BIT(22)
+#define RNP_BE_MASK BIT(21)
+#define RNP_JE_MASK BIT(20)
+#define RNP_IFG_96 (0x00)
+#define RNP_IFG_OFFSET (17)
+#define RNP_DCRS_MASK BIT(16)
+#define RNP_PS_MASK BIT(15)
+#define RNP_FES_MASK BIT(14)
+#define RNP_DO_MASK BIT(13)
+#define RNP_LM_MASK BIT(12)
+#define RNP_DM_MASK BIT(11)
+#define RNP_IPC_MASK BIT(10)
+#define RNP_DR_MASK BIT(9)
+#define RNP_LUD_MASK BIT(8)
+#define RNP_ACS_MASK BIT(7)
+#define RNP_BL_MODE (0x00)
+#define RNP_BL_OFFSET (5)
+#define RNP_DC_MASK BIT(4)
+#define RNP_TE_MASK BIT(3)
+#define RNP_RE_MASK BIT(2)
+#define RNP_PRELEN_MODE (0)
+#define RNP500_MAC_UNICAST_LOW(i) (0x44 + i * 0x08)
+#define RNP500_MAC_UNICAST_HIGH(i) (0x40 + i * 0x08)
+#define GMAC_CONTROL 0x00000000 /* Configuration */
+#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
+#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
+#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */
+#define GMAC_MII_ADDR 0x00000010 /* MII Address */
+#define GMAC_MII_DATA 0x00000014 /* MII Data */
+#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */
+#define GMAC_PMT 0x0000002c
+
+enum power_event {
+	pointer_reset = 0x80000000,
+	global_unicast = 0x00000200,
+	wake_up_rx_frame = 0x00000040,
+	magic_frame = 0x00000020,
+	wake_up_frame_en = 0x00000004,
+	magic_pkt_en = 0x00000002,
+	power_down = 0x00000001,
+};
+
+#define GMAC_VTHM_MASK BIT(19)
+#define GMAC_ESVL_MASK BIT(18)
+#define GMAC_VTIM_MASK BIT(17)
+#define GMAC_ETV_MASK BIT(16)
+#define GMAC_VLAN_TAG_CTRL 0x0000001c
+#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense */
+#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */
+#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */
+#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */
+#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
+#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */
+#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
+#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */
+#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */
+#define GMAC_CONTROL_ACS 0x00000080 /* Auto Pad/FCS Stripping */
+#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */
+#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
+#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
+/* GMAC Frame Filter defines */
+#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
+#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
+#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
+#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
+#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
+#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
+#define GMAC_FRAME_FILTER_PCF 0x00000080 /* Pass Control frames */
+#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
+#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
+#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
+#define GMAC_FRAME_FILTER_VLAN 0x00010000 /* vlan filter open */
+#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
+/* GMII ADDR  defines */
+#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
+#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
+/* GMAC FLOW CTRL defines */
+#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
+#define GMAC_FLOW_CTRL_PT_SHIFT 16
+#define GMAC_FLOW_CTRL_UP 0x00000008 /* Unicast pause frame enable */
+#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
+#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
+#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
+/* Energy Efficient Ethernet (EEE)
+ *
+ * LPI status, timer and control register offset
+ */
+/* EEE and LPI defines */
+#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0)
+#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1)
+#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
+#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3)
+#define GMAC_LPI_CTRL_STATUS 0x0030
+#define GMAC_LPI_TIMER_CTRL 0x0034
+#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
+#define GMAC_INT_STATUS_PMT BIT(3)
+#define GMAC_INT_STATUS_MMCIS BIT(4)
+#define GMAC_INT_STATUS_MMCRIS BIT(5)
+#define GMAC_INT_STATUS_MMCTIS BIT(6)
+#define GMAC_INT_STATUS_MMCCSUM BIT(7)
+#define GMAC_INT_STATUS_TSTAMP BIT(9)
+#define GMAC_INT_STATUS_LPIIS BIT(10)
+/* LPI control and status defines */
+#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */
+#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */
+#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */
+#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */
+#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */
+#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */
+#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */
+#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */
+#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */
+#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
+#define GMAC_MANAGEMENT_RX_UNDERSIZE (0x01a4)
+#define GMAC_MANAGEMENT_TX_PAUSE (0x170)
+#define GMAC_MANAGEMENT_RX_PAUSE (0x1D0)
+#define RNP_MAC_TX_CFG (RNP_XLMAC + 0x0000)
+#define RNP_MAC_RX_CFG (RNP_XLMAC + 0x0004)
+#define RNP_MAC_PKT_FLT (RNP_XLMAC + 0x0008)
+#define RNP_MAC_LPI_CTRL (RNP_XLMAC + 0x00d0)
+#define RNP_MAC_TX_VLAN_TAG (RNP_XLMAC + 0x0050)
+#define RNP_MAC_TX_VLAN_MODE (RNP_XLMAC + 0x0060)
+#define RNP_MAC_INNER_VLAN_INCL (RNP_XLMAC + 0x0064)
+#define RNP_MAC_Q0_TX_FLOW_CTRL(i) (RNP_XLMAC + 0x0070 + 0x04 * (i))
+#define RNP_MAC_RX_FLOW_CTRL (RNP_XLMAC + 0x0090)
+#define RNP_MAC_HW_FEATURE (RNP_XLMAC + 0x0120)
+/*1588 */
+#define RNP_MAC_TS_CTRL (RNP_XLMAC + 0X0d00)
+#define RNP_MAC_SUB_SECOND_INCREMENT (RNP_XLMAC + 0x0d04)
+#define RNP_MAC_SYS_TIME_SEC_CFG (RNP_XLMAC + 0x0d08)
+#define RNP_MAC_SYS_TIME_NANOSEC_CFG (RNP_XLMAC + 0x0d0c)
+#define RNP_MAC_SYS_TIME_SEC_UPDATE (RNP_XLMAC + 0x0d10)
+#define RNP_MAC_SYS_TIME_NANOSEC_UPDATE (RNP_XLMAC + 0x0d14)
+#define RNP_MAC_TS_ADDEND (RNP_XLMAC + 0x0d18)
+#define RNP_MAC_TS_STATS (RNP_XLMAC + 0x0d20)
+#define RNP_MAC_INTERRUPT_ENABLE (RNP_XLMAC + 0x00b4)
+#define RNP_MAC_STATS_BROADCAST_LOW (RNP_XLMAC + 0x0918)
+#define RNP_MAC_STATS_BROADCAST_HIGH (RNP_XLMAC + 0x091c)
+#define RNP_MAC_STATS_MULTICAST_LOW (RNP_XLMAC + 0x0920)
+#define RNP_MAC_STATS_MULTICAST_HIGH (RNP_XLMAC + 0x0924)
+#define RNP_TX_FLOW_ENABLE_MASK (0x2)
+#define RNP_RX_FLOW_ENABLE_MASK (0x1)
+/* ================================================================== */
+
+/* ==================== RNP-MSIX Global Registers ==================== */
+#define RING_VECTOR(n) (0x04 * (n))
+/* ================================================================== */
+
+/* ==================== OTHER Global Registers ==================== */
+/* =====  PF-VF Functions ==== */
+#define VF_NUM_REG 0xa3000
+/* 8bit: 7:vf_actiove 6:fun0/fun1 [5:0]:vf_num */
+#define VF_NUM(vfnum, fun) ((1 << 7) | (((fun) & 0x1) << 6) | ((vfnum) & 0x3f))
+#define PF_NUM(fun) (((fun) & 0x1) << 6)
+#define IS_VF(vfnum) (((vfnum) & (1 << 7)) ? 1 : 0)
+/* 8bit: 7:vf_actiove [6:5]:fun0/fun1 [4:0]:vf_num */
+#define PF_NUM_N500(fun) (((fun) & 0x3) << 5)
+/* PFC Flow Control*/
+
+enum NIC_MODE {
+	MODE_NIC_MODE_2PORT_40G = 0,
+	MODE_NIC_MODE_2PORT_10G = 1,
+	MODE_NIC_MODE_4PORT_10G = 2,
+	MODE_NIC_MODE_8PORT_10G = 3,
+};
+/* ================================================================== */
+
+#endif /* RNPGBE_REGS_H */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.c
new file mode 100644
index 0000000000000..c7a5a63fb3b87
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include "rnpgbe_sfc.h"
+#include "rnpgbe.h"
+
+static inline void rsp_hal_sfc_command(u8 __iomem *hw_addr, u32 cmd)
+{
+	iowrite32(cmd, (hw_addr + 0x8));
+	iowrite32(1, (hw_addr + 0x0));
+	while (ioread32(hw_addr) != 0)
+		;
+}
+
+static inline void rsp_hal_sfc_flash_write_disable(u8 __iomem *hw_addr)
+{
+	iowrite32(CMD_CYCLE(8), (hw_addr + 0x10));
+	iowrite32(WR_DATA_CYCLE(0), (hw_addr + 0x14));
+
+	rsp_hal_sfc_command(hw_addr, CMD_WRITE_DISABLE);
+}
+
+static int32_t rsp_hal_sfc_flash_wait_idle(u8 __iomem *hw_addr)
+{
+	iowrite32(CMD_CYCLE(8), (hw_addr + 0x10));
+	iowrite32(RD_DATA_CYCLE(8), (hw_addr + 0x14));
+
+	while (1) {
+		rsp_hal_sfc_command(hw_addr, CMD_READ_STATUS);
+		if ((ioread32(hw_addr + 0x4) & 0x1) == 0)
+			break;
+	}
+	return HAL_OK;
+}
+
+static inline void rsp_hal_sfc_flash_write_enable(u8 __iomem *hw_addr)
+{
+	iowrite32(CMD_CYCLE(8), (hw_addr + 0x10));
+	iowrite32(0x1f, (hw_addr + 0x18));
+	iowrite32(0x100000, (hw_addr + 0x14));
+
+	rsp_hal_sfc_command(hw_addr, CMD_WRITE_ENABLE);
+}
+
+static int rsp_hal_sfc_flash_erase_sector_internal(u8 __iomem *hw_addr,
+						   u32 address)
+{
+	if (address >= RSP_FLASH_HIGH_16M_OFFSET)
+		return HAL_EINVAL;
+
+	if (address % 4096)
+		return HAL_EINVAL;
+
+	rsp_hal_sfc_flash_write_enable(hw_addr);
+
+	iowrite32((CMD_CYCLE(8) | ADDR_CYCLE(24)), (hw_addr + 0x10));
+	iowrite32((RD_DATA_CYCLE(0) | WR_DATA_CYCLE(0)), (hw_addr + 0x14));
+	iowrite32(SFCADDR(address), (hw_addr + 0xc));
+	rsp_hal_sfc_command(hw_addr, CMD_SECTOR_ERASE);
+	rsp_hal_sfc_flash_wait_idle(hw_addr);
+	rsp_hal_sfc_flash_write_disable(hw_addr);
+
+	return HAL_OK;
+}
+
+int rsp_hal_sfc_flash_erase(struct rnpgbe_hw *hw, u32 size)
+{
+	u32 addr = SFC_MEM_BASE;
+	u32 i = 0;
+	u32 page_size = 0x1000;
+
+	size = ((size + (page_size - 1)) / page_size) * page_size;
+
+	addr = addr - SFC_MEM_BASE;
+
+	if (size == 0)
+		return HAL_EINVAL;
+
+	if ((addr + size) > RSP_FLASH_HIGH_16M_OFFSET)
+		return HAL_EINVAL;
+
+	if (addr % page_size)
+		return HAL_EINVAL;
+
+	if (size % page_size)
+		return HAL_EINVAL;
+
+	for (i = 0; i < size; i += page_size) {
+		if ((i >= 0x1f000) && (i < 0x20000))
+			continue;
+
+		rsp_hal_sfc_flash_erase_sector_internal(hw->hw_addr,
+							(addr + i));
+	}
+
+	return HAL_OK;
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.h
new file mode 100644
index 0000000000000..88a60b1ef0eef
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBE_SFC_H
+#define _RNPGBE_SFC_H
+
+/* Return value */
+#define HAL_OK 0
+#define HAL_EINVAL (-3) /* Invalid argument */
+#define HAL_ETIME (-6) /* Timer expired */
+#define RSP_FLASH_HIGH_16M_OFFSET 0x1000000
+#define SFC_MEM_BASE 0x28000000
+#define RSP_FLASH_SIZE 0x1000000
+#define CMD_WRITE_DISABLE 0x04000000
+#define CMD_READ_STATUS 0x05000000
+#define CMD_WRITE_ENABLE 0x06000000
+#define CMD_SECTOR_ERASE 0x20000000
+#define CMD_BLOCK_ERASE_64K 0xd8000000
+#define SFCADDR(a) ((a) << 8)
+#define CMD_CYCLE(c) (((c) & 0xff) << 0)
+#define RD_DATA_CYCLE(c) (((c) & 0xff) << 8)
+#define WR_DATA_CYCLE(c) (((c) & 0xff) << 0)
+#define ADDR_CYCLE(c) (((c) & 0xff) << 16)
+
+#endif /* _RNPGBE_SFC_H */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sriov.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sriov.c
new file mode 100644
index 0000000000000..760f40b901abb
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sriov.c
@@ -0,0 +1,1652 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "rnpgbe.h"
+#include "rnpgbe_type.h"
+#include "rnpgbe_sriov.h"
+
+#ifdef CONFIG_PCI_IOV
+static int __rnpgbe_enable_sriov(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int num_vf_macvlans, i, num_vebvlans;
+	struct vf_macvlans *mv_list;
+	struct vf_vebvlans *vv_list = NULL;
+
+	dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+	/* sriov and dcb cannot open together */
+	/* reset numtc */
+	adapter->flags &= (~RNP_FLAG_DCB_ENABLED);
+	netdev_reset_tc(adapter->netdev);
+
+	e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
+
+	dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+
+	/* Enable VMDq flag so device will be set in VM mode */
+	adapter->flags |= RNP_FLAG_VMDQ_ENABLED;
+	if (!adapter->ring_feature[RING_F_VMDQ].limit)
+		adapter->ring_feature[RING_F_VMDQ].limit = 1;
+	if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED)
+		adapter->ring_feature[RING_F_VMDQ].offset = 0;
+	else
+		adapter->ring_feature[RING_F_VMDQ].offset = hw->max_vfs - 1;
+
+	num_vf_macvlans = hw->num_rar_entries -
+			  (hw->max_pf_macvlans + 1 + adapter->num_vfs);
+	num_vebvlans = hw->num_vebvlan_entries;
+
+	adapter->mv_list = mv_list = kcalloc(
+		num_vf_macvlans, sizeof(struct vf_macvlans), GFP_KERNEL);
+	if (num_vebvlans)
+		hw->vv_list = vv_list = kcalloc(
+			num_vebvlans, sizeof(struct vf_vebvlans), GFP_KERNEL);
+
+	if (mv_list) {
+		/* Initialize list of VF macvlans */
+		INIT_LIST_HEAD(&adapter->vf_mvs.l);
+		for (i = 0; i < num_vf_macvlans; i++) {
+			mv_list->vf = -1;
+			mv_list->free = true;
+			mv_list->rar_entry = hw->mac.num_rar_entries -
+					     (i + adapter->num_vfs + 1);
+			list_add(&mv_list->l, &adapter->vf_mvs.l);
+			mv_list++;
+		}
+	}
+
+	if (vv_list) {
+		/* Initialize list of VF macvlans */
+		INIT_LIST_HEAD(&hw->vf_vas.l);
+		for (i = 0; i < num_vebvlans; i++) {
+			vv_list->vid = -1;
+			vv_list->vid = 0;
+			vv_list->free = true;
+			vv_list->veb_entry = i;
+			list_add(&vv_list->l, &hw->vf_vas.l);
+			vv_list++;
+		}
+	}
+
+	adapter->flags2 |= RNP_FLAG2_BRIDGE_MODE_VEB;
+	dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+
+	hw->ops.set_sriov_status(hw, true);
+
+	adapter->vfinfo = kcalloc(adapter->num_vfs,
+				  sizeof(struct vf_data_storage), GFP_KERNEL);
+	if (adapter->vfinfo) {
+		/* limit trafffic classes based on VFs enabled */
+		/* TODO analyze VF need support pfc or traffic classes */
+		/* We do not support RSS w/ SR-IOV */
+		adapter->ring_feature[RING_F_RSS].limit = hw->sriov_ring_limit;
+
+		/* Disable RSC when in SR-IOV mode */
+		adapter->flags2 &=
+			~(RNP_FLAG2_RSC_CAPABLE | RNP_FLAG2_RSC_ENABLED);
+
+		adapter->flags |= RNP_FLAG_SRIOV_ENABLED;
+		/* force close eee if open sriov */
+		adapter->eee_enabled = 0;
+		return 0;
+	}
+
+	/* open flags at last to avoid null call adapter->vfinfo */
+	dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+	return -ENOMEM;
+}
+
+void rnpgbe_enable_sriov_true(struct rnpgbe_adapter *adapter)
+{
+	int err = 0;
+
+	if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return;
+
+	adapter->flags |= RNP_FLAG_SRIOV_INIT_DONE;
+
+	err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+	if (err) {
+		printk(KERN_DEBUG "Failed to enable PCI sriov: %d num %d\n",
+		       err, adapter->num_vfs);
+		printk(KERN_DEBUG "We cannot handle this error\n");
+	}
+
+	adapter->flags |= RNP_FLAG_VF_INIT_DONE;
+}
+
+/* Note this function is called when the user wants to enable SR-IOV
+ * VFs using the now deprecated module parameter
+ * never used
+ */
+void rnpgbe_enable_sriov(struct rnpgbe_adapter *adapter)
+{
+	int pre_existing_vfs = 0;
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	pre_existing_vfs = pci_num_vf(adapter->pdev);
+	if (!pre_existing_vfs && !adapter->num_vfs)
+		return;
+
+	dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+	if (!pre_existing_vfs)
+		dev_warn(&adapter->pdev->dev,
+			 "Enabling SR-IOV VFs using the module parameter is deprecated"
+			 "- please use the pci sysfs interface.\n");
+
+	dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+	/* If there are pre-existing VFs then we have to force
+	 * use of that many - over ride any module parameter value.
+	 * This may result from the user unloading the PF driver
+	 * while VFs were assigned to guest VMs or because the VFs
+	 * have been created via the new PCI SR-IOV sysfs interface.
+	 */
+	if (pre_existing_vfs) {
+		adapter->num_vfs = pre_existing_vfs;
+		dev_warn(&adapter->pdev->dev,
+			 "Virtual Functions already enabled for this device - Please"
+			 "reload all VF drivers to avoid spoofed packet errors\n");
+	} else {
+		int i;
+		/*
+		 * The n10 supports up to 64 VFs per physical function
+		 * but this implementation limits allocation to 126 so that
+		 * basic networking resources are still available to the
+		 * physical function.  If the user requests greater than
+		 * 64 VFs then it is an error - reset to default of zero.
+		 */
+		adapter->num_vfs =
+			min_t(unsigned int, adapter->num_vfs, hw->max_vfs - 1);
+
+		if (__rnpgbe_enable_sriov(adapter)) {
+			e_err(probe, "Failed to alloc memory for sriov\n");
+			adapter->num_vfs = 0;
+		}
+
+		for (i = 0; i < adapter->num_vfs; i++)
+			rnpgbe_vf_configuration(adapter->pdev,
+						(i | 0x10000000));
+
+		dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+	}
+}
+
+static bool rnpgbe_vfs_are_assigned(struct rnpgbe_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	struct pci_dev *vfdev;
+	unsigned int dev_id = RNP_DEV_ID_N10_PF0_VF_N;
+	unsigned int vendor_id = PCI_VENDOR_ID_MUCSE;
+
+	switch (adapter->pdev->device) {
+	case RNP_DEV_ID_N10_PF0:
+	case RNP_DEV_ID_N10_PF1:
+		vendor_id = 0x1dab;
+		if (rnpgbe_is_pf1(pdev))
+			dev_id = RNP_DEV_ID_N10_PF1_VF;
+		else
+			dev_id = RNP_DEV_ID_N10_PF0_VF;
+		break;
+	case PCI_DEVICE_ID_N10_PF0:
+	case PCI_DEVICE_ID_N10_PF1:
+		vendor_id = PCI_VENDOR_ID_MUCSE;
+		if (rnpgbe_is_pf1(pdev))
+			dev_id = RNP_DEV_ID_N10_PF1_VF_N;
+		else
+			dev_id = RNP_DEV_ID_N10_PF0_VF_N;
+	}
+
+	/* loop through all the VFs to see if we own any that are assigned */
+	vfdev = pci_get_device(vendor_id, dev_id, NULL);
+	while (vfdev) {
+		/* if we don't own it we don't care */
+		if (vfdev->is_virtfn && vfdev->physfn == pdev) {
+			/* if it is assigned we cannot release it */
+			if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+				return true;
+		}
+
+		vfdev = pci_get_device(vendor_id, dev_id, vfdev);
+	}
+
+	return false;
+}
+#endif /* #ifdef CONFIG_PCI_IOV */
+
+int rnpgbe_disable_sriov(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int rss;
+	int time = 0;
+
+	if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return 0;
+
+	adapter->num_vfs = 0;
+	adapter->flags &= ~RNP_FLAG_SRIOV_ENABLED;
+	adapter->flags &= ~RNP_FLAG_SRIOV_INIT_DONE;
+	adapter->flags &= ~RNP_FLAG_VF_INIT_DONE;
+	adapter->vlan_count = 0;
+	msleep(100);
+	hw->ops.set_mac_rx(hw, false);
+	hw->ops.set_sriov_status(hw, false);
+
+	/* set num VFs to 0 to prevent access to vfinfo */
+	while (test_and_set_bit(__RNP_USE_VFINFI, &adapter->state)) {
+		msleep(100);
+		time++;
+
+		if (time > 100) {
+			printk(KERN_DEBUG "wait flags timeout\n");
+			break;
+		}
+	}
+	if (time < 100)
+		clear_bit(__RNP_USE_VFINFI, &adapter->state);
+
+	/* free VF control structures */
+	kfree(adapter->vfinfo);
+	adapter->vfinfo = NULL;
+
+	/* free macvlan list */
+	if (hw->vv_list) {
+		kfree(hw->vv_list);
+		hw->vv_list = NULL;
+	}
+
+	if (adapter->mv_list) {
+		kfree(adapter->mv_list);
+		adapter->mv_list = NULL;
+	}
+
+	dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+#ifdef CONFIG_PCI_IOV
+	/*
+	 * If our VFs are assigned we cannot shut down SR-IOV
+	 * without causing issues, so just leave the hardware
+	 * available but disabled
+	 */
+	if (rnpgbe_vfs_are_assigned(adapter)) {
+		e_dev_warn("Unloading driver while VFs are assigned"
+			   "- VFs will not be "
+			   "deallocated\n");
+		return -EPERM;
+	}
+	/* disable iov and allow time for transactions to clear */
+	pci_disable_sriov(adapter->pdev);
+#endif
+	dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+
+	/* set default pool back to 0 */
+
+	/* Disable VMDq flag so device will be set in VM mode */
+	if (adapter->ring_feature[RING_F_VMDQ].limit == 1)
+		adapter->flags &= ~RNP_FLAG_VMDQ_ENABLED;
+	adapter->ring_feature[RING_F_VMDQ].offset = 0;
+
+	rss = min_t(int, adapter->max_ring_pair_counts, num_online_cpus());
+
+	rss = min_t(int, rss,
+		    hw->mac.max_msix_vectors - adapter->num_other_vectors);
+
+	adapter->ring_feature[RING_F_RSS].limit = rss;
+
+	/* take a breather then clean up driver data */
+	msleep(100);
+
+	dbg("%s:%d flags:0x%x\n", __func__, __LINE__, adapter->flags);
+	return 0;
+}
+
+#ifdef CONFIG_PCI_IOV
+bool check_ari_mode(struct pci_dev *dev)
+{
+	struct pci_bus *bus = dev->bus;
+
+	return bus->self && bus->self->ari_enabled;
+}
+#endif
+
+static int rnpgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(dev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int err = 0;
+	int i;
+	int pre_existing_vfs = pci_num_vf(dev);
+
+	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+		err = rnpgbe_disable_sriov(adapter);
+	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+		goto out;
+
+	if (hw->feature_flags & RNP_VEB_VLAN_MASK_EN) {
+		if (adapter->vlan_count > hw->max_vfs - 1) {
+			dev_err(&adapter->pdev->dev,
+				"vlans is too much, delete less than %d vlans\n",
+				hw->max_vfs - 1);
+
+			err = -EOPNOTSUPP;
+			goto err_out;
+		}
+
+	} else if (adapter->vlan_count > 1) {
+		dev_err(&adapter->pdev->dev,
+			"only 1 vlan in sriov mode, delete other vlans\n");
+		dev_err(&adapter->pdev->dev, "please delete all vlans first\n");
+
+		err = -EOPNOTSUPP;
+		goto err_out;
+	}
+
+	adapter->vlan_count = 0;
+	if (err)
+		goto err_out;
+
+	/* While the SR-IOV capability structure reports total VFs to be
+	 * 64 we limit the actual number that can be allocated to 63 so
+	 * that some transmit/receive resources can be reserved to the
+	 * PF.  The PCI bus driver already checks for other values out of
+	 * range.
+	 */
+
+	if (check_ari_mode(dev)) {
+		if (num_vfs > (hw->max_vfs - 1)) {
+			err = -EPERM;
+			goto err_out;
+		}
+	} else {
+		if (num_vfs > hw->max_vfs_noari) {
+			err = -EPERM;
+			goto err_out;
+		}
+	}
+
+	/* maybe too early */
+	adapter->num_vfs = num_vfs;
+
+	err = __rnpgbe_enable_sriov(adapter);
+	if (err)
+		goto err_out;
+
+	for (i = 0; i < adapter->num_vfs; i++)
+		rnpgbe_vf_configuration(dev, (i | 0x10000000));
+	dbg("flags:0x%x\n", adapter->flags);
+	if (hw->ops.clr_rar_all)
+		hw->ops.clr_rar_all(hw);
+
+	rnpgbe_sriov_reinit(adapter);
+
+	adapter->flags |= RNP_FLAG_SRIOV_INIT_DONE;
+	err = pci_enable_sriov(dev, num_vfs);
+	if (err) {
+		e_dev_warn("Failed to enable PCI sriov: %d num %d\n", err,
+			   num_vfs);
+		rnpgbe_disable_sriov(adapter);
+		rnpgbe_sriov_reinit(adapter);
+		goto err_out;
+	}
+	adapter->flags |= RNP_FLAG_VF_INIT_DONE;
+
+out:
+	return num_vfs;
+
+err_out:
+	return err;
+#endif
+	return 0;
+}
+
+static int rnpgbe_pci_sriov_disable(struct pci_dev *dev)
+{
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(dev);
+	int err;
+	u32 current_flags = adapter->flags;
+
+	err = rnpgbe_disable_sriov(adapter);
+
+	/* Only reinit if no error and state changed */
+	if (!err && current_flags != adapter->flags) {
+		/* rnpgbe_disable_sriov() doesn't clear VMDQ flag */
+		adapter->flags &= ~RNP_FLAG_VMDQ_ENABLED;
+#ifdef CONFIG_PCI_IOV
+		rnpgbe_sriov_reinit(adapter);
+#endif
+	}
+
+	return err;
+}
+
+static int rnpgbe_set_vf_multicasts(struct rnpgbe_adapter *adapter,
+				    u32 *msgbuf,
+				    u32 vf)
+{
+	int entries = (msgbuf[0] & RNP_VT_MSGINFO_MASK) >> RNP_VT_MSGINFO_SHIFT;
+	u16 *hash_list = (u16 *)&msgbuf[1];
+	struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int i;
+
+	/* only so many hash values supported */
+	entries = min(entries, RNP_MAX_VF_MC_ENTRIES);
+
+	/*
+	 * salt away the number of multi cast addresses assigned
+	 * to this VF for later use to restore when the PF multi cast
+	 * list changes
+	 */
+	vfinfo->num_vf_mc_hashes = entries;
+
+	/*
+	 * VFs are limited to using the MTA hash table for their multicast
+	 * addresses
+	 */
+	for (i = 0; i < entries; i++)
+		vfinfo->vf_mc_hashes[i] = hash_list[i];
+
+	for (i = 0; i < vfinfo->num_vf_mc_hashes; i++)
+		hw->ops.set_sriov_vf_mc(hw, vfinfo->vf_mc_hashes[i]);
+
+	return 0;
+}
+
+void rnpgbe_restore_vf_macs(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int vf;
+	u8 *mac_addr;
+	int rar_entry;
+
+	for (vf = 0; vf < adapter->num_vfs; vf++) {
+		mac_addr = adapter->vfinfo[vf].vf_mac_addresses;
+		rar_entry = hw->mac.num_rar_entries - (vf + 1);
+		/* setup to the hw */
+		if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED)
+			hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf + 1,
+						true);
+		else
+			hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf,
+						true);
+	}
+}
+
+void rnpgbe_restore_vf_macvlans(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct list_head *pos;
+	struct vf_macvlans *entry;
+
+	hw_dbg(hw, "%s Staring..\n", __func__);
+
+	list_for_each(pos, &adapter->vf_mvs.l) {
+		entry = list_entry(pos, struct vf_macvlans, l);
+		if (!entry->free) {
+			hw_dbg(hw, "  vf:%d MACVLAN: RAR[%d] <= %pM\n",
+			       entry->vf, entry->rar_entry, entry->vf_macvlan);
+
+			if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) {
+				hw->ops.set_rar_with_vf(hw, entry->vf_macvlan,
+							entry->rar_entry,
+							entry->vf + 1, true);
+			} else {
+				hw->ops.set_rar_with_vf(hw, entry->vf_macvlan,
+							entry->rar_entry,
+							entry->vf, true);
+			}
+		}
+	}
+	hw_dbg(hw, "%s Done\n", __func__);
+}
+
+void rnpgbe_restore_vf_multicasts(struct rnpgbe_adapter *adapter)
+{
+	/* Restore any VF macvlans */
+	rnpgbe_restore_vf_macvlans(adapter);
+}
+
+static int rnpgbe_set_vf_vlan(struct rnpgbe_adapter *adapter, int add, int vid,
+			      u32 vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int true_handle = 1;
+	int i;
+	/* VLAN 0 is a special case, don't allow it to be removed */
+	if (!vid && !add)
+		return 0;
+
+	/* should check other vf */
+	if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED)) {
+		/* if other vf use this vlan, don't true remove */
+		if (!add) {
+			/* check equal pf_vlan? */
+			if (vid == adapter->vf_vlan)
+				true_handle = 0;
+			if (!test_and_set_bit(__RNP_USE_VFINFI,
+					      &adapter->state)) {
+				for (i = 0; i < adapter->num_vfs; i++) {
+					/* check if other vf_vlan still valid */
+					if ((i != vf) &&
+					    (vid == adapter->vfinfo[i].vf_vlan))
+						true_handle = 0;
+					/* check if other pf_vlan still valid */
+					if ((i != vf) &&
+					    (vid == adapter->vfinfo[i].pf_vlan))
+						true_handle = 0;
+				}
+				clear_bit(__RNP_USE_VFINFI, &adapter->state);
+			}
+		}
+	}
+	if (true_handle)
+		hw->ops.set_vf_vlan_filter(hw, vid, vf, (bool)add, false);
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) {
+		if (hw->ops.set_vf_vlan_mode) {
+			if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED)
+				hw->ops.set_vf_vlan_mode(hw, vid, vf + 1,
+							 (bool)add);
+			else
+				hw->ops.set_vf_vlan_mode(hw, vid, vf,
+							 (bool)add);
+		}
+	}
+
+	return 0;
+}
+
+static inline void rnpgbe_vf_reset_event(struct rnpgbe_adapter *adapter, u32 vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+	int i;
+
+	/* reset multicast table array for vf */
+	adapter->vfinfo[vf].num_vf_mc_hashes = 0;
+
+	/* Flush and reset the mta with the new values */
+	rnpgbe_set_rx_mode(adapter->netdev);
+
+	/* clear this rar_entry */
+	hw->ops.clr_rar(hw, rar_entry);
+
+	/* reset VF api back to unknown */
+	adapter->vfinfo[vf].vf_api = 0;
+	/* clear vf multicast */
+	for (i = 0; i < RNP_MAX_VF_MC_ENTRIES; i++)
+		adapter->vfinfo[vf].vf_mc_hashes[i] = 0;
+	/* clear vf vlan setup */
+	adapter->vfinfo[vf].vf_vlan = 0;
+	adapter->vfinfo[vf].vlan_count = 0;
+}
+
+static int rnpgbe_set_vf_mac(struct rnpgbe_adapter *adapter, int vf,
+			     unsigned char *mac_addr)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	/* this rar_entry may be cofict with mac vlan with pf */
+	int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+
+	memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+
+	/* setup to the hw */
+	if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED)
+		hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf + 1, true);
+	else
+		hw->ops.set_rar_with_vf(hw, mac_addr, rar_entry, vf, true);
+
+	return 0;
+}
+
+static int rnpgbe_set_vf_macvlan(struct rnpgbe_adapter *adapter, int vf,
+				 int index, unsigned char *mac_addr)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct list_head *pos;
+	struct vf_macvlans *entry;
+	/* index = 0 , only earase */
+	/* index = 1 , earase and then set */
+	if (index <= 1) {
+		list_for_each (pos, &adapter->vf_mvs.l) {
+			entry = list_entry(pos, struct vf_macvlans, l);
+			if (entry->vf == vf) {
+				entry->vf = -1;
+				entry->free = true;
+				entry->is_macvlan = false;
+				hw->ops.clr_rar(hw, entry->rar_entry);
+			}
+		}
+	}
+
+	/*
+	 * If index was zero then we were asked to clear the uc list
+	 * for the VF.  We're done.
+	 */
+	if (!index)
+		return 0;
+
+	entry = NULL;
+
+	list_for_each (pos, &adapter->vf_mvs.l) {
+		entry = list_entry(pos, struct vf_macvlans, l);
+		if (entry->free)
+			break;
+	}
+
+	/*
+	 * If we traversed the entire list and didn't find a free entry
+	 * then we're out of space on the RAR table.  Also entry may
+	 * be NULL because the original memory allocation for the list
+	 * failed, which is not fatal but does mean we can't support
+	 * VF requests for MACVLAN because we couldn't allocate
+	 * memory for the list management required.
+	 */
+	if (!entry || !entry->free)
+		return -ENOSPC;
+
+	entry->free = false;
+	entry->is_macvlan = true;
+	entry->vf = vf;
+	memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
+
+	if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) {
+		hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, entry->rar_entry,
+					entry->vf + 1, true);
+	} else {
+		hw->ops.set_rar_with_vf(hw, entry->vf_macvlan, entry->rar_entry,
+					entry->vf, true);
+	}
+	return 0;
+}
+
+int rnpgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
+{
+	unsigned char vf_mac_addr[6];
+	struct rnpgbe_adapter *adapter = pci_get_drvdata(pdev);
+	unsigned int vfn = (event_mask & 0x3f);
+
+	bool enable = ((event_mask & 0x10000000U) != 0);
+
+	if (enable) {
+		eth_zero_addr(vf_mac_addr);
+		memcpy(vf_mac_addr, adapter->hw.mac.perm_addr, 6);
+		vf_mac_addr[5] = vf_mac_addr[5] + (0x80 | vfn);
+		vf_mac_addr[4] = vf_mac_addr[4] + (pdev->devfn);
+
+		memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
+	}
+
+	return 0;
+}
+
+static int rnpgbe_vf_reset_msg(struct rnpgbe_adapter *adapter, u32 vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
+	u32 msgbuf[RNP_VF_PERMADDR_MSG_LEN];
+	u8 *addr = (u8 *)(&msgbuf[1]);
+
+	/* reset the filters for the device */
+	rnpgbe_vf_reset_event(adapter, vf);
+
+	/* set vf mac address */
+	if (!is_zero_ether_addr(vf_mac))
+		rnpgbe_set_vf_mac(adapter, vf, vf_mac);
+
+	/* enable VF mailbox for further messages */
+	adapter->vfinfo[vf].clear_to_send = true;
+
+	/* Enable counting of spoofed packets in the SSVPC register */
+
+	/* reply to reset with ack and vf mac address */
+	msgbuf[0] = RNP_VF_RESET;
+	if (!is_zero_ether_addr(vf_mac)) {
+		msgbuf[0] |= RNP_VT_MSGTYPE_ACK;
+		memcpy(addr, vf_mac, ETH_ALEN);
+	} else {
+		msgbuf[0] |= RNP_VT_MSGTYPE_NACK;
+		dev_warn(&adapter->pdev->dev,
+			 "VF %d has no MAC address assigned, you have to assign"
+			 "one manually\n",
+			 vf);
+	}
+
+	/*
+	 * Piggyback the multicast filter type so VF can compute the
+	 * correct vectors
+	 */
+	msgbuf[RNP_VF_MC_TYPE_WORD] = 0;
+	/* setup link status , pause mode, ft padding mode */
+
+	/* link status */
+	/* pause mode */
+	msgbuf[RNP_VF_MC_TYPE_WORD] |= (0xff & hw->fc.current_mode) << 16;
+	if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING)
+		msgbuf[RNP_VF_MC_TYPE_WORD] |= (0x01 << 8);
+	else
+		msgbuf[RNP_VF_MC_TYPE_WORD] |= (0x00 << 8);
+	/* mc_type */
+	msgbuf[RNP_VF_MC_TYPE_WORD] |= rd32(hw, RNP_ETH_DMAC_MCSTCTRL) & 0x03;
+	msgbuf[RNP_VF_DMA_VERSION_WORD] = rd32(hw, RNP_DMA_VERSION);
+	msgbuf[RNP_VF_VLAN_WORD] = adapter->vfinfo[vf].pf_vlan;
+	msgbuf[RNP_VF_PHY_TYPE_WORD] = (hw->mac_type << 16) | hw->phy_type;
+	msgbuf[RNP_VF_FW_VERSION_WORD] = (hw->fw_version);
+	if (adapter->vfinfo[vf].link_state == rnpgbe_link_state_auto) {
+		msgbuf[RNP_VF_LINK_STATUS_WORD] =
+			(adapter->link_up ? RNP_PF_LINK_UP : 0) |
+			adapter->link_speed;
+	} else if (adapter->vfinfo[vf].link_state == rnpgbe_link_state_on) {
+		msgbuf[RNP_VF_LINK_STATUS_WORD] =
+			RNP_PF_LINK_UP | adapter->link_speed;
+
+	} else {
+		msgbuf[RNP_VF_LINK_STATUS_WORD] = 0;
+	}
+
+	msgbuf[RNP_VF_AXI_MHZ] = hw->usecstocount;
+	msgbuf[RNP_VF_FEATURE] = 0;
+	if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+		msgbuf[RNP_VF_FEATURE] |= PF_FEATRURE_VLAN_FILTER;
+	if (hw->ncsi_en)
+		msgbuf[RNP_VF_FEATURE] |= PF_NCSI_EN;
+
+	/* now vf maybe has no irq handler if it is the first reset*/
+	rnpgbe_write_mbx(hw, msgbuf, RNP_VF_PERMADDR_MSG_LEN, vf);
+
+	return 0;
+}
+
+static int rnpgbe_get_vf_mac_addr(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+				  u32 vf)
+{
+	u8 *mac = ((u8 *)(&msgbuf[1]));
+
+	memcpy(mac, adapter->vfinfo[vf].vf_mac_addresses, 6);
+
+	return 0;
+}
+
+/* vf call setup a new mac */
+static int rnpgbe_set_vf_mac_addr(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+				  u32 vf)
+{
+	u8 *new_mac = ((u8 *)(&msgbuf[1]));
+
+	if (!is_valid_ether_addr(new_mac)) {
+		e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
+		return -1;
+	}
+
+	if (adapter->vfinfo[vf].pf_set_mac &&
+	    memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, ETH_ALEN)) {
+		e_warn(drv,
+		       "VF %d attempted to override administratively set MAC address\n"
+		       "Reload the VF driver to resume operations\n",
+		       vf);
+		return -1;
+	}
+	rnpgbe_set_vf_mac(adapter, vf, new_mac);
+
+	return 0;
+}
+
+static int rnpgbe_set_vf_vlan_msg(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+				  u32 vf)
+{
+	int add = ((msgbuf[0] & RNP_VT_MSGINFO_MASK) >> RNP_VT_MSGINFO_SHIFT);
+	int vid = (msgbuf[1] & RNP_VLVF_VLANID_MASK);
+	int err;
+
+	if (adapter->vfinfo[vf].pf_vlan) {
+		e_warn(drv,
+		       "VF %d attempted to override administratively set VLAN "
+		       "configuration\n"
+		       "Reload the VF driver to resume operations\n",
+		       vf);
+		return -1;
+	}
+
+	if ((add) && (adapter->vfinfo[vf].vlan_count)) {
+		e_warn(drv, "VF %d attempted to set more than 1 vlan", vf);
+		e_warn(drv, " vlan now %d, try to set %d\n",
+		       adapter->vfinfo[vf].vf_vlan, vid);
+		return -1;
+	}
+
+	/* vlan 0 has no work todo */
+	if (!vid)
+		return 0;
+	if (add) {
+		adapter->vfinfo[vf].vlan_count++;
+		/* store vf vlan setup */
+		adapter->vfinfo[vf].vf_vlan = vid;
+	} else if (adapter->vfinfo[vf].vlan_count) {
+		adapter->vfinfo[vf].vf_vlan = 0;
+		adapter->vfinfo[vf].vlan_count--;
+	}
+
+	err = rnpgbe_set_vf_vlan(adapter, add, vid, vf);
+
+	return err;
+}
+
+static int rnpgbe_set_vf_vlan_strip_msg(struct rnpgbe_adapter *adapter,
+					u32 *msgbuf, u32 vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int vlan_strip_on = !!(msgbuf[1] >> 31);
+	int queue_cnt = msgbuf[1] & 0xffff;
+	int err = 0, i;
+
+	vf_dbg("strip_on:%d queeu_cnt:%d, %d %d\n", vlan_strip_on, queue_cnt,
+	       msgbuf[2], msgbuf[3]);
+
+	for (i = 0; i < queue_cnt; i++) {
+		if (vlan_strip_on)
+			hw->ops.set_vlan_strip(hw, msgbuf[2 + i], true);
+		else
+			hw->ops.set_vlan_strip(hw, msgbuf[2 + i], false);
+	}
+
+	return err;
+}
+
+static int rnpgbe_set_vf_macvlan_msg(struct rnpgbe_adapter *adapter,
+				     u32 *msgbuf, u32 vf)
+{
+	u8 *new_mac = ((u8 *)(&msgbuf[1]));
+	int index = (msgbuf[0] & RNP_VT_MSGINFO_MASK) >> RNP_VT_MSGINFO_SHIFT;
+	int err;
+
+	if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
+		e_warn(drv,
+		       "VF %d requested MACVLAN filter but is administratively denied\n",
+		       vf);
+		return -1;
+	}
+
+	/* An non-zero index indicates the VF is setting a filter */
+	if (index) {
+		if (!is_valid_ether_addr(new_mac)) {
+			e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
+			return -1;
+		}
+	}
+
+	err = rnpgbe_set_vf_macvlan(adapter, vf, index, new_mac);
+	if (err == -ENOSPC)
+		e_warn(drv, "VF %d has requested a MACVLAN filter but "
+			    "there is no space for it\n", vf);
+
+	return err < 0;
+}
+
+static int rnpgbe_negotiate_vf_api(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+				   u32 vf)
+{
+	adapter->vfinfo[vf].vf_api = 0;
+
+	return 0;
+}
+
+static int rnpgbe_get_vf_reg(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+			     u32 vf)
+{
+	u32 reg = msgbuf[1];
+
+	msgbuf[1] = rd32(&adapter->hw, reg);
+
+	return 0;
+}
+
+static int rnpgbe_set_vf_mtu(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+			     u32 vf)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	if (msgbuf[1] > netdev->mtu) {
+		e_dev_warn("vf %d try to change %d mtu to %d (too large)\n",
+			   vf, netdev->mtu, msgbuf[1]);
+		return -1;
+	} else {
+		return 0;
+	}
+}
+
+static int rnpgbe_get_vf_mtu(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+			     u32 vf)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	msgbuf[1] = netdev->mtu;
+
+	return 0;
+}
+
+static int rnpgbe_get_vf_fw(struct rnpgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	msgbuf[1] = hw->fw_version;
+
+	return 0;
+}
+
+static int rnpgbe_get_vf_link(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+			      u32 vf)
+{
+	if (adapter->vfinfo[vf].link_state == rnpgbe_link_state_auto) {
+		msgbuf[1] = (adapter->link_up ? RNP_PF_LINK_UP : 0) |
+			    adapter->link_speed;
+	} else if (adapter->vfinfo[vf].link_state == rnpgbe_link_state_on) {
+		msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed;
+
+	} else {
+		msgbuf[1] = 0;
+	}
+
+	return 0;
+}
+
+static int rnpgbe_get_vf_dma_frag(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+				  u32 vf)
+{
+	/* we fixed 1536 bytes */
+	msgbuf[1] = 1536;
+	return 0;
+}
+
+static int rnpgbe_vf_get_stats_clr(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+				   u32 vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+
+	if (dma_rd32(dma, RNP500_STATISTIC_CRL(vf)))
+		msgbuf[1] = 1;
+	else
+		msgbuf[1] = 0;
+
+	return 0;
+}
+
+static int rnpgbe_vf_set_stats_clr(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+				   u32 vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+
+	if (msgbuf[1])
+		dma_wr32(dma, RNP500_STATISTIC_CRL(vf), 1);
+	else
+		dma_wr32(dma, RNP500_STATISTIC_CRL(vf), 0);
+
+	return 0;
+}
+
+static int rnpgbe_get_vf_queues(struct rnpgbe_adapter *adapter, u32 *msgbuf,
+				u32 vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	msgbuf[RNP_VF_TX_QUEUES] = hw->sriov_ring_limit;
+	msgbuf[RNP_VF_RX_QUEUES] = hw->sriov_ring_limit;
+	msgbuf[RNP_VF_TRANS_VLAN] = adapter->vfinfo[vf].pf_vlan;
+	msgbuf[RNP_VF_DEF_QUEUE] = 0;
+	if (hw->hw_type == rnpgbe_hw_n400) {
+		/* n400, we use */
+		/* vf0 use ring4 */
+		/* vf1 use ring8 */
+		msgbuf[RNP_VF_QUEUE_START] = vf * 4 + 4;
+
+	} else {
+		if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED)
+			msgbuf[RNP_VF_QUEUE_START] = vf * hw->sriov_ring_limit +
+						     hw->sriov_ring_limit;
+		else
+			msgbuf[RNP_VF_QUEUE_START] = vf * hw->sriov_ring_limit;
+	}
+	msgbuf[RNP_VF_QUEUE_DEPTH] = (adapter->tx_ring_item_count << 16) |
+				     adapter->rx_ring_item_count;
+
+	return 0;
+}
+
+static int rnpgbe_rcv_msg_from_vf(struct rnpgbe_adapter *adapter, u32 vf)
+{
+	u32 mbx_size = RNP_VFMAILBOX_SIZE;
+	u32 msgbuf[RNP_VFMAILBOX_SIZE];
+	struct rnpgbe_hw *hw = &adapter->hw;
+	s32 retval;
+
+	vf_dbg("msg from vf:%d\n", vf);
+
+	retval = rnpgbe_read_mbx(hw, msgbuf, mbx_size, vf);
+	if (retval) {
+		pr_err("Error receiving message from VF\n");
+		return retval;
+	}
+	vf_dbg("msg[0]=0x%08x\n", msgbuf[0]);
+
+	/* this is a message we already processed, do nothing */
+	if (msgbuf[0] & (RNP_VT_MSGTYPE_ACK | RNP_VT_MSGTYPE_NACK))
+		return retval;
+
+	/* flush the ack before we write any messages back */
+
+	/* clear vf_num */
+	msgbuf[0] &= (~RNP_VF_MASK);
+
+	/* this is a vf reset irq */
+	if ((msgbuf[0] & RNP_MAIL_CMD_MASK) == RNP_VF_RESET) {
+		vf_dbg("vf %d up\n", vf);
+		return rnpgbe_vf_reset_msg(adapter, vf);
+	}
+
+	/*
+	 * until the vf completes a virtual function reset it should not be
+	 * allowed to start any configuration.
+	 */
+	if (!adapter->vfinfo[vf].clear_to_send) {
+		vf_dbg("wait vf clear to send\n");
+		msgbuf[0] |= RNP_VT_MSGTYPE_NACK;
+		rnpgbe_write_mbx(hw, msgbuf, 1, vf);
+		return retval;
+	}
+
+	switch ((msgbuf[0] & RNP_MAIL_CMD_MASK)) {
+	case RNP_VF_SET_MAC_ADDR:
+		retval = rnpgbe_set_vf_mac_addr(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_SET_MULTICAST:
+		retval = rnpgbe_set_vf_multicasts(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_SET_VLAN:
+		retval = rnpgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_SET_VLAN_STRIP:
+		retval = rnpgbe_set_vf_vlan_strip_msg(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_GET_MACADDR:
+		retval = rnpgbe_get_vf_mac_addr(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_SET_MACVLAN:
+		retval = rnpgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_API_NEGOTIATE:
+		retval = rnpgbe_negotiate_vf_api(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_GET_QUEUES:
+		retval = rnpgbe_get_vf_queues(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_REG_RD:
+		retval = rnpgbe_get_vf_reg(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_GET_MTU:
+		retval = rnpgbe_get_vf_mtu(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_SET_MTU:
+		retval = rnpgbe_set_vf_mtu(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_GET_FW:
+		retval = rnpgbe_get_vf_fw(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_GET_LINK:
+		retval = rnpgbe_get_vf_link(adapter, msgbuf, vf);
+		break;
+	case RNP_PF_REMOVE:
+		vf_dbg("vf %d removed\n", vf);
+		adapter->vfinfo[vf].clear_to_send = false;
+		adapter->vfinfo[vf].vf_vlan = 0;
+		/* todo clean vf info */
+		retval = 1;
+		break;
+	case RNP_VF_RESET_PF:
+		adapter->flags2 |= RNP_FLAG2_RESET_PF;
+		retval = 1;
+		break;
+	case RNP_VF_GET_DMA_FRAG:
+		retval = rnpgbe_get_vf_dma_frag(adapter, msgbuf, vf);
+
+		break;
+	case RNP_VF_SET_STATS_CLR:
+		retval = rnpgbe_vf_set_stats_clr(adapter, msgbuf, vf);
+		break;
+	case RNP_VF_GET_STATS_CLR:
+		retval = rnpgbe_vf_get_stats_clr(adapter, msgbuf, vf);
+		break;
+	default:
+		e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
+		retval = RNP_ERR_MBX;
+		break;
+	}
+
+	/* notify the VF of the results of what it sent us */
+	if (retval)
+		msgbuf[0] |= RNP_VT_MSGTYPE_NACK;
+	else
+		msgbuf[0] |= RNP_VT_MSGTYPE_ACK;
+
+	/* write vf_num */
+	msgbuf[0] |= (vf << 21);
+
+	msgbuf[0] |= RNP_VT_MSGTYPE_CTS;
+
+	if ((msgbuf[0] & RNP_MAIL_CMD_MASK) != RNP_PF_REMOVE)
+		rnpgbe_write_mbx(hw, msgbuf, mbx_size, vf);
+
+	return retval;
+}
+
+static void rnpgbe_rcv_ack_from_vf(struct rnpgbe_adapter *adapter, u32 vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 msg = RNP_VT_MSGTYPE_NACK;
+
+	/* if device isn't clear to send it shouldn't be reading either */
+	if (!adapter->vfinfo[vf].clear_to_send)
+		rnpgbe_write_mbx(hw, &msg, 1, vf);
+}
+
+void rnpgbe_msg_task(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 vf;
+
+	rnpgbe_fw_msg_handler(adapter);
+
+	if (!(adapter->flags & RNP_FLAG_SRIOV_INIT_DONE))
+		return;
+	for (vf = 0; vf < adapter->num_vfs; vf++) {
+		if (test_and_set_bit(__VF_MBX_USED,
+				     &adapter->vfinfo[vf].status)) {
+			adapter->miss_time++;
+			e_info(drv, "we missed some irqs %d\n", vf);
+			continue;
+		}
+
+		/* process any messages pending */
+		if (!rnpgbe_check_for_msg(hw, vf))
+			rnpgbe_rcv_msg_from_vf(adapter, vf);
+
+		/* process any acks */
+		if (!rnpgbe_check_for_ack(hw, vf))
+			rnpgbe_rcv_ack_from_vf(adapter, vf);
+		clear_bit(__VF_MBX_USED, &adapter->vfinfo[vf].status);
+	}
+}
+
+int rnpgbe_msg_post_status_signle_link(struct rnpgbe_adapter *adapter, int vf,
+				       int link_state)
+{
+	u32 msgbuf[RNP_VFMAILBOX_SIZE];
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+
+	msgbuf[0] = RNP_PF_SET_LINK | (vf << RNP_VNUM_OFFSET);
+	switch (link_state) {
+	case rnpgbe_link_state_on:
+		msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed;
+		break;
+	case rnpgbe_link_state_off:
+		msgbuf[1] = 0;
+		break;
+	case rnpgbe_link_state_auto:
+		if (adapter->link_up)
+			msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed;
+		else
+			msgbuf[1] = 0;
+		break;
+	}
+	return mbx->ops.write(hw, msgbuf, 2, vf);
+}
+
+int rnpgbe_msg_post_status_signle(struct rnpgbe_adapter *adapter,
+				  enum PF_STATUS status, int vf)
+{
+	u32 msgbuf[RNP_VFMAILBOX_SIZE];
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+
+	switch (status) {
+	case PF_FCS_STATUS:
+		msgbuf[0] = RNP_PF_SET_FCS | (vf << RNP_VNUM_OFFSET);
+		if (adapter->netdev->features & NETIF_F_RXFCS)
+			msgbuf[1] = 1;
+		else
+			msgbuf[1] = 0;
+		break;
+	case PF_PAUSE_STATUS:
+		msgbuf[0] = RNP_PF_SET_PAUSE | (vf << RNP_VNUM_OFFSET);
+		msgbuf[1] = hw->fc.requested_mode;
+		break;
+	case PF_FT_PADDING_STATUS:
+		msgbuf[0] = RNP_PF_SET_FT_PADDING | (vf << RNP_VNUM_OFFSET);
+		if (adapter->priv_flags & RNP_PRIV_FLAG_FT_PADDING)
+			msgbuf[1] = 1;
+		else
+			msgbuf[1] = 0;
+
+		break;
+	case PF_VLAN_FILTER_STATUS:
+		msgbuf[0] = RNP_PF_SET_VLAN_FILTER | (vf << RNP_VNUM_OFFSET);
+		if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+			msgbuf[1] = 1;
+		else
+			msgbuf[1] = 0;
+		break;
+	case PF_SET_VLAN_STATUS:
+		msgbuf[0] = RNP_PF_SET_VLAN | (vf << RNP_VNUM_OFFSET);
+
+		msgbuf[1] = adapter->vfinfo[vf].pf_vlan;
+		break;
+	case PF_SET_LINK_STATUS:
+		if (adapter->vfinfo[vf].link_state != rnpgbe_link_state_auto)
+			return 0;
+		/* only update link state if in auto mode */
+		msgbuf[0] = RNP_PF_SET_LINK | (vf << RNP_VNUM_OFFSET);
+		if (adapter->link_up)
+			msgbuf[1] = RNP_PF_LINK_UP | adapter->link_speed;
+		else
+			msgbuf[1] = 0;
+		break;
+	case PF_SET_MTU:
+		msgbuf[0] = RNP_PF_SET_MTU | (vf << RNP_VNUM_OFFSET);
+		msgbuf[1] = adapter->netdev->mtu;
+		break;
+	case PF_SET_RESET:
+		msgbuf[0] = RNP_PF_SET_RESET | (vf << RNP_VNUM_OFFSET);
+		msgbuf[1] = 0;
+
+		break;
+	}
+	return mbx->ops.write_posted(hw, msgbuf, 2, vf);
+}
+
+/* try to send mailbox to all active vf */
+int rnpgbe_msg_post_status(struct rnpgbe_adapter *adapter,
+			   enum PF_STATUS status)
+{
+	u32 vf;
+	int err = 0;
+
+	if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return 0;
+	/* broadcast */
+	for (vf = 0; vf < adapter->num_vfs; vf++) {
+		if (!(adapter->vfinfo[vf].clear_to_send))
+			continue;
+
+		if (!test_bit(__RNP_IN_IRQ, &adapter->state)) {
+			if (test_and_set_bit(__VF_MBX_USED,
+					     &adapter->vfinfo[vf].status)) {
+				adapter->miss_time++;
+				printk(KERN_DEBUG "send miss \n");
+				return -1;
+			}
+			err |= rnpgbe_msg_post_status_signle(adapter, status,
+							     vf);
+			clear_bit(__VF_MBX_USED, &adapter->vfinfo[vf].status);
+		}
+	}
+	return err;
+}
+
+void rnpgbe_ping_all_vfs(struct rnpgbe_adapter *adapter)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u32 ping;
+	int i;
+
+	for (i = 0; i < adapter->num_vfs; i++) {
+		ping = RNP_PF_CONTROL_PRING_MSG;
+		/* only send to active vf */
+		ping |= RNP_VT_MSGTYPE_CTS;
+		rnpgbe_write_mbx(hw, &ping, 1, i);
+	}
+}
+
+int rnpgbe_get_vf_ringnum(struct rnpgbe_hw *hw, int vf, int num)
+{
+	return vf;
+}
+
+int rnpgbe_setup_ring_maxrate(struct rnpgbe_adapter *adapter, int ring,
+			      u64 max_rate)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct rnpgbe_dma_info *dma = &hw->dma;
+	int samples_1sec = adapter->hw.usecstocount * 100000;
+
+	dma_ring_wr32(dma, RING_OFFSET(ring) + RNP_DMA_REG_TX_FLOW_CTRL_TM,
+		      samples_1sec);
+	dma_ring_wr32(dma, RING_OFFSET(ring) + RNP_DMA_REG_TX_FLOW_CTRL_TH,
+		      max_rate);
+	return 0;
+}
+
+static int rnpgbe_disable_port_vlan(struct rnpgbe_adapter *adapter, int vf)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int err;
+
+	err = rnpgbe_set_vf_vlan(adapter, false, adapter->vfinfo[vf].pf_vlan,
+				 vf);
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) {
+		if (hw->ops.set_vf_vlan_mode) {
+			if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED)
+				hw->ops.set_vf_vlan_mode(
+					hw, adapter->vfinfo[vf].pf_vlan, vf + 1,
+					false);
+			else
+				hw->ops.set_vf_vlan_mode(
+					hw, adapter->vfinfo[vf].pf_vlan, vf,
+					false);
+		}
+	}
+	adapter->vfinfo[vf].pf_vlan = 0;
+	adapter->vfinfo[vf].pf_qos = 0;
+	/* clear veb */
+	hw->ops.set_vf_vlan_filter(hw, 0, vf, false, true);
+
+	return err;
+}
+
+static int rnpgbe_enable_port_vlan(struct rnpgbe_adapter *adapter, int vf,
+				   u16 vlan, u8 qos)
+{
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int err;
+
+	err = rnpgbe_set_vf_vlan(adapter, true, vlan, vf);
+	if (err)
+		goto out;
+
+	adapter->vfinfo[vf].pf_vlan = vlan;
+	adapter->vfinfo[vf].pf_qos = qos;
+	dev_info(pci_dev_to_dev(adapter->pdev),
+		 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+	if (test_bit(__RNP_DOWN, &adapter->state)) {
+		dev_warn(
+			pci_dev_to_dev(adapter->pdev),
+			"The VF VLAN has been set, but the PF device is not up.\n");
+		dev_warn(
+			pci_dev_to_dev(adapter->pdev),
+			"Bring the PF device up before attempting to use the VF device.\n");
+	}
+	/* setup veb only */
+	hw->ops.set_vf_vlan_filter(hw, vlan, vf, true, true);
+
+	/* if in sriov vlan mode should setup pfvlvf table */
+	if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) {
+		if (hw->ops.set_vf_vlan_mode) {
+			if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED)
+				hw->ops.set_vf_vlan_mode(hw, vlan, vf + 1,
+							 true);
+			else
+				hw->ops.set_vf_vlan_mode(hw, vlan, vf, true);
+		}
+	}
+out:
+	return err;
+}
+
+int rnpgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+			   __be16 vlan_proto)
+{
+	int err = 0;
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	/* VLAN IDs accepted range 0-4094 */
+	if (vf < 0 || vf >= adapter->num_vfs || vlan > VLAN_VID_MASK - 1 ||
+	    qos > 7)
+		return -EINVAL;
+
+	if (vlan_proto != htons(ETH_P_8021Q))
+		return -EPROTONOSUPPORT;
+	if (vlan || qos) {
+		/*
+		 * Check if there is already a port VLAN set, if so
+		 * we have to delete the old one first before we
+		 * can set the new one.  The usage model had
+		 * previously assumed the user would delete the
+		 * old port VLAN before setting a new one but this
+		 * is not necessarily the case.
+		 */
+		if (adapter->vfinfo[vf].vf_vlan) {
+			dev_err(&adapter->pdev->dev,
+				"vf set vlan before, delete it before add new\n");
+			err = -EINVAL;
+			goto out;
+		}
+		if (adapter->vfinfo[vf].pf_vlan)
+			err = rnpgbe_disable_port_vlan(adapter, vf);
+		if (err)
+			goto out;
+		err = rnpgbe_enable_port_vlan(adapter, vf, vlan, qos);
+
+	} else {
+		/* if vf set vlan,  return error */
+		if ((!adapter->vfinfo[vf].pf_vlan) &&
+		    (adapter->vfinfo[vf].vf_vlan)) {
+			dev_err(&adapter->pdev->dev,
+				"pf cann't delete vf set vlan\n");
+
+			err = -EINVAL;
+			goto out;
+		} else if (adapter->vfinfo[vf].pf_vlan)
+			err = rnpgbe_disable_port_vlan(adapter, vf);
+	}
+	/* send mbx to vf */
+	if (adapter->vfinfo[vf].clear_to_send)
+		rnpgbe_msg_post_status_signle(adapter, PF_SET_VLAN_STATUS, vf);
+out:
+	return err;
+}
+
+#if IS_ENABLED(CONFIG_PCI_IOV)
+int rnpgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (vf < 0 || vf >= adapter->num_vfs)
+		return -EINVAL;
+
+	/* maybe we not support this in hw */
+	adapter->vfinfo[vf].spoofchk_enabled = setting;
+
+	return 0;
+}
+#else
+inline int rnpgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
+{
+	return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+
+int rnpgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (vf < 0 || vf >= adapter->num_vfs)
+		return -EINVAL;
+
+	/* nothing to do */
+	if (adapter->vfinfo[vf].trusted == setting)
+		return 0;
+
+	adapter->vfinfo[vf].trusted = setting;
+	e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not ");
+
+	return 0;
+}
+
+int rnpgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+
+	if (vf < 0 || vf >= adapter->num_vfs) {
+		dev_err(pci_dev_to_dev(adapter->pdev),
+			"NDO set VF link - invalid VF identifier %d\n", vf);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	switch (state) {
+	case IFLA_VF_LINK_STATE_ENABLE:
+		dev_info(pci_dev_to_dev(adapter->pdev),
+			 "NDO set VF %d link state %d\n", vf, state);
+		adapter->vfinfo[vf].link_state = rnpgbe_link_state_on;
+		rnpgbe_msg_post_status_signle_link(adapter, vf,
+						   rnpgbe_link_state_on);
+		break;
+	case IFLA_VF_LINK_STATE_DISABLE:
+		dev_info(pci_dev_to_dev(adapter->pdev),
+			 "NDO set VF %d link state disable\n", vf);
+		adapter->vfinfo[vf].link_state = rnpgbe_link_state_off;
+		rnpgbe_msg_post_status_signle_link(adapter, vf,
+						   rnpgbe_link_state_off);
+		break;
+	case IFLA_VF_LINK_STATE_AUTO:
+		dev_info(pci_dev_to_dev(adapter->pdev),
+			 "NDO set VF %d link state auto\n", vf);
+		adapter->vfinfo[vf].link_state = rnpgbe_link_state_auto;
+		rnpgbe_msg_post_status_signle_link(adapter, vf,
+						   rnpgbe_link_state_auto);
+		break;
+	default:
+		dev_err(pci_dev_to_dev(adapter->pdev),
+			"NDO set VF %d - invalid link state %d\n", vf, state);
+		ret = -EINVAL;
+	}
+out:
+	return ret;
+}
+
+int rnpgbe_ndo_set_vf_bw(struct net_device *netdev, int vf,
+			 int __always_unused min_tx_rate, int max_tx_rate)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	/* limit vf ring rate */
+	int ring_max_rate;
+	int vf_ring;
+	int link_speed = 0;
+	u64 real_rate = 0;
+
+	if (vf >= hw->max_vfs - 1)
+		return -EINVAL;
+
+	switch (adapter->link_speed) {
+	case RNP_LINK_SPEED_40GB_FULL:
+		link_speed = 40000;
+		break;
+	case RNP_LINK_SPEED_25GB_FULL:
+		link_speed = 25000;
+		break;
+	case RNP_LINK_SPEED_10GB_FULL:
+		link_speed = 10000;
+		break;
+	case RNP_LINK_SPEED_1GB_FULL:
+		link_speed = 1000;
+		break;
+	case RNP_LINK_SPEED_100_FULL:
+		link_speed = 100;
+		break;
+	}
+	/* rate limit cannot be greater than link speed */
+	if (max_tx_rate && (max_tx_rate > link_speed))
+		return -EINVAL;
+
+	adapter->vfinfo[vf].tx_rate = max_tx_rate;
+
+	ring_max_rate = max_tx_rate / hw->sriov_ring_limit;
+
+	if (max_tx_rate <= 10)
+		real_rate = (ring_max_rate * 1000 * 85) >> 3;
+	else if (max_tx_rate <= 50)
+		real_rate = (ring_max_rate * 1000 * 90) >> 3;
+	else if (max_tx_rate <= 100)
+		real_rate = (ring_max_rate * 1000 * 94) >> 3;
+	else
+		real_rate = (ring_max_rate * 1000 * 99) >> 3;
+
+	vf_ring = rnpgbe_get_vf_ringnum(hw, vf, 0);
+	rnpgbe_setup_ring_maxrate(adapter, vf_ring, real_rate);
+
+	return 0;
+}
+
+int rnpgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
+		return -EINVAL;
+	adapter->vfinfo[vf].pf_set_mac = true;
+	dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
+	dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
+				      " change effective.");
+	if (test_bit(__RNP_DOWN, &adapter->state)) {
+		dev_warn(&adapter->pdev->dev,
+			 "The VF MAC address has been set,"
+			 " but the PF device is not up.\n");
+		dev_warn(&adapter->pdev->dev,
+			 "Bring the PF device up before"
+			 " attempting to use the VF device.\n");
+	}
+	rnpgbe_set_vf_mac(adapter, vf, mac);
+	/* send reset to vf only vf is up */
+	if (adapter->vfinfo[vf].clear_to_send)
+		rnpgbe_msg_post_status_signle(adapter, PF_SET_RESET, vf);
+
+	return 0;
+}
+
+int rnpgbe_ndo_get_vf_config(struct net_device *netdev, int vf,
+			     struct ifla_vf_info *ivi)
+{
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (vf >= adapter->num_vfs)
+		return -EINVAL;
+	ivi->vf = vf;
+	memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
+	ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate;
+	ivi->min_tx_rate = 0;
+
+	if (adapter->vfinfo[vf].pf_vlan)
+		ivi->vlan = adapter->vfinfo[vf].pf_vlan;
+	else
+		ivi->vlan = adapter->vfinfo[vf].vf_vlan;
+
+	ivi->qos = adapter->vfinfo[vf].pf_qos;
+	ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
+	ivi->linkstate = adapter->vfinfo[vf].link_state;
+	ivi->trusted = adapter->vfinfo[vf].trusted;
+
+	return 0;
+}
+
+int rnpgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+	vf_dbg("\n\n !!!! %s:%d num_vfs:%d\n", __func__, __LINE__, num_vfs);
+	if (num_vfs == 0)
+		return rnpgbe_pci_sriov_disable(dev);
+	else
+		return rnpgbe_pci_sriov_enable(dev, num_vfs);
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sriov.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sriov.h
new file mode 100644
index 0000000000000..7e93e9a4d7e25
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sriov.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBE_SRIOV_H_
+#define _RNPGBE_SRIOV_H_
+
+int rnpgbe_setup_ring_maxrate(struct rnpgbe_adapter *adapter, int ring,
+			      u64 max_rate);
+int rnpgbe_get_vf_ringnum(struct rnpgbe_hw *hw, int vf, int num);
+void rnpgbe_restore_vf_macs(struct rnpgbe_adapter *adapter);
+void rnpgbe_restore_vf_multicasts(struct rnpgbe_adapter *adapter);
+void rnpgbe_restore_vf_macvlans(struct rnpgbe_adapter *adapter);
+void rnpgbe_msg_task(struct rnpgbe_adapter *adapter);
+int rnpgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
+void rnpgbe_ping_all_vfs(struct rnpgbe_adapter *adapter);
+int rnpgbe_ndo_set_vf_bw(struct net_device *netdev, int vf,
+			 int __always_unused min_tx_rate, int max_tx_rate);
+int rnpgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
+int rnpgbe_msg_post_status(struct rnpgbe_adapter *adapter,
+			   enum PF_STATUS status);
+int rnpgbe_ndo_set_vf_bw(struct net_device *netdev, int vf,
+			 int __always_unused min_tx_rate, int max_tx_rate);
+int rnpgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
+int rnpgbe_ndo_get_vf_config(struct net_device *netdev, int vf,
+			     struct ifla_vf_info *ivi);
+void rnpgbe_check_vf_rate_limit(struct rnpgbe_adapter *adapter);
+int rnpgbe_disable_sriov(struct rnpgbe_adapter *adapter);
+#ifdef CONFIG_PCI_IOV
+void rnpgbe_enable_sriov_true(struct rnpgbe_adapter *adapter);
+void rnpgbe_enable_sriov(struct rnpgbe_adapter *adapter);
+#endif /* CONFIG_PCI_IOV */
+int rnpgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+int rnpgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+			   __be16 vlan_proto);
+int rnpgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state);
+#if IS_ENABLED(CONFIG_PCI_IOV)
+int rnpgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
+#endif /* IS_ENABLED(CONFIG_PCI_IOV) */
+int rnpgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
+
+#endif /* _RNPGBE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sysfs.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sysfs.c
new file mode 100644
index 0000000000000..cf5afa5d2a7ba
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sysfs.c
@@ -0,0 +1,1247 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "rnpgbe.h"
+#include "rnpgbe_common.h"
+#include "rnpgbe_type.h"
+#include "version.h"
+#include "rnpgbe_mbx.h"
+#include "rnpgbe_mbx_fw.h"
+
+struct maintain_req {
+	int magic;
+#define MAINTAIN_MAGIC 0xa6a7a8a9
+
+	int cmd;
+	int arg0;
+	int req_data_bytes;
+	int reply_bytes;
+	char data[0];
+} __attribute__((packed));
+
+struct maintain_reply {
+	int magic;
+#define MAINTAIN_REPLY_MAGIC 0xB6B7B8B9
+	int cmd;
+	int arg0;
+	int data_bytes;
+	int rev;
+	int data[0];
+} __attribute__((packed));
+
+struct ucfg_mac_sn {
+	unsigned char macaddr[64];
+	unsigned char sn[32];
+	int magic;
+#define MAC_SN_MAGIC 0x87654321
+	char rev[52];
+	unsigned char pn[32];
+} __attribute__((packed, aligned(4)));
+
+static int print_desc(char *buf, void *data, int len)
+{
+	u8 *ptr = (u8 *)data;
+	int ret = 0;
+	int i = 0;
+
+	for (i = 0; i < len; i++)
+		ret += sprintf(buf + ret, "%02x ", *(ptr + i));
+
+	return ret;
+}
+
+#ifdef RNPGBE_HWMON
+static ssize_t rnpgbe_hwmon_show_location(struct device __always_unused *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct hwmon_attr *rnpgbe_attr =
+		container_of(attr, struct hwmon_attr, dev_attr);
+
+	return snprintf(buf, PAGE_SIZE, "loc%u\n",
+			rnpgbe_attr->sensor->location);
+}
+
+static ssize_t rnpgbe_hwmon_show_name(struct device __always_unused *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "rnpgbe\n");
+}
+
+static ssize_t rnpgbe_hwmon_show_temp(struct device __always_unused *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct hwmon_attr *rnpgbe_attr =
+		container_of(attr, struct hwmon_attr, dev_attr);
+	unsigned int value;
+
+	/* reset the temp field */
+	rnpgbe_attr->hw->ops.get_thermal_sensor_data(rnpgbe_attr->hw);
+
+	value = rnpgbe_attr->sensor->temp;
+	/* display millidegree */
+	value *= 1000;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", value);
+}
+
+static ssize_t
+rnpgbe_hwmon_show_cautionthresh(struct device __always_unused *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct hwmon_attr *rnpgbe_attr =
+		container_of(attr, struct hwmon_attr, dev_attr);
+	unsigned int value = rnpgbe_attr->sensor->caution_thresh;
+	/* display millidegree */
+	value *= 1000;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", value);
+}
+
+static ssize_t rnpgbe_hwmon_show_maxopthresh(struct device __always_unused *dev,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	struct hwmon_attr *rnpgbe_attr =
+		container_of(attr, struct hwmon_attr, dev_attr);
+	unsigned int value = rnpgbe_attr->sensor->max_op_thresh;
+
+	/* display millidegree */
+	value *= 1000;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", value);
+}
+
+/**
+ * rnpgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
+ * @adapter: pointer to the adapter structure
+ * @offset: offset in the eeprom sensor data table
+ * @type: type of sensor data to display
+ *
+ * For each file we want in hwmon's sysfs interface we need a device_attribute
+ * This is included in our hwmon_attr struct that contains the references to
+ * the data structures we need to get the data to display.
+ */
+static int rnpgbe_add_hwmon_attr(struct rnpgbe_adapter *adapter,
+				 unsigned int offset, int type)
+{
+	unsigned int n_attr;
+	struct hwmon_attr *rnpgbe_attr;
+	n_attr = adapter->rnpgbe_hwmon_buff->n_hwmon;
+	rnpgbe_attr = &adapter->rnpgbe_hwmon_buff->hwmon_list[n_attr];
+
+	switch (type) {
+	case RNPGBE_HWMON_TYPE_LOC:
+		rnpgbe_attr->dev_attr.show = rnpgbe_hwmon_show_location;
+		snprintf(rnpgbe_attr->name, sizeof(rnpgbe_attr->name),
+			 "temp%u_label", offset + 1);
+		break;
+	case RNPGBE_HWMON_TYPE_NAME:
+		rnpgbe_attr->dev_attr.show = rnpgbe_hwmon_show_name;
+		snprintf(rnpgbe_attr->name, sizeof(rnpgbe_attr->name), "name");
+		break;
+	case RNPGBE_HWMON_TYPE_TEMP:
+		rnpgbe_attr->dev_attr.show = rnpgbe_hwmon_show_temp;
+		snprintf(rnpgbe_attr->name, sizeof(rnpgbe_attr->name),
+			 "temp%u_input", offset + 1);
+		break;
+	case RNPGBE_HWMON_TYPE_CAUTION:
+		rnpgbe_attr->dev_attr.show = rnpgbe_hwmon_show_cautionthresh;
+		snprintf(rnpgbe_attr->name, sizeof(rnpgbe_attr->name),
+			 "temp%u_max", offset + 1);
+		break;
+	case RNPGBE_HWMON_TYPE_MAX:
+		rnpgbe_attr->dev_attr.show = rnpgbe_hwmon_show_maxopthresh;
+		snprintf(rnpgbe_attr->name, sizeof(rnpgbe_attr->name),
+			 "temp%u_crit", offset + 1);
+		break;
+	default:
+		return -EPERM;
+	}
+
+	/* These always the same regardless of type */
+	rnpgbe_attr->sensor = &adapter->hw.thermal_sensor_data.sensor[offset];
+	rnpgbe_attr->hw = &adapter->hw;
+	rnpgbe_attr->dev_attr.store = NULL;
+	rnpgbe_attr->dev_attr.attr.mode = 0444;
+	rnpgbe_attr->dev_attr.attr.name = rnpgbe_attr->name;
+
+	sysfs_attr_init(&rnpgbe_attr->dev_attr.attr);
+
+	adapter->rnpgbe_hwmon_buff->attrs[n_attr] = &rnpgbe_attr->dev_attr.attr;
+
+	++adapter->rnpgbe_hwmon_buff->n_hwmon;
+
+	return 0;
+}
+#endif /* RNPGBE_HWMON */
+
+#define to_net_device(n) container_of(n, struct net_device, dev)
+
+static ssize_t maintain_read(struct file *filp, struct kobject *kobj,
+			     struct bin_attribute *attr, char *buf, loff_t off,
+			     size_t count)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int rbytes = count;
+
+	if (adapter->maintain_buf == NULL)
+		return 0;
+
+	if (off + count > adapter->maintain_buf_len)
+		rbytes = adapter->maintain_buf_len - off;
+
+	memcpy(buf, adapter->maintain_buf + off, rbytes);
+
+	if ((off + rbytes) >= adapter->maintain_buf_len) {
+		kfree(adapter->maintain_buf);
+		adapter->maintain_buf = NULL;
+		adapter->maintain_buf_len = 0;
+	}
+
+	return rbytes;
+}
+
+static void n500_exchange_share_ram(struct rnpgbe_hw *hw,
+				    u32 *buf, int flag,
+				    int len)
+{
+	int i;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+
+	if (len > mbx->share_size)
+		return;
+	/* write */
+	if (flag) {
+		for (i = 0; i < len; i = i + 4)
+			rnpgbe_wr_reg(hw->hw_addr + mbx->cpu_vf_share_ram + i,
+				      *(buf + i / 4));
+	} else {
+		/* read */
+		for (i = 0; i < len; i = i + 4)
+			*(buf + i / 4) = rnpgbe_rd_reg(
+				hw->hw_addr + mbx->cpu_vf_share_ram + i);
+	}
+}
+
+static void n210_clean_share_ram(struct rnpgbe_hw *hw)
+{
+	int i;
+	struct rnpgbe_mbx_info *mbx = &hw->mbx;
+	int len = mbx->share_size;
+
+	for (i = 0; i < len; i = i + 4)
+		rnpgbe_wr_reg(hw->hw_addr + mbx->cpu_vf_share_ram + i,
+				0xffffffff);
+
+}
+
+static int check_fw_type(struct rnpgbe_hw *hw, const u8 *data)
+{
+	u32 device_id;
+	int ret = 0;
+
+	device_id = *((u16 *)data + 30);
+
+	/* if no device_id no check */
+	if ((device_id == 0) || (device_id == 0xffff))
+		return 0;
+
+	switch (hw->hw_type) {
+	case rnpgbe_hw_n500:
+		if (device_id != 0x8308)
+			ret = 1;
+	break;
+	case rnpgbe_hw_n210:
+		if (device_id != 0x8208)
+			ret = 1;
+	break;
+	default:
+		ret = 1;
+	}
+
+	return ret;
+}
+
+static ssize_t maintain_write(struct file *filp, struct kobject *kobj,
+			      struct bin_attribute *attr, char *buf,
+			      loff_t off,
+			      size_t count)
+{
+	struct device *dev = kobj_to_dev(kobj);
+	int err = -EINVAL;
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	struct maintain_req *req;
+	void *dma_buf = NULL;
+	dma_addr_t dma_phy;
+	int bytes;
+
+	if (off == 0) {
+		if (count < sizeof(*req))
+			return -EINVAL;
+		req = (struct maintain_req *)buf;
+		if (req->magic != MAINTAIN_MAGIC)
+			return -EINVAL;
+
+		bytes = max_t(int, req->req_data_bytes, req->reply_bytes);
+		bytes += sizeof(*req);
+
+		if (adapter->maintain_buf) {
+			kfree(adapter->maintain_buf);
+			adapter->maintain_buf = NULL;
+			adapter->maintain_buf_len = 0;
+		}
+
+		dma_buf = dma_alloc_coherent(&hw->pdev->dev, bytes, &dma_phy,
+					     GFP_ATOMIC);
+		if (!dma_buf) {
+			netdev_err(netdev, "%s: alloc dma_buf failed:%d!",
+				   __func__,
+				   bytes);
+			return -ENOMEM;
+		}
+
+		adapter->maintain_dma_buf = dma_buf;
+		adapter->maintain_dma_phy = dma_phy;
+		adapter->maintain_dma_size = bytes;
+		adapter->maintain_in_bytes = req->req_data_bytes + sizeof(*req);
+
+		memcpy(dma_buf + off, buf, count);
+
+		if (count < adapter->maintain_in_bytes)
+			return count;
+	}
+
+	dma_buf = adapter->maintain_dma_buf;
+	dma_phy = adapter->maintain_dma_phy;
+	req = (struct maintain_req *)dma_buf;
+	memcpy(dma_buf + off, buf, count);
+
+	/* all data got, send req */
+	if ((off + count) >= adapter->maintain_in_bytes) {
+		int reply_bytes = req->reply_bytes;
+		int offset;
+		struct rnpgbe_mbx_info *mbx = &hw->mbx;
+		/* add check fw here /n210 n500 flag */
+		if (req->cmd == 1) {
+			if (check_fw_type(hw, (u8 *)(dma_buf + sizeof(*req)))) {
+				err = -EINVAL;
+				goto err_quit;
+			}
+		}
+
+		if (req->cmd) {
+			int data_len;
+			int ram_size = mbx->share_size;
+
+			offset = 0;
+			if ((req->req_data_bytes > ram_size) &&
+			    (req->cmd == 1)) {
+				offset += ram_size;
+				/* if n210 first clean header */
+				if (hw->hw_type == rnpgbe_hw_n210) {
+					n210_clean_share_ram(hw);
+					err = rnpgbe_maintain_req(hw, req->cmd,
+							req->arg0,
+							0, 0, 0);
+					if (err != 0)
+						goto err_quit;
+				}
+			}
+
+			while (offset < req->req_data_bytes) {
+				data_len =
+					(req->req_data_bytes - offset) >
+					ram_size ?
+					ram_size :
+					(req->req_data_bytes -
+					 offset);
+				/* copy to ram */
+				n500_exchange_share_ram(hw,
+						(u32 *)(dma_buf + offset +
+							sizeof(*req)),
+						1, data_len);
+				err = rnpgbe_maintain_req(hw, req->cmd,
+						req->arg0,
+						offset, 0, 0);
+				if (err != 0)
+					goto err_quit;
+
+				offset += data_len;
+			}
+			/* write header if update hw */
+			if ((req->req_data_bytes > ram_size) && (req->cmd == 1)) {
+				offset = 0;
+				data_len = ram_size;
+				/* copy to ram */
+				n500_exchange_share_ram(hw,
+						(u32 *)(dma_buf + offset +
+							sizeof(*req)),
+						1, data_len);
+				err = rnpgbe_maintain_req(hw, req->cmd,
+						req->arg0,
+						offset, 0, 0);
+				if (err != 0)
+					goto err_quit;
+			}
+
+		} else {
+			/* it is read ? */
+			int data_len;
+			int ram_size = mbx->share_size;
+			struct maintain_reply reply;
+			adapter->maintain_buf_len = (reply_bytes + 3) & (~3);
+			adapter->maintain_buf = kmalloc(
+					adapter->maintain_buf_len, GFP_KERNEL);
+			if (!adapter->maintain_buf) {
+				netdev_err(netdev, "alloc failed for maintain buf:%d\n",
+					   adapter->maintain_buf_len);
+				err = -ENOMEM;
+
+				goto err_quit;
+			}
+			reply.magic = MAINTAIN_REPLY_MAGIC;
+			reply.cmd = req->cmd;
+			reply.arg0 = req->arg0;
+			reply.data_bytes = req->reply_bytes;
+			memcpy(adapter->maintain_buf, &reply,
+					sizeof(struct maintain_reply));
+
+			reply_bytes = reply_bytes - sizeof(*req);
+			/* copy req first */
+			offset = 0;
+			while (offset < reply_bytes) {
+				data_len = (reply_bytes - offset) >
+					   ram_size ?
+					   ram_size :
+					   (reply_bytes - offset);
+				err = rnpgbe_maintain_req(hw, req->cmd,
+						req->arg0, 0,
+						offset, 0);
+				if (err != 0)
+					goto err_quit;
+				/* copy to ram */
+				n500_exchange_share_ram(hw,
+						(u32 *)(adapter->maintain_buf +
+							offset + sizeof(*req)),
+						0, data_len);
+				offset += data_len;
+			}
+		}
+		if (dma_buf) {
+			dma_free_coherent(&hw->pdev->dev,
+					adapter->maintain_dma_size,
+					dma_buf, dma_phy);
+		}
+		adapter->maintain_dma_buf = NULL;
+
+	}
+
+	return count;
+err_quit:
+	if (dma_buf) {
+		dma_free_coherent(&hw->pdev->dev, adapter->maintain_dma_size,
+				dma_buf, dma_phy);
+		adapter->maintain_dma_buf = NULL;
+	}
+	return err;
+}
+
+static BIN_ATTR(maintain, 0644, maintain_read, maintain_write, 1 * 1024 * 1024);
+
+static ssize_t version_info_show(struct device *dev, struct device_attribute *attr,
+				 char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int ret = 0;
+
+	ret += sprintf(buf + ret, "drver: %s %s\n",
+			rnpgbe_driver_version, GIT_COMMIT);
+
+	ret += sprintf(buf + ret, "fw   : %d.%d.%d.%d 0x%08x\n",
+		       ((char *)&(hw->fw_version))[3],
+		       ((char *)&(hw->fw_version))[2],
+		       ((char *)&(hw->fw_version))[1],
+		       ((char *)&(hw->fw_version))[0],
+		       hw->bd_uid | (hw->sfc_boot ? 0x80000000 : 0) |
+		       (hw->pxe_en ? 0x40000000 : 0) |
+		       (hw->ncsi_en ? 0x20000000 : 0) |
+		       (hw->trim_valid ? 0x10000000 : 0));
+
+	return ret;
+}
+
+static ssize_t rx_desc_info_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	u32 rx_ring_num = adapter->sysfs_rx_ring_num;
+	u32 rx_desc_num = adapter->sysfs_rx_desc_num;
+	struct rnpgbe_ring *ring = adapter->rx_ring[rx_ring_num];
+	int ret = 0;
+	union rnpgbe_rx_desc *desc;
+
+	if (test_bit(__RNP_DOWN, &adapter->state)) {
+		ret += sprintf(buf + ret, "port not up \n");
+
+		return ret;
+	}
+
+	desc = RNP_RX_DESC(ring, rx_desc_num);
+	ret += sprintf(buf + ret, "rx ring %d desc %d:\n", rx_ring_num,
+			rx_desc_num);
+	ret += print_desc(buf + ret, desc, sizeof(*desc));
+	ret += sprintf(buf + ret, "\n");
+
+	return ret;
+}
+
+static ssize_t rx_desc_info_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = count;
+
+	u32 rx_desc_num = adapter->sysfs_rx_desc_num;
+	u32 rx_ring_num = adapter->sysfs_rx_ring_num;
+
+	struct rnpgbe_ring *ring = adapter->rx_ring[rx_ring_num];
+
+	if (kstrtou32(buf, 0, &rx_desc_num) != 0)
+		return -EINVAL;
+	if (rx_desc_num < ring->count)
+		adapter->sysfs_rx_desc_num = rx_desc_num;
+	else
+		ret = -EINVAL;
+
+	return ret;
+}
+
+static ssize_t tcp_sync_info_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC)
+		ret += sprintf(buf + ret, "tcp syn to queue %d prio %s\n",
+			       adapter->tcp_sync_queue,
+			       (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC_PRIO) ?
+			       "NO" :
+			       "OFF");
+	else
+		ret += sprintf(buf + ret, "tcp sync remap off\n");
+
+	return ret;
+}
+
+static ssize_t tcp_sync_info_store(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int ret = count;
+	u32 tcp_sync_queue;
+
+	if (kstrtou32(buf, 0, &tcp_sync_queue) != 0)
+		return -EINVAL;
+
+	if (tcp_sync_queue < adapter->num_rx_queues) {
+		adapter->tcp_sync_queue = tcp_sync_queue;
+		adapter->priv_flags |= RNP_PRIV_FLAG_TCP_SYNC;
+
+		if (adapter->priv_flags & RNP_PRIV_FLAG_TCP_SYNC_PRIO)
+			hw->ops.set_tcp_sync_remapping(hw,
+						       adapter->tcp_sync_queue,
+						       true, true);
+		else
+			hw->ops.set_tcp_sync_remapping(hw,
+						       adapter->tcp_sync_queue,
+						       true, false);
+
+	} else {
+		adapter->priv_flags &= ~RNP_PRIV_FLAG_TCP_SYNC;
+
+		hw->ops.set_tcp_sync_remapping(hw, adapter->tcp_sync_queue,
+				false, false);
+	}
+
+	return ret;
+}
+
+static ssize_t rx_skip_info_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_RX_SKIP_EN)
+		ret += sprintf(buf + ret, "rx skip bytes: %d\n",
+			       16 * (adapter->priv_skip_count + 1));
+	else
+		ret += sprintf(buf + ret, "rx skip off\n");
+
+	return ret;
+}
+
+static ssize_t rx_drop_info_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+
+	ret += sprintf(buf + ret, "rx_drop_status %llx\n",
+		       adapter->rx_drop_status);
+
+	return ret;
+}
+
+static ssize_t rx_drop_info_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int ret = count;
+	u64 rx_drop_status;
+
+	if (kstrtou64(buf, 0, &rx_drop_status) != 0)
+		return -EINVAL;
+
+	adapter->rx_drop_status = rx_drop_status;
+
+	hw->ops.update_rx_drop(hw);
+
+	return ret;
+}
+
+static ssize_t outer_vlan_info_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+
+	if (adapter->priv_flags & RNP_PRIV_FLAG_DOUBLE_VLAN)
+		ret += sprintf(buf + ret, "double vlan on\n");
+	else
+		ret += sprintf(buf + ret, "double vlan off\n");
+
+	switch (adapter->outer_vlan_type) {
+	case outer_vlan_type_88a8:
+		ret += sprintf(buf + ret, "outer vlan 0x88a8\n");
+
+		break;
+	case outer_vlan_type_9100:
+		ret += sprintf(buf + ret, "outer vlan 0x9100\n");
+
+		break;
+	case outer_vlan_type_9200:
+		ret += sprintf(buf + ret, "outer vlan 0x9200\n");
+
+		break;
+	default:
+		ret += sprintf(buf + ret, "outer vlan error\n");
+		break;
+	}
+	return ret;
+}
+
+static ssize_t outer_vlan_info_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int ret = count;
+	u32 outer_vlan_type;
+
+	if (kstrtou32(buf, 0, &outer_vlan_type) != 0)
+		return -EINVAL;
+	if (outer_vlan_type < outer_vlan_type_max)
+		adapter->outer_vlan_type = outer_vlan_type;
+	else
+		ret = -EINVAL;
+	if (hw->ops.set_outer_vlan_type)
+		hw->ops.set_outer_vlan_type(hw, outer_vlan_type);
+
+	return ret;
+}
+
+static ssize_t tx_stags_info_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+
+	if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED)
+		ret += sprintf(buf + ret, "tx stags on\n");
+	else
+		ret += sprintf(buf + ret, "tx stags off\n");
+
+	ret += sprintf(buf + ret, "vid 0x%x\n", adapter->stags_vid);
+
+	return ret;
+}
+
+static ssize_t tx_stags_info_store(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	struct rnpgbe_eth_info *eth = &hw->eth;
+	int ret = count;
+	u16 tx_stags;
+
+	if (kstrtou16(buf, 0, &tx_stags) != 0)
+		return -EINVAL;
+	if (tx_stags < VLAN_N_VID)
+		adapter->stags_vid = tx_stags;
+	else
+		ret = -EINVAL;
+
+	eth->ops.set_vfta(eth, adapter->stags_vid, true);
+
+	return ret;
+}
+
+static ssize_t gephy_test_info_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+
+	if (adapter->gephy_test_mode)
+		ret += sprintf(buf + ret, "gephy_test on: %d\n",
+			       adapter->gephy_test_mode);
+	else
+		ret += sprintf(buf + ret, "gephy_test off\n");
+
+	return ret;
+}
+
+static ssize_t gephy_test_info_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int ret = count;
+	u32 test_mode;
+
+#define MAX_MODE (5)
+	if (kstrtou32(buf, 0, &test_mode) != 0)
+		return -EINVAL;
+	if (test_mode < 5)
+		adapter->gephy_test_mode = test_mode;
+	else
+		ret = -EINVAL;
+
+	rnpgbe_mbx_gephy_test_set(hw, test_mode);
+
+	return ret;
+}
+
+static ssize_t tx_desc_info_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	u32 tx_ring_num = adapter->sysfs_tx_ring_num;
+	u32 tx_desc_num = adapter->sysfs_tx_desc_num;
+	struct rnpgbe_ring *ring = adapter->tx_ring[tx_ring_num];
+	int ret = 0;
+	struct rnpgbe_tx_desc *desc;
+
+	if (test_bit(__RNP_DOWN, &adapter->state)) {
+		ret += sprintf(buf + ret, "port not up\n");
+
+		return ret;
+	}
+
+	desc = RNP_TX_DESC(ring, tx_desc_num);
+	ret += sprintf(buf + ret, "tx ring %d desc %d:\n", tx_ring_num,
+		       tx_desc_num);
+	ret += print_desc(buf + ret, desc, sizeof(*desc));
+	ret += sprintf(buf + ret, "\n");
+
+	return ret;
+}
+
+static ssize_t tx_desc_info_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = count;
+	u32 tx_desc_num = adapter->sysfs_tx_desc_num;
+	u32 tx_ring_num = adapter->sysfs_tx_ring_num;
+	struct rnpgbe_ring *ring = adapter->tx_ring[tx_ring_num];
+
+	if (kstrtou32(buf, 0, &tx_desc_num) != 0)
+		return -EINVAL;
+	if (tx_desc_num < ring->count)
+		adapter->sysfs_tx_desc_num = tx_desc_num;
+	else
+		ret = -EINVAL;
+
+	return ret;
+}
+
+static ssize_t rx_ring_info_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	u32 rx_ring_num = adapter->sysfs_rx_ring_num;
+	struct rnpgbe_ring *ring = adapter->rx_ring[rx_ring_num];
+	int ret = 0;
+	union rnpgbe_rx_desc *rx_desc;
+
+	if (test_bit(__RNP_DOWN, &adapter->state)) {
+		ret += sprintf(buf + ret, "port not up\n");
+
+		return ret;
+	}
+
+	ret += sprintf(buf + ret, "queue %d info:\n", rx_ring_num);
+	ret += sprintf(buf + ret, "next_to_use %d\n", ring->next_to_use);
+	ret += sprintf(buf + ret, "next_to_clean %d\n", ring->next_to_clean);
+	rx_desc = RNP_RX_DESC(ring, ring->next_to_clean);
+	ret += sprintf(buf + ret, "next_to_clean desc: ");
+	ret += print_desc(buf + ret, rx_desc, sizeof(*rx_desc));
+	ret += sprintf(buf + ret, "\n");
+
+	return ret;
+}
+
+static ssize_t rx_ring_info_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = count;
+
+	u32 rx_ring_num = adapter->sysfs_rx_ring_num;
+
+	if (kstrtou32(buf, 0, &rx_ring_num) != 0)
+		return -EINVAL;
+	if (rx_ring_num < adapter->num_rx_queues)
+		adapter->sysfs_rx_ring_num = rx_ring_num;
+	else
+		ret = -EINVAL;
+
+	return ret;
+}
+
+static ssize_t tx_ring_info_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	u32 tx_ring_num = adapter->sysfs_tx_ring_num;
+	struct rnpgbe_ring *ring = adapter->tx_ring[tx_ring_num];
+	int ret = 0;
+	struct rnpgbe_tx_buffer *tx_buffer;
+	struct rnpgbe_tx_desc *eop_desc;
+
+	if (test_bit(__RNP_DOWN, &adapter->state)) {
+		ret += sprintf(buf + ret, "port not up \n");
+
+		return ret;
+	}
+
+	ret += sprintf(buf + ret, "queue %d info:\n", tx_ring_num);
+	ret += sprintf(buf + ret, "next_to_use %d\n", ring->next_to_use);
+	ret += sprintf(buf + ret, "next_to_clean %d\n", ring->next_to_clean);
+
+	tx_buffer = &ring->tx_buffer_info[ring->next_to_clean];
+	eop_desc = tx_buffer->next_to_watch;
+	/* if have watch desc */
+	if (eop_desc) {
+		ret += sprintf(buf + ret, "next_to_watch:\n");
+		ret += print_desc(buf + ret, eop_desc, sizeof(*eop_desc));
+		ret += sprintf(buf + ret, "\n");
+	} else {
+		ret += sprintf(buf + ret, "no next_to_watch data\n");
+	}
+
+	return ret;
+}
+
+static ssize_t tx_ring_info_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = count;
+
+	u32 tx_ring_num = adapter->sysfs_tx_ring_num;
+
+	if (kstrtou32(buf, 0, &tx_ring_num) != 0)
+		return -EINVAL;
+
+	if (tx_ring_num < adapter->num_tx_queues)
+		adapter->sysfs_tx_ring_num = tx_ring_num;
+	else
+		ret = -EINVAL;
+
+	return ret;
+}
+
+static ssize_t active_vid_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	u16 vid;
+	u16 current_vid = 0;
+	int ret = 0;
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u8 vfnum = hw->max_vfs - 1;
+	/* use last-vf's table entry. the last */
+
+	if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED)) {
+		current_vid = rd32(hw, RNP_DMA_PORT_VEB_VID_TBL(adapter->port,
+								vfnum));
+	}
+
+	for_each_set_bit (vid, adapter->active_vlans, VLAN_N_VID) {
+		ret += sprintf(buf + ret, "%u%s ", vid,
+			       (current_vid == vid ? "*" : ""));
+	}
+	ret += sprintf(buf + ret, "\n");
+	return ret;
+}
+
+static ssize_t active_vid_store(struct device *dev,
+				struct device_attribute *attr, const char *buf,
+				size_t count)
+{
+	u16 vid;
+	int err = -EINVAL;
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	u8 vfnum = hw->max_vfs - 1;
+	/* use last-vf's table entry. the last */
+	int port = 0;
+
+	if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))
+		return -EIO;
+
+	if (kstrtou16(buf, 0, &vid) != 0)
+		return -EINVAL;
+
+	if ((vid < 4096) && test_bit(vid, adapter->active_vlans)) {
+		if (rd32(hw, RNP_DMA_VERSION) >= 0x20201231) {
+			for (port = 0; port < 4; port++)
+				wr32(hw, RNP_DMA_PORT_VEB_VID_TBL(port, vfnum),
+				     vid);
+		} else {
+			wr32(hw, RNP_DMA_PORT_VEB_VID_TBL(adapter->port, vfnum),
+			     vid);
+		}
+		err = 0;
+	}
+
+	return err ? err : count;
+}
+
+static ssize_t port_idx_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	int ret = 0;
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+
+	ret += sprintf(buf, "%d\n", adapter->portid_of_card);
+	return ret;
+}
+
+static DEVICE_ATTR(port_idx, 0644, port_idx_show, NULL);
+
+static ssize_t debug_link_stat_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	int ret = 0;
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	ret += sprintf(buf, "%d %d dumy:0x%x up-flag:%d carry:%d\n",
+		       adapter->link_up, adapter->hw.link, rd32(hw, 0xc),
+		       adapter->flags & RNP_FLAG_NEED_LINK_UPDATE,
+		       netif_carrier_ok(netdev));
+	return ret;
+}
+
+static DEVICE_ATTR(debug_link_stat, 0644, debug_link_stat_show, NULL);
+
+static ssize_t pci_store(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	int err = -EINVAL;
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int gen = 3, lanes = 8;
+
+	if (count > 30)
+		return -EINVAL;
+
+	if (sscanf(buf, "gen%dx%d", &gen, &lanes) != 2) {
+		printk(KERN_DEBUG "Error: invalid input. example: gen3x8\n");
+		return -EINVAL;
+	}
+	if (gen > 3 || lanes > 8)
+		return -EINVAL;
+
+	err = rnpgbe_set_lane_fun(hw, LANE_FUN_PCI_LANE, gen, lanes, 0, 0);
+
+	return err ? err : count;
+}
+
+static ssize_t pci_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	int ret = 0;
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+
+	if (rnpgbe_mbx_get_lane_stat(hw) != 0)
+		ret += sprintf(buf, " IO Error\n");
+	else
+		ret += sprintf(buf, "gen%dx%d\n", hw->pci_gen, hw->pci_lanes);
+
+	return ret;
+}
+
+static DEVICE_ATTR(pci, 0644, pci_show, pci_store);
+
+static ssize_t temperature_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbe_hw *hw = &adapter->hw;
+	int ret = 0, temp = 0, voltage = 0;
+
+	temp = rnpgbe_mbx_get_temp(hw, &voltage);
+
+	ret += sprintf(buf, "temp:%d oC \n", temp);
+	return ret;
+}
+
+static struct pci_dev *pcie_find_root_port_old(struct pci_dev *dev)
+{
+	while (1) {
+		if (!pci_is_pcie(dev))
+			break;
+		if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+			return dev;
+		if (!dev->bus->self)
+			break;
+		dev = dev->bus->self;
+	}
+	return NULL;
+}
+
+static ssize_t root_slot_info_show(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_device(dev);
+	struct rnpgbe_adapter *adapter = netdev_priv(netdev);
+	int ret = 0;
+	struct pci_dev *root_pdev = pcie_find_root_port_old(adapter->pdev);
+
+	if (root_pdev) {
+		ret += sprintf(buf + ret, "%02x:%02x.%x\n",
+			       root_pdev->bus->number,
+			       PCI_SLOT(root_pdev->devfn),
+			       PCI_FUNC(root_pdev->devfn));
+	}
+	return ret;
+}
+
+static DEVICE_ATTR(root_slot_info, 0644, root_slot_info_show, NULL);
+static DEVICE_ATTR(temperature, 0644, temperature_show, NULL);
+static DEVICE_ATTR(active_vid, 0644, active_vid_show, active_vid_store);
+static DEVICE_ATTR(tx_ring_info, 0644, tx_ring_info_show, tx_ring_info_store);
+static DEVICE_ATTR(rx_ring_info, 0644, rx_ring_info_show, rx_ring_info_store);
+static DEVICE_ATTR(tx_desc_info, 0644, tx_desc_info_show, tx_desc_info_store);
+static DEVICE_ATTR(rx_desc_info, 0644, rx_desc_info_show, rx_desc_info_store);
+static DEVICE_ATTR(rx_drop_info, 0644, rx_drop_info_show, rx_drop_info_store);
+static DEVICE_ATTR(outer_vlan_info, 0644, outer_vlan_info_show,
+		   outer_vlan_info_store);
+static DEVICE_ATTR(tcp_sync_info, 0644, tcp_sync_info_show,
+		   tcp_sync_info_store);
+static DEVICE_ATTR(rx_skip_info, 0644, rx_skip_info_show, NULL);
+static DEVICE_ATTR(tx_stags_info, 0644, tx_stags_info_show,
+		   tx_stags_info_store);
+static DEVICE_ATTR(gephy_test_info, 0644, gephy_test_info_show,
+		   gephy_test_info_store);
+static DEVICE_ATTR(version_info, 0644, version_info_show, NULL);
+
+static struct attribute *dev_attrs[] = {
+	&dev_attr_tx_stags_info.attr,
+	&dev_attr_gephy_test_info.attr,
+	&dev_attr_version_info.attr,
+	&dev_attr_root_slot_info.attr,
+	&dev_attr_active_vid.attr,
+	&dev_attr_rx_drop_info.attr,
+	&dev_attr_outer_vlan_info.attr,
+	&dev_attr_tcp_sync_info.attr,
+	&dev_attr_rx_skip_info.attr,
+	&dev_attr_tx_ring_info.attr,
+	&dev_attr_rx_ring_info.attr,
+	&dev_attr_tx_desc_info.attr,
+	&dev_attr_rx_desc_info.attr,
+	&dev_attr_port_idx.attr,
+	&dev_attr_temperature.attr,
+	&dev_attr_pci.attr,
+	&dev_attr_debug_link_stat.attr,
+	NULL,
+};
+
+static struct bin_attribute *dev_bin_attrs[] = {
+	&bin_attr_maintain,
+	NULL,
+};
+
+static struct attribute_group dev_attr_grp = {
+	.attrs = dev_attrs,
+	.bin_attrs = dev_bin_attrs,
+};
+
+static void
+rnpgbe_sysfs_del_adapter(struct rnpgbe_adapter __maybe_unused *adapter)
+{
+}
+
+/* called from rnpgbe_main.c */
+void rnpgbe_sysfs_exit(struct rnpgbe_adapter *adapter)
+{
+	rnpgbe_sysfs_del_adapter(adapter);
+	sysfs_remove_group(&adapter->netdev->dev.kobj, &dev_attr_grp);
+	if (adapter->maintain_buf) {
+		kfree(adapter->maintain_buf);
+		adapter->maintain_buf = NULL;
+		adapter->maintain_buf_len = 0;
+	}
+}
+
+/* called from rnpgbe_main.c */
+int rnpgbe_sysfs_init(struct rnpgbe_adapter *adapter)
+{
+	int rc = 0;
+	int flag;
+#ifdef RNPGBE_HWMON
+	struct hwmon_buff *rnpgbe_hwmon;
+	struct device *hwmon_dev;
+	unsigned int i;
+#endif /* RNPGBE_HWMON */
+
+	flag = sysfs_create_group(&adapter->netdev->dev.kobj, &dev_attr_grp);
+	if (flag != 0) {
+		dev_err(&adapter->netdev->dev,
+			"sysfs_create_group faild:flag:%d\n", flag);
+		return flag;
+	}
+#ifdef RNPGBE_HWMON
+	/* If this method isn't defined we don't support thermals */
+	if (adapter->hw.ops.init_thermal_sensor_thresh == NULL)
+		goto no_thermal;
+
+	/* Don't create thermal hwmon interface if no sensors present */
+	if (adapter->hw.ops.init_thermal_sensor_thresh(&adapter->hw))
+		goto no_thermal;
+
+	rnpgbe_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*rnpgbe_hwmon),
+				    GFP_KERNEL);
+
+	if (!rnpgbe_hwmon) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	adapter->rnpgbe_hwmon_buff = rnpgbe_hwmon;
+
+	for (i = 0; i < RNPGBE_MAX_SENSORS; i++) {
+		/*
+		 * Only create hwmon sysfs entries for sensors that have
+		 * meaningful data for.
+		 */
+		if (adapter->hw.thermal_sensor_data.sensor[i].location == 0)
+			continue;
+
+		/* Bail if any hwmon attr struct fails to initialize */
+		rc = rnpgbe_add_hwmon_attr(adapter, i,
+					   RNPGBE_HWMON_TYPE_CAUTION);
+		if (rc)
+			goto err;
+		rc = rnpgbe_add_hwmon_attr(adapter, i, RNPGBE_HWMON_TYPE_LOC);
+		if (rc)
+			goto err;
+		rc = rnpgbe_add_hwmon_attr(adapter, i, RNPGBE_HWMON_TYPE_TEMP);
+		if (rc)
+			goto err;
+		rc = rnpgbe_add_hwmon_attr(adapter, i, RNPGBE_HWMON_TYPE_MAX);
+		if (rc)
+			goto err;
+	}
+
+	rnpgbe_hwmon->groups[0] = &rnpgbe_hwmon->group;
+	rnpgbe_hwmon->group.attrs = rnpgbe_hwmon->attrs;
+
+	hwmon_dev = devm_hwmon_device_register_with_groups(
+		&adapter->pdev->dev, "rnpgbe", rnpgbe_hwmon, rnpgbe_hwmon->groups);
+
+	if (IS_ERR(hwmon_dev)) {
+		rc = PTR_ERR(hwmon_dev);
+		goto exit;
+	}
+no_thermal:
+#endif /* RNPGBE_HWMON */
+	goto exit;
+
+err:
+	rnpgbe_sysfs_exit(adapter);
+exit:
+	return rc;
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_type.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_type.h
new file mode 100644
index 0000000000000..2d81885bda4aa
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_type.h
@@ -0,0 +1,1362 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBE_TYPE_H_
+#define _RNPGBE_TYPE_H_
+
+#include 
+#include 
+#include 
+
+//#define OPTM_WITH_LPAGE
+
+#if defined(CONFIG_MGBE_OPTM_WITH_LPAGE) && !defined(OPTM_WITH_LPAGE)
+#define OPTM_WITH_LPAGE
+#endif
+
+#if defined(CONFIG_MXGBE_MSIX_COUNT)
+#define RNP_N10_MSIX_VECTORS CONFIG_MXGBE_MSIX_COUNT
+#endif
+
+#if IS_ENABLED(CONFIG_SYSFS)
+#ifndef RNPGBE_SYSFS
+#define RNPGBE_SYSFS
+#endif /* RNPGBE_SYSFS */
+#endif /* CONFIG_SYSFS */
+#if IS_ENABLED(CONFIG_HWMON)
+#ifndef RNPGBE_HWMON
+#define RNPGBE_HWMON
+#endif /* RNPGBE_HWMON */
+#endif /* CONFIG_HWMON */
+//#define DISABLE_PACKET_SPLIT
+//
+#ifdef CONFIG_DEBUG_FS
+#define HAVE_RNP_DEBUG_FS
+#endif /* CONFIG_DEBUG_FS */
+
+#if (PAGE_SIZE < 8192)
+//error
+#ifdef OPTM_WITH_LPAGE
+//#error can't open OPTM_WITH_LPAGE with PAGE_SIZE small than 8192
+#undef OPTM_WITH_LPAGE
+#endif
+#endif
+
+#include "rnpgbe_regs.h"
+
+static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
+{
+        return &pdev->dev;
+}
+
+/* Device IDs */
+#define PCI_VENDOR_ID_MUCSE 0x8848
+#define PCI_DEVICE_ID_N10_PF0 0x1000
+#define PCI_DEVICE_ID_N10_PF1 0x1001
+
+#define RNP_DEV_ID_N10_PF0 0x7001
+#define RNP_DEV_ID_N10_PF1 0x7002
+
+#define PCI_DEVICE_ID_N10 0x1000
+#define PCI_DEVICE_ID_N10C 0x1C00
+#define PCI_DEVICE_ID_N400 0x1001
+#define PCI_DEVICE_ID_N500_QUAD_PORT 0x8308
+#define PCI_DEVICE_ID_N500_DUAL_PORT 0x8318
+#define PCI_DEVICE_ID_N500_VF 0x8309
+#define PCI_DEVICE_ID_N210 0x8208
+/* Wake Up Control */
+#define RNP_WUC_PME_EN 0x00000002 /* PME Enable */
+#define RNP_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define RNP_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion  */
+
+/* Wake Up Filter Control */
+#define RNP_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define RNP_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define RNP_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define RNP_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
+#define RNP_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define RNP_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
+#define RNP_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define RNP_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define RNP_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */
+
+#define RNP_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */
+#define RNP_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define RNP_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define RNP_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define RNP_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define RNP_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
+#define RNP_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
+#define RNP_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
+#define RNP_WUFC_FLX_FILTERS_6 0x003F0000 /* Mask for 6 flex filters */
+#define RNP_WUFC_FLX_FILTERS_8 0x00FF0000 /* Mask for 8 flex filters */
+#define RNP_WUFC_FW_RST_WK 0x80000000 /* Ena wake on FW reset assertion */
+/* Mask for Ext. flex filters */
+#define RNP_WUFC_EXT_FLX_FILTERS 0x00300000
+#define RNP_WUFC_ALL_FILTERS 0x000F00FF /* Mask all 4 flex filters */
+#define RNP_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask all 6 flex filters */
+#define RNP_WUFC_ALL_FILTERS_8 0x00FF00FF /* Mask all 8 flex filters */
+#define RNP_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
+
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL 0x0020
+#define ADVERTISE_2500_HALF 0x0040 /* NOT used, just FYI */
+#define ADVERTISE_2500_FULL 0x0080
+
+#define RNPGBE_MAX_SENSORS 1
+struct rnpgbe_thermal_diode_data {
+	unsigned int location;
+	unsigned int temp;
+	unsigned int caution_thresh;
+	unsigned int max_op_thresh;
+};
+
+struct rnpgbe_thermal_sensor_data {
+	struct rnpgbe_thermal_diode_data sensor[RNPGBE_MAX_SENSORS];
+};
+
+/* Proxy Status */
+#define RNP_PROXYS_EX 0x00000004 /* Exact packet received */
+#define RNP_PROXYS_ARP_DIR 0x00000020 /* ARP w/filter match received */
+#define RNP_PROXYS_NS 0x00000200 /* IPV6 NS received */
+#define RNP_PROXYS_NS_DIR 0x00000400 /* IPV6 NS w/DA match received */
+#define RNP_PROXYS_ARP 0x00000800 /* ARP request packet received */
+#define RNP_PROXYS_MLD 0x00001000 /* IPv6 MLD packet received */
+
+/* Proxying Filter Control */
+#define RNP_PROXYFC_ENABLE 0x00000001 /* Port Proxying Enable */
+#define RNP_PROXYFC_EX 0x00000004 /* Directed Exact Proxy Enable */
+#define RNP_PROXYFC_ARP_DIR 0x00000020 /* Directed ARP Proxy Enable */
+#define RNP_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */
+#define RNP_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Enable */
+#define RNP_PROXYFC_MLD 0x00000800 /* IPv6 MLD Proxy Enable */
+#define RNP_PROXYFC_NO_TCO 0x00008000 /* Ignore TCO packets */
+
+#define RNP_WUPL_LENGTH_MASK 0xFFFF
+
+/* max 4 in n10 */
+#define RNP_MAX_TRAFFIC_CLASS 4
+#define TSRN10_TX_DEFAULT_BURST 8
+
+#ifndef TSRN10_RX_DEFAULT_BURST
+#define TSRN10_RX_DEFAULT_BURST 16
+#endif
+
+#ifndef TSRN10_RX_DEFAULT_LINE
+#define TSRN10_RX_DEFAULT_LINE 32
+#endif
+
+#ifndef RNP_PKT_TIMEOUT
+#define RNP_PKT_TIMEOUT 30
+#endif
+
+#ifndef RNP_RX_PKT_POLL_BUDGET
+#define RNP_RX_PKT_POLL_BUDGET 64
+#endif
+
+#ifndef RNP_TX_PKT_POLL_BUDGET
+#define RNP_TX_PKT_POLL_BUDGET 0x30
+#endif
+
+#ifndef RNP_PKT_TIMEOUT_TX
+#define RNP_PKT_TIMEOUT_TX 200
+#endif
+/* VF Device IDs */
+#define RNP_DEV_ID_N10_PF0_VF 0x8001
+#define RNP_DEV_ID_N10_PF1_VF 0x8002
+
+#define RNP_DEV_ID_N10_PF0_VF_N 0x1010
+#define RNP_DEV_ID_N10_PF1_VF_N 0x1011
+
+/* Transmit Descriptor - Advanced */
+struct rnpgbe_tx_desc {
+	union {
+		__le64 pkt_addr; // Packet buffer address
+		struct {
+			__le32 adr_lo;
+			__le32 adr_hi;
+		};
+	};
+	union {
+		__le64 vlan_cmd_bsz;
+		struct {
+			__le32 blen_mac_ip_len;
+			__le32 vlan_cmd;
+		};
+	};
+#define RNP_TXD_FLAGS_VLAN_PRIO_MASK 0xe000
+#define RNP_TX_FLAGS_VLAN_PRIO_SHIFT 13
+#define RNP_TX_FLAGS_VLAN_CFI_SHIFT 12
+
+#define RNP_TXD_VLAN_VALID (0x80000000)
+#define RNP_TXD_SVLAN_TYPE (0x02000000)
+#define RNP_TXD_VLAN_CTRL_NOP (0x00 << 13)
+#define RNP_TXD_VLAN_CTRL_RM_VLAN (0x20000000)
+#define RNP_TXD_VLAN_CTRL_INSERT_VLAN (0x40000000)
+
+#define RNP_TXD_L4_CSUM (0x10000000) //udp tcp sctp csum
+#define RNP_TXD_IP_CSUM (0x8000000)
+#define RNP_TXD_TUNNEL_MASK (0x3000000)
+#define RNP_TXD_TUNNEL_VXLAN (0x1000000)
+#define RNP_TXD_TUNNEL_NVGRE (0x2000000)
+#define RNP_TXD_L4_TYPE_UDP (0xc00000)
+#define RNP_TXD_L4_TYPE_TCP (0x400000)
+#define RNP_TXD_L4_TYPE_SCTP (0x800000)
+#define RNP_TXD_FLAG_IPv4 (0)
+#define RNP_TXD_FLAG_IPv6 (0x200000)
+#define RNP_TXD_FLAG_TSO (0x100000)
+#define RNP_TXD_FLAG_PTP (0x4000000)
+#define RNP_TXD_CMD_RS (0x040000)
+#define RNP_TXD_CMD_INNER_VLAN (0x08000000)
+#define RNP_TXD_STAT_DD (0x020000)
+#define RNP_TXD_CMD_EOP (0x010000)
+#define RNP_TXD_PAD_CTRL (0x01000000)
+};
+
+struct rnpgbe_tx_ctx_desc {
+	__le32 mss_len_vf_num;
+	__le32 inner_vlan_tunnel_len;
+#define VF_VEB_MARK (1 << 24) //bit 56
+#define VF_VEB_IGNORE_VLAN (1 << 25) //bit 57
+	__le32 resv;
+	__le32 resv_cmd;
+#define RNP_TXD_FLAG_TO_RPU (1 << 15)
+#define RNP_TXD_SMAC_CTRL_NOP (0x00 << 12)
+#define RNP_TXD_SMAC_CTRL_REPLACE_MACADDR0 (0x02 << 12)
+#define RNP_TXD_SMAC_CTRL_REPLACE_MACADDR1 (0x06 << 12)
+#define RNP_TXD_CTX_VLAN_CTRL_NOP (0x00 << 10)
+#define RNP_TXD_CTX_VLAN_CTRL_RM_VLAN (0x01 << 10)
+#define RNP_TXD_CTX_VLAN_CTRL_INSERT_VLAN (0x02 << 10)
+#define RNP_TXD_MTI_CRC_PAD_CTRL (0x01000000)
+#define RNP_TXD_CTX_CTRL_DESC (0x080000)
+#define RNP_TXD_CMD_RS (0x040000)
+#define RNP_TXD_STAT_DD (0x020000)
+};
+
+/* Receive Descriptor - Advanced */
+union rnpgbe_rx_desc {
+	struct {
+		union {
+			__le64 pkt_addr; /* Packet buffer address */
+			struct {
+				__le32 addr_lo;
+				__le32 addr_hi;
+			};
+		};
+		__le64 resv_cmd;
+#define RNP_RXD_FLAG_RS (0)
+	};
+
+	struct {
+		__le32 rss_hash;
+		__le16 mark;
+		__le16 rev1;
+#define RNP_RX_L3_TYPE_MASK (1 << 15) // 1 is ipv4
+#define VEB_VF_PKG (1 << 1) // bit 49
+#define VEB_VF_IGNORE_VLAN (1 << 0) //bit 48
+#define REV_OUTER_VLAN (1 << 5)
+		__le16 len;
+		__le16 padding_len;
+		__le16 vlan;
+		__le16 cmd;
+#define RNP_RXD_STAT_VLAN_VALID (1 << 15)
+#define RNP_RXD_STAT_STAG (0x01 << 14)
+#define RNP_RXD_STAT_TUNNEL_NVGRE (0x02 << 13)
+#define RNP_RXD_STAT_TUNNEL_VXLAN (0x01 << 13)
+#define RNP_RXD_STAT_TUNNEL_MASK (0x03 << 13)
+#define RNP_RXD_STAT_ERR_MASK (0x1f << 8)
+#define RNP_RXD_STAT_SCTP_MASK (0x04 << 8)
+#define RNP_RXD_STAT_L4_MASK (0x02 << 8)
+#define RNP_RXD_STAT_L4_SCTP (0x02 << 6)
+#define RNP_RXD_STAT_L4_TCP (0x01 << 6)
+#define RNP_RXD_STAT_L4_UDP (0x03 << 6)
+#define RNP_RXD_STAT_IPV6 (1 << 5)
+#define RNP_RXD_STAT_IPV4 (0 << 5)
+#define RNP_RXD_STAT_PTP (1 << 4)
+#define RNP_RXD_STAT_DD (1 << 1)
+#define RNP_RXD_STAT_EOP (1 << 0)
+	} wb;
+} __packed;
+
+/* Host Interface Command Structures */
+struct rnpgbe_hic_hdr {
+	u8 cmd;
+	u8 buf_len;
+	union {
+		u8 cmd_resv;
+		u8 ret_status;
+	} cmd_or_resp;
+	u8 checksum;
+};
+
+struct rnpgbe_hic_drv_info {
+	struct rnpgbe_hic_hdr hdr;
+	u8 port_num;
+	u8 ver_sub;
+	u8 ver_build;
+	u8 ver_min;
+	u8 ver_maj;
+	u8 pad; /* end spacing to ensure length is mult. of dword */
+	u16 pad2; /* end spacing to ensure length is mult. of dword2 */
+};
+
+/* Context descriptors */
+struct rnpgbe_adv_tx_context_desc {
+	__le32 vlan_macip_lens;
+	__le32 seqnum_seed;
+	__le32 type_tucmd_mlhl;
+	__le32 mss_l4len_idx;
+};
+
+/* RAH */
+#define RNP_RAH_VIND_MASK 0x003C0000
+#define RNP_RAH_VIND_SHIFT 18
+#define RNP_RAH_AV 0x80000000
+#define RNP_CLEAR_VMDQ_ALL 0xFFFFFFFF
+
+/* Autonegotiation advertised speeds */
+typedef u32 rnpgbe_autoneg_advertised;
+/* Link speed */
+typedef u32 rnpgbe_link_speed;
+#define RNP_LINK_SPEED_UNKNOWN 0
+#define RNP_LINK_SPEED_10_FULL BIT(2)
+#define RNP_LINK_SPEED_100_FULL BIT(3)
+#define RNP_LINK_SPEED_1GB_FULL BIT(4)
+#define RNP_LINK_SPEED_10GB_FULL BIT(5)
+#define RNP_LINK_SPEED_40GB_FULL BIT(6)
+#define RNP_LINK_SPEED_25GB_FULL BIT(7)
+#define RNP_LINK_SPEED_50GB_FULL BIT(8)
+#define RNP_LINK_SPEED_100GB_FULL BIT(9)
+#define RNP_LINK_SPEED_10_HALF BIT(10)
+#define RNP_LINK_SPEED_100_HALF BIT(11)
+#define RNP_LINK_SPEED_1GB_HALF BIT(12)
+#define RNP_SFP_MODE_10G_LR BIT(13)
+#define RNP_SFP_MODE_10G_SR BIT(14)
+#define RNP_SFP_MODE_10G_LRM BIT(15)
+#define RNP_SFP_MODE_1G_T BIT(16)
+#define RNP_SFP_MODE_1G_KX BIT(17)
+#define RNP_SFP_MODE_1G_SX BIT(18)
+#define RNP_SFP_MODE_1G_LX BIT(19)
+#define RNP_SFP_MODE_40G_SR4 BIT(20)
+#define RNP_SFP_MODE_40G_CR4 BIT(21)
+#define RNP_SFP_MODE_40G_LR4 BIT(22)
+#define RNP_SFP_MODE_1G_CX BIT(23)
+#define RNP_SFP_MODE_10G_BASE_T BIT(24)
+#define RNP_SFP_MODE_FIBER_CHANNEL_SPEED BIT(25) // sfp-a0-10 != 0
+#define RNP_SFP_CONNECTOR_DAC BIT(26)
+#define RNP_SFP_TO_SGMII BIT(27)
+#define RNP_SFP_25G_SR BIT(28)
+#define RNP_SFP_25G_KR BIT(29)
+#define RNP_SFP_25G_CR BIT(30)
+
+/* Flow Control Data Sheet defined values
+ * Calculation and defines taken from 802.1bb Annex O
+ */
+
+enum rnpgbe_atr_flow_type {
+	RNP_ATR_FLOW_TYPE_IPV4 = 0x0,
+	RNP_ATR_FLOW_TYPE_UDPV4 = 0x1,
+	RNP_ATR_FLOW_TYPE_TCPV4 = 0x2,
+	RNP_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+	RNP_ATR_FLOW_TYPE_IPV6 = 0x4,
+	RNP_ATR_FLOW_TYPE_UDPV6 = 0x5,
+	RNP_ATR_FLOW_TYPE_TCPV6 = 0x6,
+	RNP_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+	RNP_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10,
+	RNP_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11,
+	RNP_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12,
+	RNP_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13,
+	RNP_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14,
+	RNP_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15,
+	RNP_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16,
+	RNP_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17,
+	RNP_ATR_FLOW_TYPE_ETHER = 0x18,
+	RNP_ATR_FLOW_TYPE_USERDEF = 0x19,
+};
+
+#define RNP_FDIR_DROP_QUEUE (200)
+
+enum {
+	fdir_mode_tcam = 0,
+	fdir_mode_tuple5,
+};
+/* Flow Director ATR input struct. */
+union rnpgbe_atr_input {
+	/*
+	 * Byte layout in order, all values with MSB first:
+	 *
+	 * vm_pool      - 1 byte
+	 * flow_type    - 1 byte
+	 * vlan_id      - 2 bytes
+	 * src_ip       - 16 bytes
+	 * inner_mac    - 6 bytes
+	 * cloud_mode   - 2 bytes
+	 * tni_vni      - 4 bytes
+	 * dst_ip       - 16 bytes
+	 * src_port     - 2 bytes
+	 * dst_port     - 2 bytes
+	 * flex_bytes   - 2 bytes
+	 * bkt_hash     - 2 bytes
+	 */
+	struct {
+		u8 vm_pool;
+		u8 flow_type;
+		__be16 vlan_id;
+		__be32 dst_ip[4];
+		__be32 dst_ip_mask[4];
+		__be32 src_ip[4];
+		__be32 src_ip_mask[4];
+		u8 inner_mac[6];
+		u8 inner_mac_mask[6];
+		__be16 tunnel_type;
+		__be32 tni_vni;
+		__be16 src_port;
+		__be16 src_port_mask;
+		__be16 dst_port;
+		__be16 dst_port_mask;
+		__be16 flex_bytes;
+		__be16 bkt_hash;
+	} formatted;
+	struct {
+		u8 vm_poll;
+		u8 flow_type;
+		u16 vlan_id;
+		__be16 proto;
+		__be16 resv;
+		__be32 nouse[12];
+	} layer2_formate;
+	__be32 dword_stream[14];
+};
+
+/* BitTimes (BT) conversion */
+#define RNP_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024))
+#define RNP_B2BT(BT) (BT * 8)
+
+/* Calculate Delay to respond to PFC */
+#define RNP_PFC_D 672
+
+/* Calculate Cable Delay */
+#define RNP_CABLE_DC 5556 /* Delay Copper */
+#define RNP_CABLE_DO 5000 /* Delay Optical */
+
+/* Calculate Interface Delay X540 */
+#define RNP_PHY_DC 25600 /* Delay 10G BASET */
+#define RNP_MAC_DC 8192 /* Delay Copper XAUI interface */
+#define RNP_XAUI_DC (2 * 2048) /* Delay Copper Phy */
+
+#define RNP_ID_X540 (RNP_MAC_DC + RNP_XAUI_DC + RNP_PHY_DC)
+
+/* Calculate Interface Delay 82598, n10 */
+#define RNP_PHY_D 12800
+#define RNP_MAC_D 4096
+#define RNP_XAUI_D (2 * 1024)
+
+/* PHY MDI STANDARD CONFIG */
+#define RNP_MDI_PHY_ID1_OFFSET 2
+#define RNP_MDI_PHY_ID2_OFFSET 3
+#define RNP_MDI_PHY_ID_MASK 0xFFFFFC00U
+#define RNP_MDI_PHY_SPEED_SELECT1 0x0040
+#define RNP_MDI_PHY_DUPLEX 0x0100
+#define RNP_MDI_PHY_RESTART_AN 0x0200
+#define RNP_MDI_PHY_ANE 0x1000
+#define RNP_MDI_PHY_SPEED_SELECT0 0x2000
+#define RNP_MDI_PHY_RESET
+
+#define NGBE_PHY_RST_WAIT_PERIOD 50
+
+#define RNP_ID (RNP_MAC_D + RNP_XAUI_D + RNP_PHY_D)
+
+/* Calculate Delay incurred from higher layer */
+#define RNP_HD 6144
+
+/* Calculate PCI Bus delay for low thresholds */
+#define RNP_PCI_DELAY 10000
+
+/* Flow Director compressed ATR hash input struct */
+union rnpgbe_atr_hash_dword {
+	struct {
+		u8 vm_pool;
+		u8 flow_type;
+		__be16 vlan_id;
+	} formatted;
+	__be32 ip;
+	struct {
+		__be16 src;
+		__be16 dst;
+	} port;
+	__be16 flex_bytes;
+	__be32 dword;
+};
+
+enum rnpgbe_eeprom_type {
+	rnpgbe_eeprom_uninitialized = 0,
+	rnpgbe_eeprom_spi,
+	rnpgbe_flash,
+	rnpgbe_eeprom_none /* No NVM support */
+};
+
+enum mac_type {
+	mac_dwc_xlg,
+	mac_dwc_g,
+
+};
+
+enum rnpgbe_mac_type {
+	rnpgbe_mac_unknown = 0,
+	rnpgbe_mac_n10g_x8_40G,
+	rnpgbe_mac_n10g_x2_10G,
+	rnpgbe_mac_n10g_x4_10G,
+	rnpgbe_mac_n10g_x8_10G,
+	rnpgbe_mac_n10l_x8_1G,
+	rnpgbe_num_macs
+};
+
+enum rnpgbe_rss_type {
+	rnpgbe_rss_uv440 = 0,
+	rnpgbe_rss_uv3p,
+	rnpgbe_rss_n10,
+	rnpgbe_rss_n20,
+	rnpgbe_rss_n500
+};
+
+enum rnpgbe_hw_type {
+	rnpgbe_hw_uv440 = 0,
+	rnpgbe_hw_uv3p,
+	rnpgbe_hw_n10,
+	rnpgbe_hw_n20,
+	rnpgbe_hw_n400,
+	rnpgbe_hw_n500,
+	rnpgbe_hw_n210,
+};
+
+enum rnpgbe_eth_type { rnpgbe_eth_n10 = 0, rnpgbe_eth_n500 };
+
+enum rnpgbe_phy_type {
+	rnpgbe_phy_unknown = 0,
+	rnpgbe_phy_none,
+	rnpgbe_phy_sfp,
+	rnpgbe_phy_sfp_unsupported,
+	rnpgbe_phy_generic,
+	rnpgbe_phy_sfp_unknown,
+	rnpgbe_phy_sgmii,
+};
+
+enum rnpgbe_sfp_type {
+	rnpgbe_sfp_type_da_cu = 0,
+	rnpgbe_sfp_type_sr = 1,
+	rnpgbe_sfp_type_lr = 2,
+	rnpgbe_sfp_type_da_cu_core0 = 3,
+	rnpgbe_sfp_type_da_cu_core1 = 4,
+	rnpgbe_sfp_type_srlr_core0 = 5,
+	rnpgbe_sfp_type_srlr_core1 = 6,
+	rnpgbe_sfp_type_da_act_lmt_core0 = 7,
+	rnpgbe_sfp_type_da_act_lmt_core1 = 8,
+	rnpgbe_sfp_type_1g_cu_core0 = 9,
+	rnpgbe_sfp_type_1g_cu_core1 = 10,
+	rnpgbe_sfp_type_1g_sx_core0 = 11,
+	rnpgbe_sfp_type_1g_sx_core1 = 12,
+	rnpgbe_sfp_type_1g_lx_core0 = 13,
+	rnpgbe_sfp_type_1g_lx_core1 = 14,
+	rnpgbe_sfp_type_not_present = 0xFFFE,
+	rnpgbe_sfp_type_unknown = 0xFFFF
+};
+
+enum rnpgbe_media_type {
+	rnpgbe_media_type_unknown = 0,
+	rnpgbe_media_type_fiber,
+	rnpgbe_media_type_copper,
+	rnpgbe_media_type_backplane,
+	rnpgbe_media_type_cx4,
+	rnpgbe_media_type_da,
+	rnpgbe_media_type_virtual
+
+};
+
+/* Flow Control Settings */
+enum rnpgbe_fc_mode {
+	rnpgbe_fc_none = 0,
+	rnpgbe_fc_rx_pause,
+	rnpgbe_fc_tx_pause,
+	rnpgbe_fc_full,
+	rnpgbe_fc_default
+};
+
+#define PAUSE_TX (0x1)
+#define PAUSE_RX (0x2)
+#define PAUSE_AUTO (0x10)
+
+#define ASYM_PAUSE BIT(11)
+#define SYM_PAUSE BIT(10)
+
+struct rnpgbe_addr_filter_info {
+	u32 num_mc_addrs;
+	u32 rar_used_count;
+	u32 mta_in_use;
+	u32 overflow_promisc;
+	bool uc_set_promisc;
+	bool user_set_promisc;
+};
+
+/* Bus parameters */
+struct rnpgbe_bus_info {
+	u16 func;
+	u16 lan_id;
+};
+
+/* Flow control parameters */
+struct rnpgbe_fc_info {
+	u32 high_water[RNP_MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
+	u32 low_water[RNP_MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */
+	u16 pause_time; /* Flow Control Pause timer */
+	bool send_xon; /* Flow control send XON */
+	bool strict_ieee; /* Strict IEEE mode */
+	bool disable_fc_autoneg; /* Do not autonegotiate FC */
+	bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
+	enum rnpgbe_fc_mode current_mode; /* FC mode in effect */
+	u32 requested_mode; /* FC mode requested by caller */
+};
+
+/* Statistics counters collected by the MAC */
+struct rnpgbe_hw_stats {
+	u64 dma_to_dma;
+	u64 dma_to_switch;
+	u64 mac_to_mac;
+	u64 switch_to_switch;
+	u64 mac_to_dma;
+	u64 switch_to_dma;
+	u64 vlan_add_cnt;
+	u64 vlan_strip_cnt;
+	//=== error
+	u64 invalid_droped_packets;
+	u64 filter_dropped_packets;
+	//== drop ==
+	u64 rx_capabity_lost;
+	u64 host_l2_match_drop;
+	u64 redir_input_match_drop;
+	u64 redir_etype_match_drop;
+	u64 redir_tcp_syn_match_drop;
+	u64 redir_tuple5_match_drop;
+	u64 redir_tcam_match_drop;
+
+	u64 bmc_dropped_packets;
+	u64 switch_dropped_packets;
+	//=== rx
+	u64 dma_to_host;
+	//=== dma-tx ==
+	u64 port0_tx_packets;
+	u64 port1_tx_packets;
+	u64 port2_tx_packets;
+	u64 port3_tx_packets;
+	//=== emac 1to4 tx ==
+	u64 in0_tx_pkts;
+	u64 in1_tx_pkts;
+	u64 in2_tx_pkts;
+	u64 in3_tx_pkts;
+	//=== phy tx ==
+	u64 port0_to_phy_pkts;
+	u64 port1_to_phy_pkts;
+	u64 port2_to_phy_pkts;
+	u64 port3_to_phy_pkts;
+	//=== mac rx ===
+	u64 mac_rx_broadcast;
+	u64 mac_rx_multicast;
+	u64 tx_broadcast;
+	u64 tx_multicast;
+	// n500 use this
+	u64 ultra_short_cnt;
+	u64 jumbo_cnt;
+
+	u64 dma_rx_drop_cnt_0;
+	u64 dma_rx_drop_cnt_1;
+	u64 dma_rx_drop_cnt_2;
+	u64 dma_rx_drop_cnt_3;
+	u64 dma_rx_drop_cnt_4;
+	u64 dma_rx_drop_cnt_5;
+	u64 dma_rx_drop_cnt_6;
+	u64 dma_rx_drop_cnt_7;
+	u64 tx_pause;
+	u64 rx_pause;
+};
+
+/* forward declaration */
+struct rnpgbe_hw;
+struct rnpgbe_eth_info;
+struct rnpgbe_dma_info;
+struct rnpgbe_mac_info;
+
+/* iterator type for walking multicast address lists */
+typedef u8 *(*rnpgbe_mc_addr_itr)(struct rnpgbe_hw *hw, u8 **mc_addr_ptr,
+				  u32 *vmdq);
+
+/* Function pointer table */
+struct rnpgbe_eeprom_operations {
+	s32 (*init_params)(struct rnpgbe_hw *hw);
+	//	s32 (*read)(struct rnpgbe_hw *hw, u16, u16 *);
+	//	s32 (*read_buffer)(struct rnpgbe_hw *hw, u16, u16, u16 *);
+	//	s32 (*write)(struct rnpgbe_hw *hw, u16, u16);
+	//	s32 (*write_buffer)(struct rnpgbe_hw *hw, u16, u16, u16 *);
+	//	s32 (*validate_checksum)(struct rnpgbe_hw *hw, u16 *);
+	s32 (*update_checksum)(struct rnpgbe_hw *hw);
+	u16 (*calc_checksum)(struct rnpgbe_hw *hw);
+};
+
+/* add nic operations */
+struct rnpgbe_eth_operations {
+	/* RAR, Multicast, VLAN */
+	s32 (*get_mac_addr)(struct rnpgbe_eth_info *eth, u8 *addr);
+	s32 (*set_rar)(struct rnpgbe_eth_info *eth, u32 index, u8 *addr,
+		       bool enable_addr);
+	s32 (*clear_rar)(struct rnpgbe_eth_info *eth, u32 index);
+	s32 (*set_vmdq)(struct rnpgbe_eth_info *eth, u32 rar, u32 vmdq);
+	s32 (*clear_vmdq)(struct rnpgbe_eth_info *eth, u32 rar, u32 vmdq);
+
+	s32 (*update_mc_addr_list)(struct rnpgbe_eth_info *eth,
+				   struct net_device *netdev, bool sriov_on);
+	void (*clr_mc_addr)(struct rnpgbe_eth_info *eth);
+
+	int (*set_rss_hfunc)(struct rnpgbe_eth_info *eth, int hfunc);
+	void (*set_rss_key)(struct rnpgbe_eth_info *eth, bool sriov_flag);
+	void (*set_rss_table)(struct rnpgbe_eth_info *eth);
+	void (*set_rx_hash)(struct rnpgbe_eth_info *eth, bool status,
+			    bool sriov_flag);
+
+	// ntuple function
+	void (*set_layer2_remapping)(struct rnpgbe_eth_info *eth,
+				     union rnpgbe_atr_input *input, u16 pri_id,
+				     u8 queue, bool prio_flag);
+	void (*clr_layer2_remapping)(struct rnpgbe_eth_info *eth, u16 pri_id);
+	void (*clr_all_layer2_remapping)(struct rnpgbe_eth_info *eth);
+	void (*set_tuple5_remapping)(struct rnpgbe_eth_info *eth,
+				     union rnpgbe_atr_input *input, u16 pri_id,
+				     u8 queue, bool prio_flag);
+	void (*clr_tuple5_remapping)(struct rnpgbe_eth_info *eth, u16 pri_id);
+	void (*clr_all_tuple5_remapping)(struct rnpgbe_eth_info *eth);
+	void (*set_tcp_sync_remapping)(struct rnpgbe_eth_info *eth, int queue,
+				       bool flag, bool prio);
+	void (*set_rx_skip)(struct rnpgbe_eth_info *eth, int count, bool flag);
+
+	void (*set_min_max_packet)(struct rnpgbe_eth_info *eth, int min,
+				   int max);
+	void (*set_vlan_strip)(struct rnpgbe_eth_info *eth, u16 queue,
+			       bool enable);
+	s32 (*set_vfta)(struct rnpgbe_eth_info *eth, u32 vlan, bool vlan_on);
+	void (*clr_vfta)(struct rnpgbe_eth_info *eth);
+	void (*set_vlan_filter)(struct rnpgbe_eth_info *eth, bool status);
+	void (*set_outer_vlan_type)(struct rnpgbe_eth_info *eth, int type);
+	void (*set_double_vlan)(struct rnpgbe_eth_info *eth, bool on);
+	void (*set_vxlan_port)(struct rnpgbe_eth_info *eth, u32 port);
+	void (*set_vxlan_mode)(struct rnpgbe_eth_info *eth, bool inner);
+	s32 (*set_fc_mode)(struct rnpgbe_eth_info *eth);
+
+	void (*set_rx)(struct rnpgbe_eth_info *eth, bool status);
+	void (*set_fcs)(struct rnpgbe_eth_info *eth, bool status);
+
+	void (*set_vf_vlan_mode)(struct rnpgbe_eth_info *eth, u16 vlan, int vf,
+				 bool enable);
+};
+
+enum {
+	rnpgbe_driver_insmod,
+	rnpgbe_driver_suspuse,
+	rnpgbe_driver_force_control_phy,
+};
+
+struct rnpgbe_hw_operations {
+	s32 (*init_hw)(struct rnpgbe_hw *hw);
+	s32 (*reset_hw)(struct rnpgbe_hw *hw);
+	s32 (*start_hw)(struct rnpgbe_hw *hw);
+
+	void (*set_mtu)(struct rnpgbe_hw *hw, int mtu);
+	void (*set_vlan_filter_en)(struct rnpgbe_hw *hw, bool enable);
+	void (*set_vlan_filter)(struct rnpgbe_hw *hw, u16 vid, bool enable,
+				bool sriov_flag);
+	int (*set_veb_vlan_mask)(struct rnpgbe_hw *hw, u16 vid, int vf,
+				 bool enable);
+	void (*set_vf_vlan_filter)(struct rnpgbe_hw *hw, u16 vid, int vf,
+				   bool enable, bool veb_only);
+	void (*clr_vfta)(struct rnpgbe_hw *hw);
+	void (*set_vlan_strip)(struct rnpgbe_hw *hw, u16 queue, bool strip);
+	void (*set_mac)(struct rnpgbe_hw *hw, u8 *mac, bool sriov_flag);
+	void (*set_rx_mode)(struct rnpgbe_hw *hw, struct net_device *netdev,
+			    bool sriov_flag);
+	void (*set_rar_with_vf)(struct rnpgbe_hw *hw, u8 *mac, int idx,
+				u32 vfnum, bool enable);
+	void (*clr_rar)(struct rnpgbe_hw *hw, int idx);
+	void (*clr_rar_all)(struct rnpgbe_hw *hw);
+	void (*clr_vlan_veb)(struct rnpgbe_hw *hw);
+	void (*set_txvlan_mode)(struct rnpgbe_hw *hw, bool vlan);
+	void (*set_tx_maxrate)(struct rnpgbe_hw *hw, bool flag);
+	void (*set_fcs_mode)(struct rnpgbe_hw *hw, bool status);
+	void (*set_vxlan_port)(struct rnpgbe_hw *hw, u32 port);
+	void (*set_vxlan_mode)(struct rnpgbe_hw *hw, bool inner);
+	void (*set_mac_speed)(struct rnpgbe_hw *hw, bool link, u32 speed,
+			      bool duplex);
+	void (*set_mac_rx)(struct rnpgbe_hw *hw, bool status);
+	void (*update_sriov_info)(struct rnpgbe_hw *hw);
+
+	void (*set_sriov_status)(struct rnpgbe_hw *hw, bool status);
+	//void (*set_sriov_vf_mac)(struct rnpgbe_hw *, u8 *, int, bool);
+	void (*set_sriov_vf_mc)(struct rnpgbe_hw *hw, u16 mc_addr);
+
+	void (*set_pause_mode)(struct rnpgbe_hw *hw);
+	void (*get_pause_mode)(struct rnpgbe_hw *hw);
+	void (*update_hw_info)(struct rnpgbe_hw *hw);
+	void (*set_rx_hash)(struct rnpgbe_hw *hw, bool status, bool sriov_flag);
+	int (*set_rss_hfunc)(struct rnpgbe_hw *hw, u8 hfunc);
+	void (*set_rss_key)(struct rnpgbe_hw *hw, bool sriov_flag);
+	void (*set_rss_table)(struct rnpgbe_hw *hw);
+
+	//MBX_ID
+	void (*set_mbx_link_event)(struct rnpgbe_hw *hw, int enable);
+	void (*set_mbx_ifup)(struct rnpgbe_hw *hw, int enable);
+
+	s32 (*get_thermal_sensor_data)(struct rnpgbe_hw *hw);
+	s32 (*init_thermal_sensor_thresh)(struct rnpgbe_hw *hw);
+
+	void (*disable_tx_laser)(struct rnpgbe_hw *hw);
+	void (*enable_tx_laser)(struct rnpgbe_hw *hw);
+	void (*flap_tx_laser)(struct rnpgbe_hw *hw);
+	s32 (*check_link)(struct rnpgbe_hw *hw, rnpgbe_link_speed *speed,
+			  bool *link_up, bool *duplex,
+			  bool link_up_wait_to_complete);
+	s32 (*setup_link)(struct rnpgbe_hw *hw, rnpgbe_link_speed adv,
+			  u32 autoneg, u32 speed, u32 duplex);
+	void (*clean_link)(struct rnpgbe_hw *hw);
+	s32 (*get_link_capabilities)(struct rnpgbe_hw *hw,
+				     rnpgbe_link_speed *speed, bool *autoneg);
+	s32 (*init_rx_addrs)(struct rnpgbe_hw *hw);
+
+	// ntuple function
+	void (*set_layer2_remapping)(struct rnpgbe_hw *hw,
+				     union rnpgbe_atr_input *input, u16 pri_id,
+				     u8 queue, bool prio_flag);
+	void (*clr_layer2_remapping)(struct rnpgbe_hw *hw, u16 pri_id);
+	void (*clr_all_layer2_remapping)(struct rnpgbe_hw *hw);
+	void (*set_tuple5_remapping)(struct rnpgbe_hw *hw,
+				     union rnpgbe_atr_input *input, u16 pri_id,
+				     u8 queue, bool prio_flag);
+	void (*clr_tuple5_remapping)(struct rnpgbe_hw *hw, u16 pri_id);
+	void (*clr_all_tuple5_remapping)(struct rnpgbe_hw *hw);
+	void (*set_tcp_sync_remapping)(struct rnpgbe_hw *hw, int queue,
+				       bool flag, bool prio);
+	void (*set_rx_skip)(struct rnpgbe_hw *hw, int count, bool flag);
+	void (*set_outer_vlan_type)(struct rnpgbe_hw *hw, int type);
+
+	void (*update_hw_status)(struct rnpgbe_hw *hw,
+				 struct rnpgbe_hw_stats *hw_stats,
+				 struct net_device_stats *net_stats);
+	void (*update_msix_count)(struct rnpgbe_hw *hw, int msix_count);
+
+	void (*update_rx_drop)(struct rnpgbe_hw *hw);
+
+	// ethtool
+	void (*setup_ethtool)(struct net_device *netdev);
+
+	s32 (*phy_read_reg)(struct rnpgbe_hw *hw, u32 reg_addr, u32 device_type,
+			    u16 *phy_data);
+	s32 (*phy_write_reg)(struct rnpgbe_hw *hw, u32 reg_addr,
+			     u32 device_type, u16 phy_data);
+	void (*setup_wol)(struct rnpgbe_hw *hw, u32 mode);
+	void (*set_vf_vlan_mode)(struct rnpgbe_hw *hw, u16 vlan, int vf,
+				 bool enable);
+	void (*driver_status)(struct rnpgbe_hw *hw, bool enable, int mode);
+
+	void (*setup_eee)(struct rnpgbe_hw *hw, int ls, int tw, u32 local_eee);
+
+	void (*set_eee_mode)(struct rnpgbe_hw *hw, bool en_tx_lpi_clockgating);
+	void (*reset_eee_mode)(struct rnpgbe_hw *hw);
+	void (*set_eee_timer)(struct rnpgbe_hw *hw, int ls, int tw);
+	void (*set_eee_pls)(struct rnpgbe_hw *hw, int link);
+
+	u32 (*get_lpi_status)(struct rnpgbe_hw *hw);
+
+	int (*get_ncsi_mac)(struct rnpgbe_hw *hw, u8 *addr, int idx);
+	int (*get_ncsi_vlan)(struct rnpgbe_hw *hw, u16 *vlan, int idx);
+
+	void (*set_lldp)(struct rnpgbe_hw *hw, bool enable);
+	void (*get_lldp)(struct rnpgbe_hw *hw);
+};
+
+struct rnpgbe_mac_operations {
+	void (*set_mac_rx)(struct rnpgbe_mac_info *mac, bool status);
+	void (*set_mac_speed)(struct rnpgbe_mac_info *mac, bool link, u32 speed,
+			      bool duplex);
+	void (*set_mac_fcs)(struct rnpgbe_mac_info *mac, bool status);
+	s32 (*set_fc_mode)(struct rnpgbe_mac_info *mac);
+	void (*check_link)(struct rnpgbe_mac_info *mac,
+			   rnpgbe_link_speed *speed, bool *link_up,
+			   bool link_up_wait_to_complete);
+	void (*set_mac)(struct rnpgbe_mac_info *mac, u8 *addr, int index);
+	int (*mdio_write)(struct rnpgbe_mac_info *mac, int phyreg, int phydata);
+	int (*mdio_read)(struct rnpgbe_mac_info *mac, u32 phyreg,
+			 u32 *regvalue);
+	void (*pmt)(struct rnpgbe_mac_info *mac, u32 mode, bool ncsi_en);
+	void (*set_eee_mode)(struct rnpgbe_mac_info *mac,
+			     bool en_tx_lpi_clockgating);
+	void (*reset_eee_mode)(struct rnpgbe_mac_info *mac);
+	void (*set_eee_timer)(struct rnpgbe_mac_info *mac, int ls, int tw);
+	void (*set_eee_pls)(struct rnpgbe_mac_info *mac, int link);
+	u32 (*get_lpi_status)(struct rnpgbe_mac_info *mac);
+};
+
+struct rnpgbe_phy_operations {
+	s32 (*identify)(struct rnpgbe_hw *hw);
+	s32 (*identify_sfp)(struct rnpgbe_hw *hw);
+	s32 (*init)(struct rnpgbe_hw *hw);
+	s32 (*reset)(struct rnpgbe_hw *hw);
+	s32 (*read_reg)(struct rnpgbe_hw *hw, u32 reg_addr, u32 device_type,
+			u16 *phy_data);
+	s32 (*write_reg)(struct rnpgbe_hw *hw, u32 reg_addr, u32 device_type,
+			 u16 phy_data);
+	s32 (*setup_link)(struct rnpgbe_hw *hw);
+	s32 (*setup_link_speed)(struct rnpgbe_hw *hw, rnpgbe_link_speed speed,
+				bool wait_to_complete);
+	s32 (*check_link)(struct rnpgbe_hw *hw, rnpgbe_link_speed *speed,
+			  bool *flag);
+	s32 (*get_firmware_version)(struct rnpgbe_hw *hw, u16 *version);
+	s32 (*read_i2c_byte)(struct rnpgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+			     u8 *data);
+	s32 (*write_i2c_byte)(struct rnpgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+			      u8 data);
+	s32 (*read_i2c_sff8472)(struct rnpgbe_hw *hw, u8 byte_offset, u8 *data);
+	s32 (*read_i2c_eeprom)(struct rnpgbe_hw *hw, u8 byte_offset, u8 *data);
+	s32 (*write_i2c_eeprom)(struct rnpgbe_hw *hw, u8 byte_offset, u8 data);
+	s32 (*check_overtemp)(struct rnpgbe_hw *hw);
+};
+
+struct rnpgbe_eeprom_info {
+	struct rnpgbe_eeprom_operations ops;
+	enum rnpgbe_eeprom_type type;
+	u32 semaphore_delay;
+	u16 word_size;
+	u16 address_bits;
+	u16 word_page_size;
+};
+
+struct rnpgbe_dma_operations {
+	void (*set_tx_maxrate)(struct rnpgbe_dma_info *dma, u16 queue,
+			       u32 max_rate);
+	void (*set_veb_mac)(struct rnpgbe_dma_info *dma, u8 *mac, u32 vfnum,
+			    u32 ring);
+	/* only set own vlan */
+	void (*set_veb_vlan)(struct rnpgbe_dma_info *dma, u16 vlan, u32 vfnum);
+	void (*set_veb_vlan_mask)(struct rnpgbe_dma_info *dma, u16 vlan,
+				  u16 mask, int entry);
+	void (*clr_veb_all)(struct rnpgbe_dma_info *dma);
+};
+
+struct rnpgbe_dma_info {
+	struct rnpgbe_dma_operations ops;
+	u8 __iomem *dma_base_addr;
+	u8 __iomem *dma_ring_addr;
+	void *back;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	u32 dma_version;
+};
+
+#define RNP_MAX_MTA 128
+struct rnpgbe_eth_info {
+	struct rnpgbe_eth_operations ops;
+	u8 __iomem *eth_base_addr;
+	enum rnpgbe_eth_type eth_type;
+	void *back;
+
+	u32 mta_shadow[RNP_MAX_MTA];
+	s32 mc_filter_type;
+	u32 mcft_size;
+	u32 vft_size;
+	u32 num_rar_entries;
+	u32 rar_highwater;
+	u32 rx_pb_size;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	u32 reg_off;
+	u32 orig_autoc;
+	u32 cached_autoc;
+	u32 orig_autoc2;
+};
+
+struct rnpgbe_nic_info {
+	u8 __iomem *nic_base_addr;
+};
+
+struct mii_regs {
+	unsigned int addr; /* MII Address */
+	unsigned int data; /* MII Data */
+	unsigned int addr_shift; /* MII address shift */
+	unsigned int reg_shift; /* MII reg shift */
+	unsigned int addr_mask; /* MII address mask */
+	unsigned int reg_mask; /* MII reg mask */
+	unsigned int clk_csr_shift;
+	unsigned int clk_csr_mask;
+};
+
+#define RNP_FLAGS_DOUBLE_RESET_REQUIRED 0x01
+#define RNP_FLAGS_INIT_MAC_ADDRESS 0x02
+struct rnpgbe_mac_info {
+	struct rnpgbe_mac_operations ops;
+	u8 __iomem *mac_addr;
+	void *back;
+	struct mii_regs mii;
+	int phy_addr;
+	int clk_csr;
+	enum rnpgbe_mac_type type;
+	enum mac_type mac_type;
+	u8 addr[ETH_ALEN];
+	u8 perm_addr[ETH_ALEN];
+	/* prefix for World Wide Node Name (WWNN) */
+	u16 wwnn_prefix;
+	/* prefix for World Wide Port Name (WWPN) */
+	u16 wwpn_prefix;
+	u16 max_msix_vectors;
+	u32 mta_shadow[RNP_MAX_MTA];
+	s32 mc_filter_type;
+	u32 mcft_size;
+	u32 vft_size;
+	u32 num_rar_entries;
+	u32 rar_highwater;
+	u32 rx_pb_size;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	u32 reg_off;
+	u32 orig_autoc;
+	u32 cached_autoc;
+	u32 orig_autoc2;
+	bool orig_link_settings_stored;
+	bool autotry_restart;
+	u8 mac_flags;
+};
+
+struct rnpgbe_phy_info {
+	struct rnpgbe_phy_operations ops;
+	struct mdio_if_info mdio;
+	enum rnpgbe_phy_type type;
+	u32 id;
+	u32 phy_addr;
+	bool is_mdix;
+	u8 mdix;
+	enum rnpgbe_sfp_type sfp_type;
+	bool sfp_setup_needed;
+	u32 revision;
+	enum rnpgbe_media_type media_type;
+	bool reset_disable;
+	rnpgbe_autoneg_advertised autoneg_advertised;
+	bool smart_speed_active;
+	bool multispeed_fiber;
+	bool reset_if_overtemp;
+};
+
+#include "rnpgbe_mbx.h"
+
+struct rnpgbe_pcs_operations {
+	u32 (*read)(struct rnpgbe_hw *hw, int num, u32 addr);
+	void (*write)(struct rnpgbe_hw *hw, int num, u32 addr, u32 value);
+};
+
+struct rnpgbe_mbx_operations {
+	s32 (*init_params)(struct rnpgbe_hw *hw);
+	s32 (*read)(struct rnpgbe_hw *hw, u32 *msg, u16 size, enum MBX_ID);
+	s32 (*write)(struct rnpgbe_hw *hw, u32 *msg, u16 size, enum MBX_ID);
+	s32 (*read_posted)(struct rnpgbe_hw *hw, u32 *msg, u16 size,
+			   enum MBX_ID);
+	s32 (*write_posted)(struct rnpgbe_hw *hw, u32 *msg, u16 size,
+			    enum MBX_ID);
+	s32 (*check_for_msg)(struct rnpgbe_hw *hw, enum MBX_ID);
+	s32 (*check_for_ack)(struct rnpgbe_hw *hw, enum MBX_ID);
+	//s32 (*check_for_rst)(struct rnpgbe_hw *, enum MBX_ID);
+	s32 (*configure)(struct rnpgbe_hw *hw, int nr_vec, bool enable);
+};
+
+struct rnpgbe_mbx_stats {
+	u32 msgs_tx;
+	u32 msgs_rx;
+
+	u32 acks;
+	u32 reqs;
+	u32 rsts;
+};
+
+struct rnpgbe_pcs_info {
+	struct rnpgbe_pcs_operations ops;
+	int pcs_count;
+};
+
+struct rnpgbe_mbx_info {
+	struct rnpgbe_mbx_operations ops;
+	struct rnpgbe_mbx_stats stats;
+	u32 timeout;
+	u32 usec_delay;
+	u32 v2p_mailbox;
+	u16 size;
+
+	u16 vf_req[64];
+	u16 vf_ack[64];
+	u16 cpu_req;
+	u16 cpu_ack;
+
+	struct mutex lock;
+
+	bool other_irq_enabled;
+	// add reg define
+	int mbx_size;
+
+	int mbx_mem_size;
+#define MBX_FEATURE_NO_ZERO BIT(0)
+#define MBX_FEATURE_WRITE_DELAY BIT(1)
+	u32 mbx_feature;
+	// cm3 <-> pf mbx
+	u32 cpu_pf_shm_base;
+	u32 pf2cpu_mbox_ctrl;
+	u32 pf2cpu_mbox_mask;
+	u32 cpu_pf_mbox_mask;
+	u32 cpu2pf_mbox_vec;
+
+	// pf <--> vf mbx
+	u32 pf_vf_shm_base;
+	u32 pf2vf_mbox_ctrl_base;
+	u32 pf_vf_mbox_mask_lo;
+	u32 pf_vf_mbox_mask_hi;
+	u32 pf2vf_mbox_vec_base;
+	u32 vf2pf_mbox_vec_base;
+
+	u32 cpu_vf_share_ram;
+	int share_size;
+};
+
+struct vf_vebvlans {
+	struct list_head l;
+	bool free;
+	int veb_entry;
+	u16 vid;
+	u16 mask;
+};
+struct lldp_status {
+	int enable;
+	int inteval;
+};
+
+struct rnpgbe_hw {
+	void *back;
+	u8 __iomem *hw_addr;
+	u8 __iomem *ring_msix_base;
+	u8 __iomem *rpu_addr; // 0x4000_0000
+	u8 pfvfnum; // fun
+	u8 pfvfnum_system;
+	struct pci_dev *pdev;
+
+	u16 device_id;
+	u16 vendor_id;
+	u16 subsystem_device_id;
+	u16 subsystem_vendor_id;
+	char lane_mask;
+	u16 mac_type;
+	u16 phy_type;
+	int nr_lane;
+	int sfc_boot;
+	int pxe_en;
+	int ncsi_en;
+	int trim_valid;
+
+	u8 is_backplane : 1;
+	u8 is_sgmii : 1;
+	u8 force_10g_1g_speed_ablity : 1;
+	u8 force_speed_stat : 2;
+#define FORCE_SPEED_STAT_DISABLED 0
+#define FORCE_SPEED_STAT_1G 1
+#define FORCE_SPEED_STAT_10G 2
+
+	u32 supported_link;
+	u32 advertised_link;
+	u32 autoneg;
+	u32 fake_autoneg;
+	u32 tp_mdx;
+	u32 tp_mdix_ctrl;
+	u32 phy_id;
+
+	u32 eee_capability;
+
+	u8 link;
+	u8 pci_gen;
+	u8 pci_lanes;
+	u16 max_msix_vectors;
+
+	int speed;
+	int duplex;
+	u32 dma_version;
+	u32 wol;
+	u32 wol_en;
+	u16 min_length;
+	u16 max_length;
+	u16 min_length_current;
+	u16 max_length_current;
+	/* rss info */
+#define HW_MAX_RETA_ENTRIES 512
+	u8 rss_indir_tbl[HW_MAX_RETA_ENTRIES];
+#define HW_MAX_TC_ENTRIES 8
+	u8 rss_tc_tbl[HW_MAX_TC_ENTRIES];
+	int rss_indir_tbl_num;
+	int rss_tc_tbl_num;
+	u32 rss_tbl_setup_flag;
+#define HW_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
+	u8 rss_key[HW_RSS_KEY_SIZE];
+	u32 rss_key_setup_flag;
+	u32 vfnum;
+	int num_rar_entries;
+	int max_vfs;
+	int max_vfs_noari;
+	int sriov_ring_limit;
+	int max_pf_macvlans;
+	int num_vebvlan_entries;
+
+	int fdir_mode;
+	int layer2_count;
+	int tuple5_count;
+	int veb_ring;
+
+	struct lldp_status lldp_status;
+
+	u32 fdir_pballoc; //total count
+	enum rnpgbe_rss_type rss_type;
+	enum rnpgbe_hw_type hw_type;
+	struct rnpgbe_hw_operations ops;
+	struct rnpgbe_nic_info nic;
+	struct rnpgbe_dma_info dma;
+	struct rnpgbe_eth_info eth;
+	struct rnpgbe_mac_info mac;
+	struct rnpgbe_addr_filter_info addr_ctrl;
+	struct rnpgbe_fc_info fc;
+	struct rnpgbe_phy_info phy;
+	struct rnpgbe_eeprom_info eeprom;
+	struct rnpgbe_bus_info bus;
+	struct rnpgbe_mbx_info mbx;
+	struct rnpgbe_pcs_info pcs;
+	bool adapter_stopped;
+	bool force_full_reset;
+	bool mng_fw_enabled;
+	bool wol_enabled;
+	unsigned long wol_supported;
+	int fw_version;
+	int force_en;
+	int force_cap;
+	u32 driver_version;
+	u8 sfp_connector;
+
+	struct vf_vebvlans vf_vas;
+	struct vf_vebvlans *vv_list;
+
+	u32 axi_mhz;
+	u32 bd_uid;
+	union {
+		u8 port_id[4];
+		u32 port_ids;
+	};
+
+	int mode;
+	int default_rx_queue;
+	u32 usecstocount;
+#define RNP_NET_FEATURE_SG ((u32)(1 << 0))
+#define RNP_NET_FEATURE_TX_CHECKSUM ((u32)(1 << 1))
+#define RNP_NET_FEATURE_RX_CHECKSUM ((u32)(1 << 2))
+#define RNP_NET_FEATURE_TSO ((u32)(1 << 3))
+#define RNP_NET_FEATURE_TX_UDP_TUNNEL (1 << 4)
+#define RNP_NET_FEATURE_VLAN_FILTER (1 << 5)
+#define RNP_NET_FEATURE_VLAN_OFFLOAD (1 << 6)
+#define RNP_NET_FEATURE_RX_NTUPLE_FILTER (1 << 7)
+#define RNP_NET_FEATURE_TCAM (1 << 8)
+#define RNP_NET_FEATURE_RX_HASH (1 << 9)
+#define RNP_NET_FEATURE_RX_FCS (1 << 10)
+#define RNP_NET_FEATURE_HW_TC (1 << 11)
+#define RNP_NET_FEATURE_USO (1 << 12)
+#define RNP_NET_FEATURE_STAG_FILTER (1 << 13)
+#define RNP_NET_FEATURE_STAG_OFFLOAD (1 << 14)
+#define RNP_NET_FEATURE_VF_FIXED (1 << 15)
+#define RNP_VEB_VLAN_MASK_EN (1 << 16)
+#define RNP_HW_FEATURE_EEE (1 << 17)
+#define RNP_HW_SOFT_MASK_OTHER_IRQ (1 << 18)
+
+	u32 feature_flags;
+	struct rnpgbe_thermal_sensor_data thermal_sensor_data;
+
+	struct {
+		int version;
+		int len;
+		int flag;
+	} dump;
+};
+
+struct rnpgbe_info {
+	enum rnpgbe_mac_type mac;
+	enum rnpgbe_rss_type rss_type;
+	enum rnpgbe_hw_type hw_type;
+	s32 (*get_invariants)(struct rnpgbe_hw *hw);
+	struct rnpgbe_mac_operations *mac_ops;
+	struct rnpgbe_eeprom_operations *eeprom_ops;
+	struct rnpgbe_phy_operations *phy_ops;
+	struct rnpgbe_mbx_operations *mbx_ops;
+	struct rnpgbe_pcs_operations *pcs_ops;
+
+	bool one_pf_with_two_dma;
+	int reg_off;
+	int adapter_cnt;
+	char lane_mask;
+	int hi_dma;
+	int total_queue_pair_cnts;
+	int dma2_in_1pf;
+	char *hw_addr;
+};
+
+/* Error Codes */
+#define RNP_ERR_EEPROM -1
+#define RNP_ERR_EEPROM_CHECKSUM -2
+#define RNP_ERR_PHY -3
+#define RNP_ERR_CONFIG -4
+#define RNP_ERR_PARAM -5
+#define RNP_ERR_MAC_TYPE -6
+#define RNP_ERR_UNKNOWN_PHY -7
+#define RNP_ERR_LINK_SETUP -8
+#define RNP_ERR_ADAPTER_STOPPED -9
+#define RNP_ERR_INVALID_MAC_ADDR -10
+#define RNP_ERR_DEVICE_NOT_SUPPORTED -11
+#define RNP_ERR_MASTER_REQUESTS_PENDING -12
+#define RNP_ERR_INVALID_LINK_SETTINGS -13
+#define RNP_ERR_AUTONEG_NOT_COMPLETE -14
+#define RNP_ERR_RESET_FAILED -15
+#define RNP_ERR_SWFW_SYNC -16
+#define RNP_ERR_PHY_ADDR_INVALID -17
+#define RNP_ERR_I2C -18
+#define RNP_ERR_SFP_NOT_SUPPORTED -19
+#define RNP_ERR_SFP_NOT_PRESENT -20
+#define RNP_ERR_SFP_NO_INIT_SEQ_PRESENT -21
+#define RNP_ERR_FDIR_REINIT_FAILED -23
+#define RNP_ERR_EEPROM_VERSION -24
+#define RNP_ERR_NO_SPACE -25
+#define RNP_ERR_OVERTEMP -26
+#define RNP_ERR_FC_NOT_NEGOTIATED -27
+#define RNP_ERR_FC_NOT_SUPPORTED -28
+#define RNP_ERR_SFP_SETUP_NOT_COMPLETE -30
+#define RNP_ERR_PBA_SECTION -31
+#define RNP_ERR_INVALID_ARGUMENT -32
+#define RNP_ERR_HOST_INTERFACE_COMMAND -33
+#define RNP_NOT_IMPLEMENTED 0x7FFFFFFF
+
+#define RNP_RAH_AV 0x80000000
+/* eth fix code */
+#define RNP_FCTRL_BPE BIT(10)
+#define RNP_FCTRL_UPE BIT(9)
+#define RNP_FCTRL_MPE BIT(8)
+#define RNP_MCSTCTRL_MTA BIT(2)
+#define RNP_MCSTCTRL_UTA BIT(3)
+#define RNP_MAX_LAYER2_FILTERS (16)
+#define RNP_MAX_TUPLE5_FILTERS (128)
+#define RNP_MAX_TCAM_FILTERS (4096)
+#define RNP_SRC_IP_MASK BIT(0)
+#define RNP_DST_IP_MASK BIT(1)
+#define RNP_SRC_PORT_MASK BIT(2)
+#define RNP_DST_PORT_MASK BIT(3)
+#define RNP_L4_PROTO_MASK BIT(4)
+#endif /* _RNPGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbe/version.h b/drivers/net/ethernet/mucse/rnpgbe/version.h
new file mode 100644
index 0000000000000..e6a50ab873323
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbe/version.h
@@ -0,0 +1,4 @@
+#ifndef VERSION_H
+#define VERSION_H
+#define GIT_COMMIT " 5b646e9"
+#endif /* VERSION_H */
diff --git a/drivers/net/ethernet/mucse/rnpgbevf/Makefile b/drivers/net/ethernet/mucse/rnpgbevf/Makefile
new file mode 100644
index 0000000000000..45832fbd8f823
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbevf/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2022 - 2024 Mucse Corporation
+#
+# Makefile for the Mucse(R) 1GbE PCI Express ethernet driver
+#
+#
+
+obj-$(CONFIG_MXGBEVF) += rnpgbevf.o
+rnpgbevf-objs :=   \
+		vf.o \
+                rnpgbevf_mbx.o \
+                rnpgbevf_ethtool.o \
+                rnpgbevf_sysfs.o \
+                rnpgbevf_main.o
+
diff --git a/drivers/net/ethernet/mucse/rnpgbevf/defines.h b/drivers/net/ethernet/mucse/rnpgbevf/defines.h
new file mode 100644
index 0000000000000..500176b32a8d7
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbevf/defines.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBEVF_DEFINES_H_
+#define _RNPGBEVF_DEFINES_H_
+#include 
+#include 
+/* Device IDs */
+#define RNPGBE_DEV_ID_N10_PF0_VF 0x8001
+#define RNPGBE_DEV_ID_N10_PF1_VF 0x8002
+
+#define RNPGBE_DEV_ID_N10_PF0_VF_N 0x1010
+#define RNPGBE_DEV_ID_N10_PF1_VF_N 0x1011
+
+#define RNPGBE_VF_IRQ_CLEAR_MASK 7
+#define RNPGBE_VF_MAX_TX_QUEUES 8
+#define RNPGBE_VF_MAX_RX_QUEUES 8
+
+/* DCB define */
+#define RNPGBE_VF_MAX_TRAFFIC_CLASS 8
+
+/* Link speed */
+typedef u32 rnp_link_speed;
+#define RNPGBE_LINK_SPEED_UNKNOWN 0
+#define RNPGBE_LINK_SPEED_10_FULL BIT(2)
+#define RNPGBE_LINK_SPEED_100_FULL BIT(3)
+#define RNPGBE_LINK_SPEED_1GB_FULL BIT(4)
+#define RNPGBE_LINK_SPEED_10GB_FULL BIT(5)
+#define RNPGBE_LINK_SPEED_40GB_FULL BIT(6)
+#define RNPGBE_LINK_SPEED_25GB_FULL BIT(7)
+#define RNPGBE_LINK_SPEED_50GB_FULL BIT(8)
+#define RNPGBE_LINK_SPEED_100GB_FULL BIT(9)
+#define RNPGBE_LINK_SPEED_10_HALF BIT(10)
+#define RNPGBE_LINK_SPEED_100_HALF BIT(11)
+#define RNPGBE_LINK_SPEED_1GB_HALF BIT(12)
+#define RNPGBE_SFP_MODE_10G_LR BIT(13)
+#define RNPGBE_SFP_MODE_10G_SR BIT(14)
+#define RNPGBE_SFP_MODE_10G_LRM BIT(15)
+#define RNPGBE_SFP_MODE_1G_T BIT(16)
+#define RNPGBE_SFP_MODE_1G_KX BIT(17)
+#define RNPGBE_SFP_MODE_1G_SX BIT(18)
+#define RNPGBE_SFP_MODE_1G_LX BIT(19)
+#define RNPGBE_SFP_MODE_40G_SR4 BIT(20)
+#define RNPGBE_SFP_MODE_40G_CR4 BIT(21)
+#define RNPGBE_SFP_MODE_40G_LR4 BIT(22)
+#define RNPGBE_SFP_MODE_1G_CX BIT(23)
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define RNPGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define RNPGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define RNPGBE_REQ_TX_BUFFER_GRANULARITY 1024
+
+#define RNP_START_ITR 648 /* ~6000 ints/sec */
+#define RNP_4K_ITR 980
+#define RNP_20K_ITR 196
+#define RNP_70K_ITR
+#define RNP_LOWEREST_ITR 5
+
+/* Interrupt Vector Allocation Registers */
+#define RNPGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
+
+#define RNPGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+/* Transmit Descriptor - Advanced */
+struct rnp_tx_desc {
+	union {
+		__le64 pkt_addr; /* Packet buffer address */
+		struct {
+			__le32 adr_lo;
+			__le32 adr_hi;
+		};
+	};
+	__le16 blen;
+	union {
+		struct {
+			__le16 ip_len : 9;
+			__le16 mac_len : 7;
+		};
+		__le16 mac_ip_len;
+	};
+	__le16 vlan;
+#define RNPGBE_TXD_FLAGS_VLAN_PRIO_MASK 0xe000
+#define RNPGBE_TX_FLAGS_VLAN_PRIO_SHIFT 13
+#define RNPGBE_TX_FLAGS_VLAN_CFI_SHIFT 12
+
+	__le16 cmd;
+#define RNPGBE_TXD_VLAN_VALID (1 << 15)
+#define RNPGBE_TXD_SVLAN_TYPE (1 << 9)
+#define RNPGBE_TXD_VLAN_CTRL_NOP (0x00 << 13)
+#define RNPGBE_TXD_VLAN_CTRL_RM_VLAN (0x01 << 13)
+#define RNPGBE_TXD_VLAN_CTRL_INSERT_VLAN (0x02 << 13)
+#define RNPGBE_TXD_L4_CSUM (1 << 12)
+#define RNPGBE_TXD_IP_CSUM (1 << 11)
+#define RNPGBE_TXD_TUNNEL_MASK (0x3000000)
+#define RNPGBE_TXD_TUNNEL_VXLAN (0x01 << 8)
+#define RNPGBE_TXD_TUNNEL_NVGRE (0x02 << 8)
+#define RNPGBE_TXD_L4_TYPE_UDP (0x03 << 6)
+#define RNPGBE_TXD_L4_TYPE_TCP (0x01 << 6)
+#define RNPGBE_TXD_L4_TYPE_SCTP (0x02 << 6)
+#define RNPGBE_TXD_FLAG_IPv4 (0 << 5)
+#define RNPGBE_TXD_FLAG_IPv6 (1 << 5)
+#define RNPGBE_TXD_FLAG_TSO (1 << 4)
+#define RNPGBE_TXD_CMD_RS (1 << 2)
+#define RNPGBE_TXD_STAT_DD (1 << 1)
+#define RNPGBE_TXD_CMD_EOP (1 << 0)
+} __packed;
+
+struct rnp_tx_ctx_desc {
+	__le16 mss_len;
+	u8 vfnum;
+	u8 l4_hdr_len;
+	u8 tunnel_hdr_len;
+	__le16 inner_vlan;
+	u8 vf_veb_flags;
+#define VF_IGNORE_VLAN (1 << 1) /* bit 57 */
+#define VF_VEB_MARK (1 << 0) /* bit 56 */
+	u32 rev;
+	__le16 rev1;
+	__le16 cmd;
+#define RNPGBE_TXD_FLAG_TO_RPU (1 << 15)
+#define RNPGBE_TXD_SMAC_CTRL_NOP (0x00 << 12)
+#define RNPGBE_TXD_SMAC_CTRL_REPLACE_MACADDR0 (0x02 << 12)
+#define RNPGBE_TXD_SMAC_CTRL_REPLACE_MACADDR1 (0bx06 << 12)
+#define RNPGBE_TXD_CTX_VLAN_CTRL_NOP (0x00 << 10)
+#define RNPGBE_TXD_CTX_VLAN_CTRL_RM_VLAN (0x01 << 10)
+#define RNPGBE_TXD_CTX_VLAN_CTRL_INSERT_VLAN (0x02 << 10)
+#define RNPGBE_TXD_MTI_CRC_PAD_CTRL (0x01000000)
+#define RNPGBE_TXD_CTX_CTRL_DESC (1 << 3)
+#define RNPGBE_TXD_CTX_CMD_RS (1 << 2)
+#define RNPGBE_TXD_STAT_DD (1 << 1)
+} __packed;
+
+/* Receive Descriptor - Advanced */
+union rnp_rx_desc {
+	struct {
+		union {
+			__le64 pkt_addr;
+			/* Packet buffer address */
+			struct {
+				__le32 addr_lo;
+				__le32 addr_hi;
+			};
+		};
+		u8 dumy[6];
+		__le16 cmd;
+#define RNPGBE_RXD_FLAG_RS (1 << 2)
+	};
+
+	struct {
+		__le32 rss_hash;
+		__le16 mark;
+		__le16 rev1;
+#define RNPGBE_RX_L3_TYPE_MASK (1 << 15) /* 1 is ipv4 */
+#define VEB_VF_PKG (1 << 1) /* bit 49 */
+#define VEB_VF_IGNORE_VLAN (1 << 0) /* bit 48 */
+		__le16 len;
+		__le16 padding_len;
+		__le16 vlan;
+		__le16 cmd;
+#define RNPGBE_RXD_STAT_VLAN_VALID (1 << 15)
+#define RNPGBE_RXD_STAT_STAG (0x01 << 14)
+#define RNPGBE_RXD_STAT_TUNNEL_NVGRE (0x02 << 13)
+#define RNPGBE_RXD_STAT_TUNNEL_VXLAN (0x01 << 13)
+#define RNPGBE_RXD_STAT_ERR_MASK (0x1f << 8)
+#define RNPGBE_RXD_STAT_TUNNEL_MASK (0x03 << 13)
+#define RNPGBE_RXD_STAT_SCTP_MASK (0x04 << 8)
+#define RNPGBE_RXD_STAT_L4_MASK (0x02 << 8)
+#define RNPGBE_RXD_STAT_ERR_MASK_NOSCTP (0x1b << 8)
+#define RNPGBE_RXD_STAT_L4_SCTP (0x02 << 6)
+#define RNPGBE_RXD_STAT_L4_TCP (0x01 << 6)
+#define RNPGBE_RXD_STAT_L4_UDP (0x03 << 6)
+#define RNPGBE_RXD_STAT_IPV6 (1 << 5)
+#define RNPGBE_RXD_STAT_IPV4 (0 << 5)
+#define RNPGBE_RXD_STAT_PTP (1 << 4)
+#define RNPGBE_RXD_STAT_DD (1 << 1)
+#define RNPGBE_RXD_STAT_EOP (1 << 0)
+	} wb;
+} __packed;
+
+/* Interrupt register bitmasks */
+
+#define RNPGBE_EITR_CNT_WDIS 0x80000000
+#define RNPGBE_MAX_EITR 0x00000FF8
+#define RNPGBE_MIN_EITR 8
+
+/* Error Codes */
+#define RNPGBE_ERR_INVALID_MAC_ADDR -1
+#define RNPGBE_ERR_RESET_FAILED -2
+#define RNPGBE_ERR_INVALID_ARGUMENT -3
+
+#ifdef DEBUG
+#define dbg(fmt, args...)                                                      \
+	printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args)
+#else
+#define dbg(fmt, args...)
+#endif
+
+#define rnpgbevf_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
+#define rnpgbevf_info(fmt, args...)                                            \
+	printk(KERN_DEBUG "rnpvf-info: " fmt, ##args)
+#define rnpgbevf_warn(fmt, args...)                                            \
+	printk(KERN_DEBUG "rnpvf-warn: " fmt, ##args)
+#define rnpgbevf_err(fmt, args...) printk(KERN_ERR "rnpvf-err : " fmt, ##args)
+
+#define DPRINTK(nlevel, klevel, fmt, args...)                                  \
+	((NETIF_MSG_##nlevel & adapter->msg_enable) ?                          \
+		 (void)(netdev_printk(KERN_##klevel, adapter->netdev, fmt,     \
+				      ##args)) :                               \
+		 NULL)
+
+#ifdef CONFIG_RNPGBE_TX_DEBUG
+static inline void buf_dump_line(const char *msg, int line, void *buf, int len)
+{
+	int i, offset = 0;
+	int msg_len = 1024;
+	u8 msg_buf[1024];
+	u8 *ptr = (u8 *)buf;
+
+	offset += snprintf(msg_buf + offset, msg_len,
+			   "=== %s #%d line:%d buf:%p==\n000: ", msg, len, line,
+			   buf);
+
+	for (i = 0; i < len; ++i) {
+		if ((i != 0) && (i % 16) == 0 && (offset >= (1024 - 10 * 16))) {
+			printk(KERN_DEBUG "%s\n", msg_buf);
+			offset = 0;
+		}
+
+		if ((i != 0) && (i % 16) == 0) {
+			offset += snprintf(msg_buf + offset, msg_len,
+					   "\n%03x: ", i);
+		}
+		offset += snprintf(msg_buf + offset, msg_len, "%02x ", ptr[i]);
+	}
+
+	offset += snprintf(msg_buf + offset, msg_len, "\n");
+	printk(KERN_DEBUG "%s\n", msg_buf);
+}
+#else
+#define buf_dump_line(msg, line, buf, len)
+#endif
+
+static inline void buf_dump(const char *msg, void *buf, int len)
+{
+	int i, offset = 0;
+	int msg_len = 1024;
+	u8 msg_buf[1024];
+	u8 *ptr = (u8 *)buf;
+
+	offset += snprintf(msg_buf + offset, msg_len,
+			   "=== %s #%d ==\n000: ", msg, len);
+
+	for (i = 0; i < len; ++i) {
+		if ((i != 0) && (i % 16) == 0 && (offset >= (1024 - 10 * 16))) {
+			printk(KERN_DEBUG "%s\n", msg_buf);
+			offset = 0;
+		}
+
+		if ((i != 0) && (i % 16) == 0) {
+			offset += snprintf(msg_buf + offset, msg_len,
+					   "\n%03x: ", i);
+		}
+		offset += snprintf(msg_buf + offset, msg_len, "%02x ", ptr[i]);
+	}
+
+	offset += snprintf(msg_buf + offset, msg_len, "\n=== done ==\n");
+	printk(KERN_DEBUG "%s\n", msg_buf);
+}
+#ifndef NO_SKB_DUMP
+static inline void _rnp_skb_dump(const struct sk_buff *skb, bool full_pkt)
+{
+	static atomic_t can_dump_full = ATOMIC_INIT(5);
+#if defined(RHEL_RELEASE_CODE)
+#if RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7, 1)
+	struct skb_shared_info *sh = skb_shinfo(skb);
+#endif
+#else
+	struct skb_shared_info *sh = skb_shinfo(skb);
+#endif
+	struct net_device *dev = skb->dev;
+	struct sk_buff *list_skb;
+	bool has_mac, has_trans;
+	int headroom, tailroom;
+	int i, len, seg_len;
+	const char *level = KERN_WARNING;
+
+	if (full_pkt)
+		full_pkt = atomic_dec_if_positive(&can_dump_full) >= 0;
+
+	if (full_pkt)
+		len = skb->len;
+	else
+		len = min_t(int, skb->len, MAX_HEADER + 128);
+
+	headroom = skb_headroom(skb);
+	tailroom = skb_tailroom(skb);
+
+	has_mac = skb_mac_header_was_set(skb);
+	has_trans = skb_transport_header_was_set(skb);
+
+#if defined(RHEL_RELEASE_CODE)
+#if RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7, 1)
+	printk(KERN_DEBUG
+	       "%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
+	       "mac=(%d,%d) net=(%d,%d) trans=%d\n"
+	       "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
+	       "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
+	       "hash(rx:0x%x  l4=%u) proto=0x%04x pkttype=%u iif=%d\n",
+	       level, skb->len, headroom, skb_headlen(skb), tailroom,
+	       has_mac ? skb->mac_header : -1,
+	       has_mac ? (skb->network_header - skb->mac_header) : -1,
+	       skb->network_header,
+	       has_trans ? skb_network_header_len(skb) : -1,
+	       has_trans ? skb->transport_header : -1, sh->tx_flags,
+	       sh->nr_frags, sh->gso_size, sh->gso_type, sh->gso_segs,
+	       skb->csum, skb->ip_summed, skb->csum_complete_sw,
+	       skb->csum_valid, skb->csum_level, skb->rxhash, skb->l4_rxhash,
+	       ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
+#endif
+#else
+	printk(KERN_DEBUG
+	       "%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
+	       "mac=(%d,%d) net=(%d,%d) trans=%d\n"
+	       "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
+	       "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
+	       "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n",
+	       level, skb->len, headroom, skb_headlen(skb), tailroom,
+	       has_mac ? skb->mac_header : -1,
+	       has_mac ? (skb->network_header - skb->mac_header) : -1,
+	       skb->network_header,
+	       has_trans ? skb_network_header_len(skb) : -1,
+	       has_trans ? skb->transport_header : -1, sh->tx_flags,
+	       sh->nr_frags, sh->gso_size, sh->gso_type, sh->gso_segs,
+	       skb->csum, skb->ip_summed, skb->csum_complete_sw,
+	       skb->csum_valid, skb->csum_level, skb->hash, skb->sw_hash,
+	       skb->l4_hash, ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
+#endif
+
+	if (dev)
+		printk(KERN_DEBUG "%sdev name=%s feat=0x%pNF\n", level,
+		       dev->name, &dev->features);
+
+	seg_len = min_t(int, skb_headlen(skb), len);
+	if (seg_len)
+		print_hex_dump(level, "skb linear:   ", DUMP_PREFIX_OFFSET, 16,
+			       1, skb->data, seg_len, false);
+	len -= seg_len;
+
+	for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		u32 p_len;
+		struct page *p;
+		u8 *vaddr;
+
+		p = skb_frag_address(frag);
+		p_len = skb_frag_size(frag);
+		seg_len = min_t(int, p_len, len);
+		vaddr = kmap_atomic(p);
+		print_hex_dump(level, "skb frag:     ", DUMP_PREFIX_OFFSET, 16,
+			       1, vaddr, seg_len, false);
+		kunmap_atomic(vaddr);
+		len -= seg_len;
+		if (!len)
+			break;
+	}
+
+	if (full_pkt && skb_has_frag_list(skb)) {
+		printk(KERN_DEBUG "skb fraglist:\n");
+		skb_walk_frags(skb, list_skb) _rnp_skb_dump(list_skb, true);
+	}
+}
+#endif
+
+#define TRACE() printk(KERN_DEBUG "=[%s] %d == \n", __func__, __LINE__)
+
+#ifdef CONFIG_RNPGBE_TX_DEBUG
+#define desc_hex_dump(msg, buf, len)                                           \
+	print_hex_dump(KERN_WARNING, msg, DUMP_PREFIX_OFFSET, 16, 1, (buf),    \
+		       (len), false)
+#define rnpgbevf_skb_dump _rnp_skb_dump
+#else
+#define desc_hex_dump(msg, buf, len)
+#define rnpgbevf_skb_dump(skb, full_pkt)
+#endif
+
+#ifdef CONFIG_RNPGBE_RX_DEBUG
+#define rx_debug_printk printk
+#define rx_buf_dump buf_dump
+#else
+#define rx_debug_printk(fmt, args...)
+#define rx_buf_dump(a, b, c)
+#endif //CONFIG_RNPGBE_RX_DEBUG
+
+#endif /* _RNPGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf.h b/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf.h
new file mode 100644
index 0000000000000..489e1a9006f9a
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf.h
@@ -0,0 +1,715 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBEVF_H_
+#define _RNPGBEVF_H_
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#ifdef CONFIG_NET_NCSI
+#include 
+#endif
+
+#include "vf.h"
+
+#define RNPVF_ALLOC_PAGE_ORDER 0
+#define RNPVF_PAGE_BUFFER_NUMS(ring)                                           \
+	(((1 << RNPVF_ALLOC_PAGE_ORDER) * PAGE_SIZE) >> 11)
+
+#define RNPVF_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
+#if defined(CONFIG_MGBEVF_OPTM_WITH_LARGE) && !defined(OPTM_WITH_LARGE)
+#define OPTM_WITH_LPAGE
+#endif
+
+#if (PAGE_SIZE < 8192)
+/* if page_size is 4k, no need use this */
+#ifdef OPTM_WITH_LPAGE
+#undef OPTM_WITH_LPAGE
+#endif
+#endif
+
+extern const struct rnpgbevf_info rnp_n500_vf_info;
+extern const struct rnpgbevf_info rnp_n210_vf_info;
+
+struct rnpgbevf_queue_stats {
+	u64 packets;
+	u64 bytes;
+};
+
+struct rnpgbevf_tx_queue_stats {
+	u64 restart_queue;
+	u64 tx_busy;
+	u64 tx_done_old;
+	u64 clean_desc;
+	u64 poll_count;
+	u64 irq_more_count;
+	u64 vlan_add;
+	u64 tx_irq_miss;
+	u64 tx_next_to_clean;
+	u64 tx_equal_count;
+};
+
+struct rnpgbevf_rx_queue_stats {
+	u64 driver_drop_packets;
+	u64 rsc_count;
+	u64 rsc_flush;
+	u64 non_eop_descs;
+	u64 alloc_rx_page_failed;
+	u64 alloc_rx_buff_failed;
+	u64 alloc_rx_page;
+	u64 csum_err;
+	u64 csum_good;
+	u64 poll_again_count;
+	u64 poll_count;
+	u64 vlan_remove;
+	u64 rx_irq_miss;
+	u64 rx_next_to_clean;
+	u64 rx_equal_count;
+};
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the Buffers
+ */
+struct rnpgbevf_tx_buffer {
+	struct rnp_tx_desc *next_to_watch;
+	unsigned long time_stamp;
+	struct sk_buff *skb;
+	unsigned int bytecount;
+	unsigned short gso_segs;
+	bool gso_need_padding;
+
+	__be16 protocol;
+	DEFINE_DMA_UNMAP_ADDR(dma);
+	DEFINE_DMA_UNMAP_LEN(len);
+	union {
+		u32 tx_flags;
+		struct {
+			u16 vlan;
+			u16 cmd_flags;
+		};
+	};
+	__le32 mac_ip_len;
+	/* for control desc */
+	union {
+		u32 mss_len_vf_num;
+		struct {
+			__le16 mss_len;
+			u8 vf_num;
+			u8 l4_hdr_len;
+		};
+	};
+	union {
+		u32 inner_vlan_tunnel_len;
+		struct {
+			u8 tunnel_hdr_len;
+			u8 inner_vlan_l;
+			u8 inner_vlan_h;
+			u8 resv;
+		};
+	};
+	bool ctx_flag;
+};
+
+struct rnpgbevf_rx_buffer {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	struct page *page;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+	__u32 page_offset;
+#else
+	__u16 page_offset;
+#endif
+	__u16 pagecnt_bias;
+};
+
+enum rnpgbevf_ring_state_t {
+	__RNPVF_RX_3K_BUFFER,
+	__RNPVF_RX_BUILD_SKB_ENABLED,
+	__RNPVF_TX_FDIR_INIT_DONE,
+	__RNPVF_TX_XPS_INIT_DONE,
+	__RNPVF_TX_DETECT_HANG,
+	__RNPVF_HANG_CHECK_ARMED,
+	__RNPVF_RX_CSUM_UDP_ZERO_ERR,
+	__RNPVF_RX_FCOE,
+};
+
+#define ring_uses_build_skb(ring)                                              \
+	test_bit(__RNPVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
+
+/* now tx max 4k for one desc */
+#define RNPVF_MAX_TXD_PWR 12
+#define RNPVF_MAX_DATA_PER_TXD (1 << RNPVF_MAX_TXD_PWR)
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), RNPVF_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
+struct rnpgbevf_ring {
+	struct rnpgbevf_ring *next; /* pointer to next ring in q_vector */
+	struct rnpgbevf_q_vector *q_vector; /* backpointer to host q_vector */
+	struct net_device *netdev; /* netdev ring belongs to */
+	struct device *dev; /* device for DMA mapping */
+	void *desc; /* descriptor ring memory */
+	union {
+		struct rnpgbevf_tx_buffer *tx_buffer_info;
+		struct rnpgbevf_rx_buffer *rx_buffer_info;
+	};
+	unsigned long last_rx_timestamp;
+	unsigned long state;
+	u8 __iomem *ring_addr;
+	u8 __iomem *hw_addr;
+	u8 __iomem *tail;
+	u8 __iomem *dma_int_stat;
+	u8 __iomem *dma_int_mask;
+	u8 __iomem *dma_int_clr;
+	dma_addr_t dma; /* phys. address of descriptor ring */
+	unsigned int size; /* length in bytes */
+	u32 ring_flags;
+#define RNPVF_RING_FLAG_DELAY_SETUP_RX_LEN ((u32)(1 << 0))
+#define RNPVF_RING_FLAG_CHANGE_RX_LEN ((u32)(1 << 1))
+#define RNPVF_RING_FLAG_DO_RESET_RX_LEN ((u32)(1 << 2))
+#define RNPVF_RING_SKIP_TX_START ((u32)(1 << 3))
+#define RNPVF_RING_NO_TUNNEL_SUPPORT ((u32)(1 << 4))
+#define RNPVF_RING_SIZE_CHANGE_FIX ((u32)(1 << 5))
+#define RNPVF_RING_SCATER_SETUP ((u32)(1 << 6))
+#define RNPVF_RING_STAGS_SUPPORT ((u32)(1 << 7))
+#define RNPVF_RING_DOUBLE_VLAN_SUPPORT ((u32)(1 << 8))
+#define RNPVF_RING_VEB_MULTI_FIX ((u32)(1 << 9))
+#define RNPVF_RING_IRQ_MISS_FIX ((u32)(1 << 10))
+#define RNPVF_RING_CHKSM_FIX ((u32)(1 << 11))
+#define RNPVF_RING_LOWER_ITR ((u32)(1 << 12))
+	u8 vfnum;
+	u8 rnpgbevf_msix_off;
+	u16 count; /* amount of descriptors */
+	u8 queue_index; /* queue_index needed for multiqueue queue management */
+	u8 rnpgbevf_queue_idx;
+	u16 next_to_use;
+	u16 next_to_clean;
+	u16 device_id;
+#ifdef OPTM_WITH_LPAGE
+	u16 rx_page_buf_nums;
+	u32 rx_per_buf_mem;
+	struct sk_buff *skb;
+#endif
+	union {
+		u16 next_to_alloc;
+		struct {
+			u8 atr_sample_rate;
+			u8 atr_count;
+		};
+	};
+
+	u8 dcb_tc;
+	struct rnpgbevf_queue_stats stats;
+	struct u64_stats_sync syncp;
+	union {
+		struct rnpgbevf_tx_queue_stats tx_stats;
+		struct rnpgbevf_rx_queue_stats rx_stats;
+	};
+} ____cacheline_internodealigned_in_smp;
+
+#define RNPVF_ITR_ADAPTIVE_MIN_INC 2
+#define RNPVF_ITR_ADAPTIVE_MIN_USECS 5
+#define RNPVF_ITR_ADAPTIVE_MAX_USECS 800
+#define RNPVF_ITR_ADAPTIVE_LATENCY 0x400
+#define RNPVF_ITR_ADAPTIVE_BULK 0x00
+#define RNPVF_ITR_ADAPTIVE_MASK_USECS                                          \
+	(RNPVF_ITR_ADAPTIVE_LATENCY - RNPVF_ITR_ADAPTIVE_MIN_INC)
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define RNPVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define RNPVF_VF_MAX_TX_QUEUES 2
+#define RNPVF_VF_MAX_RX_QUEUES 2
+
+#define MAX_RX_QUEUES RNPVF_VF_MAX_RX_QUEUES
+#define MAX_TX_QUEUES RNPVF_VF_MAX_TX_QUEUES
+
+#ifndef RNPVF_PKT_TIMEOUT
+#define RNPVF_PKT_TIMEOUT 30
+#endif
+
+#ifndef RNPVF_RX_PKT_POLL_BUDGET
+#define RNPVF_RX_PKT_POLL_BUDGET 64
+#endif
+
+#ifndef RNPVF_TX_PKT_POLL_BUDGET
+#define RNPVF_TX_PKT_POLL_BUDGET 0x30
+#endif
+
+#ifndef RNPVF_PKT_TIMEOUT_TX
+#define RNPVF_PKT_TIMEOUT_TX 100
+#endif
+
+#define RNPVF_MIN_RX_WORK (32)
+#define RNPVF_DEFAULT_RX_WORK (64)
+#define RNPVF_MAX_RX_WORK (512)
+#define RNPVF_WORK_ALIGN (2)
+#define RNPVF_MIN_TX_FRAME (1)
+#define RNPVF_MAX_TX_FRAME (256)
+#define RNPVF_MIN_TX_USEC (30)
+#define RNPVF_MAX_TX_USEC (10000)
+
+#define RNPVF_MIN_RX_FRAME (1)
+#define RNPVF_MAX_RX_FRAME (256)
+#define RNPVF_MIN_RX_USEC (10)
+#define RNPVF_MAX_RX_USEC (10000)
+
+#define RNPVF_MIN_TX_WORK (32)
+#define RNPVF_MAX_TX_WORK (512)
+#define RNPVF_DEFAULT_TX_WORK 256
+#define RNPVF_DEFAULT_TXD 512
+#define RNPVF_DEFAULT_RXD 512
+#define RNPVF_MAX_TXD 4096
+#define RNPVF_MIN_TXD 256
+#define RNPVF_MAX_RXD 4096
+#define RNPVF_MIN_RXD 256
+
+#ifndef TSRN10_RX_DEFAULT_BURST
+#define TSRN10_RX_DEFAULT_BURST 16
+#endif
+
+#ifndef TSRN10_RX_DEFAULT_LINE
+#define TSRN10_RX_DEFAULT_LINE 32
+#endif
+
+#define TSRN10_TX_DEFAULT_BURST 8
+
+/* Supported Rx Buffer Sizes */
+#define RNPVF_RXBUFFER_256 256 /* Used for packet split */
+#define RNPVF_RXBUFFER_2K 2048
+#define RNPVF_RXBUFFER_1536 1536
+#define RNPVF_RXBUFFER_3K 3072
+#define RNPVF_RXBUFFER_4K 4096
+#define RNPVF_RXBUFFER_8K 8192
+#define RNPVF_RXBUFFER_10K 10240
+
+#define RNPVF_RX_HDR_SIZE RNPVF_RXBUFFER_256
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define RNPVF_TX_FLAGS_CSUM ((u32)(1))
+#define RNPVF_TX_FLAGS_VLAN ((u32)(1 << 1))
+#define RNPVF_TX_FLAGS_TSO ((u32)(1 << 2))
+#define RNPVF_TX_FLAGS_IPV4 ((u32)(1 << 3))
+#define RNPVF_TX_FLAGS_FCOE ((u32)(1 << 4))
+#define RNPVF_TX_FLAGS_FSO ((u32)(1 << 5))
+#define RNPVF_TX_FLAGS_VLAN_MASK 0xffff0000
+#define RNPVF_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
+#define RNPVF_TX_FLAGS_VLAN_SHIFT 16
+
+#define RNPVF_GSO_PARTIAL_FEATURES                                             \
+	(NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |     \
+	 NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+struct rnpgbevf_ring_container {
+	struct rnpgbevf_ring *ring; /* pointer to linked list of rings */
+	unsigned long next_update; /* jiffies value of last update */
+	unsigned int total_bytes; /* total bytes processed this int */
+	unsigned int total_packets; /* total packets processed this int */
+	unsigned int total_packets_old;
+	u16 count; /* total number of rings in vector */
+	u16 itr; /* current ITR setting for ring */
+	u16 add_itr;
+	int update_count;
+};
+
+/* iterator for handling rings in ring container */
+#define rnpgbevf_for_each_ring(pos, head)                                      \
+	for (pos = (head).ring; pos != NULL; pos = pos->next)
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct rnpgbevf_q_vector {
+	int old_rx_count;
+	int new_rx_count;
+	int large_times;
+	int small_times;
+	int too_small_times;
+	int middle_time;
+	struct rnpgbevf_adapter *adapter;
+	u16 v_idx;
+	/* index of q_vector within array, also used for
+	 * finding the bit in EICR and friends that
+	 * represents the vector for this rings
+	 */
+	struct rnpgbevf_ring_container rx, tx;
+
+	struct napi_struct napi;
+	cpumask_t affinity_mask;
+	int numa_node;
+	u16 itr_rx;
+	u16 itr_tx;
+	struct rcu_head rcu; /* to avoid race with update stats on free */
+	u32 vector_flags;
+#define RNPVF_QVECTOR_FLAG_IRQ_MISS_CHECK ((u32)(1 << 0))
+#define RNPVF_QVECTOR_FLAG_ITR_FEATURE ((u32)(1 << 1))
+#define RNPVF_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS ((u32)(1 << 2))
+
+	int irq_check_usecs;
+	struct hrtimer irq_miss_check_timer;
+
+	char name[IFNAMSIZ + 9];
+
+	/* for dynamic allocation of rings associated with this q_vector */
+	struct rnpgbevf_ring ring[0] ____cacheline_internodealigned_in_smp;
+};
+
+/* rnp_test_staterr - tests bits in Rx descriptor status and error fields */
+static inline __le16 rnpgbevf_test_staterr(union rnp_rx_desc *rx_desc,
+					   const u16 stat_err_bits)
+{
+	return rx_desc->wb.cmd & cpu_to_le16(stat_err_bits);
+}
+
+static inline __le16 rnpgbevf_get_stat(union rnp_rx_desc *rx_desc,
+				       const u16 stat_mask)
+{
+	return rx_desc->wb.cmd & cpu_to_le16(stat_mask);
+}
+
+static inline u16 rnpgbevf_desc_unused(struct rnpgbevf_ring *ring)
+{
+	u16 ntc = ring->next_to_clean;
+	u16 ntu = ring->next_to_use;
+
+	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
+
+/*
+ * microsecond values for various ITR rates shifted by 2 to fit itr register
+ * with the first 3 bits reserved 0
+ */
+#define RNPVF_MIN_RSC_ITR 24
+#define RNPVF_100K_ITR 40
+#define RNPVF_20K_ITR 200
+#define RNPVF_10K_ITR 400
+#define RNPVF_8K_ITR 500
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways.  The lowest value
+ * supported by all of the rnp hardware is 8.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr)                                        \
+	((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define RNPVF_DESC_UNUSED(R)                                                   \
+	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) +          \
+	 (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define RNPVF_RX_DESC(R, i) (&(((union rnp_rx_desc *)((R)->desc))[i]))
+#define RNPVF_TX_DESC(R, i) (&(((struct rnp_tx_desc *)((R)->desc))[i]))
+#define RNPVF_TX_CTXTDESC(R, i) (&(((struct rnp_tx_ctx_desc *)((R)->desc))[i]))
+
+#define RNPVF_N10_MAX_JUMBO_FRAME_SIZE 9590 /* Maximum Supported Size 9.5KB */
+#define RNPVF_N500_MAX_JUMBO_FRAME_SIZE 9722 /* Maximum Supported Size 9.5KB */
+#define RNPVF_MIN_MTU 68
+
+#define MAX_MSIX_VECTORS 4
+#define OTHER_VECTOR 1
+#define NON_Q_VECTORS (OTHER_VECTOR)
+
+#define MAX_MSIX_Q_VECTORS 2
+
+#define MIN_MSIX_Q_VECTORS 1
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+
+enum phy_type {
+	PHY_TYPE_NONE = 0,
+	PHY_TYPE_1G_BASE_KX,
+	PHY_TYPE_RGMII,
+	PHY_TYPE_10G_BASE_KR,
+	PHY_TYPE_25G_BASE_KR,
+	PHY_TYPE_40G_BASE_KR4,
+};
+
+struct rnpgbevf_hw {
+	void *back;
+	u8 __iomem *hw_addr;
+	u8 __iomem *hw_addr_bar0;
+	u8 __iomem *ring_msix_base;
+	u8 vfnum;
+#define VF_NUM_MASK 0x3f
+	struct pci_dev *pdev;
+	u16 device_id;
+	u16 vendor_id;
+	u16 subsystem_device_id;
+	u16 subsystem_vendor_id;
+	enum rnp_board_type board_type;
+	u32 dma_version;
+	u16 min_length;
+	u16 max_length;
+	u16 queue_ring_base;
+	u32 tx_items_count;
+	u32 rx_items_count;
+	u16 mac_type;
+	u16 phy_type;
+	int mtu;
+	u16 link;
+	u16 speed;
+	struct rnpgbevf_hw_operations ops;
+	struct rnp_mac_info mac;
+	struct rnp_fc_info fc;
+	struct rnp_mbx_info mbx;
+	bool adapter_stopped;
+	u32 api_version;
+	int fw_version;
+	int usecstocount;
+#define PF_FEATURE_VLAN_FILTER BIT(0)
+#define PF_NCSI_EN BIT(1)
+	u32 pf_feature;
+	int mode;
+#define RNPVF_NET_FEATURE_SG ((u32)(1 << 0))
+#define RNPVF_NET_FEATURE_TX_CHECKSUM ((u32)(1 << 1))
+#define RNPVF_NET_FEATURE_RX_CHECKSUM ((u32)(1 << 2))
+#define RNPVF_NET_FEATURE_TSO ((u32)(1 << 3))
+#define RNPVF_NET_FEATURE_TX_UDP_TUNNEL (1 << 4)
+#define RNPVF_NET_FEATURE_VLAN_FILTER (1 << 5)
+#define RNPVF_NET_FEATURE_VLAN_OFFLOAD (1 << 6)
+#define RNPVF_NET_FEATURE_RX_NTUPLE_FILTER (1 << 7)
+#define RNPVF_NET_FEATURE_TCAM (1 << 8)
+#define RNPVF_NET_FEATURE_RX_HASH (1 << 9)
+#define RNPVF_NET_FEATURE_RX_FCS (1 << 10)
+#define RNPVF_NET_FEATURE_HW_TC (1 << 11)
+#define RNPVF_NET_FEATURE_USO (1 << 12)
+#define RNPVF_NET_FEATURE_STAG_FILTER (1 << 13)
+#define RNPVF_NET_FEATURE_STAG_OFFLOAD (1 << 14)
+
+	u32 feature_flags;
+};
+
+#define VFNUM(mbx, num) ((num) & mbx->vf_num_mask)
+
+enum irq_mode_enum {
+	irq_mode_msix,
+	irq_mode_msi,
+	irq_mode_legency,
+};
+
+/* board specific private data structure */
+struct rnpgbevf_adapter {
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+#define GET_VFNUM_FROM_BAR0 BIT(0)
+	u16 status;
+	u16 vf_vlan;
+	struct timer_list watchdog_timer;
+	u16 bd_number;
+	struct work_struct reset_task;
+	/* this var is used for auto itr modify */
+	/* hw not Supported well */
+	unsigned long last_moder_packets[MAX_RX_QUEUES];
+	unsigned long last_moder_tx_packets;
+	unsigned long last_moder_bytes[MAX_RX_QUEUES];
+	unsigned long last_moder_jiffies;
+	int last_moder_time[MAX_RX_QUEUES];
+
+	/* Interrupt Throttle Rate */
+	u16 rx_itr_setting;
+	u16 tx_itr_setting;
+
+	u16 rx_usecs;
+	u16 rx_frames;
+	u16 tx_usecs;
+	u16 tx_frames;
+	u32 pkt_rate_low;
+	u16 rx_usecs_low;
+	u32 pkt_rate_high;
+	u16 rx_usecs_high;
+	u32 sample_interval;
+	u32 adaptive_rx_coal;
+	u32 adaptive_tx_coal;
+	u32 auto_rx_coal;
+	u32 napi_budge;
+	u32 tx_work_limit;
+	/* TX */
+	struct rnpgbevf_ring
+		*tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
+	int tx_ring_item_count;
+	int num_q_vectors;
+	int num_tx_queues;
+	u64 restart_queue;
+	u64 hw_csum_tx_good;
+	u64 lsc_int;
+	u64 hw_tso_ctxt;
+	u64 hw_tso6_ctxt;
+	u32 tx_timeout_count;
+	/* RX */
+	struct rnpgbevf_ring *rx_ring[MAX_RX_QUEUES];
+	int rx_ring_item_count;
+	int num_rx_queues;
+	u64 hw_csum_rx_error;
+	u64 hw_rx_no_dma_resources;
+	u64 hw_csum_rx_good;
+	u64 non_eop_descs;
+	u32 alloc_rx_page_failed;
+	u32 alloc_rx_buff_failed;
+	int vector_off;
+	int num_other_vectors;
+	int irq_mode;
+	struct rnpgbevf_q_vector *q_vector[MAX_MSIX_VECTORS];
+	int num_msix_vectors;
+	struct msix_entry *msix_entries;
+	u32 dma_channels; /* the real used dma ring channels */
+	/* Some features need tri-state capability,
+	 * thus the additional *_CAPABLE flags.
+	 */
+	u32 flags;
+#define RNPVF_FLAG_IN_WATCHDOG_TASK ((u32)(1))
+#define RNPVF_FLAG_IN_NETPOLL ((u32)(1 << 1))
+#define RNPVF_FLAG_PF_SET_VLAN ((u32)(1 << 2))
+#define RNPVF_FLAG_PF_UPDATE_MTU ((u32)(1 << 3))
+#define RNPVF_FLAG_PF_UPDATE_MAC ((u32)(1 << 4))
+#define RNPVF_FLAG_PF_UPDATE_VLAN ((u32)(1 << 5))
+#define RNPVF_FLAG_PF_RESET ((u32)(1 << 6))
+#define RNPVF_FLAG_PF_RESET_REQ ((u32)(1 << 7))
+#define RNPVF_FLAG_MSI_CAPABLE ((u32)(1 << 8))
+#define RNPVF_FLAG_MSI_ENABLED ((u32)(1 << 9))
+#define RNPVF_FLAG_MSIX_CAPABLE ((u32)(1 << 10))
+#define RNPVF_FLAG_MSIX_ENABLED ((u32)(1 << 11))
+#define RNPVF_FLAG_RX_CHKSUM_ENABLED ((u32)(1 << 12))
+#define RNPVF_FLAG_RX_CVLAN_OFFLOAD ((u32)(1 << 13))
+#define RNPVF_FLAG_TX_CVLAN_OFFLOAD ((u32)(1 << 14))
+#define RNPVF_FLAG_RX_SVLAN_OFFLOAD ((u32)(1 << 15))
+#define RNPVF_FLAG_TX_SVLAN_OFFLOAD ((u32)(1 << 16))
+	u32 priv_flags;
+#define RNPVF_PRIV_FLAG_FT_PADDING BIT(0)
+#define RNPVF_PRIV_FLAG_PADDING_DEBUG BIT(1)
+#define RNPVF_PRIV_FLAG_FCS_ON BIT(2)
+#define RNPVF_PRIV_FLAG_TX_PADDING BIT(3)
+	/* OS defined structs */
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+	/* structs defined in rnp_vf.h */
+	struct rnpgbevf_hw hw;
+	u16 msg_enable;
+	struct rnpgbevf_hw_stats stats;
+	struct rnpgbevf_hw_stats_own hw_stats;
+	u64 zero_base;
+	/* Interrupt Throttle Rate */
+	u32 eitr_param;
+	unsigned long state;
+	u64 tx_busy;
+	u32 link_speed;
+	bool link_up;
+	struct work_struct watchdog_task;
+	u8 port;
+	spinlock_t mbx_lock;
+	char name[60];
+#ifdef CONFIG_NET_NCSI
+	struct ncsi_dev *ncsi_dev;
+#endif
+};
+
+enum rnpgbevf_state_t {
+	__RNPVF_TESTING,
+	__RNPVF_RESETTING,
+	__RNPVF_DOWN,
+	__RNPVF_REMOVE,
+	__RNPVF_MBX_POLLING,
+	__RNPVF_LINK_DOWN
+};
+
+struct rnpgbevf_cb {
+	union { /* Union defining head/tail partner */
+		struct sk_buff *head;
+		struct sk_buff *tail;
+	};
+	dma_addr_t dma;
+	u16 append_cnt;
+	bool page_released;
+};
+#define RNPVF_CB(skb) ((struct rnpgbevf_cb *)(skb)->cb)
+
+#define RING2ADAPT(ring) netdev_priv((ring)->netdev)
+
+enum rnpgbevf_boards {
+	board_n10,
+	board_n500,
+	board_n210,
+};
+
+extern const struct rnp_mbx_operations rnpgbevf_mbx_ops;
+
+/* needed by ethtool.c */
+extern char rnpgbevf_driver_name[];
+extern const char rnpgbevf_driver_version[];
+extern void rnpgbevf_up(struct rnpgbevf_adapter *adapter);
+extern void rnpgbevf_down(struct rnpgbevf_adapter *adapter);
+extern void rnpgbevf_reinit_locked(struct rnpgbevf_adapter *adapter);
+extern void rnpgbevf_reset(struct rnpgbevf_adapter *adapter);
+extern void rnpgbevf_set_ethtool_ops(struct net_device *netdev);
+extern int rnpgbevf_setup_rx_resources(struct rnpgbevf_adapter *adapter,
+				       struct rnpgbevf_ring *ring);
+extern int rnpgbevf_setup_tx_resources(struct rnpgbevf_adapter *adapter,
+				       struct rnpgbevf_ring *ring);
+extern void rnpgbevf_free_rx_resources(struct rnpgbevf_adapter *adapter,
+				       struct rnpgbevf_ring *ring);
+extern void rnpgbevf_free_tx_resources(struct rnpgbevf_adapter *adapter,
+				       struct rnpgbevf_ring *ring);
+extern void rnpgbevf_update_stats(struct rnpgbevf_adapter *adapter);
+extern int ethtool_ioctl(struct ifreq *ifr);
+extern void remove_mbx_irq(struct rnpgbevf_adapter *adapter);
+extern void rnpgbevf_clear_interrupt_scheme(struct rnpgbevf_adapter *adapter);
+extern int register_mbx_irq(struct rnpgbevf_adapter *adapter);
+extern int rnpgbevf_init_interrupt_scheme(struct rnpgbevf_adapter *adapter);
+extern int rnpgbevf_close(struct net_device *netdev);
+extern int rnpgbevf_open(struct net_device *netdev);
+extern void rnp_napi_add_all(struct rnpgbevf_adapter *adapter);
+extern void rnp_napi_del_all(struct rnpgbevf_adapter *adapter);
+extern int rnpgbevf_sysfs_init(struct net_device *ndev);
+extern void rnpgbevf_sysfs_exit(struct net_device *ndev);
+
+static inline int rnpgbevf_is_pf1(struct pci_dev *pdev)
+{
+	return ((pdev->devfn) ? 1 : 0);
+}
+
+static inline struct netdev_queue *txring_txq(const struct rnpgbevf_ring *ring)
+{
+	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
+/*
+ * FCoE requires that all Rx buffers be over 2200 bytes in length.  Since
+ * this is twice the size of a half page we need to double the page order
+ * for FCoE enabled Rx queues.
+ */
+static inline unsigned int rnpgbevf_rx_bufsz(struct rnpgbevf_ring *ring)
+{
+	return (RNPVF_RXBUFFER_1536 - NET_IP_ALIGN);
+}
+
+static inline unsigned int rnpgbevf_rx_pg_order(struct rnpgbevf_ring *ring)
+{
+	return 0;
+}
+#define rnpgbevf_rx_pg_size(_ring) (PAGE_SIZE << rnpgbevf_rx_pg_order(_ring))
+
+static inline u32 rnpgbevf_rx_desc_used_hw(struct rnpgbevf_hw *hw,
+					   struct rnpgbevf_ring *rx_ring)
+{
+	u32 head = ring_rd32(rx_ring, RNPGBE_DMA_REG_RX_DESC_BUF_HEAD);
+	u32 tail = ring_rd32(rx_ring, RNPGBE_DMA_REG_RX_DESC_BUF_TAIL);
+	u16 count = rx_ring->count;
+
+	return ((tail >= head) ? (count - tail + head) : (head - tail));
+}
+
+static inline u32 rnpgbevf_tx_desc_unused_hw(struct rnpgbevf_hw *hw,
+					     struct rnpgbevf_ring *tx_ring)
+{
+	u32 head = ring_rd32(tx_ring, RNPGBE_DMA_REG_TX_DESC_BUF_HEAD);
+	u32 tail = ring_rd32(tx_ring, RNPGBE_DMA_REG_TX_DESC_BUF_TAIL);
+	u16 count = tx_ring->count;
+
+	return ((tail > head) ? (count - tail + head) : (head - tail));
+}
+
+#define IS_VALID_VID(vid) ((vid) >= 0 && (vid) < 4096)
+
+#endif /* _RNPGBEVF_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_ethtool.c b/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_ethtool.c
new file mode 100644
index 0000000000000..4848bbbb77359
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_ethtool.c
@@ -0,0 +1,895 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "rnpgbevf.h"
+
+#define RNPGBE_ALL_RAR_ENTRIES 16
+
+struct rnpgbevf_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+	int base_stat_offset;
+	int saved_reset_offset;
+};
+
+#define RNPVF_NUM_RX_QUEUES netdev->real_num_rx_queues
+#define RNPVF_NUM_TX_QUEUES netdev->real_num_tx_queues
+
+#define RNPGBE_NETDEV_STAT(_net_stat)                                          \
+	{                                                                      \
+		.stat_string = #_net_stat,                                     \
+		.sizeof_stat =                                                 \
+			sizeof_field(struct net_device_stats, _net_stat),      \
+		.stat_offset = offsetof(struct net_device_stats, _net_stat)    \
+	}
+
+static const struct rnpgbevf_stats rnp_gstrings_net_stats[] = {
+	RNPGBE_NETDEV_STAT(rx_packets),
+	RNPGBE_NETDEV_STAT(tx_packets),
+	RNPGBE_NETDEV_STAT(rx_bytes),
+	RNPGBE_NETDEV_STAT(tx_bytes),
+	RNPGBE_NETDEV_STAT(rx_errors),
+	RNPGBE_NETDEV_STAT(tx_errors),
+	RNPGBE_NETDEV_STAT(rx_dropped),
+	RNPGBE_NETDEV_STAT(tx_dropped),
+	RNPGBE_NETDEV_STAT(multicast),
+	RNPGBE_NETDEV_STAT(collisions),
+	RNPGBE_NETDEV_STAT(rx_over_errors),
+	RNPGBE_NETDEV_STAT(rx_crc_errors),
+	RNPGBE_NETDEV_STAT(rx_frame_errors),
+	RNPGBE_NETDEV_STAT(rx_fifo_errors),
+	RNPGBE_NETDEV_STAT(rx_missed_errors),
+	RNPGBE_NETDEV_STAT(tx_aborted_errors),
+	RNPGBE_NETDEV_STAT(tx_carrier_errors),
+	RNPGBE_NETDEV_STAT(tx_fifo_errors),
+	RNPGBE_NETDEV_STAT(tx_heartbeat_errors),
+};
+
+#define RNPVF_GLOBAL_STATS_LEN ARRAY_SIZE(rnp_gstrings_net_stats)
+#define RNPVF_HW_STAT(_name, _stat)                                            \
+	{                                                                      \
+		.stat_string = _name,                                          \
+		.sizeof_stat = sizeof_field(struct rnpgbevf_adapter, _stat),   \
+		.stat_offset = offsetof(struct rnpgbevf_adapter, _stat)        \
+	}
+static struct rnpgbevf_stats rnpgbevf_hwstrings_stats[] = {
+	RNPVF_HW_STAT("vlan_add_cnt", hw_stats.vlan_add_cnt),
+	RNPVF_HW_STAT("vlan_strip_cnt", hw_stats.vlan_strip_cnt),
+	RNPVF_HW_STAT("rx_csum_offload_errors", hw_stats.csum_err),
+	RNPVF_HW_STAT("rx_csum_offload_good", hw_stats.csum_good),
+};
+
+#define RNPVF_HWSTRINGS_STATS_LEN ARRAY_SIZE(rnpgbevf_hwstrings_stats)
+
+struct rnpgbevf_tx_queue_ring_stat {
+	u64 hw_head;
+	u64 hw_tail;
+	u64 sw_to_clean;
+};
+
+struct rnpgbevf_rx_queue_ring_stat {
+	u64 hw_head;
+	u64 hw_tail;
+	u64 sw_to_use;
+};
+
+#define RNPGBE_QUEUE_STATS_LEN                                                 \
+	(RNPVF_NUM_TX_QUEUES *                                                 \
+		 (sizeof(struct rnpgbevf_tx_queue_stats) / sizeof(u64) +       \
+		  sizeof(struct rnpgbevf_queue_stats) / sizeof(u64) +          \
+		  sizeof(struct rnpgbevf_tx_queue_ring_stat) / sizeof(u64)) +  \
+	 RNPVF_NUM_RX_QUEUES *                                                 \
+		 (sizeof(struct rnpgbevf_rx_queue_stats) / sizeof(u64) +       \
+		  sizeof(struct rnpgbevf_queue_stats) / sizeof(u64) +          \
+		  sizeof(struct rnpgbevf_rx_queue_ring_stat) / sizeof(u64)))
+
+#define RNPVF_STATS_LEN                                                        \
+	(RNPVF_GLOBAL_STATS_LEN + RNPGBE_QUEUE_STATS_LEN +                     \
+	 RNPVF_HWSTRINGS_STATS_LEN)
+
+static const char rnp_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Register test  (offline)", "Link test   (on/offline)"
+};
+
+#define RNPVF_TEST_LEN (sizeof(rnp_gstrings_test) / ETH_GSTRING_LEN)
+
+enum priv_bits {
+	padding_enable = 0,
+};
+
+static const char rnpgbevf_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define RNPVF_FT_PADDING BIT(0)
+#define RNPVF_FCS_ON BIT(1)
+	"ft_padding", "fcs"
+};
+
+#define RNPVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(rnpgbevf_priv_flags_strings)
+
+#define ADVERTISED_MASK_10G                                                    \
+	(SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full |             \
+	 SUPPORTED_10000baseKR_Full)
+static int rnpgbevf_get_link_ksettings(struct net_device *netdev,
+				       struct ethtool_link_ksettings *cmd)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	bool autoneg = false;
+	bool link_up;
+	u32 supported;
+	u32 advertising = 0;
+	u32 link_speed = 0;
+
+	ethtool_convert_link_mode_to_legacy_u32(&supported,
+						cmd->link_modes.supported);
+
+	hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+
+	switch (link_speed) {
+	case RNPGBE_LINK_SPEED_1GB_FULL:
+		supported |= SUPPORTED_1000baseT_Full;
+		supported |= SUPPORTED_FIBRE;
+		advertising |= ADVERTISED_FIBRE | ADVERTISED_1000baseKX_Full;
+		cmd->base.port = PORT_FIBRE;
+		break;
+	case RNPGBE_LINK_SPEED_10GB_FULL:
+		supported |= SUPPORTED_10000baseT_Full;
+		supported |= SUPPORTED_FIBRE;
+		advertising |= ADVERTISED_FIBRE | SUPPORTED_10000baseT_Full;
+		cmd->base.port = PORT_FIBRE;
+		break;
+	case RNPGBE_LINK_SPEED_25GB_FULL:
+		supported |= SUPPORTED_40000baseKR4_Full;
+		supported |= SUPPORTED_FIBRE;
+		advertising |= ADVERTISED_FIBRE | SUPPORTED_40000baseKR4_Full;
+		cmd->base.port = PORT_FIBRE;
+		break;
+	case RNPGBE_LINK_SPEED_40GB_FULL:
+		supported |= SUPPORTED_40000baseCR4_Full |
+			     SUPPORTED_40000baseSR4_Full |
+			     SUPPORTED_40000baseLR4_Full;
+		supported |= SUPPORTED_FIBRE;
+		advertising |= ADVERTISED_FIBRE;
+		cmd->base.port = PORT_FIBRE;
+		break;
+	}
+
+	if (autoneg) {
+		supported |= SUPPORTED_Autoneg;
+		advertising |= ADVERTISED_Autoneg;
+		cmd->base.autoneg = AUTONEG_ENABLE;
+	} else
+		cmd->base.autoneg = AUTONEG_DISABLE;
+
+	/* set pause support */
+	supported |= SUPPORTED_Pause;
+
+	switch (hw->fc.current_mode) {
+	case rnp_fc_full:
+		advertising |= ADVERTISED_Pause;
+		break;
+	case rnp_fc_rx_pause:
+		advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+		break;
+	case rnp_fc_tx_pause:
+		advertising |= ADVERTISED_Asym_Pause;
+		break;
+	default:
+		advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+	}
+
+	if (link_up) {
+		switch (link_speed) {
+		case RNPGBE_LINK_SPEED_40GB_FULL:
+			cmd->base.speed = SPEED_40000;
+			break;
+		case RNPGBE_LINK_SPEED_25GB_FULL:
+			cmd->base.speed = SPEED_25000;
+			break;
+		case RNPGBE_LINK_SPEED_10GB_FULL:
+			cmd->base.speed = SPEED_10000;
+			break;
+		case RNPGBE_LINK_SPEED_1GB_FULL:
+			cmd->base.speed = SPEED_1000;
+			break;
+		case RNPGBE_LINK_SPEED_100_FULL:
+			cmd->base.speed = SPEED_100;
+			break;
+		default:
+			break;
+		}
+		cmd->base.duplex = DUPLEX_FULL;
+	} else {
+		cmd->base.speed = SPEED_UNKNOWN;
+		cmd->base.duplex = DUPLEX_UNKNOWN;
+	}
+
+	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+						supported);
+	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+						supported);
+	return 0;
+}
+
+static void rnpgbevf_get_drvinfo(struct net_device *netdev,
+				 struct ethtool_drvinfo *drvinfo)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbevf_hw *hw = &adapter->hw;
+
+	strlcpy(drvinfo->driver, rnpgbevf_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, rnpgbevf_driver_version,
+		sizeof(drvinfo->version));
+	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info));
+	if (hw->board_type == rnp_board_n10) {
+		snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+			 "%d.%d.%d.%d", ((char *)&(hw->fw_version))[3],
+			 ((char *)&(hw->fw_version))[2],
+			 ((char *)&(hw->fw_version))[1],
+			 ((char *)&(hw->fw_version))[0]);
+	} else if (hw->board_type == rnp_board_n500) {
+		snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+			 "%d.%d.%d.%d", ((char *)&(hw->fw_version))[3],
+			 ((char *)&(hw->fw_version))[2],
+			 ((char *)&(hw->fw_version))[1],
+			 ((char *)&(hw->fw_version))[0]);
+	} else if (hw->board_type == rnp_board_n210) {
+		snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+			 "fw %x", hw->fw_version);
+	}
+	drvinfo->n_priv_flags = RNPVF_PRIV_FLAGS_STR_LEN;
+}
+
+void rnpgbevf_get_ringparam(struct net_device *netdev,
+			    struct ethtool_ringparam *ring,
+			    struct kernel_ethtool_ringparam __always_unused *ker,
+			    struct netlink_ext_ack __always_unused *extack)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+
+	ring->rx_max_pending = RNPVF_MAX_RXD;
+	ring->tx_max_pending = RNPVF_MAX_TXD;
+	ring->rx_pending = adapter->rx_ring_item_count;
+	ring->tx_pending = adapter->tx_ring_item_count;
+}
+
+int rnpgbevf_set_ringparam(struct net_device *netdev,
+			   struct ethtool_ringparam *ring,
+			   struct kernel_ethtool_ringparam __always_unused *ker,
+			   struct netlink_ext_ack __always_unused *extack)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbevf_ring *temp_ring;
+	int i, err = 0;
+	u32 new_rx_count, new_tx_count;
+
+	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+		return -EINVAL;
+
+	new_tx_count =
+		clamp_t(u32, ring->tx_pending, RNPVF_MIN_TXD, RNPVF_MAX_TXD);
+	new_tx_count = ALIGN(new_tx_count, RNPGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+	new_rx_count =
+		clamp_t(u32, ring->rx_pending, RNPVF_MIN_RXD, RNPVF_MAX_RXD);
+	new_rx_count = ALIGN(new_rx_count, RNPGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+	if ((new_tx_count == adapter->tx_ring_item_count) &&
+	    (new_rx_count == adapter->rx_ring_item_count)) {
+		/* nothing to do */
+		return 0;
+	}
+
+	while (test_and_set_bit(__RNPVF_RESETTING, &adapter->state))
+		usleep_range(1000, 2000);
+
+	if (!netif_running(adapter->netdev)) {
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			adapter->tx_ring[i]->count = new_tx_count;
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			adapter->rx_ring[i]->count = new_rx_count;
+		adapter->tx_ring_item_count = new_tx_count;
+		adapter->rx_ring_item_count = new_rx_count;
+		goto clear_reset;
+	}
+
+	/* allocate temporary buffer to store rings in */
+	i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
+	temp_ring = vmalloc(i * sizeof(struct rnpgbevf_ring));
+
+	if (!temp_ring) {
+		err = -ENOMEM;
+		goto clear_reset;
+	}
+	memset(temp_ring, 0x00, i * sizeof(struct rnpgbevf_ring));
+
+	rnpgbevf_down(adapter);
+
+	/*
+	 * Setup new Tx resources and free the old Tx resources in that order.
+	 * We can then assign the new resources to the rings via a memcpy.
+	 * The advantage to this approach is that we are guaranteed to still
+	 * have resources even in the case of an allocation failure.
+	 */
+	if (new_tx_count != adapter->tx_ring_item_count) {
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			memcpy(&temp_ring[i], adapter->tx_ring[i],
+			       sizeof(struct rnpgbevf_ring));
+
+			temp_ring[i].count = new_tx_count;
+			err = rnpgbevf_setup_tx_resources(adapter,
+							  &temp_ring[i]);
+			if (err) {
+				while (i) {
+					i--;
+					rnpgbevf_free_tx_resources(
+						adapter, &temp_ring[i]);
+				}
+				goto err_setup;
+			}
+		}
+
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			rnpgbevf_free_tx_resources(adapter,
+						   adapter->tx_ring[i]);
+
+			memcpy(adapter->tx_ring[i], &temp_ring[i],
+			       sizeof(struct rnpgbevf_ring));
+		}
+
+		adapter->tx_ring_item_count = new_tx_count;
+	}
+
+	/* Repeat the process for the Rx rings if needed */
+	if (new_rx_count != adapter->rx_ring_item_count) {
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			memcpy(&temp_ring[i], adapter->rx_ring[i],
+			       sizeof(struct rnpgbevf_ring));
+
+			temp_ring[i].count = new_rx_count;
+			err = rnpgbevf_setup_rx_resources(adapter,
+							  &temp_ring[i]);
+			if (err) {
+				while (i) {
+					i--;
+					rnpgbevf_free_rx_resources(
+						adapter, &temp_ring[i]);
+				}
+				goto err_setup;
+			}
+		}
+
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			rnpgbevf_free_rx_resources(adapter,
+						   adapter->rx_ring[i]);
+
+			memcpy(adapter->rx_ring[i], &temp_ring[i],
+			       sizeof(struct rnpgbevf_ring));
+		}
+
+		adapter->rx_ring_item_count = new_rx_count;
+	}
+
+err_setup:
+	rnpgbevf_up(adapter);
+	vfree(temp_ring);
+clear_reset:
+	clear_bit(__RNPVF_RESETTING, &adapter->state);
+	return err;
+}
+
+static void rnpgbevf_get_strings(struct net_device *netdev, u32 stringset,
+				 u8 *data)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+	char *p = (char *)data;
+	int i;
+	struct rnpgbevf_ring *ring;
+	u16 queue_idx;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < RNPVF_GLOBAL_STATS_LEN; i++) {
+			memcpy(p, rnp_gstrings_net_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+
+		for (i = 0; i < RNPVF_HWSTRINGS_STATS_LEN; i++) {
+			memcpy(p, rnpgbevf_hwstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+
+		BUG_ON(RNPVF_NUM_TX_QUEUES != RNPVF_NUM_RX_QUEUES);
+
+		for (i = 0; i < RNPVF_NUM_TX_QUEUES; i++) {
+			/* ====  tx ======== */
+			ring = adapter->tx_ring[i];
+			queue_idx = ring->rnpgbevf_queue_idx;
+			sprintf(p, "\n     queue%u_tx_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_bytes", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_tx_restart", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_busy", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_done_old", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_clean_desc", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_poll_count", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_irq_more", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_tx_hw_head", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_hw_tail", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_sw_next_to_clean", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_added_vlan_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_irq_miss", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_next_to_clean", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_equal_count", i);
+			p += ETH_GSTRING_LEN;
+
+			/* ====  rx ======== */
+			ring = adapter->rx_ring[i];
+			queue_idx = ring->rnpgbevf_queue_idx;
+			sprintf(p, "\n     queue%u_rx_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_bytes", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_rx_driver_drop_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_rsc", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_rsc_flush", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_non_eop_descs", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_alloc_page_failed", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_alloc_buff_failed", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_alloc_page", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_csum_err", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_csum_good", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_poll_again_count", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_poll_count", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_rm_vlan_packets", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_rx_hw_head", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_hw_tail", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_sw_next_to_use", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_irq_miss", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_next_to_clean", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_equal_count", i);
+			p += ETH_GSTRING_LEN;
+		}
+		break;
+	case ETH_SS_PRIV_FLAGS:
+		memcpy(data, rnpgbevf_priv_flags_strings,
+		       RNPVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+		break;
+	}
+}
+
+static int rnpgbevf_get_sset_count(struct net_device *netdev, int sset)
+{
+	switch (sset) {
+	/* now we don't support test */
+	case ETH_SS_STATS:
+		return RNPVF_STATS_LEN;
+	case ETH_SS_PRIV_FLAGS:
+		return RNPVF_PRIV_FLAGS_STR_LEN;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static u32 rnpgbevf_get_priv_flags(struct net_device *netdev)
+{
+	struct rnpgbevf_adapter *adapter =
+		(struct rnpgbevf_adapter *)netdev_priv(netdev);
+	u32 priv_flags = 0;
+
+	if (adapter->priv_flags & RNPVF_PRIV_FLAG_FT_PADDING)
+		priv_flags |= RNPVF_FT_PADDING;
+	if (adapter->priv_flags & RNPVF_PRIV_FLAG_FCS_ON)
+		priv_flags |= RNPVF_FCS_ON;
+
+	return priv_flags;
+}
+
+static int rnpgbevf_get_coalesce(struct net_device *netdev,
+				 struct ethtool_coalesce *coal,
+				 struct kernel_ethtool_coalesce *kernel_coal,
+				 struct netlink_ext_ack *extack)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+
+	coal->use_adaptive_tx_coalesce = adapter->adaptive_tx_coal;
+	coal->tx_coalesce_usecs = adapter->tx_usecs;
+	coal->tx_coalesce_usecs_irq = 0;
+	coal->tx_max_coalesced_frames = adapter->tx_frames;
+	coal->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
+
+	coal->use_adaptive_rx_coalesce = adapter->adaptive_rx_coal;
+	coal->rx_coalesce_usecs_irq = 0;
+	coal->rx_coalesce_usecs = adapter->rx_usecs;
+	coal->rx_max_coalesced_frames = adapter->rx_frames;
+	coal->rx_max_coalesced_frames_irq = adapter->napi_budge;
+
+	/* this is not support */
+	coal->pkt_rate_low = 0;
+	coal->pkt_rate_high = 0;
+	coal->rx_coalesce_usecs_low = 0;
+	coal->rx_max_coalesced_frames_low = 0;
+	coal->tx_coalesce_usecs_low = 0;
+	coal->tx_max_coalesced_frames_low = 0;
+	coal->rx_coalesce_usecs_high = 0;
+	coal->rx_max_coalesced_frames_high = 0;
+	coal->tx_coalesce_usecs_high = 0;
+	coal->tx_max_coalesced_frames_high = 0;
+	coal->rate_sample_interval = 0;
+	return 0;
+}
+
+static int rnpgbevf_set_coalesce(struct net_device *netdev,
+				 struct ethtool_coalesce *ec,
+				 struct kernel_ethtool_coalesce *kernel_coal,
+				 struct netlink_ext_ack *extack)
+{
+	int reset = 0;
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+	u32 value;
+	/* we don't support close tx and rx coalesce */
+	if (!(ec->use_adaptive_tx_coalesce) ||
+	    !(ec->use_adaptive_rx_coalesce)) {
+		return -EINVAL;
+	}
+
+	if ((ec->tx_max_coalesced_frames_irq < RNPVF_MIN_TX_WORK) ||
+	    (ec->tx_max_coalesced_frames_irq > RNPVF_MAX_TX_WORK))
+		return -EINVAL;
+
+	value = clamp_t(u32, ec->tx_max_coalesced_frames_irq, RNPVF_MIN_TX_WORK,
+			RNPVF_MAX_TX_WORK);
+	value = ALIGN(value, RNPVF_WORK_ALIGN);
+
+	if (adapter->tx_work_limit != value) {
+		reset = 1;
+		adapter->tx_work_limit = value;
+	}
+
+	if ((ec->tx_max_coalesced_frames < RNPVF_MIN_TX_FRAME) ||
+	    (ec->tx_max_coalesced_frames > RNPVF_MAX_TX_FRAME))
+		return -EINVAL;
+
+	value = clamp_t(u32, ec->tx_max_coalesced_frames, RNPVF_MIN_TX_FRAME,
+			RNPVF_MAX_TX_FRAME);
+	if (adapter->tx_frames != value) {
+		reset = 1;
+		adapter->tx_frames = value;
+	}
+
+	if ((ec->tx_coalesce_usecs < RNPVF_MIN_TX_USEC) ||
+	    (ec->tx_coalesce_usecs > RNPVF_MAX_TX_USEC))
+		return -EINVAL;
+
+	value = clamp_t(u32, ec->tx_coalesce_usecs, RNPVF_MIN_TX_USEC,
+			RNPVF_MAX_TX_USEC);
+	if (adapter->tx_usecs != value) {
+		reset = 1;
+		adapter->tx_usecs = value;
+	}
+
+	if ((ec->rx_max_coalesced_frames_irq < RNPVF_MIN_RX_WORK) ||
+	    (ec->rx_max_coalesced_frames_irq > RNPVF_MAX_RX_WORK))
+		return -EINVAL;
+
+	value = clamp_t(u32, ec->rx_max_coalesced_frames_irq, RNPVF_MIN_RX_WORK,
+			RNPVF_MAX_RX_WORK);
+	value = ALIGN(value, RNPVF_WORK_ALIGN);
+
+	if (adapter->napi_budge != value) {
+		reset = 1;
+		adapter->napi_budge = value;
+	}
+
+	if ((ec->rx_max_coalesced_frames < RNPVF_MIN_RX_FRAME) ||
+	    (ec->rx_max_coalesced_frames > RNPVF_MAX_RX_FRAME))
+		return -EINVAL;
+
+	value = clamp_t(u32, ec->rx_max_coalesced_frames, RNPVF_MIN_RX_FRAME,
+			RNPVF_MAX_RX_FRAME);
+	if (adapter->rx_frames != value) {
+		reset = 1;
+		adapter->rx_frames = value;
+	}
+
+	if ((ec->rx_coalesce_usecs < RNPVF_MIN_RX_USEC) ||
+	    (ec->rx_coalesce_usecs > RNPVF_MAX_RX_USEC))
+		return -EINVAL;
+
+	value = clamp_t(u32, ec->rx_coalesce_usecs, RNPVF_MIN_RX_USEC,
+			RNPVF_MAX_RX_USEC);
+
+	if (adapter->rx_usecs != value) {
+		reset = 1;
+		adapter->rx_usecs = value;
+	}
+	/* other setup is not supported */
+	if ((ec->pkt_rate_low) || (ec->pkt_rate_high) ||
+	    (ec->rx_coalesce_usecs_low) || (ec->rx_max_coalesced_frames_low) ||
+	    (ec->tx_coalesce_usecs_low) || (ec->tx_max_coalesced_frames_low) ||
+	    (ec->rx_coalesce_usecs_high) ||
+	    (ec->rx_max_coalesced_frames_high) ||
+	    (ec->tx_coalesce_usecs_high) ||
+	    (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval) ||
+	    (ec->tx_coalesce_usecs_irq) || (ec->rx_coalesce_usecs_irq))
+		return -EINVAL;
+
+	if (reset) {
+		if (netif_running(netdev))
+			rnpgbevf_close(netdev);
+		remove_mbx_irq(adapter);
+		rnpgbevf_clear_interrupt_scheme(adapter);
+		rnpgbevf_init_interrupt_scheme(adapter);
+		register_mbx_irq(adapter);
+		if (netif_running(netdev))
+			return rnpgbevf_open(netdev);
+	}
+	return 0;
+}
+
+static void rnpgbevf_get_ethtool_stats(struct net_device *netdev,
+				       struct ethtool_stats *stats, u64 *data)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+	struct net_device_stats *net_stats = &netdev->stats;
+	struct rnpgbevf_ring *ring;
+	int i = 0, j;
+	char *p = NULL;
+	struct rnpgbevf_hw *hw = &adapter->hw;
+
+	rnpgbevf_update_stats(adapter);
+	net_stats->multicast += rd32(hw, RNPVF500_VEB_VFMPRC(0));
+
+	for (i = 0; i < RNPVF_GLOBAL_STATS_LEN; i++) {
+		p = (char *)net_stats + rnp_gstrings_net_stats[i].stat_offset;
+		data[i] =
+			(rnp_gstrings_net_stats[i].sizeof_stat == sizeof(u64)) ?
+				*(u64 *)p :
+				*(u32 *)p;
+	}
+	for (j = 0; j < RNPVF_HWSTRINGS_STATS_LEN; j++, i++) {
+		p = (char *)adapter + rnpgbevf_hwstrings_stats[j].stat_offset;
+		data[i] = (rnpgbevf_hwstrings_stats[j].sizeof_stat ==
+			   sizeof(u64)) ?
+				  *(u64 *)p :
+				  *(u32 *)p;
+	}
+
+	BUG_ON(RNPVF_NUM_TX_QUEUES != RNPVF_NUM_RX_QUEUES);
+
+	for (j = 0; j < RNPVF_NUM_TX_QUEUES; j++) {
+		/* ===== tx-ring == */
+		ring = adapter->tx_ring[j];
+
+		if (!ring) {
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			/* rnpgbevf_tx_queue_ring_stat */
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			/* ===== rx-ring == */
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			continue;
+		}
+
+		data[i++] = ring->stats.packets;
+		data[i++] = ring->stats.bytes;
+
+		data[i++] = ring->tx_stats.restart_queue;
+		data[i++] = ring->tx_stats.tx_busy;
+		data[i++] = ring->tx_stats.tx_done_old;
+		data[i++] = ring->tx_stats.clean_desc;
+		data[i++] = ring->tx_stats.poll_count;
+		data[i++] = ring->tx_stats.irq_more_count;
+
+		/* rnpgbevf_tx_queue_ring_stat */
+		data[i++] = ring_rd32(ring, RNPGBE_DMA_REG_TX_DESC_BUF_HEAD);
+		data[i++] = ring_rd32(ring, RNPGBE_DMA_REG_TX_DESC_BUF_TAIL);
+		data[i++] = ring->next_to_clean;
+		data[i++] = ring->tx_stats.vlan_add;
+		data[i++] = ring->tx_stats.tx_irq_miss;
+		if (ring->tx_stats.tx_next_to_clean == -1)
+			data[i++] = ring->count;
+		else
+			data[i++] = ring->tx_stats.tx_next_to_clean;
+		data[i++] = ring->tx_stats.tx_equal_count;
+
+		/* ===== rx-ring == */
+		ring = adapter->rx_ring[j];
+
+		if (!ring) {
+			/* ===== rx-ring == */
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			continue;
+		}
+
+		data[i++] = ring->stats.packets;
+		data[i++] = ring->stats.bytes;
+		data[i++] = ring->rx_stats.driver_drop_packets;
+		data[i++] = ring->rx_stats.rsc_count;
+		data[i++] = ring->rx_stats.rsc_flush;
+		data[i++] = ring->rx_stats.non_eop_descs;
+		data[i++] = ring->rx_stats.alloc_rx_page_failed;
+		data[i++] = ring->rx_stats.alloc_rx_buff_failed;
+		data[i++] = ring->rx_stats.alloc_rx_page;
+		data[i++] = ring->rx_stats.csum_err;
+		data[i++] = ring->rx_stats.csum_good;
+		data[i++] = ring->rx_stats.poll_again_count;
+		data[i++] = ring->rx_stats.poll_count;
+		data[i++] = ring->rx_stats.vlan_remove;
+		data[i++] = ring_rd32(ring, RNPGBE_DMA_REG_RX_DESC_BUF_HEAD);
+		data[i++] = ring_rd32(ring, RNPGBE_DMA_REG_RX_DESC_BUF_TAIL);
+		data[i++] = ring->next_to_clean;
+		data[i++] = ring->rx_stats.rx_irq_miss;
+		if (ring->rx_stats.rx_next_to_clean == -1)
+			data[i++] = ring->count;
+		else
+			data[i++] = ring->rx_stats.rx_next_to_clean;
+		data[i++] = ring->rx_stats.rx_equal_count;
+	}
+}
+
+static void rnpgbevf_get_channels(struct net_device *dev,
+				  struct ethtool_channels *ch)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(dev);
+
+	/* report maximum channels */
+	ch->max_combined = min_t(int, adapter->hw.mac.max_tx_queues,
+				 adapter->hw.mac.max_rx_queues);
+
+	/* report info for other vector */
+	ch->max_other = NON_Q_VECTORS;
+	ch->other_count = NON_Q_VECTORS;
+
+	/* record RSS queues */
+	ch->combined_count = adapter->dma_channels;
+}
+
+static u32 rnpgbevf_get_msglevel(struct net_device *netdev)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+
+	return adapter->msg_enable;
+}
+
+static void rnpgbevf_get_pauseparam(struct net_device *netdev,
+				    struct ethtool_pauseparam *pause)
+{
+	pause->autoneg = 0;
+	/* vf fixed off */
+	pause->rx_pause = 0;
+	pause->rx_pause = 0;
+}
+
+static void rnpgbevf_set_msglevel(struct net_device *netdev, u32 data)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+
+	adapter->msg_enable = data;
+}
+
+static const struct ethtool_ops rnpgbevf_ethtool_ops = {
+
+	.get_link_ksettings = rnpgbevf_get_link_ksettings,
+	.get_drvinfo = rnpgbevf_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+	.get_ringparam = rnpgbevf_get_ringparam,
+	.set_ringparam = rnpgbevf_set_ringparam,
+	.get_strings = rnpgbevf_get_strings,
+	/* vf juset get status */
+	.get_pauseparam = rnpgbevf_get_pauseparam,
+	.get_msglevel = rnpgbevf_get_msglevel,
+	.set_msglevel = rnpgbevf_set_msglevel,
+	.get_sset_count = rnpgbevf_get_sset_count,
+	.get_priv_flags = rnpgbevf_get_priv_flags,
+	.get_ethtool_stats = rnpgbevf_get_ethtool_stats,
+	.get_coalesce = rnpgbevf_get_coalesce,
+	.set_coalesce = rnpgbevf_set_coalesce,
+	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+				     ETHTOOL_COALESCE_MAX_FRAMES,
+	.get_channels = rnpgbevf_get_channels,
+};
+
+void rnpgbevf_set_ethtool_ops(struct net_device *netdev)
+{
+	netdev->ethtool_ops = &rnpgbevf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_main.c b/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_main.c
new file mode 100644
index 0000000000000..4a3c8f384b893
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_main.c
@@ -0,0 +1,6287 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#ifdef CONFIG_NET_NCSI
+#include 
+#endif
+
+#include "rnpgbevf.h"
+
+char rnpgbevf_driver_name[] = "rnpgbevf";
+static const char rnpgbevf_driver_string[] =
+	"Mucse(R) 1 Gigabit PCI Express Virtual Function Network Driver";
+
+#define DRV_VERSION "0.2.3"
+const char rnpgbevf_driver_version[] = DRV_VERSION;
+static const char rnpgbevf_copyright[] =
+	"Copyright (c) 2020 - 2024 Mucse Corporation.";
+
+static const struct rnpgbevf_info *rnpgbevf_info_tbl[] = {
+	[board_n500] = &rnp_n500_vf_info,
+	[board_n210] = &rnp_n210_vf_info,
+};
+
+#define N500_BOARD board_n500
+#define N210_BOARD board_n210
+
+#ifdef CONFIG_NET_NCSI
+static unsigned int be_ncsi_mc;
+module_param(be_ncsi_mc, uint, 0000);
+#endif
+
+static unsigned int fix_eth_name;
+module_param(fix_eth_name, uint, 0000);
+MODULE_PARM_DESC(fix_eth_name, "set eth adapter name to rnpvfXX");
+static struct pci_device_id rnpgbevf_pci_tbl[] = {
+	{ PCI_DEVICE(0x8848, 0x8309), .driver_data = N500_BOARD },
+	{ PCI_DEVICE(0x8848, 0x8209), .driver_data = N210_BOARD },
+	/* required last entry */
+	{
+		0,
+	},
+};
+
+MODULE_DEVICE_TABLE(pci, rnpgbevf_pci_tbl);
+MODULE_AUTHOR("Mucse Corporation, ");
+MODULE_DESCRIPTION("Mucse(R) N500 Virtual Function Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0000);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static int pci_using_hi_dma;
+
+/* forward decls */
+static void rnpgbevf_free_all_rx_resources(struct rnpgbevf_adapter *adapter);
+
+#define RNPVF_XDP_PASS 0
+#define RNPVF_XDP_CONSUMED 1
+#define RNPVF_XDP_TX 2
+
+static void rnpgbevf_pull_tail(struct sk_buff *skb);
+#ifdef OPTM_WITH_LPAGE
+static bool rnpgbevf_alloc_mapped_page(struct rnpgbevf_ring *rx_ring,
+				       struct rnpgbevf_rx_buffer *bi,
+				       union rnp_rx_desc *rx_desc, u16 bufsz,
+				       u64 fun_id);
+static void rnpgbevf_put_rx_buffer(struct rnpgbevf_ring *rx_ring,
+				   struct rnpgbevf_rx_buffer *rx_buffer);
+#else /* OPTM_WITH_LPAGE */
+static bool rnpgbevf_alloc_mapped_page(struct rnpgbevf_ring *rx_ring,
+				       struct rnpgbevf_rx_buffer *bi);
+static void rnpgbevf_put_rx_buffer(struct rnpgbevf_ring *rx_ring,
+				   struct rnpgbevf_rx_buffer *rx_buffer,
+				   struct sk_buff *skb);
+#endif /* OPTM_WITH_LPAGE */
+
+/**
+ * rnpgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
+ * @adapter: pointer to adapter struct
+ * @direction: 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ */
+static void rnpgbevf_set_ring_vector(struct rnpgbevf_adapter *adapter,
+				     u8 rnpgbevf_queue, u8 rnpgbevf_msix_vector)
+{
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	u32 data = 0;
+
+	data = hw->vfnum << 24;
+	data |= (rnpgbevf_msix_vector << 8);
+	data |= (rnpgbevf_msix_vector << 0);
+	DPRINTK(IFUP, INFO,
+		"Set Ring-Vector queue:%d (reg:0x%x) <-- Rx-MSIX:%d, Tx-MSIX:%d\n",
+		rnpgbevf_queue, RING_VECTOR(rnpgbevf_queue),
+		rnpgbevf_msix_vector, rnpgbevf_msix_vector);
+
+	rnpgbevf_wr_reg(hw->ring_msix_base + RING_VECTOR(rnpgbevf_queue), data);
+}
+
+void rnpgbevf_unmap_and_free_tx_resource(struct rnpgbevf_ring *ring,
+					 struct rnpgbevf_tx_buffer *tx_buffer)
+{
+	if (tx_buffer->skb) {
+		dev_kfree_skb_any(tx_buffer->skb);
+		if (dma_unmap_len(tx_buffer, len))
+			dma_unmap_single(ring->dev,
+					 dma_unmap_addr(tx_buffer, dma),
+					 dma_unmap_len(tx_buffer, len),
+					 DMA_TO_DEVICE);
+	} else if (dma_unmap_len(tx_buffer, len)) {
+		dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma),
+			       dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE);
+	}
+	tx_buffer->next_to_watch = NULL;
+	tx_buffer->skb = NULL;
+	dma_unmap_len_set(tx_buffer, len, 0);
+	/* tx_buffer must be completely set up in the transmit path */
+}
+
+static void rnpgbevf_tx_timeout(struct net_device *netdev);
+
+/**
+ * rnpgbevf_clean_tx_irq - Reclaim resources after transmit completes
+ * @q_vector: board private structure
+ * @tx_ring: tx ring to clean
+ **/
+static bool rnpgbevf_clean_tx_irq(struct rnpgbevf_q_vector *q_vector,
+				  struct rnpgbevf_ring *tx_ring)
+{
+	struct rnpgbevf_adapter *adapter = q_vector->adapter;
+	struct rnpgbevf_tx_buffer *tx_buffer;
+	struct rnp_tx_desc *tx_desc;
+	unsigned int total_bytes = 0, total_packets = 0;
+	unsigned int budget = adapter->tx_work_limit;
+	unsigned int i = tx_ring->next_to_clean;
+
+	if (test_bit(__RNPVF_DOWN, &adapter->state))
+		return true;
+	tx_ring->tx_stats.poll_count++;
+	tx_buffer = &tx_ring->tx_buffer_info[i];
+	tx_desc = RNPVF_TX_DESC(tx_ring, i);
+	i -= tx_ring->count;
+
+	do {
+		struct rnp_tx_desc *eop_desc = tx_buffer->next_to_watch;
+
+		/* if next_to_watch is not set then there is no work pending */
+		if (!eop_desc)
+			break;
+
+		/* prevent any other reads prior to eop_desc */
+		rmb();
+
+		/* if eop DD is not set pending work has not been completed */
+		if (!(eop_desc->cmd & cpu_to_le16(RNPGBE_TXD_STAT_DD)))
+			break;
+
+		/* clear next_to_watch to prevent false hangs */
+		tx_buffer->next_to_watch = NULL;
+
+		/* update the statistics for this packet */
+		total_bytes += tx_buffer->bytecount;
+		total_packets += tx_buffer->gso_segs;
+
+		/* free the skb */
+		dev_kfree_skb_any(tx_buffer->skb);
+
+		/* unmap skb header data */
+		dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma),
+				 dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE);
+
+		/* clear tx_buffer data */
+		tx_buffer->skb = NULL;
+		dma_unmap_len_set(tx_buffer, len, 0);
+
+		/* unmap remaining buffers */
+		while (tx_desc != eop_desc) {
+			tx_buffer++;
+			tx_desc++;
+			i++;
+			if (unlikely(!i)) {
+				i -= tx_ring->count;
+				tx_buffer = tx_ring->tx_buffer_info;
+				tx_desc = RNPVF_TX_DESC(tx_ring, 0);
+			}
+
+			/* unmap any remaining paged data */
+			if (dma_unmap_len(tx_buffer, len)) {
+				dma_unmap_page(tx_ring->dev,
+					       dma_unmap_addr(tx_buffer, dma),
+					       dma_unmap_len(tx_buffer, len),
+					       DMA_TO_DEVICE);
+				dma_unmap_len_set(tx_buffer, len, 0);
+			}
+		}
+
+		/* move us one more past the eop_desc for start of next pkt */
+		tx_buffer++;
+		tx_desc++;
+		i++;
+		if (unlikely(!i)) {
+			i -= tx_ring->count;
+			tx_buffer = tx_ring->tx_buffer_info;
+			tx_desc = RNPVF_TX_DESC(tx_ring, 0);
+		}
+
+		/* issue prefetch for next Tx descriptor */
+		prefetch(tx_desc);
+
+		/* update budget accounting */
+		budget--;
+	} while (likely(budget));
+
+	i += tx_ring->count;
+	tx_ring->next_to_clean = i;
+	u64_stats_update_begin(&tx_ring->syncp);
+	tx_ring->stats.bytes += total_bytes;
+	tx_ring->stats.packets += total_packets;
+	u64_stats_update_end(&tx_ring->syncp);
+	q_vector->tx.total_bytes += total_bytes;
+	q_vector->tx.total_packets += total_packets;
+
+	netdev_tx_completed_queue(txring_txq(tx_ring), total_packets,
+				  total_bytes);
+
+	if (!(q_vector->vector_flags & RNPVF_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS)) {
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+		if (unlikely(total_packets &&
+			     netif_carrier_ok(tx_ring->netdev) &&
+			     (rnpgbevf_desc_unused(tx_ring) >=
+			      TX_WAKE_THRESHOLD))) {
+			/* Make sure that anybody stopping the queue after this
+			 * sees the new next_to_clean.
+			 */
+			smp_mb();
+			if (__netif_subqueue_stopped(tx_ring->netdev,
+						     tx_ring->queue_index) &&
+			    !test_bit(__RNPVF_DOWN, &adapter->state)) {
+				netif_wake_subqueue(tx_ring->netdev,
+						    tx_ring->queue_index);
+				++tx_ring->tx_stats.restart_queue;
+			}
+		}
+	}
+
+	return !!budget;
+}
+
+static inline void rnpgbevf_rx_hash(struct rnpgbevf_ring *ring,
+				    union rnp_rx_desc *rx_desc,
+				    struct sk_buff *skb)
+{
+	int rss_type;
+
+	if (!(ring->netdev->features & NETIF_F_RXHASH))
+		return;
+
+#define RNPVF_RSS_TYPE_MASK 0xc0
+	rss_type = rx_desc->wb.cmd & RNPVF_RSS_TYPE_MASK;
+	skb_set_hash(skb, le32_to_cpu(rx_desc->wb.rss_hash),
+		     rss_type ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
+}
+
+/**
+ * rnpgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @ring: structure containing ring specific data
+ * @rx_desc: current Rx descriptor being processed
+ * @skb: skb currently being received and modified
+ **/
+static inline void rnpgbevf_rx_checksum(struct rnpgbevf_ring *ring,
+					union rnp_rx_desc *rx_desc,
+					struct sk_buff *skb)
+{
+	bool encap_pkt = false;
+
+	skb_checksum_none_assert(skb);
+
+	/* Rx csum disabled */
+	if (!(ring->netdev->features & NETIF_F_RXCSUM))
+		return;
+
+	/* vxlan packet handle ? */
+
+	if (!(ring->ring_flags & RNPVF_RING_NO_TUNNEL_SUPPORT)) {
+		if (rnpgbevf_get_stat(rx_desc, RNPGBE_RXD_STAT_TUNNEL_MASK) ==
+		    RNPGBE_RXD_STAT_TUNNEL_VXLAN) {
+			encap_pkt = true;
+			skb->encapsulation = 1;
+			skb->ip_summed = CHECKSUM_NONE;
+		}
+	}
+
+	/* if L3/L4  error:ignore errors from veb(other vf) */
+	if (unlikely(
+		    rnpgbevf_test_staterr(rx_desc, RNPGBE_RXD_STAT_ERR_MASK))) {
+		ring->rx_stats.csum_err++;
+		return;
+	}
+	ring->rx_stats.csum_good++;
+	/* It must be a TCP or UDP packet with a valid checksum */
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	if (encap_pkt) {
+		/* If we checked the outer header let the stack know */
+		skb->csum_level = 1;
+	}
+}
+
+static inline void rnpgbevf_update_rx_tail(struct rnpgbevf_ring *rx_ring,
+					   u32 val)
+{
+	rx_ring->next_to_use = val;
+
+	/* update next to alloc since we have filled the ring */
+	rx_ring->next_to_alloc = val;
+	/*
+	 * Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64).
+	 */
+	wmb();
+	rnpgbevf_wr_reg(rx_ring->tail, val);
+}
+
+#ifndef OPTM_WITH_LPAGE
+/**
+ * rnpgbevf_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void rnpgbevf_alloc_rx_buffers(struct rnpgbevf_ring *rx_ring, u16 cleaned_count)
+{
+	union rnp_rx_desc *rx_desc;
+	struct rnpgbevf_rx_buffer *bi;
+	u16 i = rx_ring->next_to_use;
+	u64 fun_id = ((u64)(rx_ring->vfnum) << (32 + 24));
+	u16 bufsz;
+	/* nothing to do */
+	if (!cleaned_count)
+		return;
+
+	rx_desc = RNPVF_RX_DESC(rx_ring, i);
+	BUG_ON(rx_desc == NULL);
+	bi = &rx_ring->rx_buffer_info[i];
+	BUG_ON(bi == NULL);
+
+	i -= rx_ring->count;
+	bufsz = rnpgbevf_rx_bufsz(rx_ring);
+
+	do {
+		if (!rnpgbevf_alloc_mapped_page(rx_ring, bi))
+			break;
+
+		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+						 bi->page_offset, bufsz,
+						 DMA_FROM_DEVICE);
+
+		/*
+		 * Refresh the desc even if buffer_addrs didn't change
+		 * because each write-back erases this info.
+		 */
+		rx_desc->pkt_addr =
+			cpu_to_le64(bi->dma + bi->page_offset + fun_id);
+		/* clean dd */
+		rx_desc->cmd = 0;
+
+		rx_desc++;
+		bi++;
+		i++;
+		if (unlikely(!i)) {
+			rx_desc = RNPVF_RX_DESC(rx_ring, 0);
+			bi = rx_ring->rx_buffer_info;
+			i -= rx_ring->count;
+		}
+
+		/* clear the hdr_addr for the next_to_use descriptor */
+		cleaned_count--;
+	} while (cleaned_count);
+
+	i += rx_ring->count;
+
+	if (rx_ring->next_to_use != i)
+		rnpgbevf_update_rx_tail(rx_ring, i);
+}
+#endif
+
+/**
+ * rnpgbevf_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void rnpgbevf_reuse_rx_page(struct rnpgbevf_ring *rx_ring,
+				   struct rnpgbevf_rx_buffer *old_buff)
+{
+	struct rnpgbevf_rx_buffer *new_buff;
+	u16 nta = rx_ring->next_to_alloc;
+
+	new_buff = &rx_ring->rx_buffer_info[nta];
+
+	/* update, and store next to alloc */
+	nta++;
+	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+	/*
+	 * Transfer page from old buffer to new buffer.
+	 * Move each member individually to avoid possible store
+	 * forwarding stalls and unnecessary copy of skb.
+	 */
+	new_buff->dma = old_buff->dma;
+	new_buff->page = old_buff->page;
+	new_buff->page_offset = old_buff->page_offset;
+	new_buff->pagecnt_bias = old_buff->pagecnt_bias;
+}
+
+static inline bool rnpgbevf_page_is_reserved(struct page *page)
+{
+	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+static bool rnpgbevf_can_reuse_rx_page(struct rnpgbevf_rx_buffer *rx_buffer)
+{
+	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+	struct page *page = rx_buffer->page;
+
+#ifdef OPTM_WITH_LPAGE
+	return false;
+#endif
+	/* avoid re-using remote pages */
+	if (unlikely(rnpgbevf_page_is_reserved(page)))
+		return false;
+
+#if (PAGE_SIZE < 8192)
+		/* if we are only owner of page we can reuse it */
+	if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+		return false;
+#else
+
+	/*
+	 * The last offset is a bit aggressive in that we assume the
+	 * worst case of FCoE being enabled and using a 3K buffer.
+	 * However this should have minimal impact as the 1K extra is
+	 * still less than one buffer in size.
+	 */
+#define RNPVF_LAST_OFFSET (SKB_WITH_OVERHEAD(PAGE_SIZE) - RNPVF_RXBUFFER_2K)
+	if (rx_buffer->page_offset > RNPVF_LAST_OFFSET)
+		return false;
+#endif
+
+	/* If we have drained the page fragment pool we need to update
+	 * the pagecnt_bias and page count so that we fully restock the
+	 * number of references the driver holds.
+	 */
+	if (unlikely(pagecnt_bias == 1)) {
+		page_ref_add(page, USHRT_MAX - 1);
+		rx_buffer->pagecnt_bias = USHRT_MAX;
+	}
+
+	return true;
+}
+
+#if (PAGE_SIZE < 8192)
+#define RNPVF_MAX_2K_FRAME_BUILD_SKB (RNPVF_RXBUFFER_1536 - NET_IP_ALIGN)
+#define RNPVF_2K_TOO_SMALL_WITH_PADDING                                        \
+	((NET_SKB_PAD + RNPVF_RXBUFFER_1536) >                                 \
+	 SKB_WITH_OVERHEAD(RNPVF_RXBUFFER_2K))
+
+static inline int rnpgbevf_compute_pad(int rx_buf_len)
+{
+	int page_size, pad_size;
+
+	page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+	pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+	return pad_size;
+}
+
+static inline int rnpgbevf_skb_pad(void)
+{
+	int rx_buf_len;
+
+	/* If a 2K buffer cannot handle a standard Ethernet frame then
+	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
+	 *
+	 * For a 3K buffer we need to add enough padding to allow for
+	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
+	 * cache-line alignment.
+	 */
+	if (RNPVF_2K_TOO_SMALL_WITH_PADDING)
+		rx_buf_len = RNPVF_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN);
+	else
+		rx_buf_len = RNPVF_RXBUFFER_1536;
+
+	/* if needed make room for NET_IP_ALIGN */
+	rx_buf_len -= NET_IP_ALIGN;
+	return rnpgbevf_compute_pad(rx_buf_len);
+}
+
+#define RNPVF_SKB_PAD rnpgbevf_skb_pad()
+#else /* PAGE_SIZE < 8192 */
+#define RNPVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+
+/**
+ * rnp_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+static void rnpgbevf_clean_rx_ring(struct rnpgbevf_ring *rx_ring)
+{
+	u16 i = rx_ring->next_to_clean;
+	struct rnpgbevf_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
+
+	/* Free all the Rx ring sk_buffs */
+	while (i != rx_ring->next_to_alloc) {
+		if (rx_buffer->skb) {
+			struct sk_buff *skb = rx_buffer->skb;
+
+			dev_kfree_skb(skb);
+			rx_buffer->skb = NULL;
+		}
+
+		/* Invalidate cache lines that may have been written to by
+		 * device so that we avoid corrupting memory.
+		 */
+		dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma,
+					      rx_buffer->page_offset,
+					      rnpgbevf_rx_bufsz(rx_ring),
+					      DMA_FROM_DEVICE);
+
+		/* free resources associated with mapping */
+		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+				     rnpgbevf_rx_pg_size(rx_ring),
+				     DMA_FROM_DEVICE,
+				     RNPVF_RX_DMA_ATTR);
+
+		__page_frag_cache_drain(rx_buffer->page,
+					rx_buffer->pagecnt_bias);
+		/* now this page is not used */
+		rx_buffer->page = NULL;
+		i++;
+		rx_buffer++;
+		if (i == rx_ring->count) {
+			i = 0;
+			rx_buffer = rx_ring->rx_buffer_info;
+		}
+	}
+
+	rx_ring->next_to_alloc = 0;
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+}
+
+static inline unsigned int rnpgbevf_rx_offset(struct rnpgbevf_ring *rx_ring)
+{
+	return ring_uses_build_skb(rx_ring) ? RNPVF_SKB_PAD : 0;
+}
+
+#ifdef OPTM_WITH_LPAGE
+static bool rnpgbevf_alloc_mapped_page(struct rnpgbevf_ring *rx_ring,
+				       struct rnpgbevf_rx_buffer *bi,
+				       union rnp_rx_desc *rx_desc, u16 bufsz,
+				       u64 fun_id)
+{
+	struct page *page = bi->page;
+	dma_addr_t dma;
+
+	/* since we are recycling buffers we should seldom need to alloc */
+	if (likely(page))
+		return true;
+
+	page = dev_alloc_pages(RNPVF_ALLOC_PAGE_ORDER);
+	if (unlikely(!page)) {
+		rx_ring->rx_stats.alloc_rx_page_failed++;
+		return false;
+	}
+
+	bi->page_offset = rnpgbevf_rx_offset(rx_ring);
+
+	/* map page for use */
+	dma = dma_map_page_attrs(rx_ring->dev, page, bi->page_offset, bufsz,
+				 DMA_FROM_DEVICE,
+				 RNPVF_RX_DMA_ATTR);
+
+	/*
+	 * if mapping failed free memory back to system since
+	 * there isn't much point in holding memory we can't use
+	 */
+	if (dma_mapping_error(rx_ring->dev, dma)) {
+		__free_pages(page, RNPVF_ALLOC_PAGE_ORDER);
+		printk(KERN_DEBUG "map failed\n");
+
+		rx_ring->rx_stats.alloc_rx_page_failed++;
+		return false;
+	}
+	bi->dma = dma;
+	bi->page = page;
+	bi->page_offset = rnpgbevf_rx_offset(rx_ring);
+	page_ref_add(page, USHRT_MAX - 1);
+	bi->pagecnt_bias = USHRT_MAX;
+	rx_ring->rx_stats.alloc_rx_page++;
+
+	/* sync the buffer for use by the device */
+	dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0, bufsz,
+					 DMA_FROM_DEVICE);
+
+	/*
+	 * Refresh the desc even if buffer_addrs didn't change
+	 * because each write-back erases this info.
+	 */
+	rx_desc->pkt_addr = cpu_to_le64(bi->dma + fun_id);
+
+	return true;
+}
+
+static void rnpgbevf_put_rx_buffer(struct rnpgbevf_ring *rx_ring,
+				   struct rnpgbevf_rx_buffer *rx_buffer)
+{
+	if (rnpgbevf_can_reuse_rx_page(rx_buffer)) {
+		/* hand second half of page back to the ring */
+		rnpgbevf_reuse_rx_page(rx_ring, rx_buffer);
+	} else {
+		/* we are not reusing the buffer so unmap it */
+		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+				     rnpgbevf_rx_bufsz(rx_ring),
+				     DMA_FROM_DEVICE,
+				     RNPVF_RX_DMA_ATTR);
+		__page_frag_cache_drain(rx_buffer->page,
+					rx_buffer->pagecnt_bias);
+	}
+
+	/* clear contents of rx_buffer */
+	rx_buffer->page = NULL;
+}
+
+/**
+ * rnpgbevf_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void rnpgbevf_alloc_rx_buffers(struct rnpgbevf_ring *rx_ring, u16 cleaned_count)
+{
+	union rnp_rx_desc *rx_desc;
+	struct rnpgbevf_rx_buffer *bi;
+	u16 i = rx_ring->next_to_use;
+	u64 fun_id = ((u64)(rx_ring->vfnum) << (32 + 24));
+	u16 bufsz;
+	/* nothing to do */
+	if (!cleaned_count)
+		return;
+
+	rx_desc = RNPVF_RX_DESC(rx_ring, i);
+
+	BUG_ON(rx_desc == NULL);
+
+	bi = &rx_ring->rx_buffer_info[i];
+
+	BUG_ON(bi == NULL);
+
+	i -= rx_ring->count;
+	bufsz = rnpgbevf_rx_bufsz(rx_ring);
+
+	do {
+		int count = 1;
+		struct page *page;
+
+		if (!rnpgbevf_alloc_mapped_page(rx_ring, bi, rx_desc, bufsz,
+						fun_id))
+			break;
+		page = bi->page;
+
+		rx_desc->cmd = 0;
+
+		rx_desc++;
+		i++;
+		bi++;
+
+		if (unlikely(!i)) {
+			rx_desc = RNPVF_RX_DESC(rx_ring, 0);
+			bi = rx_ring->rx_buffer_info;
+			i -= rx_ring->count;
+		}
+
+		rx_desc->cmd = 0;
+
+		cleaned_count--;
+
+		while (count < rx_ring->rx_page_buf_nums && cleaned_count) {
+			dma_addr_t dma;
+
+			bi->page_offset = rx_ring->rx_per_buf_mem * count +
+					  rnpgbevf_rx_offset(rx_ring);
+			/* map page for use */
+			dma = dma_map_page_attrs(rx_ring->dev, page,
+						 bi->page_offset, bufsz,
+						 DMA_FROM_DEVICE,
+						 RNPVF_RX_DMA_ATTR);
+
+			if (dma_mapping_error(rx_ring->dev, dma)) {
+				printk(KERN_DEBUG "map second error\n");
+				rx_ring->rx_stats.alloc_rx_page_failed++;
+				break;
+			}
+
+			bi->dma = dma;
+			bi->page = page;
+			page_ref_add(page, USHRT_MAX);
+			bi->pagecnt_bias = USHRT_MAX;
+
+			/* sync the buffer for use by the device */
+			dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+							 0, bufsz,
+							 DMA_FROM_DEVICE);
+
+			/*
+			 * Refresh the desc even if buffer_addrs didn't change
+			 * because each write-back erases this info.
+			 */
+			rx_desc->pkt_addr = cpu_to_le64(bi->dma + fun_id);
+			/* clean dd */
+			rx_desc->cmd = 0;
+
+			rx_desc++;
+			bi++;
+			i++;
+			if (unlikely(!i)) {
+				rx_desc = RNPVF_RX_DESC(rx_ring, 0);
+				bi = rx_ring->rx_buffer_info;
+				i -= rx_ring->count;
+			}
+			count++;
+			/* clear the hdr_addr for the next_to_use descriptor */
+			cleaned_count--;
+		}
+	} while (cleaned_count);
+
+	i += rx_ring->count;
+
+	if (rx_ring->next_to_use != i)
+		rnpgbevf_update_rx_tail(rx_ring, i);
+}
+
+#else
+static bool rnpgbevf_alloc_mapped_page(struct rnpgbevf_ring *rx_ring,
+				       struct rnpgbevf_rx_buffer *bi)
+{
+	struct page *page = bi->page;
+	dma_addr_t dma;
+
+	/* since we are recycling buffers we should seldom need to alloc */
+	if (likely(page))
+		return true;
+
+	page = dev_alloc_pages(rnpgbevf_rx_pg_order(rx_ring));
+	if (unlikely(!page)) {
+		rx_ring->rx_stats.alloc_rx_page_failed++;
+		return false;
+	}
+
+	/* map page for use */
+	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+				 rnpgbevf_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
+				 RNPVF_RX_DMA_ATTR);
+
+	/*
+	 * if mapping failed free memory back to system since
+	 * there isn't much point in holding memory we can't use
+	 */
+	if (dma_mapping_error(rx_ring->dev, dma)) {
+		__free_pages(page, rnpgbevf_rx_pg_order(rx_ring));
+		printk(KERN_DEBUG "map failed\n");
+
+		rx_ring->rx_stats.alloc_rx_page_failed++;
+		return false;
+	}
+	bi->dma = dma;
+	bi->page = page;
+	bi->page_offset = rnpgbevf_rx_offset(rx_ring);
+	page_ref_add(page, USHRT_MAX - 1);
+	bi->pagecnt_bias = USHRT_MAX;
+	rx_ring->rx_stats.alloc_rx_page++;
+
+	return true;
+}
+
+static void rnpgbevf_put_rx_buffer(struct rnpgbevf_ring *rx_ring,
+				   struct rnpgbevf_rx_buffer *rx_buffer,
+				   struct sk_buff *skb)
+{
+	if (rnpgbevf_can_reuse_rx_page(rx_buffer)) {
+		/* hand second half of page back to the ring */
+		rnpgbevf_reuse_rx_page(rx_ring, rx_buffer);
+	} else {
+		/* we are not reusing the buffer so unmap it */
+		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+				     rnpgbevf_rx_pg_size(rx_ring),
+				     DMA_FROM_DEVICE,
+				     RNPVF_RX_DMA_ATTR);
+		__page_frag_cache_drain(rx_buffer->page,
+					rx_buffer->pagecnt_bias);
+	}
+
+	/* clear contents of rx_buffer */
+	rx_buffer->page = NULL;
+	rx_buffer->skb = NULL;
+}
+
+#endif /* OPTM_WITH_LPAGE */
+
+/* drop this packets if error */
+static bool rnpgbevf_check_csum_error(struct rnpgbevf_ring *rx_ring,
+				      union rnp_rx_desc *rx_desc,
+				      unsigned int size,
+				      unsigned int *driver_drop_packets)
+{
+	bool err = false;
+
+	struct net_device *netdev = rx_ring->netdev;
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+
+	if ((netdev->features & NETIF_F_RXCSUM) &&
+	    (!(adapter->priv_flags & RNPVF_PRIV_FLAG_FCS_ON))) {
+		if (unlikely(rnpgbevf_test_staterr(rx_desc,
+						   RNPGBE_RXD_STAT_ERR_MASK))) {
+			/* push this packet to stack if in promisc mode */
+			rx_ring->rx_stats.csum_err++;
+
+			if ((!(netdev->flags & IFF_PROMISC) &&
+			     (!(netdev->features & NETIF_F_RXALL)))) {
+				if (rx_ring->ring_flags &
+				    RNPVF_RING_CHKSM_FIX) {
+					err = true;
+
+					goto skip_fix;
+				}
+				if (unlikely(rnpgbevf_test_staterr(
+						     rx_desc,
+						     RNPGBE_RXD_STAT_L4_MASK) &&
+					     (!(rx_desc->wb.rev1 &
+						RNPGBE_RX_L3_TYPE_MASK)))) {
+					rx_ring->rx_stats.csum_err--;
+					goto skip_fix;
+				}
+
+				if (unlikely(rnpgbevf_test_staterr(
+					    rx_desc,
+					    RNPGBE_RXD_STAT_SCTP_MASK))) {
+					if (size > 60) {
+						err = true;
+					} else {
+						/* sctp less than 60 hw report err by mistake */
+						rx_ring->rx_stats.csum_err--;
+					}
+				} else {
+					err = true;
+				}
+			}
+		}
+	}
+
+skip_fix:
+	if (err) {
+		u32 ntc = rx_ring->next_to_clean + 1;
+		struct rnpgbevf_rx_buffer *rx_buffer;
+#if (PAGE_SIZE < 8192)
+		unsigned int truesize = rnpgbevf_rx_pg_size(rx_ring) / 2;
+#else
+		unsigned int truesize =
+			ring_uses_build_skb(rx_ring) ?
+				SKB_DATA_ALIGN(RNPVF_SKB_PAD + size) :
+				SKB_DATA_ALIGN(size);
+#endif
+
+		if (likely(rnpgbevf_test_staterr(rx_desc, RNPGBE_RXD_STAT_EOP)))
+			*driver_drop_packets = *driver_drop_packets + 1;
+
+		/* we are reusing so sync this buffer for CPU use */
+		rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+		dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma,
+					      rx_buffer->page_offset, size,
+					      DMA_FROM_DEVICE);
+		/* we should clean it since we used all info in it */
+		rx_desc->wb.cmd = 0;
+
+#if (PAGE_SIZE < 8192)
+		rx_buffer->page_offset ^= truesize;
+#else
+		rx_buffer->page_offset += truesize;
+#endif
+#ifdef OPTM_WITH_LPAGE
+		rnpgbevf_put_rx_buffer(rx_ring, rx_buffer);
+#else
+		rnpgbevf_put_rx_buffer(rx_ring, rx_buffer, NULL);
+#endif
+		ntc = (ntc < rx_ring->count) ? ntc : 0;
+		rx_ring->next_to_clean = ntc;
+	}
+	return err;
+}
+
+/**
+ * rnpgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, timestamp, protocol, and
+ * other fields within the skb.
+ **/
+static void rnpgbevf_process_skb_fields(struct rnpgbevf_ring *rx_ring,
+					union rnp_rx_desc *rx_desc,
+					struct sk_buff *skb)
+{
+	struct net_device *dev = rx_ring->netdev;
+	struct rnpgbevf_adapter *adapter = netdev_priv(dev);
+	struct rnpgbevf_hw *hw = &adapter->hw;
+
+	rnpgbevf_rx_hash(rx_ring, rx_desc, skb);
+	rnpgbevf_rx_checksum(rx_ring, rx_desc, skb);
+
+	if (hw->pf_feature & PF_NCSI_EN) {
+		/* if pf setup vf vlan, check it */
+		if (adapter->flags & RNPVF_FLAG_PF_SET_VLAN) {
+			u16 vid_pf;
+			u8 header[ETH_ALEN + ETH_ALEN];
+			u8 *data = skb->data;
+
+			if (__vlan_get_tag(skb, &vid_pf))
+				goto skip_vf_vlan;
+
+			if (vid_pf == adapter->vf_vlan) {
+				memcpy(header, data, ETH_ALEN + ETH_ALEN);
+				memcpy(skb->data + 4, header, ETH_ALEN + ETH_ALEN);
+				skb->len -= 4;
+				skb->data += 4;
+				goto skip_vf_vlan;
+			}
+		}
+	}
+	/* remove vlan if pf set a vlan */
+	if (((dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+	     || (dev->features & NETIF_F_HW_VLAN_STAG_RX)) &&
+	    rnpgbevf_test_staterr(rx_desc, RNPGBE_RXD_STAT_VLAN_VALID) &&
+	    !(cpu_to_le16(rx_desc->wb.rev1) & VEB_VF_IGNORE_VLAN)) {
+		u16 vid = le16_to_cpu(rx_desc->wb.vlan);
+		if (rx_ring->ring_flags & RNPVF_RING_STAGS_SUPPORT) {
+			if (rnpgbevf_test_staterr(rx_desc,
+						  RNPGBE_RXD_STAT_STAG)) {
+				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
+						       vid);
+
+			} else {
+				if ((adapter->vf_vlan) &&
+				    (adapter->vf_vlan == vid))
+					goto skip_vf_vlan;
+				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+						       vid);
+			}
+		} else {
+			if ((adapter->vf_vlan) && (adapter->vf_vlan == vid))
+				goto skip_vf_vlan;
+			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+		}
+		rx_ring->rx_stats.vlan_remove++;
+	}
+skip_vf_vlan:
+	skb_record_rx_queue(skb, rx_ring->queue_index);
+
+	skb->protocol = eth_type_trans(skb, dev);
+}
+
+static void rnpgbevf_rx_skb(struct rnpgbevf_q_vector *q_vector,
+			    struct sk_buff *skb)
+{
+	struct rnpgbevf_adapter *adapter = q_vector->adapter;
+
+	if (!(adapter->flags & RNPVF_FLAG_IN_NETPOLL))
+		napi_gro_receive(&q_vector->napi, skb);
+	else
+		netif_rx(skb);
+}
+
+/**
+ * rnpgbevf_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
+ * @data: pointer to the start of the headers
+ * @max_len: total length of section to find headers in
+ *
+ * This function is meant to determine the length of headers that will
+ * be recognized by hardware for LRO, GRO, and RSC offloads.  The main
+ * motivation of doing this is to only perform one pull for IPv4 TCP
+ * packets so that we can do basic things like calculating the gso_size
+ * based on the average data per packet.
+ **/
+static unsigned int rnpgbevf_get_headlen(unsigned char *data,
+					 unsigned int max_len)
+{
+	union {
+		unsigned char *network;
+		/* l2 headers */
+		struct ethhdr *eth;
+		struct vlan_hdr *vlan;
+		/* l3 headers */
+		struct iphdr *ipv4;
+		struct ipv6hdr *ipv6;
+	} hdr;
+	__be16 protocol;
+	u8 nexthdr = 0; /* default to not TCP */
+	u8 hlen;
+
+	/* this should never happen, but better safe than sorry */
+	if (max_len < ETH_HLEN)
+		return max_len;
+
+	/* initialize network frame pointer */
+	hdr.network = data;
+
+	/* set first protocol and move network header forward */
+	protocol = hdr.eth->h_proto;
+	hdr.network += ETH_HLEN;
+
+	/* handle any vlan tag if present */
+	if (protocol == htons(ETH_P_8021Q)) {
+		if ((hdr.network - data) > (max_len - VLAN_HLEN))
+			return max_len;
+
+		protocol = hdr.vlan->h_vlan_encapsulated_proto;
+		hdr.network += VLAN_HLEN;
+	}
+
+	/* handle L3 protocols */
+	if (protocol == htons(ETH_P_IP)) {
+		if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
+			return max_len;
+
+		/* access ihl as a u8 to avoid unaligned access on ia64 */
+		hlen = (hdr.network[0] & 0x0F) << 2;
+
+		/* verify hlen meets minimum size requirements */
+		if (hlen < sizeof(struct iphdr))
+			return hdr.network - data;
+
+		/* record next protocol if header is present */
+		if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
+			nexthdr = hdr.ipv4->protocol;
+	} else if (protocol == htons(ETH_P_IPV6)) {
+		if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+			return max_len;
+
+		/* record next protocol */
+		nexthdr = hdr.ipv6->nexthdr;
+		hlen = sizeof(struct ipv6hdr);
+	} else {
+		return hdr.network - data;
+	}
+
+	/* relocate pointer to start of L4 header */
+	hdr.network += hlen;
+
+	/* finally sort out TCP/UDP */
+	if (nexthdr == IPPROTO_TCP) {
+		if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
+			return max_len;
+
+		/* access doff as a u8 to avoid unaligned access on ia64 */
+		hlen = (hdr.network[12] & 0xF0) >> 2;
+
+		/* verify hlen meets minimum size requirements */
+		if (hlen < sizeof(struct tcphdr))
+			return hdr.network - data;
+
+		hdr.network += hlen;
+	} else if (nexthdr == IPPROTO_UDP) {
+		if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
+			return max_len;
+
+		hdr.network += sizeof(struct udphdr);
+	}
+
+	/*
+	 * If everything has gone correctly hdr.network should be the
+	 * data section of the packet and will be the end of the header.
+	 * If not then it probably represents the end of the last recognized
+	 * header.
+	 */
+	if ((hdr.network - data) < max_len)
+		return hdr.network - data;
+	else
+		return max_len;
+}
+
+/**
+ * rnpgbevf_pull_tail - rnp specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an rnp specific version of __pskb_pull_tail.  The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void rnpgbevf_pull_tail(struct sk_buff *skb)
+{
+	skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+	unsigned char *va;
+	unsigned int pull_len;
+
+	/*
+	 * it is valid to use page_address instead of kmap since we are
+	 * working with pages allocated out of the lomem pool per
+	 * alloc_page(GFP_ATOMIC)
+	 */
+	va = skb_frag_address(frag);
+
+	/*
+	 * we need the header to contain the greater of either ETH_HLEN or
+	 * 60 bytes if the skb->len is less than 60 for skb_pad.
+	 */
+	pull_len = rnpgbevf_get_headlen(va, RNPVF_RX_HDR_SIZE);
+
+	/* align pull length to size of long to optimize memcpy performance */
+	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+	/* update all of the pointers */
+	skb_frag_size_sub(frag, pull_len);
+	skb_frag_off_add(frag, pull_len);
+	skb->data_len -= pull_len;
+	skb->tail += pull_len;
+}
+
+/**
+ * rnpgbevf_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Check for corrupted packet headers caused by senders on the local L2
+ * embedded NIC switch not setting up their Tx Descriptors right.  These
+ * should be very rare.
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool rnpgbevf_cleanup_headers(struct rnpgbevf_ring *rx_ring,
+				     union rnp_rx_desc *rx_desc,
+				     struct sk_buff *skb)
+{
+#ifdef OPTM_WITH_LPAGE
+#else
+	/* XDP packets use error pointer so abort at this point */
+	if (IS_ERR(skb))
+		return true;
+#endif
+
+	/* place header in linear portion of buffer */
+	if (!skb_headlen(skb))
+		rnpgbevf_pull_tail(skb);
+
+	if (eth_skb_pad(skb))
+		return true;
+	return false;
+}
+
+/**
+ * rnpgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @skb: sk_buff to place the data into
+ * @size: size of data
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static void rnpgbevf_add_rx_frag(struct rnpgbevf_ring *rx_ring,
+				 struct rnpgbevf_rx_buffer *rx_buffer,
+				 struct sk_buff *skb, unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = rnpgbevf_rx_pg_size(rx_ring) / 2;
+#else
+	unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+					SKB_DATA_ALIGN(RNPVF_SKB_PAD + size) :
+					SKB_DATA_ALIGN(size);
+#endif
+
+	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+			rx_buffer->page_offset, size, truesize);
+
+#if (PAGE_SIZE < 8192)
+	rx_buffer->page_offset ^= truesize;
+#else
+	rx_buffer->page_offset += truesize;
+#endif
+}
+
+#ifdef OPTM_WITH_LPAGE
+static struct sk_buff *rnpgbevf_build_skb(struct rnpgbevf_ring *rx_ring,
+					  struct rnpgbevf_rx_buffer *rx_buffer,
+					  union rnp_rx_desc *rx_desc,
+					  unsigned int size)
+{
+	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+				SKB_DATA_ALIGN(size + RNPVF_SKB_PAD);
+	struct sk_buff *skb;
+
+	/* prefetch first cache line of first page */
+	prefetch(va);
+#if L1_CACHE_BYTES < 128
+	prefetch(va + L1_CACHE_BYTES);
+#endif
+
+	/* build an skb around the page buffer */
+	skb = build_skb(va - RNPVF_SKB_PAD, truesize);
+	if (unlikely(!skb))
+		return NULL;
+
+	/* update pointers within the skb to store the data */
+	skb_reserve(skb, RNPVF_SKB_PAD);
+	__skb_put(skb, size);
+
+	return skb;
+}
+
+static struct rnpgbevf_rx_buffer *
+rnpgbevf_get_rx_buffer(struct rnpgbevf_ring *rx_ring,
+		       union rnp_rx_desc *rx_desc, const unsigned int size)
+{
+	struct rnpgbevf_rx_buffer *rx_buffer;
+
+	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+	prefetchw(rx_buffer->page);
+
+	rx_buf_dump("rx buf",
+		    page_address(rx_buffer->page) + rx_buffer->page_offset,
+		    rx_desc->wb.len);
+
+	/* we are reusing so sync this buffer for CPU use */
+	dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, 0, size,
+				      DMA_FROM_DEVICE);
+	/* skip_sync: */
+	rx_buffer->pagecnt_bias--;
+
+	return rx_buffer;
+}
+
+/**
+ * rnpgbevf_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean.  If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool rnpgbevf_is_non_eop(struct rnpgbevf_ring *rx_ring,
+				union rnp_rx_desc *rx_desc)
+{
+	u32 ntc = rx_ring->next_to_clean + 1;
+	/* fetch, update, and store next to clean */
+	ntc = (ntc < rx_ring->count) ? ntc : 0;
+	rx_ring->next_to_clean = ntc;
+
+	prefetch(RNPVF_RX_DESC(rx_ring, ntc));
+
+	/* if we are the last buffer then there is nothing else to do */
+	if (likely(rnpgbevf_test_staterr(rx_desc, RNPGBE_RXD_STAT_EOP)))
+		return false;
+	/* place skb in next buffer to be received */
+	/* we should clean it since we used all info in it */
+	rx_desc->wb.cmd = 0;
+
+	return true;
+}
+
+static struct sk_buff *
+rnpgbevf_construct_skb(struct rnpgbevf_ring *rx_ring,
+		       struct rnpgbevf_rx_buffer *rx_buffer,
+		       union rnp_rx_desc *rx_desc, unsigned int size)
+{
+	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+	unsigned int truesize = SKB_DATA_ALIGN(size);
+	unsigned int headlen;
+	struct sk_buff *skb;
+
+	/* prefetch first cache line of first page */
+	prefetch(va);
+#if L1_CACHE_BYTES < 128
+	prefetch(va + L1_CACHE_BYTES);
+#endif
+
+	/* allocate a skb to store the frags */
+	skb = napi_alloc_skb(&rx_ring->q_vector->napi, RNPVF_RX_HDR_SIZE);
+	if (unlikely(!skb))
+		return NULL;
+
+	prefetchw(skb->data);
+
+	/* Determine available headroom for copy */
+	headlen = size;
+	if (headlen > RNPVF_RX_HDR_SIZE)
+		headlen = rnpgbevf_get_headlen(va, RNPVF_RX_HDR_SIZE);
+
+	/* align pull length to size of long to optimize memcpy performance */
+	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+
+	/* update all of the pointers */
+	size -= headlen;
+
+	if (size) {
+		skb_add_rx_frag(skb, 0, rx_buffer->page,
+				(va + headlen) - page_address(rx_buffer->page),
+				size, truesize);
+		rx_buffer->page_offset += truesize;
+	} else {
+		rx_buffer->pagecnt_bias++;
+	}
+
+	return skb;
+}
+
+/**
+ * rnp_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @q_vector: structure containing interrupt and ring information
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing.  The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the syste.
+ *
+ * Returns amount of work completed.
+ **/
+
+static int rnpgbevf_clean_rx_irq(struct rnpgbevf_q_vector *q_vector,
+				 struct rnpgbevf_ring *rx_ring, int budget)
+{
+	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+	unsigned int err_packets = 0;
+	unsigned int driver_drop_packets = 0;
+	struct sk_buff *skb = rx_ring->skb;
+	struct rnpgbevf_adapter *adapter = q_vector->adapter;
+	u16 cleaned_count = rnpgbevf_desc_unused(rx_ring);
+
+	while (likely(total_rx_packets < budget)) {
+		union rnp_rx_desc *rx_desc;
+		struct rnpgbevf_rx_buffer *rx_buffer;
+		unsigned int size;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= RNPVF_RX_BUFFER_WRITE) {
+			rnpgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+		rx_desc = RNPVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
+		rx_buf_dump("rx-desc:", rx_desc, sizeof(*rx_desc));
+		rx_debug_printk("  dd set: %s\n",
+				(rx_desc->wb.cmd & RNPGBE_RXD_STAT_DD) ? "Yes" :
+									 "No");
+
+		if (!rnpgbevf_test_staterr(rx_desc, RNPGBE_RXD_STAT_DD))
+			break;
+
+		rx_debug_printk(
+			"queue:%d  rx-desc:%d has-data len:%d next_to_clean %d\n",
+			rx_ring->rnp_queue_idx, rx_ring->next_to_clean,
+			rx_desc->wb.len, rx_ring->next_to_clean);
+
+		/* handle padding */
+		if ((adapter->priv_flags & RNPVF_PRIV_FLAG_FT_PADDING) &&
+		    (!(adapter->priv_flags & RNPVF_PRIV_FLAG_PADDING_DEBUG))) {
+			if (likely(rnpgbevf_test_staterr(
+				    rx_desc, RNPGBE_RXD_STAT_EOP))) {
+				size = le16_to_cpu(rx_desc->wb.len) -
+				       le16_to_cpu(rx_desc->wb.padding_len);
+			} else {
+				size = le16_to_cpu(rx_desc->wb.len);
+			}
+		} else {
+			/* size should not zero */
+			size = le16_to_cpu(rx_desc->wb.len);
+		}
+
+		if (!size)
+			break;
+
+		/*
+		 * should check csum err
+		 * maybe one packet use mutiple descs
+		 * no problems hw set all csum_err in mutiple descs
+		 * maybe BUG if the last sctp desc less than 60
+		 */
+		if (rnpgbevf_check_csum_error(rx_ring, rx_desc, size,
+					      &driver_drop_packets)) {
+			cleaned_count++;
+			err_packets++;
+			if (err_packets + total_rx_packets > budget)
+				break;
+			continue;
+		}
+		/* This memory barrier is needed to keep us from reading
+		 * any other fields out of the rx_desc until we know the
+		 * descriptor has been written back
+		 */
+		dma_rmb();
+
+		rx_buffer = rnpgbevf_get_rx_buffer(rx_ring, rx_desc, size);
+
+		if (skb) {
+			rnpgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
+		} else if (ring_uses_build_skb(rx_ring)) {
+			skb = rnpgbevf_build_skb(rx_ring, rx_buffer, rx_desc,
+						 size);
+		} else {
+			skb = rnpgbevf_construct_skb(rx_ring, rx_buffer,
+						     rx_desc, size);
+		}
+
+		/* exit if we failed to retrieve a buffer */
+		if (!skb) {
+			rx_ring->rx_stats.alloc_rx_buff_failed++;
+			rx_buffer->pagecnt_bias++;
+			break;
+		}
+		rnpgbevf_put_rx_buffer(rx_ring, rx_buffer);
+		cleaned_count++;
+
+		/* place incomplete frames back on ring for completion */
+		if (rnpgbevf_is_non_eop(rx_ring, rx_desc))
+			continue;
+
+		/* verify the packet layout is correct */
+		if (rnpgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
+			/* we should clean it since we used all info in it */
+			rx_desc->wb.cmd = 0;
+			skb = NULL;
+			continue;
+		}
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += skb->len;
+
+		/* populate checksum, timestamp, VLAN, and protocol */
+		rnpgbevf_process_skb_fields(rx_ring, rx_desc, skb);
+
+		/* we should clean it since we used all info in it */
+		rx_desc->wb.cmd = 0;
+		rnpgbevf_rx_skb(q_vector, skb);
+		skb = NULL;
+
+		/* update budget accounting */
+		total_rx_packets++;
+	}
+
+	rx_ring->skb = skb;
+
+	u64_stats_update_begin(&rx_ring->syncp);
+	rx_ring->stats.packets += total_rx_packets;
+	rx_ring->stats.bytes += total_rx_bytes;
+	rx_ring->rx_stats.driver_drop_packets += driver_drop_packets;
+	u64_stats_update_end(&rx_ring->syncp);
+	q_vector->rx.total_packets += total_rx_packets;
+	q_vector->rx.total_bytes += total_rx_bytes;
+
+	if (total_rx_packets >= budget)
+		rx_ring->rx_stats.poll_again_count++;
+
+	return total_rx_packets;
+}
+
+#else
+
+/**
+ * rnpgbevf_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean.  If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool rnpgbevf_is_non_eop(struct rnpgbevf_ring *rx_ring,
+				union rnp_rx_desc *rx_desc, struct sk_buff *skb)
+{
+	u32 ntc = rx_ring->next_to_clean + 1;
+
+	ntc = (ntc < rx_ring->count) ? ntc : 0;
+	rx_ring->next_to_clean = ntc;
+	prefetch(RNPVF_RX_DESC(rx_ring, ntc));
+
+	/* if we are the last buffer then there is nothing else to do */
+	if (likely(rnpgbevf_test_staterr(rx_desc, RNPGBE_RXD_STAT_EOP)))
+		return false;
+	/* place skb in next buffer to be received */
+	rx_ring->rx_buffer_info[ntc].skb = skb;
+	/* we should clean it since we used all info in it */
+	rx_desc->wb.cmd = 0;
+	rx_ring->rx_stats.non_eop_descs++;
+
+	return true;
+}
+
+static struct sk_buff *rnpgbevf_build_skb(struct rnpgbevf_ring *rx_ring,
+					  struct rnpgbevf_rx_buffer *rx_buffer,
+					  struct xdp_buff *xdp,
+					  union rnp_rx_desc *rx_desc)
+{
+	unsigned int metasize = xdp->data - xdp->data_meta;
+	void *va = xdp->data_meta;
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = rnpgbevf_rx_pg_size(rx_ring) / 2;
+#else
+	unsigned int truesize =
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+		SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start);
+#endif
+	struct sk_buff *skb;
+
+	/* prefetch first cache line of first page */
+	prefetch(va);
+#if L1_CACHE_BYTES < 128
+	prefetch(va + L1_CACHE_BYTES);
+#endif
+
+	/* build an skb around the page buffer */
+	skb = build_skb(xdp->data_hard_start, truesize);
+	if (unlikely(!skb))
+		return NULL;
+
+	/* update pointers within the skb to store the data */
+	skb_reserve(skb, xdp->data - xdp->data_hard_start);
+	__skb_put(skb, xdp->data_end - xdp->data);
+	if (metasize)
+		skb_metadata_set(skb, metasize);
+		/* update buffer offset */
+#if (PAGE_SIZE < 8192)
+	rx_buffer->page_offset ^= truesize;
+#else
+	rx_buffer->page_offset += truesize;
+#endif
+
+	return skb;
+}
+
+static void rnpgbevf_rx_buffer_flip(struct rnpgbevf_ring *rx_ring,
+				    struct rnpgbevf_rx_buffer *rx_buffer,
+				    unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = rnpgbevf_rx_pg_size(rx_ring) / 2;
+
+	rx_buffer->page_offset ^= truesize;
+#else
+	unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+					SKB_DATA_ALIGN(RNPVF_SKB_PAD + size) :
+					SKB_DATA_ALIGN(size);
+
+	rx_buffer->page_offset += truesize;
+#endif
+}
+
+static struct rnpgbevf_rx_buffer *
+rnpgbevf_get_rx_buffer(struct rnpgbevf_ring *rx_ring,
+		       union rnp_rx_desc *rx_desc, struct sk_buff **skb,
+		       const unsigned int size)
+{
+	struct rnpgbevf_rx_buffer *rx_buffer;
+
+	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+	prefetchw(rx_buffer->page);
+	*skb = rx_buffer->skb;
+
+	rx_buf_dump("rx buf",
+		    page_address(rx_buffer->page) + rx_buffer->page_offset,
+		    rx_desc->wb.len);
+
+	/* we are reusing so sync this buffer for CPU use */
+	dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma,
+				      rx_buffer->page_offset, size,
+				      DMA_FROM_DEVICE);
+	/* skip_sync: */
+	rx_buffer->pagecnt_bias--;
+
+	return rx_buffer;
+}
+
+static struct sk_buff *
+rnpgbevf_construct_skb(struct rnpgbevf_ring *rx_ring,
+		       struct rnpgbevf_rx_buffer *rx_buffer,
+		       struct xdp_buff *xdp, union rnp_rx_desc *rx_desc)
+{
+	unsigned int size = xdp->data_end - xdp->data;
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = rnpgbevf_rx_pg_size(rx_ring) / 2;
+#else
+	unsigned int truesize =
+		SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start);
+#endif
+	struct sk_buff *skb;
+
+	/* prefetch first cache line of first page */
+	prefetch(xdp->data);
+#if L1_CACHE_BYTES < 128
+	prefetch(xdp->data + L1_CACHE_BYTES);
+#endif
+
+	/* allocate a skb to store the frags */
+	skb = napi_alloc_skb(&rx_ring->q_vector->napi, RNPVF_RX_HDR_SIZE);
+	if (unlikely(!skb))
+		return NULL;
+
+	prefetchw(skb->data);
+
+	if (size > RNPVF_RX_HDR_SIZE) {
+		skb_add_rx_frag(skb, 0, rx_buffer->page,
+				xdp->data - page_address(rx_buffer->page), size,
+				truesize);
+#if (PAGE_SIZE < 8192)
+		rx_buffer->page_offset ^= truesize;
+#else
+		rx_buffer->page_offset += truesize;
+#endif
+	} else {
+		memcpy(__skb_put(skb, size), xdp->data,
+		       ALIGN(size, sizeof(long)));
+		rx_buffer->pagecnt_bias++;
+	}
+
+	return skb;
+}
+
+/**
+ * rnp_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @q_vector: structure containing interrupt and ring information
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing.  The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the syste.
+ *
+ * Returns amount of work completed.
+ **/
+static int rnpgbevf_clean_rx_irq(struct rnpgbevf_q_vector *q_vector,
+				 struct rnpgbevf_ring *rx_ring, int budget)
+{
+	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+	unsigned int err_packets = 0;
+	unsigned int driver_drop_packets = 0;
+	struct rnpgbevf_adapter *adapter = q_vector->adapter;
+	u16 cleaned_count = rnpgbevf_desc_unused(rx_ring);
+	bool xdp_xmit = false;
+	struct xdp_buff xdp;
+
+	xdp.data = NULL;
+	xdp.data_end = NULL;
+
+	while (likely(total_rx_packets < budget)) {
+		union rnp_rx_desc *rx_desc;
+		struct rnpgbevf_rx_buffer *rx_buffer;
+		struct sk_buff *skb;
+		unsigned int size;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= RNPVF_RX_BUFFER_WRITE) {
+			rnpgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+		rx_desc = RNPVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+		rx_buf_dump("rx-desc:", rx_desc, sizeof(*rx_desc));
+		rx_debug_printk("  dd set: %s\n",
+				(rx_desc->wb.cmd & RNPGBE_RXD_STAT_DD) ? "Yes" :
+									 "No");
+
+		if (!rnpgbevf_test_staterr(rx_desc, RNPGBE_RXD_STAT_DD))
+			break;
+
+		rx_debug_printk(
+			"queue:%d  rx-desc:%d has-data len:%d next_to_clean %d\n",
+			rx_ring->rnpgbevf_queue_idx, rx_ring->next_to_clean,
+			rx_desc->wb.len, rx_ring->next_to_clean);
+
+		/* handle padding */
+		if ((adapter->priv_flags & RNPVF_PRIV_FLAG_FT_PADDING) &&
+		    (!(adapter->priv_flags & RNPVF_PRIV_FLAG_PADDING_DEBUG))) {
+			if (likely(rnpgbevf_test_staterr(
+				    rx_desc, RNPGBE_RXD_STAT_EOP))) {
+				size = le16_to_cpu(rx_desc->wb.len) -
+				       le16_to_cpu(rx_desc->wb.padding_len);
+			} else {
+				size = le16_to_cpu(rx_desc->wb.len);
+			}
+		} else {
+			/* size should not zero */
+			size = le16_to_cpu(rx_desc->wb.len);
+		}
+
+		if (!size)
+			break;
+
+		/*
+		 * should check csum err
+		 * maybe one packet use mutiple descs
+		 * no problems hw set all csum_err in mutiple descs
+		 * maybe BUG if the last sctp desc less than 60
+		 */
+		if (rnpgbevf_check_csum_error(rx_ring, rx_desc, size,
+					      &driver_drop_packets)) {
+			cleaned_count++;
+			err_packets++;
+			if (err_packets + total_rx_packets > budget)
+				break;
+			continue;
+		}
+		/* This memory barrier is needed to keep us from reading
+		 * any other fields out of the rx_desc until we know the
+		 * descriptor has been written back
+		 */
+		dma_rmb();
+
+		rx_buffer =
+			rnpgbevf_get_rx_buffer(rx_ring, rx_desc, &skb, size);
+
+		if (!skb) {
+			xdp.data = page_address(rx_buffer->page) +
+				   rx_buffer->page_offset;
+			xdp.data_meta = xdp.data;
+			xdp.data_hard_start =
+				xdp.data - rnpgbevf_rx_offset(rx_ring);
+			xdp.data_end = xdp.data + size;
+		}
+
+		if (IS_ERR(skb)) {
+			if (PTR_ERR(skb) == -RNPVF_XDP_TX) {
+				xdp_xmit = true;
+				rnpgbevf_rx_buffer_flip(rx_ring, rx_buffer,
+							size);
+			} else {
+				rx_buffer->pagecnt_bias++;
+			}
+			total_rx_packets++;
+			total_rx_bytes += size;
+		} else if (skb) {
+			rnpgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
+		} else if (ring_uses_build_skb(rx_ring)) {
+			skb = rnpgbevf_build_skb(rx_ring, rx_buffer, &xdp,
+						 rx_desc);
+		} else {
+			skb = rnpgbevf_construct_skb(rx_ring, rx_buffer, &xdp,
+						     rx_desc);
+		}
+
+		/* exit if we failed to retrieve a buffer */
+		if (!skb) {
+			rx_ring->rx_stats.alloc_rx_buff_failed++;
+			rx_buffer->pagecnt_bias++;
+			break;
+		}
+
+		rnpgbevf_put_rx_buffer(rx_ring, rx_buffer, skb);
+		cleaned_count++;
+
+		/* place incomplete frames back on ring for completion */
+		if (rnpgbevf_is_non_eop(rx_ring, rx_desc, skb))
+			continue;
+
+		/* verify the packet layout is correct */
+		if (rnpgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
+			/* we should clean it since we used all info in it */
+			rx_desc->wb.cmd = 0;
+			continue;
+		}
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += skb->len;
+
+		/* populate checksum, timestamp, VLAN, and protocol */
+		rnpgbevf_process_skb_fields(rx_ring, rx_desc, skb);
+
+		/* we should clean it since we used all info in it */
+		rx_desc->wb.cmd = 0;
+
+		rnpgbevf_rx_skb(q_vector, skb);
+
+		/* update budget accounting */
+		total_rx_packets++;
+	}
+
+	u64_stats_update_begin(&rx_ring->syncp);
+	rx_ring->stats.packets += total_rx_packets;
+	rx_ring->stats.bytes += total_rx_bytes;
+	rx_ring->rx_stats.driver_drop_packets += driver_drop_packets;
+	u64_stats_update_end(&rx_ring->syncp);
+	q_vector->rx.total_packets += total_rx_packets;
+	q_vector->rx.total_bytes += total_rx_bytes;
+
+	if (total_rx_packets >= budget)
+		rx_ring->rx_stats.poll_again_count++;
+	return total_rx_packets;
+}
+#endif
+
+/**
+ * rnpgbevf_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
+ *
+ * rnpgbevf_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ **/
+static void rnpgbevf_configure_msix(struct rnpgbevf_adapter *adapter)
+{
+	struct rnpgbevf_q_vector *q_vector;
+	int i;
+
+	/*
+	 * configure ring-msix Registers table
+	 */
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct rnpgbevf_ring *ring;
+
+		q_vector = adapter->q_vector[i];
+
+		rnpgbevf_for_each_ring(ring, q_vector->rx)
+		{
+			rnpgbevf_set_ring_vector(adapter,
+						 ring->rnpgbevf_msix_off,
+						 q_vector->v_idx);
+		}
+	}
+}
+
+enum latency_range {
+	lowest_latency = 0,
+	low_latency = 1,
+	bulk_latency = 2,
+	latency_invalid = 255
+};
+
+static inline void
+rnpgbevf_irq_enable_queues(struct rnpgbevf_q_vector *q_vector)
+{
+	struct rnpgbevf_ring *ring;
+
+	rnpgbevf_for_each_ring(ring, q_vector->rx) {
+		rnpgbevf_wr_reg(ring->dma_int_clr, RX_INT_MASK | TX_INT_MASK);
+		/* we need this */
+		wmb();
+		ring_wr32(ring, RNPGBE_DMA_INT_TRIG,
+			  (0x3 << 16) | TX_INT_MASK | RX_INT_MASK);
+		rnpgbevf_wr_reg(ring->dma_int_mask,
+				~(RX_INT_MASK | TX_INT_MASK));
+	}
+}
+
+static inline void
+rnpgbevf_irq_disable_queues(struct rnpgbevf_q_vector *q_vector)
+{
+	struct rnpgbevf_ring *ring;
+
+	rnpgbevf_for_each_ring(ring, q_vector->tx)
+	{
+		ring_wr32(ring, RNPGBE_DMA_INT_TRIG,
+			  (0x3 << 16) | (~TX_INT_MASK | RX_INT_MASK));
+		rnpgbevf_wr_reg(ring->dma_int_mask,
+				(RX_INT_MASK | TX_INT_MASK));
+	}
+}
+
+/**
+ * rnpgbevf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static inline void rnpgbevf_irq_enable(struct rnpgbevf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_q_vectors; i++)
+		rnpgbevf_irq_enable_queues(adapter->q_vector[i]);
+}
+
+static irqreturn_t rnpgbevf_msix_other(int irq, void *data)
+{
+	struct rnpgbevf_adapter *adapter = data;
+	struct rnpgbevf_hw *hw = &adapter->hw;
+
+	dbg("\n\n !!! %s irq-comming !!!\n", __func__);
+
+	/* link is down by pf */
+	if (test_bit(__RNPVF_MBX_POLLING, &adapter->state))
+		goto NO_WORK_DONE;
+	if (!hw->mbx.ops.check_for_rst(hw, false)) {
+		if (test_bit(__RNPVF_REMOVE, &adapter->state)) {
+			printk(KERN_DEBUG "rnpvf is removed\n");
+		}
+	}
+NO_WORK_DONE:
+
+	return IRQ_HANDLED;
+}
+
+static void rnpgbevf_htimer_start(struct rnpgbevf_q_vector *q_vector)
+{
+	unsigned long ns = q_vector->irq_check_usecs * NSEC_PER_USEC / 2;
+
+	hrtimer_start_range_ns(&q_vector->irq_miss_check_timer, ns_to_ktime(ns),
+			       ns, HRTIMER_MODE_REL);
+}
+
+static void rnpgbevf_htimer_stop(struct rnpgbevf_q_vector *q_vector)
+{
+	hrtimer_cancel(&q_vector->irq_miss_check_timer);
+}
+
+static irqreturn_t rnpgbevf_intr(int irq, void *data)
+{
+	struct rnpgbevf_adapter *adapter = data;
+	struct rnpgbevf_q_vector *q_vector = adapter->q_vector[0];
+	struct rnpgbevf_hw *hw = &adapter->hw;
+
+	/* handle data */
+	if (q_vector->vector_flags & RNPVF_QVECTOR_FLAG_IRQ_MISS_CHECK)
+		rnpgbevf_htimer_stop(q_vector);
+
+	/*  disabled interrupts (on this vector) for us */
+	rnpgbevf_irq_disable_queues(q_vector);
+
+	if (q_vector->rx.ring || q_vector->tx.ring)
+		napi_schedule_irqoff(&q_vector->napi);
+
+	dbg("\n\n !!! %s irq-comming !!!\n", __func__);
+
+	/* link is down by pf */
+	if (test_bit(__RNPVF_MBX_POLLING, &adapter->state))
+		goto WORK_DONE;
+	if (!hw->mbx.ops.check_for_rst(hw, false)) {
+		if (test_bit(__RNPVF_REMOVE, &adapter->state)) {
+			printk(KERN_DEBUG "rnpvf is removed\n");
+		}
+	}
+WORK_DONE:
+	return IRQ_HANDLED;
+}
+
+void rnpgbevf_write_eitr_rx(struct rnpgbevf_q_vector *q_vector)
+{
+	struct rnpgbevf_adapter *adapter = q_vector->adapter;
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	u32 new_itr_rx = q_vector->rx.itr;
+	u32 old_itr_rx = q_vector->rx.itr;
+	struct rnpgbevf_ring *ring;
+
+	new_itr_rx = new_itr_rx * hw->usecstocount;
+	rnpgbevf_for_each_ring(ring, q_vector->rx) {
+		ring_wr32(ring, RNPGBE_DMA_REG_RX_INT_DELAY_TIMER, new_itr_rx);
+		if (ring->ring_flags & RNPVF_RING_LOWER_ITR) {
+			if (q_vector->itr_rx == RNP_LOWEREST_ITR)
+				continue;
+			ring_wr32(ring, RNPGBE_DMA_REG_RX_INT_DELAY_PKTCNT, 1);
+			ring_wr32(ring, RNPGBE_DMA_REG_RX_INT_DELAY_TIMER,
+				  RNP_LOWEREST_ITR);
+			q_vector->itr_rx = RNP_LOWEREST_ITR;
+		} else {
+			if (new_itr_rx == q_vector->itr_rx)
+				continue;
+			ring_wr32(ring, RNPGBE_DMA_REG_RX_INT_DELAY_TIMER,
+				  new_itr_rx);
+			ring_wr32(ring, RNPGBE_DMA_REG_RX_INT_DELAY_PKTCNT,
+				  adapter->rx_frames);
+			q_vector->itr_rx = old_itr_rx;
+		}
+	}
+}
+
+static irqreturn_t rnpgbevf_msix_clean_rings(int irq, void *data)
+{
+	struct rnpgbevf_q_vector *q_vector = data;
+
+	if (q_vector->vector_flags & RNPVF_QVECTOR_FLAG_IRQ_MISS_CHECK)
+		rnpgbevf_htimer_stop(q_vector);
+	/*  disabled interrupts (on this vector) for us */
+	rnpgbevf_irq_disable_queues(q_vector);
+
+	rnpgbevf_write_eitr_rx(q_vector);
+
+	if (q_vector->rx.ring || q_vector->tx.ring)
+		napi_schedule(&q_vector->napi);
+
+	return IRQ_HANDLED;
+}
+
+void update_rx_count(int cleaned, struct rnpgbevf_q_vector *q_vector)
+{
+	struct rnpgbevf_adapter *adapter = q_vector->adapter;
+
+	if ((cleaned) && (cleaned != q_vector->new_rx_count)) {
+		if (cleaned < 5) {
+			q_vector->small_times = 0;
+			q_vector->large_times = 0;
+			q_vector->too_small_times++;
+			if (q_vector->too_small_times >= 2)
+				q_vector->new_rx_count = 1;
+		} else if (cleaned < 30) {
+			q_vector->too_small_times = 0;
+			q_vector->middle_time++;
+			if (cleaned < q_vector->new_rx_count) {
+				q_vector->small_times = 0;
+				q_vector->new_rx_count -=
+					(1 << (q_vector->large_times++));
+				if (q_vector->new_rx_count < 0)
+					q_vector->new_rx_count = 1;
+			} else {
+				q_vector->large_times = 0;
+
+				if (cleaned > 30) {
+					if (q_vector->new_rx_count ==
+					    (cleaned - 4)) {
+					} else {
+						q_vector->new_rx_count +=
+							(1
+							 << (q_vector->small_times++));
+					}
+					if (q_vector->new_rx_count >= cleaned) {
+						q_vector->new_rx_count =
+							cleaned - 4;
+						q_vector->small_times = 0;
+					}
+
+				} else {
+					if (q_vector->new_rx_count ==
+					    (cleaned - 1)) {
+					} else {
+						q_vector->new_rx_count +=
+							(1
+							 << (q_vector->small_times++));
+					}
+					if (q_vector->new_rx_count >= cleaned) {
+						q_vector->new_rx_count =
+							cleaned - 1;
+						q_vector->small_times = 0;
+					}
+				}
+			}
+		} else {
+			q_vector->too_small_times = 0;
+			q_vector->new_rx_count =
+				max_t(int, 64, adapter->rx_frames);
+			q_vector->small_times = 0;
+			q_vector->large_times = 0;
+		}
+	}
+}
+
+static void rnpgbevf_check_restart_tx(struct rnpgbevf_q_vector *q_vector,
+				      struct rnpgbevf_ring *tx_ring)
+{
+	struct rnpgbevf_adapter *adapter = q_vector->adapter;
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+	if (likely(netif_carrier_ok(tx_ring->netdev) &&
+		   (rnpgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+		if (__netif_subqueue_stopped(tx_ring->netdev,
+					     tx_ring->queue_index) &&
+		    !test_bit(__RNPVF_DOWN, &adapter->state)) {
+			netif_wake_subqueue(tx_ring->netdev,
+					    tx_ring->queue_index);
+			++tx_ring->tx_stats.restart_queue;
+		}
+	}
+}
+
+__maybe_unused static void
+rnpgbevf_update_ring_itr_rx(struct rnpgbevf_q_vector *q_vector)
+{
+	int new_val = q_vector->itr_rx;
+	int avg_wire_size = 0;
+	struct rnpgbevf_adapter *adapter = q_vector->adapter;
+	unsigned int packets;
+
+	/* For non-gigabit speeds, just fix the interrupt rate at 4000
+         * ints/sec - ITR timer value of 120 ticks.
+         */
+	switch (adapter->link_speed) {
+	case RNPGBE_LINK_SPEED_10_FULL:
+	case RNPGBE_LINK_SPEED_100_FULL:
+		new_val = RNP_4K_ITR;
+		goto set_itr_val;
+	default:
+		break;
+	}
+
+	packets = q_vector->rx.total_packets;
+	if (packets)
+		avg_wire_size = max_t(u32, avg_wire_size,
+				      q_vector->rx.total_bytes / packets);
+
+	/* if avg_wire_size isn't set no work was done */
+	if (!avg_wire_size)
+		goto clear_counts;
+
+	/* Add 24 bytes to size to account for CRC, preamble, and gap */
+	avg_wire_size += 24;
+
+	/* Don't starve jumbo frames */
+	avg_wire_size = min(avg_wire_size, 3000);
+
+	/* Give a little boost to mid-size frames */
+	if ((avg_wire_size > 300) && (avg_wire_size < 1200))
+		new_val = avg_wire_size / 3;
+	else
+		new_val = avg_wire_size / 2;
+
+	new_val = new_val / 2;
+
+	if (new_val < RNP_LOWEREST_ITR)
+		new_val = RNP_LOWEREST_ITR;
+
+set_itr_val:
+	if (q_vector->rx.itr != new_val) {
+		q_vector->rx.update_count++;
+		if (q_vector->rx.update_count >= 2) {
+			q_vector->rx.itr = new_val;
+			q_vector->rx.update_count = 0;
+		}
+	} else
+		q_vector->rx.update_count = 0;
+
+clear_counts:
+	q_vector->rx.total_bytes = 0;
+	q_vector->rx.total_packets = 0;
+}
+
+/**
+ * rnpgbevf_poll - NAPI polling calback
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean more than one or more rings associated with a
+ * q_vector.
+ **/
+static int rnpgbevf_poll(struct napi_struct *napi, int budget)
+{
+	struct rnpgbevf_q_vector *q_vector =
+		container_of(napi, struct rnpgbevf_q_vector, napi);
+	struct rnpgbevf_adapter *adapter = q_vector->adapter;
+	struct rnpgbevf_ring *ring;
+	int per_ring_budget, work_done = 0;
+	bool clean_complete = true;
+	int cleaned_total = 0;
+
+	rnpgbevf_for_each_ring(ring, q_vector->tx) clean_complete &=
+		!!rnpgbevf_clean_tx_irq(q_vector, ring);
+
+	/* attempt to distribute budget to each queue fairly, but don't allow
+	 * the budget to go below 1 because we'll exit polling
+	 */
+	if (q_vector->rx.count > 1)
+		per_ring_budget = max(budget / q_vector->rx.count, 1);
+	else
+		per_ring_budget = budget;
+
+	rnpgbevf_for_each_ring(ring, q_vector->rx) {
+		int cleaned = 0;
+
+		cleaned =
+			rnpgbevf_clean_rx_irq(q_vector, ring, per_ring_budget);
+
+		work_done += cleaned;
+		cleaned_total += cleaned;
+
+		if (cleaned >= per_ring_budget)
+			clean_complete = false;
+	}
+
+	/* force irq stop */
+	if (test_bit(__RNPVF_DOWN, &adapter->state))
+		clean_complete = true;
+
+	if (!(q_vector->vector_flags & RNPVF_QVECTOR_FLAG_ITR_FEATURE))
+		update_rx_count(cleaned_total, q_vector);
+
+	/* If all work not completed, return budget and keep polling */
+	if (!clean_complete)
+		return budget;
+
+	/* all work done, exit the polling mode */
+	if (likely(napi_complete_done(napi, work_done))) {
+		/* try to do itr handle */
+		if (q_vector->vector_flags & RNPVF_QVECTOR_FLAG_ITR_FEATURE)
+			rnpgbevf_update_ring_itr_rx(q_vector);
+
+		if (!test_bit(__RNPVF_DOWN, &adapter->state)) {
+			rnpgbevf_irq_enable_queues(q_vector);
+			/* we need this */
+			smp_mb();
+			/* we need this to ensure irq start before tx start */
+			if (q_vector->vector_flags &
+			    RNPVF_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS) {
+				rnpgbevf_for_each_ring(ring, q_vector->tx) {
+					rnpgbevf_check_restart_tx(q_vector,
+								  ring);
+					if (q_vector->new_rx_count !=
+					    q_vector->old_rx_count) {
+						ring_wr32(
+							ring,
+							RNPGBE_DMA_REG_RX_INT_DELAY_PKTCNT,
+							q_vector->new_rx_count);
+						q_vector->old_rx_count =
+							q_vector->new_rx_count;
+					}
+				}
+			}
+		}
+	}
+
+	if (!test_bit(__RNPVF_DOWN, &adapter->state)) {
+		if (q_vector->vector_flags & RNPVF_QVECTOR_FLAG_IRQ_MISS_CHECK)
+			rnpgbevf_htimer_start(q_vector);
+	}
+	return 0;
+}
+
+/**
+ * rnpgbevf_request_msix_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * rnpgbevf_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int rnpgbevf_request_msix_irqs(struct rnpgbevf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int err;
+	int i = 0;
+
+	DPRINTK(IFUP, INFO, "num_q_vectors:%d\n", adapter->num_q_vectors);
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct rnpgbevf_q_vector *q_vector = adapter->q_vector[i];
+		struct msix_entry *entry =
+			&adapter->msix_entries[i + adapter->vector_off];
+
+		if (q_vector->tx.ring && q_vector->rx.ring) {
+			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+				 "%s-%s-%d-%d", netdev->name, "TxRx", i,
+				 q_vector->v_idx);
+		} else {
+			WARN(!(q_vector->tx.ring && q_vector->rx.ring),
+			     "%s vector%d tx rx is null, v_idx:%d\n",
+			     netdev->name, i, q_vector->v_idx);
+			/* skip this unused q_vector */
+			continue;
+		}
+		err = request_irq(entry->vector, &rnpgbevf_msix_clean_rings, 0,
+				  q_vector->name, q_vector);
+		if (err) {
+			rnpgbevf_err(
+				"%s:request_irq failed for MSIX interrupt:%d "
+				"Error: %d\n",
+				netdev->name, entry->vector, err);
+			goto free_queue_irqs;
+		}
+		irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask);
+	}
+
+	return 0;
+
+free_queue_irqs:
+	while (i) {
+		i--;
+		irq_set_affinity_hint(
+			adapter->msix_entries[i + adapter->vector_off].vector,
+			NULL);
+		free_irq(adapter->msix_entries[i + adapter->vector_off].vector,
+			 adapter->q_vector[i]);
+	}
+	return err;
+}
+
+static int rnpgbevf_free_msix_irqs(struct rnpgbevf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct rnpgbevf_q_vector *q_vector = adapter->q_vector[i];
+		struct msix_entry *entry =
+			&adapter->msix_entries[i + adapter->vector_off];
+
+		/* free only the irqs that were actually requested */
+		if (!q_vector->rx.ring && !q_vector->tx.ring)
+			continue;
+
+		/* clear the affinity_mask in the IRQ descriptor */
+		irq_set_affinity_hint(entry->vector, NULL);
+		DPRINTK(IFDOWN, INFO, "free irq %s\n", q_vector->name);
+		free_irq(entry->vector, q_vector);
+	}
+
+	return 0;
+}
+
+/**
+ * rnpgbevf_request_irq - initialize interrupts
+ * @adapter: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int rnpgbevf_request_irq(struct rnpgbevf_adapter *adapter)
+{
+	int err;
+
+	if (adapter->flags & RNPVF_FLAG_MSIX_ENABLED) {
+		err = rnpgbevf_request_msix_irqs(adapter);
+	} else if (adapter->flags & RNPVF_FLAG_MSI_ENABLED) {
+		/* in this case one for all */
+		err = request_irq(adapter->pdev->irq, rnpgbevf_intr, 0,
+				  adapter->netdev->name, adapter);
+	} else {
+		err = request_irq(adapter->pdev->irq, rnpgbevf_intr,
+				  IRQF_SHARED, adapter->netdev->name, adapter);
+	}
+	if (err)
+		rnpgbevf_err("request_irq failed, Error %d\n", err);
+
+	return err;
+}
+
+static void rnpgbevf_free_irq(struct rnpgbevf_adapter *adapter)
+{
+	if (adapter->flags & RNPVF_FLAG_MSIX_ENABLED) {
+		rnpgbevf_free_msix_irqs(adapter);
+	} else if (adapter->flags & RNPVF_FLAG_MSI_ENABLED) {
+		/* in this case one for all */
+		free_irq(adapter->pdev->irq, adapter);
+	} else {
+		free_irq(adapter->pdev->irq, adapter);
+	}
+}
+
+/**
+ * rnpgbevf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static inline void rnpgbevf_irq_disable(struct rnpgbevf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		rnpgbevf_irq_disable_queues(adapter->q_vector[i]);
+		if (adapter->flags & RNPVF_FLAG_MSIX_ENABLED) {
+			synchronize_irq(
+				adapter->msix_entries[i + adapter->vector_off]
+					.vector);
+		} else {
+			synchronize_irq(adapter->pdev->irq);
+		}
+	}
+}
+
+/**
+ * rnpgbevf_configure_tx_ring - Configure 8259x Tx ring after Reset
+ * @adapter: board private structure
+ * @ring: structure containing ring specific data
+ *
+ * Configure the Tx descriptor ring after a reset.
+ **/
+void rnpgbevf_configure_tx_ring(struct rnpgbevf_adapter *adapter,
+				struct rnpgbevf_ring *ring)
+{
+	struct rnpgbevf_hw *hw = &adapter->hw;
+
+	/* disable queue to avoid issues while updating state */
+	if (!(ring->ring_flags & RNPVF_RING_SKIP_TX_START))
+		ring_wr32(ring, RNPGBE_DMA_TX_START, 0);
+
+	ring_wr32(ring, RNPGBE_DMA_REG_TX_DESC_BUF_BASE_ADDR_LO,
+		  (u32)ring->dma);
+	/* dma high address is used for vfnum */
+	ring_wr32(ring, RNPGBE_DMA_REG_TX_DESC_BUF_BASE_ADDR_HI,
+		  (u32)(((u64)ring->dma) >> 32) | (hw->vfnum << 24));
+	ring_wr32(ring, RNPGBE_DMA_REG_TX_DESC_BUF_LEN, ring->count);
+
+	ring->next_to_clean = ring_rd32(ring, RNPGBE_DMA_REG_TX_DESC_BUF_HEAD);
+	ring->next_to_use = ring->next_to_clean;
+	ring->tail = ring->ring_addr + RNPGBE_DMA_REG_TX_DESC_BUF_TAIL;
+	rnpgbevf_wr_reg(ring->tail, ring->next_to_use);
+
+	ring_wr32(ring, RNPGBE_DMA_REG_TX_DESC_FETCH_CTRL,
+		  (8 << 0) /*max_water_flow*/
+			  | (TSRN10_TX_DEFAULT_BURST
+			     << 16)); /* max-num_descs_peer_read */
+
+	ring_wr32(ring, RNPGBE_DMA_REG_TX_INT_DELAY_TIMER,
+		  adapter->tx_usecs * hw->usecstocount);
+	ring_wr32(ring, RNPGBE_DMA_REG_TX_INT_DELAY_PKTCNT,
+		  adapter->tx_frames);
+
+	if (!(ring->ring_flags & RNPVF_RING_SKIP_TX_START)) {
+		/* n500 should wait tx_ready before open tx start */
+		int timeout = 0;
+		u32 status = 0;
+
+		do {
+			status = ring_rd32(ring, RNPGBE_DMA_TX_READY);
+			usleep_range(100, 200);
+			timeout++;
+			rnpgbevf_dbg("wait %d tx ready to 1\n",
+				     ring->rnpgbevf_queue_idx);
+		} while ((status != 1) && (timeout < 100));
+
+		if (timeout >= 100)
+			printk("wait tx ready timeout\n");
+		ring_wr32(ring, RNPGBE_DMA_TX_START, 1);
+	}
+}
+
+/**
+ * rnpgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void rnpgbevf_configure_tx(struct rnpgbevf_adapter *adapter)
+{
+	u32 i;
+
+	/* Setup the HW Tx Head and Tail descriptor pointers */
+	for (i = 0; i < (adapter->num_tx_queues); i++)
+		rnpgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
+}
+
+#define RNPGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
+void rnpgbevf_disable_rx_queue(struct rnpgbevf_adapter *adapter,
+			       struct rnpgbevf_ring *ring)
+{
+	ring_wr32(ring, RNPGBE_DMA_RX_START, 0);
+}
+
+void rnpgbevf_enable_rx_queue(struct rnpgbevf_adapter *adapter,
+			      struct rnpgbevf_ring *ring)
+{
+	ring_wr32(ring, RNPGBE_DMA_RX_START, 1);
+}
+
+void rnpgbevf_configure_rx_ring(struct rnpgbevf_adapter *adapter,
+				struct rnpgbevf_ring *ring)
+{
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	u64 desc_phy = ring->dma;
+
+	/* disable queue to avoid issues while updating state */
+	rnpgbevf_disable_rx_queue(adapter, ring);
+
+	/* set descripts registers*/
+	ring_wr32(ring, RNPGBE_DMA_REG_RX_DESC_BUF_BASE_ADDR_LO, (u32)desc_phy);
+	/* dma address high bits is used */
+	ring_wr32(ring, RNPGBE_DMA_REG_RX_DESC_BUF_BASE_ADDR_HI,
+		  ((u32)(desc_phy >> 32)) | (hw->vfnum << 24));
+	ring_wr32(ring, RNPGBE_DMA_REG_RX_DESC_BUF_LEN, ring->count);
+
+	ring->tail = ring->ring_addr + RNPGBE_DMA_REG_RX_DESC_BUF_TAIL;
+	ring->next_to_clean = ring_rd32(ring, RNPGBE_DMA_REG_RX_DESC_BUF_HEAD);
+	ring->next_to_use = ring->next_to_clean;
+
+#define SCATER_SIZE (96)
+	if (ring->ring_flags & RNPVF_RING_SCATER_SETUP) {
+		ring_wr32(ring, PCI_DMA_REG_RX_SCATTER_LENGH, SCATER_SIZE);
+	}
+
+	ring_wr32(ring, RNPGBE_DMA_REG_RX_DESC_FETCH_CTRL,
+		  0 | (TSRN10_RX_DEFAULT_LINE << 0) /*rx-desc-flow*/
+			  |
+			  (TSRN10_RX_DEFAULT_BURST << 16) /*max-read-desc-cnt*/
+	);
+
+	ring_wr32(ring, RNPGBE_DMA_REG_RX_INT_DELAY_TIMER,
+		  adapter->rx_usecs * hw->usecstocount);
+	ring_wr32(ring, RNPGBE_DMA_REG_RX_INT_DELAY_PKTCNT, adapter->rx_frames);
+	rnpgbevf_alloc_rx_buffers(ring, rnpgbevf_desc_unused(ring));
+}
+
+static void rnpgbevf_set_rx_buffer_len(struct rnpgbevf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN * 3;
+	struct rnpgbevf_ring *rx_ring;
+	int i;
+
+	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+		max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		rx_ring = adapter->rx_ring[i];
+		clear_bit(__RNPVF_RX_3K_BUFFER, &rx_ring->state);
+		clear_bit(__RNPVF_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+		set_bit(__RNPVF_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+
+#ifdef OPTM_WITH_LPAGE
+		rx_ring->rx_page_buf_nums = RNPVF_PAGE_BUFFER_NUMS(rx_ring);
+		rx_ring->rx_per_buf_mem = RNPVF_RXBUFFER_2K;
+#endif
+	}
+}
+
+/**
+ * rnpgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void rnpgbevf_configure_rx(struct rnpgbevf_adapter *adapter)
+{
+	int i;
+
+	/* set_rx_buffer_len must be called before ring initialization */
+	rnpgbevf_set_rx_buffer_len(adapter);
+
+	/*
+	 * Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring
+	 */
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rnpgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
+}
+
+static int rnpgbevf_vlan_rx_add_vid(struct net_device *netdev,
+				    __always_unused __be16 proto, u16 vid)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int err = 0;
+
+	if ((vid) && (adapter->vf_vlan) && (vid != adapter->vf_vlan)) {
+		dev_err(&adapter->pdev->dev,
+			"only 1 vlan for vf or pf set vlan already\n");
+		return 0;
+	}
+
+	if ((vid) && (!adapter->vf_vlan)) {
+		spin_lock_bh(&adapter->mbx_lock);
+		set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+		/* add VID to filter table */
+		err = hw->mac.ops.set_vfta(hw, vid, 0, true);
+		clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+		spin_unlock_bh(&adapter->mbx_lock);
+	}
+
+	/* translate error return types so error makes sense */
+	if (err == RNPGBE_ERR_MBX) {
+
+		return -EIO;
+	}
+
+	if (err == RNPGBE_ERR_INVALID_ARGUMENT) {
+		return -EACCES;
+	}
+	set_bit(vid, adapter->active_vlans);
+
+	if (vid)
+		hw->ops.set_veb_vlan(hw, vid, VFNUM(mbx, hw->vfnum));
+
+#ifdef CONFIG_NET_NCSI
+	if (adapter->ncsi_dev)
+		ncsi_vlan_rx_add_vid(netdev, proto, vid);
+#endif
+
+	return err;
+}
+
+static int rnpgbevf_vlan_rx_kill_vid(struct net_device *netdev,
+				     __always_unused __be16 proto, u16 vid)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int err = -EOPNOTSUPP;
+
+	if (!test_bit(__RNPVF_DOWN, &adapter->state))
+		rnpgbevf_irq_disable(adapter);
+
+	if (vid) {
+		spin_lock_bh(&adapter->mbx_lock);
+		set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+		/* remove VID from filter table */
+		err = hw->mac.ops.set_vfta(hw, vid, 0, false);
+		clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+		spin_unlock_bh(&adapter->mbx_lock);
+		hw->ops.set_veb_vlan(hw, 0, VFNUM(mbx, hw->vfnum));
+	}
+
+	clear_bit(vid, adapter->active_vlans);
+
+#ifdef CONFIG_NET_NCSI
+	if (adapter->ncsi_dev)
+		ncsi_vlan_rx_kill_vid(netdev, proto, vid);
+#endif /* CONFIG_NET_NCSI */
+
+	return 0;
+}
+
+/**
+ * rnpgbevf_vlan_strip_disable - helper to disable hw vlan stripping
+ * @adapter: driver data
+ */
+__maybe_unused static void
+rnpgbevf_vlan_strip_disable(struct rnpgbevf_adapter *adapter)
+{
+	struct rnpgbevf_hw *hw = &adapter->hw;
+
+	spin_lock_bh(&adapter->mbx_lock);
+	set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	hw->mac.ops.set_vlan_strip(hw, false);
+	clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	spin_unlock_bh(&adapter->mbx_lock);
+}
+
+/**
+ * rnpgbevf_vlan_strip_enable - helper to enable hw vlan stripping
+ * @adapter: driver data
+ */
+__maybe_unused static s32
+rnpgbevf_vlan_strip_enable(struct rnpgbevf_adapter *adapter)
+{
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	int err;
+
+	spin_lock_bh(&adapter->mbx_lock);
+	set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	err = hw->mac.ops.set_vlan_strip(hw, true);
+	clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	spin_unlock_bh(&adapter->mbx_lock);
+
+	return err;
+}
+
+static void rnpgbevf_restore_vlan(struct rnpgbevf_adapter *adapter)
+{
+	u16 vid;
+
+	rnpgbevf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
+
+	for_each_set_bit (vid, adapter->active_vlans, VLAN_N_VID) {
+		rnpgbevf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q),
+					 vid);
+	}
+}
+
+static int rnpgbevf_write_uc_addr_list(struct net_device *netdev)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	int count = 0;
+
+	if ((netdev_uc_count(netdev)) > 10) {
+		pr_err("Too many unicast filters - No Space\n");
+		return -ENOSPC;
+	}
+
+	if (!netdev_uc_empty(netdev)) {
+		struct netdev_hw_addr *ha;
+
+		netdev_for_each_uc_addr (ha, netdev) {
+			spin_lock_bh(&adapter->mbx_lock);
+			set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+			hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
+			clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+			spin_unlock_bh(&adapter->mbx_lock);
+			udelay(200);
+		}
+	} else {
+		/*
+		 * If the list is empty then send message to PF driver to
+		 * clear all macvlans on this VF.
+		 */
+		spin_lock_bh(&adapter->mbx_lock);
+		set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+		hw->mac.ops.set_uc_addr(hw, 0, NULL);
+		clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+		spin_unlock_bh(&adapter->mbx_lock);
+		udelay(200);
+	}
+
+	return count;
+}
+
+/**
+ * rnpgbevf_set_rx_mode - Multicast and unicast set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_method entry point is called whenever the multicast address
+ * list, unicast address list or the network interface flags are updated.
+ * This routine is responsible for configuring the hardware for proper
+ * multicast mode and configuring requested unicast filters.
+ **/
+static void rnpgbevf_set_rx_mode(struct net_device *netdev)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbevf_hw *hw = &adapter->hw;
+
+	netdev_features_t features = netdev->features;
+	spin_lock_bh(&adapter->mbx_lock);
+	set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	/* reprogram multicast list */
+	hw->mac.ops.update_mc_addr_list(hw, netdev);
+	clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	spin_unlock_bh(&adapter->mbx_lock);
+
+	rnpgbevf_write_uc_addr_list(netdev);
+
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		rnpgbevf_vlan_strip_enable(adapter);
+	else
+		rnpgbevf_vlan_strip_disable(adapter);
+
+	/* stags */
+	if ((features & NETIF_F_HW_VLAN_STAG_RX) ||
+	    (adapter->flags & RNPVF_FLAG_PF_SET_VLAN))
+		rnpgbevf_vlan_strip_enable(adapter);
+	else
+		rnpgbevf_vlan_strip_disable(adapter);
+}
+
+static void rnpgbevf_napi_enable_all(struct rnpgbevf_adapter *adapter)
+{
+	int q_idx;
+
+	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
+		napi_enable(&adapter->q_vector[q_idx]->napi);
+}
+
+static void rnpgbevf_napi_disable_all(struct rnpgbevf_adapter *adapter)
+{
+	int q_idx;
+
+	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
+		napi_disable(&adapter->q_vector[q_idx]->napi);
+}
+
+static void rnpgbevf_configure_veb(struct rnpgbevf_adapter *adapter)
+{
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+	u32 ring;
+	u8 *mac;
+
+	if (is_valid_ether_addr(hw->mac.addr))
+		mac = hw->mac.addr;
+	else
+		mac = hw->mac.perm_addr;
+
+	ring = adapter->rx_ring[0]->rnpgbevf_queue_idx;
+	ring |= ((0x80 | vfnum) << 8);
+
+	hw->ops.set_veb_mac(hw, mac, vfnum, ring);
+}
+
+static void rnpgbevf_configure(struct rnpgbevf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	rnpgbevf_set_rx_mode(netdev);
+	rnpgbevf_restore_vlan(adapter);
+	rnpgbevf_configure_tx(adapter);
+	rnpgbevf_configure_rx(adapter);
+	rnpgbevf_configure_veb(adapter);
+}
+
+#define RNPGBE_MAX_RX_DESC_POLL 10
+
+static void rnpgbevf_save_reset_stats(struct rnpgbevf_adapter *adapter)
+{
+	/* Only save pre-reset stats if there are some */
+	if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
+		adapter->stats.saved_reset_vfgprc +=
+			adapter->stats.vfgprc - adapter->stats.base_vfgprc;
+		adapter->stats.saved_reset_vfgptc +=
+			adapter->stats.vfgptc - adapter->stats.base_vfgptc;
+		adapter->stats.saved_reset_vfgorc +=
+			adapter->stats.vfgorc - adapter->stats.base_vfgorc;
+		adapter->stats.saved_reset_vfgotc +=
+			adapter->stats.vfgotc - adapter->stats.base_vfgotc;
+		adapter->stats.saved_reset_vfmprc +=
+			adapter->stats.vfmprc - adapter->stats.base_vfmprc;
+	}
+}
+
+static void rnpgbevf_up_complete(struct rnpgbevf_adapter *adapter)
+{
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	int i;
+
+	rnpgbevf_configure_msix(adapter);
+
+	spin_lock_bh(&adapter->mbx_lock);
+	set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+
+	if (is_valid_ether_addr(hw->mac.addr))
+		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+	else
+		hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
+
+	clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	spin_unlock_bh(&adapter->mbx_lock);
+
+	rnpgbevf_napi_enable_all(adapter);
+
+	/* clear any pending interrupts */
+	rnpgbevf_irq_enable(adapter);
+
+	/* enable transmits */
+	netif_tx_start_all_queues(adapter->netdev);
+
+	rnpgbevf_save_reset_stats(adapter);
+
+	hw->mac.get_link_status = 1;
+	mod_timer(&adapter->watchdog_timer, jiffies);
+
+	clear_bit(__RNPVF_DOWN, &adapter->state);
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rnpgbevf_enable_rx_queue(adapter, adapter->rx_ring[i]);
+}
+
+void rnpgbevf_reinit_locked(struct rnpgbevf_adapter *adapter)
+{
+	WARN_ON(in_interrupt());
+
+	while (test_and_set_bit(__RNPVF_RESETTING, &adapter->state))
+		usleep_range(1000, 2000);
+
+	rnpgbevf_down(adapter);
+
+	rnpgbevf_reset(adapter);
+
+	rnpgbevf_up(adapter);
+
+	clear_bit(__RNPVF_RESETTING, &adapter->state);
+}
+
+void rnpgbevf_up(struct rnpgbevf_adapter *adapter)
+{
+	rnpgbevf_configure(adapter);
+
+	rnpgbevf_up_complete(adapter);
+}
+
+void rnpgbevf_reset(struct rnpgbevf_adapter *adapter)
+{
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+
+	set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	if (hw->mac.ops.reset_hw(hw))
+		hw_dbg(hw, "PF still resetting\n");
+	else
+		hw->mac.ops.init_hw(hw);
+
+	clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	if (is_valid_ether_addr(adapter->hw.mac.addr)) {
+		eth_hw_addr_set(netdev, adapter->hw.mac.addr);
+		memcpy(netdev->perm_addr, adapter->hw.mac.addr,
+		       netdev->addr_len);
+	}
+}
+
+/**
+ * rnpgbevf_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+static void rnpgbevf_clean_tx_ring(struct rnpgbevf_adapter *adapter,
+				   struct rnpgbevf_ring *tx_ring)
+{
+	struct rnpgbevf_tx_buffer *tx_buffer_info;
+	unsigned long size;
+	u16 i;
+
+	BUG_ON(tx_ring == NULL);
+
+	/* ring already cleared, nothing to do */
+	if (!tx_ring->tx_buffer_info)
+		return;
+
+	/* Free all the Tx ring sk_buffs */
+	for (i = 0; i < tx_ring->count; i++) {
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		rnpgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+	}
+
+	netdev_tx_reset_queue(txring_txq(tx_ring));
+
+	size = sizeof(struct rnpgbevf_tx_buffer) * tx_ring->count;
+	memset(tx_ring->tx_buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+}
+
+/**
+ * rnpgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void rnpgbevf_clean_all_rx_rings(struct rnpgbevf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rnpgbevf_clean_rx_ring(adapter->rx_ring[i]);
+}
+
+/**
+ * rnpgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void rnpgbevf_clean_all_tx_rings(struct rnpgbevf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		rnpgbevf_clean_tx_ring(adapter, adapter->tx_ring[i]);
+}
+
+void rnpgbevf_down(struct rnpgbevf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i;
+
+	/* signal that we are down to the interrupt handler */
+	set_bit(__RNPVF_DOWN, &adapter->state);
+	set_bit(__RNPVF_LINK_DOWN, &adapter->state);
+
+	/* disable all enabled rx queues */
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rnpgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
+
+	usleep_range(1000, 2000);
+
+	netif_tx_stop_all_queues(netdev);
+
+	/* call carrier off first to avoid false dev_watchdog timeouts */
+	netif_carrier_off(netdev);
+
+	netif_tx_disable(netdev);
+
+	rnpgbevf_irq_disable(adapter);
+
+	rnpgbevf_napi_disable_all(adapter);
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct rnpgbevf_ring *tx_ring = adapter->tx_ring[i];
+
+		if (!(tx_ring->ring_flags & RNPVF_RING_SKIP_TX_START)) {
+			int head, tail;
+			int timeout = 0;
+
+			head = ring_rd32(tx_ring,
+					 RNPGBE_DMA_REG_TX_DESC_BUF_HEAD);
+			tail = ring_rd32(tx_ring,
+					 RNPGBE_DMA_REG_TX_DESC_BUF_TAIL);
+
+			while (head != tail) {
+				usleep_range(10000, 20000);
+
+				head = ring_rd32(
+					tx_ring,
+					RNPGBE_DMA_REG_TX_DESC_BUF_HEAD);
+				tail = ring_rd32(
+					tx_ring,
+					RNPGBE_DMA_REG_TX_DESC_BUF_TAIL);
+				timeout++;
+				if (timeout >= 100) {
+					printk(KERN_DEBUG
+					       "vf wait tx done timeout\n");
+					break;
+				}
+			}
+		}
+	}
+
+	/* disable transmits in the hardware now that interrupts are off */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct rnpgbevf_ring *tx_ring = adapter->tx_ring[i];
+
+		if (!(tx_ring->ring_flags & RNPVF_RING_SKIP_TX_START))
+			ring_wr32(tx_ring, RNPGBE_DMA_TX_START, 0);
+	}
+
+	netif_carrier_off(netdev);
+	rnpgbevf_clean_all_tx_rings(adapter);
+	rnpgbevf_clean_all_rx_rings(adapter);
+}
+
+static netdev_features_t rnpgbevf_fix_features(struct net_device *netdev,
+					       netdev_features_t features)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbevf_hw *hw = &adapter->hw;
+
+	/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
+	if (!(features & NETIF_F_RXCSUM)) {
+		features &= ~NETIF_F_LRO;
+		adapter->flags &= (~RNPVF_FLAG_RX_CHKSUM_ENABLED);
+	} else
+		adapter->flags |= RNPVF_FLAG_RX_CHKSUM_ENABLED;
+
+	/* vf not support change vlan filter */
+	if ((netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) !=
+	    (features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
+		if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+			features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+		else
+			features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+	}
+
+	if ((netdev->features & NETIF_F_HW_VLAN_STAG_FILTER) !=
+	    (features & NETIF_F_HW_VLAN_STAG_FILTER)) {
+		if (netdev->features & NETIF_F_HW_VLAN_STAG_FILTER)
+			features |= NETIF_F_HW_VLAN_STAG_FILTER;
+		else
+			features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
+	}
+
+	if (adapter->flags & RNPVF_FLAG_PF_SET_VLAN) {
+		/* if in this mode , close tx/rx vlan offload */
+		if (features & NETIF_F_HW_VLAN_CTAG_RX)
+			adapter->priv_flags |= RNPVF_FLAG_RX_CVLAN_OFFLOAD;
+		else
+			adapter->priv_flags &= ~RNPVF_FLAG_RX_CVLAN_OFFLOAD;
+
+		if (!(hw->pf_feature & PF_NCSI_EN))
+			features |= NETIF_F_HW_VLAN_CTAG_RX;
+		if (features & NETIF_F_HW_VLAN_CTAG_TX)
+			adapter->priv_flags |= RNPVF_FLAG_TX_CVLAN_OFFLOAD;
+		else
+			adapter->priv_flags &= ~RNPVF_FLAG_TX_CVLAN_OFFLOAD;
+
+		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
+		if (features & NETIF_F_HW_VLAN_STAG_RX)
+			adapter->priv_flags |= RNPVF_FLAG_RX_SVLAN_OFFLOAD;
+		else
+			adapter->priv_flags &= ~RNPVF_FLAG_RX_SVLAN_OFFLOAD;
+
+		if (!(hw->pf_feature & PF_NCSI_EN))
+			features |= NETIF_F_HW_VLAN_STAG_RX;
+		if (features & NETIF_F_HW_VLAN_STAG_TX)
+			adapter->priv_flags |= RNPVF_FLAG_TX_SVLAN_OFFLOAD;
+		else
+			adapter->priv_flags &= ~RNPVF_FLAG_TX_SVLAN_OFFLOAD;
+
+		features &= ~NETIF_F_HW_VLAN_STAG_TX;
+
+	} else {
+		if (!(features & NETIF_F_HW_VLAN_CTAG_RX)) {
+			if (hw->feature_flags & RNPVF_NET_FEATURE_STAG_OFFLOAD)
+				features &= ~NETIF_F_HW_VLAN_STAG_RX;
+		}
+
+		if (hw->feature_flags & RNPVF_NET_FEATURE_STAG_OFFLOAD) {
+			if (!(features & NETIF_F_HW_VLAN_STAG_RX)) {
+				features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+			}
+		}
+
+		if (!(features & NETIF_F_HW_VLAN_CTAG_TX)) {
+			if (hw->feature_flags & RNPVF_NET_FEATURE_STAG_OFFLOAD)
+				features &= ~NETIF_F_HW_VLAN_STAG_TX;
+		}
+
+		if (hw->feature_flags & RNPVF_NET_FEATURE_STAG_OFFLOAD) {
+			if (!(features & NETIF_F_HW_VLAN_STAG_TX)) {
+				features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+			}
+		}
+	}
+	return features;
+}
+
+static int rnpgbevf_set_features(struct net_device *netdev,
+				 netdev_features_t features)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+	netdev_features_t changed = netdev->features ^ features;
+	bool need_reset = false;
+	int err = 0;
+
+	netdev->features = features;
+	if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
+		if (features & NETIF_F_HW_VLAN_CTAG_RX) {
+			if ((!rnpgbevf_vlan_strip_enable(adapter)))
+				features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+		} else {
+			rnpgbevf_vlan_strip_disable(adapter);
+		}
+	}
+
+	netdev->features = features;
+
+	if (need_reset)
+		rnpgbevf_reset(adapter);
+
+	return err;
+}
+
+/**
+ * rnpgbevf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+__maybe_unused static void rnpgbevf_tx_timeout(struct net_device *netdev)
+{
+	/* Do the reset outside of interrupt context */
+}
+
+/**
+ * rnpgbevf_sw_init - Initialize general software structures
+ * (struct rnpgbevf_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * rnpgbevf_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int rnpgbevf_sw_init(struct rnpgbevf_adapter *adapter)
+{
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	struct net_device *netdev = adapter->netdev;
+	int err;
+
+	/* PCI config space info */
+	hw->pdev = pdev;
+
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_device_id = pdev->subsystem_device;
+
+	hw->mbx.ops.init_params(hw);
+
+	/*initialization default pause flow */
+	hw->fc.requested_mode = rnp_fc_none;
+	hw->fc.current_mode = rnp_fc_none;
+
+	/* now vf other irq handler is not regist */
+	err = hw->mac.ops.reset_hw(hw);
+	if (err) {
+		dev_info(&pdev->dev,
+			 "PF still in reset state.  Is the PF interface up?\n");
+		hw->adapter_stopped = false;
+		hw->link = false;
+		hw->speed = 0;
+		hw->usecstocount = 500;
+		return err;
+	}
+	err = hw->mac.ops.init_hw(hw);
+	if (err) {
+		pr_err("init_shared_code failed: %d\n", err);
+		goto out;
+	}
+	err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+	if (err)
+		dev_info(&pdev->dev, "Error reading MAC address\n");
+	else if (is_zero_ether_addr(adapter->hw.mac.addr))
+		dev_info(&pdev->dev,
+			 "MAC address not assigned by administrator.\n");
+	eth_hw_addr_set(netdev, hw->mac.addr);
+
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
+		dev_info(&pdev->dev, "Assigning random MAC address\n");
+		eth_hw_addr_random(netdev);
+		memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
+	}
+	/* get info from pf */
+	err = hw->mac.ops.get_queues(hw);
+	if (err) {
+		dev_info(&pdev->dev, "Get queue info error, use default one\n");
+		hw->mac.max_tx_queues = MAX_TX_QUEUES;
+		hw->mac.max_rx_queues = MAX_RX_QUEUES;
+		hw->queue_ring_base = (hw->vfnum & VF_NUM_MASK) * MAX_RX_QUEUES;
+	}
+
+	dev_info(&pdev->dev, "queue_ring_base %d num %d\n", hw->queue_ring_base,
+		 hw->mac.max_tx_queues);
+	err = hw->mac.ops.get_mtu(hw);
+	if (err) {
+		dev_info(&pdev->dev, "Get mtu error ,use default one\n");
+		hw->mtu = 1500;
+	}
+	/* lock to protect mailbox accesses */
+	spin_lock_init(&adapter->mbx_lock);
+
+	/* Enable dynamic interrupt throttling rates */
+	/* set default ring sizes */
+	adapter->tx_ring_item_count = hw->tx_items_count;
+	adapter->rx_ring_item_count = hw->rx_items_count;
+	adapter->dma_channels =
+		min_t(int, hw->mac.max_tx_queues, hw->mac.max_rx_queues);
+	DPRINTK(PROBE, INFO, "tx parameters %d, rx parameters %d\n",
+		adapter->tx_ring_item_count, adapter->rx_ring_item_count);
+
+	/* set default tx/rx soft count */
+	adapter->adaptive_rx_coal = 1;
+	adapter->adaptive_tx_coal = 1;
+	adapter->napi_budge = RNPVF_DEFAULT_RX_WORK;
+	adapter->tx_work_limit = RNPVF_DEFAULT_TX_WORK;
+	adapter->rx_usecs = RNPVF_PKT_TIMEOUT;
+	adapter->rx_frames = RNPVF_RX_PKT_POLL_BUDGET;
+	adapter->tx_usecs = RNPVF_PKT_TIMEOUT_TX;
+	adapter->tx_frames = RNPVF_TX_PKT_POLL_BUDGET;
+
+	set_bit(__RNPVF_DOWN, &adapter->state);
+	return 0;
+
+out:
+	return err;
+}
+
+static int rnpgbevf_acquire_msix_vectors(struct rnpgbevf_adapter *adapter,
+					 int vectors)
+{
+	int err = 0;
+	int vector_threshold;
+
+	vector_threshold = MIN_MSIX_COUNT;
+
+	/* The more we get, the more we will assign to Tx/Rx Cleanup
+	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+	 * Right now, we simply care about how many we'll get; we'll
+	 * set them up later while requesting irq's.
+	 */
+	err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+				    vectors, vectors);
+	if (err > 0) { /* Success or a nasty failure. */
+		vectors = err;
+		err = 0;
+	}
+	DPRINTK(PROBE, INFO, "err:%d, vectors:%d\n", err, vectors);
+	if (err < 0) {
+		dev_err(&adapter->pdev->dev,
+			"Unable to allocate MSI-X interrupts\n");
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+	} else {
+		/*
+		 * Adjust for only the vectors we'll use, which is minimum
+		 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+		 * vectors we were allocated.
+		 */
+		adapter->num_msix_vectors = vectors;
+	}
+
+	return err;
+}
+
+/**
+ * rnpgbevf_set_num_queues - Allocate queues for device, feature dependent
+ * @adapter: board private structure to initialize
+ *
+ * This is the top level queue allocation routine.  The order here is very
+ * important, starting with the "most" number of features turned on at once,
+ * and ending with the smallest set of features.  This way large combinations
+ * can be allocated if they're turned on, and smaller combinations are the
+ * fallthrough conditions.
+ *
+ **/
+static void rnpgbevf_set_num_queues(struct rnpgbevf_adapter *adapter)
+{
+	/* Start with base case */
+	adapter->num_rx_queues = adapter->dma_channels;
+	adapter->num_tx_queues = adapter->dma_channels;
+}
+
+/**
+ * rnpgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int rnpgbevf_set_interrupt_capability(struct rnpgbevf_adapter *adapter)
+{
+	int err = 0;
+	int vector, v_budget;
+	int irq_mode_back = adapter->irq_mode;
+	/*
+	 * It's easy to be greedy for MSI-X vectors, but it really
+	 * doesn't do us much good if we have a lot more vectors
+	 * than CPU's.  So let's be conservative and only ask for
+	 * (roughly) the same number of vectors as there are CPU's.
+	 * The default is to use pairs of vectors.
+	 */
+	v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
+	v_budget = min_t(int, v_budget, num_online_cpus());
+	v_budget += NON_Q_VECTORS;
+	v_budget = min_t(int, v_budget, MAX_MSIX_VECTORS);
+
+	if (adapter->irq_mode == irq_mode_msix) {
+		/* A failure in MSI-X entry allocation isn't fatal, but it does
+		 * mean we disable MSI-X capabilities of the adapter.
+		 */
+		adapter->msix_entries = kcalloc(
+			v_budget, sizeof(struct msix_entry), GFP_KERNEL);
+		if (!adapter->msix_entries) {
+			err = -ENOMEM;
+			goto out;
+		}
+
+		for (vector = 0; vector < v_budget; vector++)
+			adapter->msix_entries[vector].entry = vector;
+
+		err = rnpgbevf_acquire_msix_vectors(adapter, v_budget);
+		if (!err) {
+			adapter->vector_off = NON_Q_VECTORS;
+			adapter->num_q_vectors =
+				adapter->num_msix_vectors - NON_Q_VECTORS;
+			DPRINTK(PROBE, INFO,
+				"adapter%d alloc vectors: cnt:%d [%d~%d] num_msix_vectors:%d\n",
+				adapter->bd_number, v_budget,
+				adapter->vector_off,
+				adapter->vector_off + v_budget - 1,
+				adapter->num_msix_vectors);
+			adapter->flags |= RNPVF_FLAG_MSIX_ENABLED;
+			goto out;
+		}
+		kfree(adapter->msix_entries);
+
+		if (adapter->flags & RNPVF_FLAG_MSI_CAPABLE) {
+			adapter->irq_mode = irq_mode_msi;
+			pr_info("acquire msix failed, try to use msi\n");
+		}
+
+	} else {
+		pr_info("adapter not in msix mode\n");
+	}
+
+	if (adapter->irq_mode == irq_mode_msi) {
+		err = pci_enable_msi(adapter->pdev);
+		if (err) {
+			pr_info("Failed to allocate MSI interrupt, falling back to legacy. Error");
+		} else {
+			/* msi mode use only 1 irq */
+			adapter->flags |= RNPVF_FLAG_MSI_ENABLED;
+		}
+	}
+	/* write back origin irq_mode */
+	adapter->irq_mode = irq_mode_back;
+	/* legacy and msi only 1 vectors */
+	adapter->num_q_vectors = 1;
+
+out:
+	return err;
+}
+
+static void rnpgbevf_add_ring(struct rnpgbevf_ring *ring,
+			      struct rnpgbevf_ring_container *head)
+{
+	ring->next = head->ring;
+	head->ring = ring;
+	head->count++;
+}
+
+static enum hrtimer_restart irq_miss_check(struct hrtimer *hrtimer)
+{
+	struct rnpgbevf_q_vector *q_vector;
+	struct rnpgbevf_ring *ring;
+	struct rnp_tx_desc *eop_desc;
+	struct rnpgbevf_adapter *adapter;
+	struct rnpgbevf_hw *hw;
+
+	int tx_next_to_clean;
+	int tx_next_to_use;
+
+	struct rnpgbevf_tx_buffer *tx_buffer;
+	union rnp_rx_desc *rx_desc;
+
+	q_vector = container_of(hrtimer, struct rnpgbevf_q_vector,
+				irq_miss_check_timer);
+	adapter = q_vector->adapter;
+	hw = &adapter->hw;
+
+	/* If we're already down or resetting, just bail */
+	if (test_bit(__RNPVF_DOWN, &adapter->state) ||
+	    test_bit(__RNPVF_RESETTING, &adapter->state))
+		goto do_self_napi;
+
+	rnpgbevf_irq_disable_queues(q_vector);
+	rnpgbevf_for_each_ring(ring, q_vector->tx) {
+		tx_next_to_clean = ring->next_to_clean;
+		tx_next_to_use = ring->next_to_use;
+		if (tx_next_to_use == tx_next_to_clean)
+			continue;
+
+		tx_buffer = &ring->tx_buffer_info[tx_next_to_clean];
+		eop_desc = tx_buffer->next_to_watch;
+		if (eop_desc) {
+			if ((eop_desc->cmd & cpu_to_le16(RNPGBE_TXD_STAT_DD))) {
+				if (q_vector->new_rx_count !=
+				    q_vector->old_rx_count) {
+					ring_wr32(
+						ring,
+						RNPGBE_DMA_REG_RX_INT_DELAY_PKTCNT,
+						q_vector->new_rx_count);
+					q_vector->old_rx_count =
+						q_vector->new_rx_count;
+				}
+				napi_schedule_irqoff(&q_vector->napi);
+				goto do_self_napi;
+			}
+		}
+	}
+
+	/* check rx irq */
+	rnpgbevf_for_each_ring(ring, q_vector->rx) {
+		rx_desc = RNPVF_RX_DESC(ring, ring->next_to_clean);
+
+		if (!rx_desc)
+			continue;
+		if (rnpgbevf_test_staterr(rx_desc, RNPGBE_RXD_STAT_DD)) {
+			unsigned int size;
+
+			size = le16_to_cpu(rx_desc->wb.len) -
+			       le16_to_cpu(rx_desc->wb.padding_len);
+
+			if (size) {
+				if (q_vector->new_rx_count !=
+				    q_vector->old_rx_count) {
+					ring_wr32(
+						ring,
+						RNPGBE_DMA_REG_RX_INT_DELAY_PKTCNT,
+						q_vector->new_rx_count);
+					q_vector->old_rx_count =
+						q_vector->new_rx_count;
+				}
+				napi_schedule_irqoff(&q_vector->napi);
+			} else
+				adapter->flags |= RNPVF_FLAG_PF_RESET_REQ;
+			goto do_self_napi;
+		}
+	}
+	rnpgbevf_irq_enable_queues(q_vector);
+do_self_napi:
+	return HRTIMER_NORESTART;
+}
+
+static int rnpgbevf_alloc_q_vector(struct rnpgbevf_adapter *adapter,
+				   int eth_queue_idx, int rnpgbevf_vector,
+				   int rnpgbevf_queue, int r_count, int step)
+{
+	struct rnpgbevf_q_vector *q_vector;
+	struct rnpgbevf_ring *ring;
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	int node = NUMA_NO_NODE;
+	int cpu = -1;
+	int ring_count, size;
+	int txr_count, rxr_count, idx;
+	int rxr_idx = rnpgbevf_queue, txr_idx = rnpgbevf_queue;
+
+	DPRINTK(PROBE, INFO,
+		"eth_queue_idx:%d rnpgbevf_vector:%d(off:%d) ring:%d "
+		"ring_cnt:%d, step:%d\n",
+		eth_queue_idx, rnpgbevf_vector, adapter->vector_off,
+		rnpgbevf_queue, r_count, step);
+
+	txr_count = rxr_count = r_count;
+
+	ring_count = txr_count + rxr_count;
+	size = sizeof(struct rnpgbevf_q_vector) +
+	       (sizeof(struct rnpgbevf_ring) * ring_count);
+
+	if (cpu_online(rnpgbevf_vector)) {
+		cpu = rnpgbevf_vector;
+		node = cpu_to_node(cpu);
+	}
+
+	/* allocate q_vector and rings */
+	q_vector = kzalloc_node(size, GFP_KERNEL, node);
+	if (!q_vector)
+		q_vector = kzalloc(size, GFP_KERNEL);
+	if (!q_vector)
+		return -ENOMEM;
+
+	/* setup affinity mask and node */
+	if (cpu != -1)
+		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
+	q_vector->numa_node = node;
+
+	netif_napi_add_weight(adapter->netdev, &q_vector->napi, rnpgbevf_poll,
+			      adapter->napi_budge);
+
+	/* tie q_vector and adapter together */
+	adapter->q_vector[rnpgbevf_vector - adapter->vector_off] = q_vector;
+	q_vector->adapter = adapter;
+	q_vector->v_idx = rnpgbevf_vector;
+
+	/* initialize pointer to rings */
+	ring = q_vector->ring;
+
+	for (idx = 0; idx < txr_count; idx++) {
+		/* assign generic ring traits */
+		ring->dev = &adapter->pdev->dev;
+		ring->netdev = adapter->netdev;
+
+		/* configure backlink on ring */
+		ring->q_vector = q_vector;
+
+		/* update q_vector Tx values */
+		rnpgbevf_add_ring(ring, &q_vector->tx);
+
+		/* apply Tx specific ring traits */
+		ring->count = adapter->tx_ring_item_count;
+		ring->queue_index = eth_queue_idx + idx;
+		ring->rnpgbevf_queue_idx = txr_idx;
+
+		if (hw->board_type == rnp_board_n10) {
+			ring->ring_flags |= RNPVF_RING_SKIP_TX_START;
+			ring->ring_addr = hw->hw_addr + RNPGBE_RING_BASE_N10 +
+					  RNPGBE_RING_OFFSET(txr_idx);
+			ring->rnpgbevf_msix_off = txr_idx;
+		} else if (hw->board_type == rnp_board_n500) {
+			/* n500 not support tunnel */
+			ring->ring_flags |= RNPVF_RING_NO_TUNNEL_SUPPORT;
+			/* n500 fixed ring size change from large to small */
+			ring->ring_flags |= RNPVF_RING_SIZE_CHANGE_FIX;
+			/* n500 vf use this */
+			ring->ring_addr = hw->hw_addr + RNPGBE_RING_BASE_N500;
+			ring->ring_flags |= RNPVF_RING_VEB_MULTI_FIX;
+			ring->rnpgbevf_msix_off = 0;
+		} else if (hw->board_type == rnp_board_n210) {
+			/* n210 not support tunnel */
+			ring->ring_flags |= RNPVF_RING_NO_TUNNEL_SUPPORT;
+			/* n210 fixed ring size change from large to small */
+			ring->ring_flags |= RNPVF_RING_SIZE_CHANGE_FIX;
+			/* n210 vf use this */
+			ring->ring_addr = hw->hw_addr + RNPGBE_RING_BASE_N500;
+			ring->ring_flags |= RNPVF_RING_VEB_MULTI_FIX;
+			ring->rnpgbevf_msix_off = 0;
+		}
+		ring->dma_int_stat = ring->ring_addr + RNPGBE_DMA_INT_STAT;
+		ring->dma_int_mask = ring->dma_int_stat + 4;
+		ring->dma_int_clr = ring->dma_int_stat + 8;
+		ring->device_id = adapter->pdev->device;
+		ring->vfnum = hw->vfnum;
+		/* assign ring to adapter */
+		adapter->tx_ring[ring->queue_index] = ring;
+		dbg("adapter->tx_ringp[%d] <= %p\n", ring->queue_index, ring);
+
+		/* update count and index */
+		txr_idx += step;
+
+		DPRINTK(PROBE, INFO,
+			"vector[%d] <--RNP TxRing:%d, eth_queue:%d\n",
+			rnpgbevf_vector, ring->rnpgbevf_queue_idx,
+			ring->queue_index);
+
+		/* push pointer to next ring */
+		ring++;
+	}
+
+	for (idx = 0; idx < rxr_count; idx++) {
+		/* assign generic ring traits */
+		ring->dev = &adapter->pdev->dev;
+		ring->netdev = adapter->netdev;
+
+		/* configure backlink on ring */
+		ring->q_vector = q_vector;
+
+		/* update q_vector Rx values */
+		rnpgbevf_add_ring(ring, &q_vector->rx);
+
+		/* apply Rx specific ring traits */
+		ring->count = adapter->rx_ring_item_count;
+		ring->queue_index = eth_queue_idx + idx;
+		ring->rnpgbevf_queue_idx = rxr_idx;
+
+		if (hw->board_type == rnp_board_n10) {
+			ring->ring_addr = hw->hw_addr + RNPGBE_RING_BASE_N10 +
+					  RNPGBE_RING_OFFSET(rxr_idx);
+			ring->rnpgbevf_msix_off = rxr_idx;
+
+		} else if (hw->board_type == rnp_board_n500) {
+			/* n500 fixed ring size change from large to small */
+			ring->ring_flags |= RNPVF_RING_SIZE_CHANGE_FIX;
+			ring->ring_flags |= RNPVF_RING_SCATER_SETUP;
+			ring->ring_flags |= RNPVF_RING_NO_TUNNEL_SUPPORT;
+			ring->ring_flags |= RNPVF_RING_STAGS_SUPPORT;
+			ring->ring_flags |= RNPVF_RING_VEB_MULTI_FIX;
+			ring->ring_flags |= RNPVF_RING_IRQ_MISS_FIX;
+			ring->ring_flags |= RNPVF_RING_CHKSM_FIX;
+			ring->ring_addr = hw->hw_addr + RNPGBE_RING_BASE_N500;
+			ring->rnpgbevf_msix_off = 0;
+		} else if (hw->board_type == rnp_board_n210) {
+			/* n210 fixed ring size change from large to small */
+			ring->ring_flags |= RNPVF_RING_SIZE_CHANGE_FIX;
+			ring->ring_flags |= RNPVF_RING_SCATER_SETUP;
+			ring->ring_flags |= RNPVF_RING_NO_TUNNEL_SUPPORT;
+			ring->ring_flags |= RNPVF_RING_STAGS_SUPPORT;
+			ring->ring_flags |= RNPVF_RING_VEB_MULTI_FIX;
+			ring->ring_flags |= RNPVF_RING_IRQ_MISS_FIX;
+			ring->ring_flags |= RNPVF_RING_CHKSM_FIX;
+			ring->ring_addr = hw->hw_addr + RNPGBE_RING_BASE_N500;
+			ring->rnpgbevf_msix_off = 0;
+		}
+		ring->dma_int_stat = ring->ring_addr + RNPGBE_DMA_INT_STAT;
+		ring->dma_int_mask = ring->dma_int_stat + 4;
+		ring->dma_int_clr = ring->dma_int_stat + 8;
+		ring->device_id = adapter->pdev->device;
+		ring->vfnum = hw->vfnum;
+
+		/* assign ring to adapter */
+		adapter->rx_ring[ring->queue_index] = ring;
+		DPRINTK(PROBE, INFO,
+			"vector[%d] <--RNP RxRing:%d, eth_queue:%d\n",
+			rnpgbevf_vector, ring->rnpgbevf_queue_idx,
+			ring->queue_index);
+
+		/* update count and index */
+		rxr_idx += step;
+
+		/* push pointer to next ring */
+		ring++;
+	}
+
+	if (hw->board_type == rnp_board_n10) {
+		q_vector->vector_flags |= RNPVF_QVECTOR_FLAG_IRQ_MISS_CHECK;
+		q_vector->vector_flags |= RNPVF_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS;
+		/* initialize timer */
+		q_vector->irq_check_usecs = 1000;
+		hrtimer_init(&q_vector->irq_miss_check_timer, CLOCK_MONOTONIC,
+			     HRTIMER_MODE_REL);
+		q_vector->irq_miss_check_timer.function =
+			irq_miss_check; /* initialize NAPI */
+	} else if (hw->board_type == rnp_board_n500) {
+		q_vector->vector_flags |= RNPVF_QVECTOR_FLAG_ITR_FEATURE;
+
+	} else if (hw->board_type == rnp_board_n210) {
+		q_vector->vector_flags |= RNPVF_QVECTOR_FLAG_ITR_FEATURE;
+	}
+
+	return 0;
+}
+
+static void rnpgbevf_free_q_vector(struct rnpgbevf_adapter *adapter, int v_idx)
+{
+	struct rnpgbevf_q_vector *q_vector;
+	struct rnpgbevf_ring *ring;
+
+	dbg("v_idx:%d\n", v_idx);
+
+	q_vector = adapter->q_vector[v_idx];
+
+	rnpgbevf_for_each_ring(ring, q_vector->tx)
+		adapter->tx_ring[ring->queue_index] = NULL;
+
+	rnpgbevf_for_each_ring(ring, q_vector->rx)
+		adapter->rx_ring[ring->queue_index] = NULL;
+
+	adapter->q_vector[v_idx] = NULL;
+	netif_napi_del(&q_vector->napi);
+
+	if (q_vector->vector_flags & RNPVF_QVECTOR_FLAG_IRQ_MISS_CHECK)
+		rnpgbevf_htimer_stop(q_vector);
+
+	/*
+	 * rnpgbevf_get_stats64() might access the rings on this vector,
+	 * we must wait a grace period before freeing it.
+	 */
+	kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * rnpgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int rnpgbevf_alloc_q_vectors(struct rnpgbevf_adapter *adapter)
+{
+	int vector_idx = adapter->vector_off;
+	int ring_idx = adapter->hw.queue_ring_base;
+	int ring_remaing =
+		min_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
+	int ring_step = 1;
+	int err, ring_cnt,
+		vector_remaing = adapter->num_msix_vectors - NON_Q_VECTORS;
+	int eth_queue_idx = 0;
+
+	BUG_ON(ring_remaing == 0);
+	BUG_ON(vector_remaing == 0);
+
+	for (; ring_remaing > 0 && vector_remaing > 0; vector_remaing--) {
+		ring_cnt = DIV_ROUND_UP(ring_remaing, vector_remaing);
+
+		err = rnpgbevf_alloc_q_vector(adapter, eth_queue_idx,
+					      vector_idx, ring_idx, ring_cnt,
+					      ring_step);
+		if (err)
+			goto err_out;
+
+		ring_idx += ring_step * ring_cnt;
+		ring_remaing -= ring_cnt;
+		vector_idx++;
+		eth_queue_idx += ring_cnt;
+	}
+
+	return 0;
+
+err_out:
+	vector_idx -= adapter->vector_off;
+	while (vector_idx--)
+		rnpgbevf_free_q_vector(adapter, vector_idx);
+	return -ENOMEM;
+}
+
+/**
+ * rnpgbevf_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void rnpgbevf_free_q_vectors(struct rnpgbevf_adapter *adapter)
+{
+	int i, v_idx = adapter->num_q_vectors;
+
+	adapter->num_rx_queues = 0;
+	adapter->num_tx_queues = 0;
+	adapter->num_q_vectors = 0;
+
+	for (i = 0; i < v_idx; i++)
+		rnpgbevf_free_q_vector(adapter, i);
+}
+
+/**
+ * rnpgbevf_reset_interrupt_capability - Reset MSIX setup
+ * @adapter: board private structure
+ *
+ **/
+static void
+rnpgbevf_reset_interrupt_capability(struct rnpgbevf_adapter *adapter)
+{
+	if (adapter->flags & RNPVF_FLAG_MSIX_ENABLED) {
+		pci_disable_msix(adapter->pdev);
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+	} else if (adapter->flags & RNPVF_FLAG_MSI_ENABLED) {
+		pci_disable_msi(adapter->pdev);
+	}
+}
+
+/**
+ * rnpgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
+ * @adapter: board private structure to initialize
+ *
+ **/
+int rnpgbevf_init_interrupt_scheme(struct rnpgbevf_adapter *adapter)
+{
+	int err;
+
+	/* Number of supported queues */
+	rnpgbevf_set_num_queues(adapter);
+
+	err = rnpgbevf_set_interrupt_capability(adapter);
+	if (err) {
+		hw_dbg(&adapter->hw,
+		       "Unable to setup interrupt capabilities\n");
+		goto err_set_interrupt;
+	}
+
+	err = rnpgbevf_alloc_q_vectors(adapter);
+	if (err) {
+		hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
+				     "vectors\n");
+		goto err_alloc_q_vectors;
+	}
+
+	hw_dbg(&adapter->hw,
+	       "Multiqueue %s: Rx Queue count = %u, "
+	       "Tx Queue count = %u\n",
+	       (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
+	       adapter->num_rx_queues, adapter->num_tx_queues);
+
+	set_bit(__RNPVF_DOWN, &adapter->state);
+
+	return 0;
+err_alloc_q_vectors:
+	rnpgbevf_reset_interrupt_capability(adapter);
+err_set_interrupt:
+	return err;
+}
+
+/**
+ * rnpgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @adapter: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+void rnpgbevf_clear_interrupt_scheme(struct rnpgbevf_adapter *adapter)
+{
+	adapter->num_tx_queues = 0;
+	adapter->num_rx_queues = 0;
+
+	rnpgbevf_free_q_vectors(adapter);
+	rnpgbevf_reset_interrupt_capability(adapter);
+}
+
+#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)                    \
+	{                                                                      \
+		u32 current_counter = RNPGBE_READ_REG(hw, reg);                \
+		if (current_counter < last_counter)                            \
+			counter += 0x100000000LL;                              \
+		last_counter = current_counter;                                \
+		counter &= 0xFFFFFFFF00000000LL;                               \
+		counter |= current_counter;                                    \
+	}
+
+#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter)       \
+	{                                                                      \
+		u64 current_counter_lsb = RNPGBE_READ_REG(hw, reg_lsb);        \
+		u64 current_counter_msb = RNPGBE_READ_REG(hw, reg_msb);        \
+		u64 current_counter =                                          \
+			(current_counter_msb << 32) | current_counter_lsb;     \
+		if (current_counter < last_counter)                            \
+			counter += 0x1000000000LL;                             \
+		last_counter = current_counter;                                \
+		counter &= 0xFFFFFFF000000000LL;                               \
+		counter |= current_counter;                                    \
+	}
+
+/**
+ * rnpgbevf_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+void rnpgbevf_update_stats(struct rnpgbevf_adapter *adapter)
+{
+	struct rnpgbevf_hw_stats_own *hw_stats = &adapter->hw_stats;
+	int i;
+	struct net_device_stats *net_stats = &adapter->netdev->stats;
+
+	net_stats->tx_packets = 0;
+	net_stats->tx_bytes = 0;
+	net_stats->rx_packets = 0;
+	net_stats->rx_bytes = 0;
+	net_stats->rx_dropped = 0;
+	net_stats->rx_errors = 0;
+	hw_stats->vlan_add_cnt = 0;
+	hw_stats->vlan_strip_cnt = 0;
+	hw_stats->csum_err = 0;
+	hw_stats->csum_good = 0;
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct rnpgbevf_ring *ring;
+		struct rnpgbevf_q_vector *q_vector = adapter->q_vector[i];
+
+		rnpgbevf_for_each_ring(ring, q_vector->tx) {
+			hw_stats->vlan_add_cnt += ring->tx_stats.vlan_add;
+			net_stats->tx_packets += ring->stats.packets;
+			net_stats->tx_bytes += ring->stats.bytes;
+		}
+
+		rnpgbevf_for_each_ring(ring, q_vector->rx) {
+			hw_stats->csum_err += ring->rx_stats.csum_err;
+			hw_stats->csum_good += ring->rx_stats.csum_good;
+			hw_stats->vlan_strip_cnt += ring->rx_stats.vlan_remove;
+			net_stats->rx_packets += ring->stats.packets;
+			net_stats->rx_bytes += ring->stats.bytes;
+			net_stats->rx_errors += ring->rx_stats.csum_err;
+		}
+	}
+}
+
+static void rnpgbevf_reset_pf_request(struct rnpgbevf_adapter *adapter)
+{
+	struct rnpgbevf_hw *hw = &adapter->hw;
+
+	if (!(adapter->flags & RNPVF_FLAG_PF_RESET_REQ))
+		return;
+
+	adapter->flags &= (~RNPVF_FLAG_PF_RESET_REQ);
+	spin_lock_bh(&adapter->mbx_lock);
+	set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	hw->mac.ops.req_reset_pf(hw);
+	clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	spin_unlock_bh(&adapter->mbx_lock);
+}
+
+static int rnpgbevf_reset_subtask(struct rnpgbevf_adapter *adapter)
+{
+	if (!(adapter->flags & RNPVF_FLAG_PF_RESET))
+		return 0;
+	/* If we're already down or resetting, just bail */
+	if (test_bit(__RNPVF_DOWN, &adapter->state) ||
+	    test_bit(__RNPVF_RESETTING, &adapter->state))
+		return 0;
+
+	adapter->tx_timeout_count++;
+
+	rtnl_lock();
+	rnpgbevf_reinit_locked(adapter);
+	rtnl_unlock();
+
+	adapter->flags &= (~RNPVF_FLAG_PF_RESET);
+
+	return 1;
+}
+
+/**
+ * rnpgbevf_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void rnpgbevf_watchdog(struct timer_list *t)
+{
+	struct rnpgbevf_adapter *adapter =
+		from_timer(adapter, t, watchdog_timer);
+
+	/*
+	 * Do the watchdog outside of interrupt context due to the lovely
+	 * delays that some of the newer hardware requires
+	 */
+
+	if (test_bit(__RNPVF_DOWN, &adapter->state))
+		goto watchdog_short_circuit;
+
+watchdog_short_circuit:
+	if (!test_bit(__RNPVF_REMOVE, &adapter->state))
+		schedule_work(&adapter->watchdog_task);
+}
+
+__maybe_unused static void rnpgbevf_reset_task(struct work_struct *work)
+{
+	struct rnpgbevf_adapter *adapter;
+
+	adapter = container_of(work, struct rnpgbevf_adapter, reset_task);
+
+	/* If we're already down or resetting, just bail */
+	if (test_bit(__RNPVF_DOWN, &adapter->state) ||
+	    test_bit(__RNPVF_RESETTING, &adapter->state))
+		return;
+
+	adapter->tx_timeout_count++;
+
+	rnpgbevf_reinit_locked(adapter);
+}
+
+static void rnpgbevf_check_hang_subtask(struct rnpgbevf_adapter *adapter)
+{
+	int i;
+	struct rnpgbevf_ring *tx_ring;
+	u64 tx_next_to_clean_old;
+	u64 tx_next_to_clean;
+	u64 tx_next_to_use;
+	struct rnpgbevf_ring *rx_ring;
+	u64 rx_next_to_clean_old;
+	u64 rx_next_to_clean;
+	union rnp_rx_desc *rx_desc;
+
+	/* If we're down or resetting, just bail */
+	if (test_bit(__RNPVF_DOWN, &adapter->state) ||
+	    test_bit(__RNPVF_RESETTING, &adapter->state))
+		return;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		tx_ring = adapter->tx_ring[i];
+		/* get the last next_to_clean */
+		tx_next_to_clean_old = tx_ring->tx_stats.tx_next_to_clean;
+		tx_next_to_clean = tx_ring->next_to_clean;
+		tx_next_to_use = tx_ring->next_to_use;
+
+		/* if we have tx desc to clean */
+		if (tx_next_to_use != tx_next_to_clean) {
+			if (tx_next_to_clean == tx_next_to_clean_old) {
+				tx_ring->tx_stats.tx_equal_count++;
+				if (tx_ring->tx_stats.tx_equal_count > 2) {
+					/* maybe not so good */
+					struct rnpgbevf_q_vector *q_vector =
+						tx_ring->q_vector;
+
+					/* stats */
+					if (q_vector->rx.ring ||
+					    q_vector->tx.ring)
+						napi_schedule_irqoff(
+							&q_vector->napi);
+
+					tx_ring->tx_stats.tx_irq_miss++;
+					tx_ring->tx_stats.tx_equal_count = 0;
+				}
+			} else {
+				tx_ring->tx_stats.tx_equal_count = 0;
+			}
+			/* update */
+			/* record this next_to_clean */
+			tx_ring->tx_stats.tx_next_to_clean = tx_next_to_clean;
+		} else {
+			/* clean record to -1 */
+			tx_ring->tx_stats.tx_next_to_clean = -1;
+		}
+	}
+	/* check if we lost rx irq */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		rx_ring = adapter->rx_ring[i];
+		/* get the last next_to_clean */
+		rx_next_to_clean_old = rx_ring->rx_stats.rx_next_to_clean;
+		/* get the now clean */
+		rx_next_to_clean = rx_ring->next_to_clean;
+
+		if (rx_next_to_clean != rx_next_to_clean_old) {
+			rx_ring->rx_stats.rx_equal_count = 0;
+			rx_ring->rx_stats.rx_next_to_clean = rx_next_to_clean;
+			continue;
+		}
+		rx_ring->rx_stats.rx_equal_count++;
+
+		if ((rx_ring->rx_stats.rx_equal_count > 2) &&
+		    (rx_ring->rx_stats.rx_equal_count < 5)) {
+			rx_desc =
+				RNPVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
+			if (rnpgbevf_test_staterr(rx_desc,
+						  RNPGBE_RXD_STAT_DD)) {
+				struct rnpgbevf_q_vector *q_vector =
+					rx_ring->q_vector;
+				unsigned int size;
+
+				size = le16_to_cpu(rx_desc->wb.len) -
+				       le16_to_cpu(rx_desc->wb.padding_len);
+				if (size) {
+					rx_ring->rx_stats.rx_irq_miss++;
+					if (q_vector->rx.ring ||
+					    q_vector->tx.ring)
+						napi_schedule_irqoff(
+							&q_vector->napi);
+				}
+			}
+		}
+		if (rx_ring->rx_stats.rx_equal_count > 1000)
+			rx_ring->rx_stats.rx_equal_count = 0;
+		rx_ring->rx_stats.rx_next_to_clean = rx_next_to_clean;
+	}
+}
+
+/* just modify rx itr */
+__maybe_unused static void
+rnpgbevf_auto_itr_moderation(struct rnpgbevf_adapter *adapter)
+{
+	int i;
+	struct rnpgbevf_ring *rx_ring;
+	u64 period = (u64)(jiffies - adapter->last_moder_jiffies);
+
+	// update jiffies
+	adapter->last_moder_jiffies = jiffies;
+
+	if (!adapter->adaptive_rx_coal ||
+	    period < adapter->sample_interval * HZ) {
+		return;
+	}
+
+	/* it is time to check moderation */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		u64 x, y, rate;
+		u64 rx_packets, packets, rx_pkt_diff;
+
+		rx_ring = adapter->rx_ring[i];
+		rx_packets = READ_ONCE(rx_ring->stats.packets);
+		rx_pkt_diff = rx_packets -
+			      adapter->last_moder_packets[rx_ring->queue_index];
+		packets = rx_pkt_diff;
+
+		x = packets * HZ;
+		y = do_div(x, period);
+		rate = x;
+
+		if (rate == 0) {
+		} else if (rate < 20000) {
+			rx_ring->ring_flags |= RNPVF_RING_LOWER_ITR;
+		} else {
+			rx_ring->ring_flags &= (~RNPVF_RING_LOWER_ITR);
+		}
+
+		/* write back new count */
+		adapter->last_moder_packets[rx_ring->queue_index] = rx_packets;
+	}
+}
+
+/**
+ * rnpgbevf_watchdog_task - worker thread to bring link up
+ * @work: pointer to work_struct containing our data
+ **/
+static void rnpgbevf_watchdog_task(struct work_struct *work)
+{
+	struct rnpgbevf_adapter *adapter =
+		container_of(work, struct rnpgbevf_adapter, watchdog_task);
+	struct net_device *netdev = adapter->netdev;
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	u32 link_speed = adapter->link_speed;
+	bool link_up = adapter->link_up;
+	s32 need_reset;
+
+	adapter->flags |= RNPVF_FLAG_IN_WATCHDOG_TASK;
+
+	rnpgbevf_reset_pf_request(adapter);
+
+	if (rnpgbevf_reset_subtask(adapter)) {
+		adapter->flags &= ~RNPVF_FLAG_PF_UPDATE_MTU;
+		adapter->flags &= ~RNPVF_FLAG_PF_UPDATE_VLAN;
+		goto pf_has_reset;
+	}
+
+	need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+
+	if (need_reset) {
+		adapter->link_up = link_up;
+		adapter->link_speed = link_speed;
+		netif_carrier_off(netdev);
+		netif_tx_stop_all_queues(netdev);
+		schedule_work(&adapter->reset_task);
+		goto pf_has_reset;
+	}
+	adapter->link_up = link_up;
+	adapter->link_speed = link_speed;
+
+	if (test_bit(__RNPVF_DOWN, &adapter->state)) {
+		if (test_bit(__RNPVF_LINK_DOWN, &adapter->state)) {
+			clear_bit(__RNPVF_LINK_DOWN, &adapter->state);
+			dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
+		}
+		goto skip_link_check;
+	}
+
+	if (link_up) {
+		if (!netif_carrier_ok(netdev)) {
+			char *link_speed_string;
+
+			switch (link_speed) {
+			case RNPGBE_LINK_SPEED_40GB_FULL:
+				link_speed_string = "40 Gbps";
+				break;
+			case RNPGBE_LINK_SPEED_25GB_FULL:
+				link_speed_string = "25 Gbps";
+				break;
+			case RNPGBE_LINK_SPEED_10GB_FULL:
+				link_speed_string = "10 Gbps";
+				break;
+			case RNPGBE_LINK_SPEED_1GB_FULL:
+				link_speed_string = "1 Gbps";
+				break;
+			case RNPGBE_LINK_SPEED_100_FULL:
+				link_speed_string = "100 Mbps";
+				break;
+			default:
+				link_speed_string = "unknown speed";
+				break;
+			}
+			dev_info(&adapter->pdev->dev, "NIC Link is Up, %s\n",
+				 link_speed_string);
+			netif_carrier_on(netdev);
+			netif_tx_wake_all_queues(netdev);
+		}
+	} else {
+		adapter->link_up = false;
+		adapter->link_speed = 0;
+		if (netif_carrier_ok(netdev)) {
+			dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
+			netif_carrier_off(netdev);
+			netif_tx_stop_all_queues(netdev);
+		}
+	}
+skip_link_check:
+	if (adapter->flags & RNPVF_FLAG_PF_UPDATE_MTU) {
+		adapter->flags &= ~RNPVF_FLAG_PF_UPDATE_MTU;
+		if (netdev->mtu > hw->mtu) {
+			netdev->mtu = hw->mtu;
+			rtnl_lock();
+			call_netdevice_notifiers(NETDEV_CHANGEMTU,
+						 adapter->netdev);
+			rtnl_unlock();
+		}
+	}
+	if (adapter->flags & RNPVF_FLAG_PF_UPDATE_VLAN) {
+		adapter->flags &= ~RNPVF_FLAG_PF_UPDATE_VLAN;
+		rnpgbevf_set_rx_mode(adapter->netdev);
+	}
+
+	rnpgbevf_check_hang_subtask(adapter);
+	rnpgbevf_update_stats(adapter);
+	rnpgbevf_auto_itr_moderation(adapter);
+
+pf_has_reset:
+	/* Reset the timer */
+	mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + (2 * HZ)));
+	adapter->flags &= ~RNPVF_FLAG_IN_WATCHDOG_TASK;
+}
+
+/**
+ * rnpgbevf_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void rnpgbevf_free_tx_resources(struct rnpgbevf_adapter *adapter,
+				struct rnpgbevf_ring *tx_ring)
+{
+	BUG_ON(tx_ring == NULL);
+
+	rnpgbevf_clean_tx_ring(adapter, tx_ring);
+
+	vfree(tx_ring->tx_buffer_info);
+	tx_ring->tx_buffer_info = NULL;
+
+	/* if not set, then don't free */
+	if (!tx_ring->desc)
+		return;
+
+	dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
+			  tx_ring->dma);
+
+	tx_ring->desc = NULL;
+}
+
+/**
+ * rnpgbevf_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void rnpgbevf_free_all_tx_resources(struct rnpgbevf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		rnpgbevf_free_tx_resources(adapter, adapter->tx_ring[i]);
+}
+
+/**
+ * rnpgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @tx_ring:    tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+int rnpgbevf_setup_tx_resources(struct rnpgbevf_adapter *adapter,
+				struct rnpgbevf_ring *tx_ring)
+{
+	struct device *dev = tx_ring->dev;
+	int orig_node = dev_to_node(dev);
+	int numa_node = NUMA_NO_NODE;
+	int size;
+
+	size = sizeof(struct rnpgbevf_tx_buffer) * tx_ring->count;
+
+	if (tx_ring->q_vector)
+		numa_node = tx_ring->q_vector->numa_node;
+
+	dbg("%s size:%d count:%d\n", __func__, size, tx_ring->count);
+	tx_ring->tx_buffer_info = vzalloc_node(size, numa_node);
+	if (!tx_ring->tx_buffer_info)
+		tx_ring->tx_buffer_info = vzalloc(size);
+	if (!tx_ring->tx_buffer_info)
+		goto err_buffer;
+
+	/* round up to nearest 4K */
+	tx_ring->size = tx_ring->count * sizeof(struct rnp_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+	set_dev_node(dev, numa_node);
+	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
+					   GFP_KERNEL);
+	set_dev_node(dev, orig_node);
+	if (!tx_ring->desc)
+		tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+						   &tx_ring->dma, GFP_KERNEL);
+	if (!tx_ring->desc)
+		goto err;
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+
+	DPRINTK(IFUP, INFO,
+		"%d TxRing:%d, vector:%d ItemCounts:%d "
+		"desc:%p(0x%llx) node:%d\n",
+		tx_ring->queue_index, tx_ring->rnpgbevf_queue_idx,
+		tx_ring->q_vector->v_idx, tx_ring->count, tx_ring->desc,
+		tx_ring->dma, numa_node);
+	return 0;
+
+err:
+	rnpgbevf_err(
+		"%s [SetupTxResources] ERROR: #%d TxRing:%d, vector:%d ItemCounts:%d\n",
+		tx_ring->netdev->name, tx_ring->queue_index,
+		tx_ring->rnpgbevf_queue_idx, tx_ring->q_vector->v_idx,
+		tx_ring->count);
+	vfree(tx_ring->tx_buffer_info);
+err_buffer:
+	tx_ring->tx_buffer_info = NULL;
+	dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
+	return -ENOMEM;
+}
+
+/**
+ * rnpgbevf_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int rnpgbevf_setup_all_tx_resources(struct rnpgbevf_adapter *adapter)
+{
+	int i, err = 0;
+
+	dbg("adapter->num_tx_queues:%d, adapter->tx_ring[0]:%p\n",
+	    adapter->num_tx_queues, adapter->tx_ring[0]);
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		BUG_ON(adapter->tx_ring[i] == NULL);
+		err = rnpgbevf_setup_tx_resources(adapter, adapter->tx_ring[i]);
+		if (!err)
+			continue;
+		hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
+		goto err_setup_tx;
+	}
+
+	return 0;
+
+err_setup_tx:
+	/* rewind the index freeing the rings as we go */
+	while (i--)
+		rnpgbevf_free_tx_resources(adapter, adapter->tx_ring[i]);
+	return err;
+}
+
+/**
+ * rnpgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rx_ring:    rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int rnpgbevf_setup_rx_resources(struct rnpgbevf_adapter *adapter,
+				struct rnpgbevf_ring *rx_ring)
+{
+	struct device *dev = rx_ring->dev;
+	int orig_node = dev_to_node(dev);
+	int numa_node = -1;
+	int size;
+
+	BUG_ON(rx_ring == NULL);
+
+	size = sizeof(struct rnpgbevf_rx_buffer) * rx_ring->count;
+
+	if (rx_ring->q_vector)
+		numa_node = rx_ring->q_vector->numa_node;
+
+	rx_ring->rx_buffer_info = vzalloc_node(size, numa_node);
+	if (!rx_ring->rx_buffer_info)
+		rx_ring->rx_buffer_info = vzalloc(size);
+	if (!rx_ring->rx_buffer_info)
+		goto alloc_buffer;
+
+	/* Round up to nearest 4K */
+	rx_ring->size = rx_ring->count * sizeof(union rnp_rx_desc);
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+	set_dev_node(dev, numa_node);
+	rx_ring->desc = dma_alloc_coherent(&adapter->pdev->dev, rx_ring->size,
+					   &rx_ring->dma, GFP_KERNEL);
+	set_dev_node(dev, orig_node);
+	if (!rx_ring->desc) {
+		vfree(rx_ring->rx_buffer_info);
+		rx_ring->rx_buffer_info = NULL;
+		goto alloc_failed;
+	}
+
+	memset(rx_ring->desc, 0, rx_ring->size);
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	DPRINTK(IFUP, INFO,
+		"%d RxRing:%d, vector:%d ItemCounts:%d "
+		"desc:%p(0x%llx) node:%d\n",
+		rx_ring->queue_index, rx_ring->rnpgbevf_queue_idx,
+		rx_ring->q_vector->v_idx, rx_ring->count, rx_ring->desc,
+		rx_ring->dma, numa_node);
+
+	return 0;
+alloc_failed:
+	rnpgbevf_err(
+		"%s [SetupTxResources] ERROR: #%d RxRing:%d, vector:%d ItemCounts:%d\n",
+		rx_ring->netdev->name, rx_ring->queue_index,
+		rx_ring->rnpgbevf_queue_idx, rx_ring->q_vector->v_idx,
+		rx_ring->count);
+	vfree(rx_ring->tx_buffer_info);
+alloc_buffer:
+	rx_ring->tx_buffer_info = NULL;
+	dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
+
+	return -ENOMEM;
+}
+
+/**
+ * rnpgbevf_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int rnpgbevf_setup_all_rx_resources(struct rnpgbevf_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		BUG_ON(adapter->rx_ring[i] == NULL);
+
+		err = rnpgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]);
+		if (!err)
+			continue;
+		hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
+		goto err_setup_rx;
+	}
+
+	return 0;
+
+err_setup_rx:
+	/* rewind the index freeing the rings as we go */
+	while (i--)
+		rnpgbevf_free_rx_resources(adapter, adapter->rx_ring[i]);
+	return err;
+}
+
+/**
+ * rnpgbevf_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void rnpgbevf_free_rx_resources(struct rnpgbevf_adapter *adapter,
+				struct rnpgbevf_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	rnpgbevf_clean_rx_ring(rx_ring);
+
+	vfree(rx_ring->rx_buffer_info);
+	rx_ring->rx_buffer_info = NULL;
+
+	/* if not set, then don't free */
+	if (!rx_ring->desc)
+		return;
+
+	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+			  rx_ring->dma);
+
+	rx_ring->desc = NULL;
+}
+
+/**
+ * rnpgbevf_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void rnpgbevf_free_all_rx_resources(struct rnpgbevf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rnpgbevf_free_rx_resources(adapter, adapter->rx_ring[i]);
+}
+
+/**
+ * rnpgbevf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int rnpgbevf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbevf_hw *hw = &adapter->hw;
+
+	if (new_mtu > hw->mtu) {
+		dev_info(&adapter->pdev->dev,
+			 "PF limit vf mtu setup too large %d \n", hw->mtu);
+		return -EINVAL;
+
+	} else {
+		hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
+		       netdev->mtu, new_mtu);
+		/* must set new MTU before calling down or up */
+		netdev->mtu = new_mtu;
+	}
+
+	if (netif_running(netdev))
+		rnpgbevf_reinit_locked(adapter);
+
+	return 0;
+}
+
+/**
+ * rnpgbevf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+int rnpgbevf_open(struct net_device *netdev)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	int err;
+
+	DPRINTK(IFUP, INFO, "ifup\n");
+
+	/* A previous failure to open the device because of a lack of
+	 * available MSIX vector resources may have reset the number
+	 * of msix vectors variable to zero.  The only way to recover
+	 * is to unload/reload the driver and hope that the system has
+	 * been able to recover some MSIX vector resources.
+	 */
+	if (!adapter->num_msix_vectors)
+		return -ENOMEM;
+
+	/* disallow open during test */
+	if (test_bit(__RNPVF_TESTING, &adapter->state))
+		return -EBUSY;
+
+	if (hw->adapter_stopped) {
+		rnpgbevf_reset(adapter);
+		/* if adapter is still stopped then PF isn't up and
+		 * the vf can't start.
+		 */
+		if (hw->adapter_stopped) {
+			err = RNPGBE_ERR_MBX;
+			dev_err(&hw->pdev->dev,
+				"%s(%s):error: Unable to start - perhaps the PF Driver isn't "
+				"up yet\n",
+				adapter->name, netdev->name);
+			goto err_setup_reset;
+		}
+	}
+
+	netif_carrier_off(netdev);
+
+	/* allocate transmit descriptors */
+	err = rnpgbevf_setup_all_tx_resources(adapter);
+	if (err)
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+	err = rnpgbevf_setup_all_rx_resources(adapter);
+	if (err)
+		goto err_setup_rx;
+
+	rnpgbevf_configure(adapter);
+
+	/* clear any pending interrupts, may auto mask */
+	err = rnpgbevf_request_irq(adapter);
+	if (err)
+		goto err_req_irq;
+
+	/* Notify the stack of the actual queue counts. */
+	err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
+	if (err)
+		goto err_set_queues;
+
+	err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
+	if (err)
+		goto err_set_queues;
+
+	rnpgbevf_up_complete(adapter);
+
+#ifdef CONFIG_NET_NCSI
+	if (adapter->ncsi_dev) {
+		err = ncsi_start_dev(adapter->ncsi_dev);
+		if (err)
+			netdev_err(netdev, "ncsi start faild!\n");
+	}
+#endif
+
+	return 0;
+
+err_set_queues:
+	rnpgbevf_free_irq(adapter);
+err_req_irq:
+
+err_setup_rx:
+	rnpgbevf_free_all_rx_resources(adapter);
+err_setup_tx:
+	rnpgbevf_free_all_tx_resources(adapter);
+
+err_setup_reset:
+
+	return err;
+}
+
+/**
+ * rnpgbevf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+int rnpgbevf_close(struct net_device *netdev)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+
+	DPRINTK(IFDOWN, INFO, "ifdown\n");
+
+#ifdef CONFIG_NET_NCSI
+	if (adapter->ncsi_dev)
+		ncsi_stop_dev(adapter->ncsi_dev);
+#endif
+
+	rnpgbevf_down(adapter);
+	rnpgbevf_free_irq(adapter);
+
+	rnpgbevf_free_all_tx_resources(adapter);
+	rnpgbevf_free_all_rx_resources(adapter);
+
+	return 0;
+}
+
+void rnpgbevf_tx_ctxtdesc(struct rnpgbevf_ring *tx_ring, u16 mss_seg_len,
+			  u8 l4_hdr_len, u8 tunnel_hdr_len, int ignore_vlan,
+			  u16 type_tucmd, bool crc_pad)
+{
+	struct rnp_tx_ctx_desc *context_desc;
+	u16 i = tx_ring->next_to_use;
+	struct rnpgbevf_adapter *adapter = RING2ADAPT(tx_ring);
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+
+	context_desc = RNPVF_TX_CTXTDESC(tx_ring, i);
+
+	i++;
+	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+	/* set bits to identify this as an advanced context descriptor */
+	type_tucmd |= RNPGBE_TXD_CTX_CTRL_DESC;
+
+	if (adapter->priv_flags & RNPVF_PRIV_FLAG_TX_PADDING) {
+		if (!crc_pad)
+			type_tucmd |= RNPGBE_TXD_MTI_CRC_PAD_CTRL;
+	}
+
+	context_desc->mss_len = cpu_to_le16(mss_seg_len);
+	context_desc->vfnum = 0x80 | vfnum;
+	context_desc->l4_hdr_len = l4_hdr_len;
+
+	if (ignore_vlan)
+		context_desc->vf_veb_flags |= VF_IGNORE_VLAN;
+
+	context_desc->tunnel_hdr_len = tunnel_hdr_len;
+	context_desc->rev = 0;
+	context_desc->cmd = cpu_to_le16(type_tucmd);
+	buf_dump_line("ctx  ", __LINE__, context_desc, sizeof(*context_desc));
+}
+
+static int rnpgbevf_tso(struct rnpgbevf_ring *tx_ring,
+			struct rnpgbevf_tx_buffer *first, u8 *hdr_len)
+{
+	struct sk_buff *skb = first->skb;
+	struct net_device *netdev = tx_ring->netdev;
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+	union {
+		struct iphdr *v4;
+		struct ipv6hdr *v6;
+		unsigned char *hdr;
+	} ip;
+	union {
+		struct tcphdr *tcp;
+		struct udphdr *udp;
+		unsigned char *hdr;
+	} l4;
+	u32 paylen, l4_offset;
+	int err;
+	u8 *inner_mac;
+	u16 gso_segs, gso_size;
+	u16 gso_need_pad;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return 0;
+
+	if (!skb_is_gso(skb))
+		return 0;
+
+	err = skb_cow_head(skb, 0);
+	if (err < 0)
+		return err;
+
+	inner_mac = skb->data;
+	ip.hdr = skb_network_header(skb);
+	l4.hdr = skb_transport_header(skb);
+
+	/* initialize outer IP header fields */
+	if (ip.v4->version == 4) {
+		/* IP header will have to cancel out any data that
+		 * is not a part of the outer IP header
+		 */
+		ip.v4->check = 0x0000;
+	} else {
+		ip.v6->payload_len = 0;
+	}
+	if (skb_shinfo(skb)->gso_type &
+	    (SKB_GSO_GRE |
+	     SKB_GSO_GRE_CSUM |
+	     SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) {
+		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+		}
+		inner_mac = skb_inner_mac_header(skb);
+		first->tunnel_hdr_len = inner_mac - skb->data;
+
+		if (skb_shinfo(skb)->gso_type &
+		    (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) {
+			first->cmd_flags |= RNPGBE_TXD_TUNNEL_VXLAN;
+			l4.udp->check = 0;
+		} else {
+			first->cmd_flags |= RNPGBE_TXD_TUNNEL_NVGRE;
+		}
+		dbg("set outer l4.udp to 0\n");
+
+		/* reset pointers to inner headers */
+		ip.hdr = skb_inner_network_header(skb);
+		l4.hdr = skb_inner_transport_header(skb);
+	}
+
+	if (ip.v4->version == 4) {
+		/* IP header will have to cancel out any data that
+		 * is not a part of the outer IP header
+		 */
+		ip.v4->check = 0x0000;
+
+	} else {
+		ip.v6->payload_len = 0;
+		/* set ipv6 type */
+		first->cmd_flags |= (RNPGBE_TXD_FLAG_IPv6);
+	}
+
+	/* determine offset of inner transport header */
+	l4_offset = l4.hdr - skb->data;
+
+	paylen = skb->len - l4_offset;
+	dbg("before l4 checksum is %x\n", l4.tcp->check);
+
+	if (skb->csum_offset == offsetof(struct tcphdr, check)) {
+		dbg("tcp before l4 checksum is %x\n", l4.tcp->check);
+		first->cmd_flags |= RNPGBE_TXD_L4_TYPE_TCP;
+		/* compute length of segmentation header */
+		*hdr_len = (l4.tcp->doff * 4) + l4_offset;
+		csum_replace_by_diff(&l4.tcp->check,
+				     (__force __wsum)htonl(paylen));
+		dbg("tcp l4 checksum is %x\n", l4.tcp->check);
+		l4.tcp->psh = 0;
+	} else {
+		dbg("paylen is %x\n", paylen);
+		first->cmd_flags |= RNPGBE_TXD_L4_TYPE_UDP;
+		/* compute length of segmentation header */
+		dbg("udp before l4 checksum is %x\n", l4.udp->check);
+		*hdr_len = sizeof(*l4.udp) + l4_offset;
+		csum_replace_by_diff(&l4.udp->check,
+				     (__force __wsum)htonl(paylen));
+		dbg("udp l4 checksum is %x\n", l4.udp->check);
+	}
+
+	dbg("l4 checksum is %x\n", l4.tcp->check);
+
+	first->mac_ip_len = l4.hdr - ip.hdr;
+	first->mac_ip_len |= (ip.hdr - inner_mac) << 9;
+
+	/* compute header lengths */
+	/* pull values out of skb_shinfo */
+	gso_size = skb_shinfo(skb)->gso_size;
+	gso_segs = skb_shinfo(skb)->gso_segs;
+
+	if (adapter->priv_flags & RNPVF_PRIV_FLAG_TX_PADDING) {
+		gso_need_pad = (first->skb->len - *hdr_len) % gso_size;
+		if (gso_need_pad) {
+			if ((gso_need_pad + *hdr_len) <= 60) {
+				gso_need_pad = 60 - (gso_need_pad + *hdr_len);
+				first->gso_need_padding = !!gso_need_pad;
+			}
+		}
+	}
+
+	/* update gso size and bytecount with header size */
+	/* to fix tx status */
+	first->gso_segs = gso_segs;
+	first->bytecount += (first->gso_segs - 1) * *hdr_len;
+	first->mss_len_vf_num |= (gso_size | ((l4.tcp->doff * 4) << 24));
+	first->cmd_flags |=
+		RNPGBE_TXD_FLAG_TSO | RNPGBE_TXD_IP_CSUM | RNPGBE_TXD_L4_CSUM;
+	first->ctx_flag = true;
+	return 1;
+}
+
+static int rnpgbevf_tx_csum(struct rnpgbevf_ring *tx_ring,
+			    struct rnpgbevf_tx_buffer *first)
+{
+	struct sk_buff *skb = first->skb;
+	u8 l4_proto = 0;
+	u8 ip_len = 0;
+	u8 mac_len = 0;
+	u8 *inner_mac = skb->data;
+	u8 *exthdr;
+	__be16 frag_off;
+	union {
+		struct iphdr *v4;
+		struct ipv6hdr *v6;
+		unsigned char *hdr;
+	} ip;
+	union {
+		struct tcphdr *tcp;
+		struct udphdr *udp;
+		unsigned char *hdr;
+	} l4;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return 0;
+
+	ip.hdr = skb_network_header(skb);
+	l4.hdr = skb_transport_header(skb);
+
+	inner_mac = skb->data;
+
+	/* outer protocol */
+	if (skb->encapsulation) {
+		/* define outer network header type */
+		if (ip.v4->version == 4) {
+			l4_proto = ip.v4->protocol;
+		} else {
+			exthdr = ip.hdr + sizeof(*ip.v6);
+			l4_proto = ip.v6->nexthdr;
+			if (l4.hdr != exthdr)
+				ipv6_skip_exthdr(skb, exthdr - skb->data,
+						 &l4_proto, &frag_off);
+		}
+
+		/* define outer transport */
+		switch (l4_proto) {
+		case IPPROTO_UDP:
+			l4.udp->check = 0;
+			first->cmd_flags |= RNPGBE_TXD_TUNNEL_VXLAN;
+
+			break;
+		case IPPROTO_GRE:
+
+			first->cmd_flags |= RNPGBE_TXD_TUNNEL_NVGRE;
+			/* There was a long-standing issue in GRE where GSO
+			 * was not setting the outer transport header unless
+			 * a GRE checksum was requested. This was fixed in
+			 * the 4.6 version of the kernel.  In the 4.7 kernel
+			 * support for GRE over IPv6 was added to GSO.  So we
+			 * can assume this workaround for all IPv4 headers
+			 * without impacting later versions of the GRE.
+			 */
+			if (ip.v4->version == 4)
+				l4.hdr = ip.hdr + (ip.v4->ihl * 4);
+			break;
+		default:
+			skb_checksum_help(skb);
+			return -1;
+		}
+
+		/* switch IP header pointer from outer to inner header */
+		ip.hdr = skb_inner_network_header(skb);
+		l4.hdr = skb_inner_transport_header(skb);
+
+		inner_mac = skb_inner_mac_header(skb);
+		first->tunnel_hdr_len = inner_mac - skb->data;
+		first->ctx_flag = true;
+		dbg("tunnel length is %d\n", first->tunnel_hdr_len);
+	}
+
+	mac_len = (ip.hdr - inner_mac);
+	dbg("inner checksum needed %d", skb_checksum_start_offset(skb));
+	dbg("skb->encapsulation %d\n", skb->encapsulation);
+	ip_len = (l4.hdr - ip.hdr);
+	if (ip.v4->version == 4) {
+		l4_proto = ip.v4->protocol;
+	} else {
+		exthdr = ip.hdr + sizeof(*ip.v6);
+		l4_proto = ip.v6->nexthdr;
+		if (l4.hdr != exthdr)
+			ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
+					 &frag_off);
+		first->cmd_flags |= RNPGBE_TXD_FLAG_IPv6;
+	}
+	/* Enable L4 checksum offloads */
+	switch (l4_proto) {
+	case IPPROTO_TCP:
+		first->cmd_flags |= RNPGBE_TXD_L4_TYPE_TCP | RNPGBE_TXD_L4_CSUM;
+		break;
+	case IPPROTO_SCTP:
+		first->cmd_flags |=
+			RNPGBE_TXD_L4_TYPE_SCTP | RNPGBE_TXD_L4_CSUM;
+		break;
+	case IPPROTO_UDP:
+		first->cmd_flags |= RNPGBE_TXD_L4_TYPE_UDP | RNPGBE_TXD_L4_CSUM;
+		break;
+	default:
+		skb_checksum_help(skb);
+		return 0;
+	}
+	if ((tx_ring->ring_flags & RNPVF_RING_NO_TUNNEL_SUPPORT) &&
+	    (first->ctx_flag)) {
+		/* if not support tunnel */
+		first->cmd_flags &= (~RNPGBE_TXD_TUNNEL_MASK);
+		mac_len += first->tunnel_hdr_len;
+		first->tunnel_hdr_len = 0;
+		first->ctx_flag = false;
+	}
+
+	dbg("mac length is %d\n", mac_len);
+	dbg("ip length is %d\n", ip_len);
+	first->mac_ip_len = (mac_len << 9) | ip_len;
+	return 0;
+}
+
+static void rnpgbevf_tx_map(struct rnpgbevf_ring *tx_ring,
+			    struct rnpgbevf_tx_buffer *first, const u8 hdr_len)
+{
+	struct sk_buff *skb = first->skb;
+	struct rnpgbevf_tx_buffer *tx_buffer;
+	struct rnp_tx_desc *tx_desc;
+	skb_frag_t *frag;
+	dma_addr_t dma;
+	unsigned int data_len, size;
+	u16 vlan = first->vlan;
+	u16 cmd = first->cmd_flags;
+	u16 i = tx_ring->next_to_use;
+	u64 fun_id = ((u64)(tx_ring->vfnum) << (32 + 24));
+
+	tx_desc = RNPVF_TX_DESC(tx_ring, i);
+	tx_desc->blen = cpu_to_le16(skb->len - hdr_len); /* maybe no-use */
+	tx_desc->vlan = cpu_to_le16(vlan);
+	tx_desc->cmd = cpu_to_le16(cmd);
+	tx_desc->mac_ip_len = first->mac_ip_len;
+
+	size = skb_headlen(skb);
+	data_len = skb->data_len;
+
+	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+	tx_buffer = first;
+
+	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+		if (dma_mapping_error(tx_ring->dev, dma))
+			goto dma_error;
+
+		/* record length, and DMA address */
+		dma_unmap_len_set(tx_buffer, len, size);
+		dma_unmap_addr_set(tx_buffer, dma, dma);
+
+		/* 1st desc */
+		tx_desc->pkt_addr = cpu_to_le64(dma | fun_id);
+
+		while (unlikely(size > RNPVF_MAX_DATA_PER_TXD)) {
+			tx_desc->cmd = cpu_to_le16(cmd);
+			tx_desc->blen = cpu_to_le16(RNPVF_MAX_DATA_PER_TXD);
+			/* ==== desc== */
+			buf_dump_line("tx0  ", __LINE__, tx_desc,
+				      sizeof(*tx_desc));
+			i++;
+			tx_desc++;
+			if (i == tx_ring->count) {
+				tx_desc = RNPVF_TX_DESC(tx_ring, 0);
+				i = 0;
+			}
+
+			dma += RNPVF_MAX_DATA_PER_TXD;
+			size -= RNPVF_MAX_DATA_PER_TXD;
+
+			tx_desc->pkt_addr = cpu_to_le64(dma | fun_id);
+		}
+
+		buf_dump_line("tx1  ", __LINE__, tx_desc, sizeof(*tx_desc));
+		if (likely(!data_len))
+			break;
+		tx_desc->cmd = cpu_to_le16(cmd);
+		tx_desc->blen = cpu_to_le16(size);
+		buf_dump_line("tx2  ", __LINE__, tx_desc, sizeof(*tx_desc));
+
+		/* ==== frag== */
+		i++;
+		tx_desc++;
+		if (i == tx_ring->count) {
+			tx_desc = RNPVF_TX_DESC(tx_ring, 0);
+			i = 0;
+		}
+		tx_desc->cmd = RNPGBE_TXD_CMD_RS;
+		tx_desc->mac_ip_len = 0;
+
+		size = skb_frag_size(frag);
+
+		data_len -= size;
+
+		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+				       DMA_TO_DEVICE);
+
+		tx_buffer = &tx_ring->tx_buffer_info[i];
+	}
+
+	/* write last descriptor with RS and EOP bits */
+	tx_desc->cmd =
+		cpu_to_le16(cmd | RNPGBE_TXD_CMD_EOP | RNPGBE_TXD_CMD_RS);
+	tx_desc->blen = cpu_to_le16(size);
+	buf_dump_line("tx3  ", __LINE__, tx_desc, sizeof(*tx_desc));
+	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
+
+	/* set the timestamp */
+	first->time_stamp = jiffies;
+
+	/*
+	 * Force memory writes to complete before letting h/w know there
+	 * are new descriptors to fetch.  (Only applicable for weak-ordered
+	 * memory model archs, such as IA-64).
+	 *
+	 * We also need this memory barrier to make certain all of the
+	 * status bits have been updated before next_to_watch is written.
+	 */
+	wmb();
+
+	/* set next_to_watch value indicating a packet is present */
+	first->next_to_watch = tx_desc;
+
+	buf_dump_line("tx4  ", __LINE__, tx_desc, sizeof(*tx_desc));
+	i++;
+	if (i == tx_ring->count)
+		i = 0;
+
+	tx_ring->next_to_use = i;
+
+	/* notify HW of packet */
+	rnpgbevf_wr_reg(tx_ring->tail, i);
+
+	return;
+dma_error:
+	dev_err(tx_ring->dev, "TX DMA map failed\n");
+
+	/* clear dma mappings for failed tx_buffer_info map */
+	for (;;) {
+		tx_buffer = &tx_ring->tx_buffer_info[i];
+		rnpgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+		if (tx_buffer == first)
+			break;
+		if (i == 0)
+			i = tx_ring->count;
+		i--;
+	}
+
+	tx_ring->next_to_use = i;
+}
+
+static int __rnpgbevf_maybe_stop_tx(struct rnpgbevf_ring *tx_ring, int size)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
+
+	dbg("stop subqueue\n");
+	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	/* Herbert's original patch had:
+	 *  smp_mb__after_netif_stop_queue();
+	 * but since that doesn't exist yet, just open code it.
+	 */
+	smp_mb();
+
+	/* We need to check again in a case another CPU has just
+	 * made room available.
+	 */
+	if (likely(rnpgbevf_desc_unused(tx_ring) < size))
+		return -EBUSY;
+
+	/* A reprieve! - use start_queue because it doesn't call schedule */
+	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	++adapter->restart_queue;
+	return 0;
+}
+
+void rnpgbevf_maybe_tx_ctxtdesc(struct rnpgbevf_ring *tx_ring,
+				struct rnpgbevf_tx_buffer *first,
+				int ignore_vlan, u16 type_tucmd)
+{
+	if (first->ctx_flag) {
+		rnpgbevf_tx_ctxtdesc(tx_ring, first->mss_len, first->l4_hdr_len,
+				     first->tunnel_hdr_len, ignore_vlan,
+				     type_tucmd, first->gso_need_padding);
+	}
+}
+
+static int rnpgbevf_maybe_stop_tx(struct rnpgbevf_ring *tx_ring, int size)
+{
+	if (likely(RNPVF_DESC_UNUSED(tx_ring) >= size))
+		return 0;
+	return __rnpgbevf_maybe_stop_tx(tx_ring, size);
+}
+
+netdev_tx_t rnpgbevf_xmit_frame_ring(struct sk_buff *skb,
+				     struct rnpgbevf_adapter *adapter,
+				     struct rnpgbevf_ring *tx_ring,
+				     bool tx_padding)
+{
+	struct rnpgbevf_tx_buffer *first;
+	int tso;
+	u16 cmd = RNPGBE_TXD_CMD_RS;
+	u16 vlan = 0;
+	unsigned short f;
+	u16 count = TXD_USE_COUNT(skb_headlen(skb));
+	__be16 protocol = skb->protocol;
+	u8 hdr_len = 0;
+	int ignore_vlan = 0;
+
+	dbg("=== begin ====\n");
+
+	rnpgbevf_skb_dump(skb, true);
+
+	dbg("skb:%p, skb->len:%d  headlen:%d, data_len:%d, tx_ring->next_to_use:%d "
+	    "count:%d\n",
+	    skb, skb->len, skb_headlen(skb), skb->data_len,
+	    tx_ring->next_to_use, tx_ring->count);
+	/*
+	 * need: 1 descriptor per page * PAGE_SIZE/RNPVF_MAX_DATA_PER_TXD,
+	 *       + 1 desc for skb_headlen/RNPVF_MAX_DATA_PER_TXD,
+	 *       + 2 desc gap to keep tail from touching head,
+	 *       + 1 desc for context descriptor,
+	 * otherwise try next time
+	 */
+	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+		skb_frag_t *frag_temp = &skb_shinfo(skb)->frags[f];
+		count += TXD_USE_COUNT(skb_frag_size(frag_temp));
+		dbg(" #%d frag: size:%d\n", f, skb_shinfo(skb)->frags[f].size);
+	}
+
+	if (rnpgbevf_maybe_stop_tx(tx_ring, count + 3)) {
+		tx_ring->tx_stats.tx_busy++;
+		return NETDEV_TX_BUSY;
+	}
+	dbg("xx %p\n", tx_ring->tx_buffer_info);
+
+	/* record the location of the first descriptor for this packet */
+	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+	first->skb = skb;
+	first->bytecount = skb->len;
+	first->gso_segs = 1;
+
+	first->mss_len_vf_num = 0;
+	first->inner_vlan_tunnel_len = 0;
+
+	if (adapter->priv_flags & RNPVF_PRIV_FLAG_TX_PADDING) {
+		first->ctx_flag = true;
+		first->gso_need_padding = tx_padding;
+	}
+
+	/* if we have a HW VLAN tag being added default to the HW one */
+	if (adapter->flags & RNPVF_FLAG_PF_SET_VLAN) {
+		/* in this mode , driver insert vlan */
+		vlan |= adapter->vf_vlan;
+		cmd |= RNPGBE_TXD_VLAN_VALID | RNPGBE_TXD_VLAN_CTRL_INSERT_VLAN;
+
+	} else {
+		if (skb_vlan_tag_present(skb)) {
+			if (skb->vlan_proto != htons(ETH_P_8021Q)) {
+				/* veb only use ctags */
+				vlan |= skb_vlan_tag_get(skb);
+				cmd |= RNPGBE_TXD_SVLAN_TYPE |
+				       RNPGBE_TXD_VLAN_CTRL_INSERT_VLAN;
+			} else {
+				vlan |= skb_vlan_tag_get(skb);
+				cmd |= RNPGBE_TXD_VLAN_VALID |
+				       RNPGBE_TXD_VLAN_CTRL_INSERT_VLAN;
+			}
+			tx_ring->tx_stats.vlan_add++;
+			/* else if it is a SW VLAN check the next protocol and store the tag */
+		} else if (protocol == __constant_htons(ETH_P_8021Q)) {
+			struct vlan_hdr *vhdr, _vhdr;
+
+			vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr),
+						  &_vhdr);
+			if (!vhdr)
+				goto out_drop;
+
+			protocol = vhdr->h_vlan_encapsulated_proto;
+			vlan = ntohs(vhdr->h_vlan_TCI);
+			cmd |= RNPGBE_TXD_VLAN_VALID | RNPGBE_TXD_VLAN_CTRL_NOP;
+			ignore_vlan = 1;
+		}
+	}
+
+	/* record initial flags and protocol */
+	first->cmd_flags = cmd;
+	first->vlan = vlan;
+	first->protocol = protocol;
+	/* default len should not 0 (hw request) */
+	first->mac_ip_len = 20;
+	first->tunnel_hdr_len = 0;
+
+	tso = rnpgbevf_tso(tx_ring, first, &hdr_len);
+	if (tso < 0)
+		goto out_drop;
+	else if (!tso)
+		rnpgbevf_tx_csum(tx_ring, first);
+	/* vf should always send ctx with vf_num*/
+	first->ctx_flag = true;
+	/* add control desc */
+	rnpgbevf_maybe_tx_ctxtdesc(tx_ring, first, ignore_vlan, 0);
+	rnpgbevf_tx_map(tx_ring, first, hdr_len);
+	rnpgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+	dbg("=== end ====\n\n\n\n");
+	return NETDEV_TX_OK;
+
+out_drop:
+	dev_kfree_skb_any(first->skb);
+	first->skb = NULL;
+
+	return NETDEV_TX_OK;
+}
+
+static bool check_sctp_no_padding(struct sk_buff *skb)
+{
+	bool no_padding = false;
+	u8 l4_proto = 0;
+	u8 *exthdr;
+	__be16 frag_off;
+	union {
+		struct iphdr *v4;
+		struct ipv6hdr *v6;
+		unsigned char *hdr;
+	} ip;
+	union {
+		struct tcphdr *tcp;
+		struct udphdr *udp;
+		unsigned char *hdr;
+	} l4;
+
+	ip.hdr = skb_network_header(skb);
+	l4.hdr = skb_transport_header(skb);
+
+	if (ip.v4->version == 4) {
+		l4_proto = ip.v4->protocol;
+	} else {
+		exthdr = ip.hdr + sizeof(*ip.v6);
+		l4_proto = ip.v6->nexthdr;
+		if (l4.hdr != exthdr)
+			ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
+					 &frag_off);
+	}
+	switch (l4_proto) {
+	case IPPROTO_SCTP:
+		no_padding = true;
+		break;
+	default:
+
+		break;
+	}
+	return no_padding;
+}
+
+static int rnpgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbevf_ring *tx_ring;
+	bool tx_padding = false;
+
+	/*
+	 * The minimum packet size for olinfo paylen is 17 so pad the skb
+	 * in order to meet this minimum size requirement.
+	 */
+	/* for sctp packet, padding 0 change the crc32c */
+	/* padding is done by hw
+	 */
+
+	if (!netif_carrier_ok(netdev)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+	if (adapter->priv_flags & RNPVF_PRIV_FLAG_TX_PADDING) {
+		if (skb->len < 60) {
+			if (!check_sctp_no_padding(skb)) {
+				if (skb_put_padto(skb, 60))
+					return NETDEV_TX_OK;
+
+			} else {
+				tx_padding = true;
+			}
+		}
+	} else {
+		if (skb_put_padto(skb, 17))
+			return NETDEV_TX_OK;
+	}
+
+	tx_ring = adapter->tx_ring[skb->queue_mapping];
+	dbg("xmi:queue_mapping:%d ring:%p\n", skb->queue_mapping, tx_ring);
+	return rnpgbevf_xmit_frame_ring(skb, adapter, tx_ring, tx_padding);
+}
+
+/**
+ * rnpgbevf_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int rnpgbevf_set_mac(struct net_device *netdev, void *p)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	struct sockaddr *addr = p;
+	s32 ret_val;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+	spin_lock_bh(&adapter->mbx_lock);
+	set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	ret_val = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
+	clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	spin_unlock_bh(&adapter->mbx_lock);
+	if (ret_val != 0) {
+		/* set mac failed */
+		dev_err(&adapter->pdev->dev, "pf not allowed reset mac\n");
+		return -EADDRNOTAVAIL;
+	}
+	eth_hw_addr_set(netdev, addr->sa_data);
+	memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+	rnpgbevf_configure_veb(adapter);
+
+	return 0;
+}
+
+void remove_mbx_irq(struct rnpgbevf_adapter *adapter)
+{
+	u32 msgbuf[2];
+	struct rnpgbevf_hw *hw = &adapter->hw;
+
+	spin_lock_bh(&adapter->mbx_lock);
+	set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	msgbuf[0] = RNPGBE_PF_REMOVE;
+	adapter->hw.mbx.ops.write_posted(hw, msgbuf, 1, false);
+	clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	spin_unlock_bh(&adapter->mbx_lock);
+
+	mdelay(100);
+
+	/* mbx */
+	if (adapter->flags & RNPVF_FLAG_MSIX_ENABLED) {
+		adapter->hw.mbx.ops.configure(
+			&adapter->hw, adapter->msix_entries[0].entry, false);
+		free_irq(adapter->msix_entries[0].vector, adapter);
+	}
+}
+
+static void rnp_get_link_status(struct rnpgbevf_adapter *adapter)
+{
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	u32 msgbuf[3];
+	s32 ret_val = -1;
+
+	spin_lock_bh(&adapter->mbx_lock);
+	set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	msgbuf[0] = RNPGBE_PF_GET_LINK;
+	adapter->hw.mbx.ops.write_posted(hw, msgbuf, 1, false);
+	mdelay(2);
+	ret_val = adapter->hw.mbx.ops.read_posted(hw, msgbuf, 2, false);
+	if (ret_val == 0) {
+		if (msgbuf[1] & RNPGBE_PF_LINK_UP) {
+			hw->link = true;
+			hw->speed = msgbuf[1] & 0xffff;
+
+		} else {
+			hw->link = false;
+			hw->speed = 0;
+		}
+	} else {
+		printk(KERN_DEBUG "[rpnvf] error! mbx GET_LINK faild!\n");
+	}
+	clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	spin_unlock_bh(&adapter->mbx_lock);
+}
+
+int register_mbx_irq(struct rnpgbevf_adapter *adapter)
+{
+	struct rnpgbevf_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	int err = 0;
+
+	/* for mbx:vector0 */
+	if (adapter->flags & RNPVF_FLAG_MSIX_ENABLED) {
+		err = request_irq(adapter->msix_entries[0].vector,
+				  rnpgbevf_msix_other, 0, netdev->name,
+				  adapter);
+		if (err) {
+			dev_err(&adapter->pdev->dev,
+				"request_irq for msix_other failed: %d\n", err);
+			goto err_mbx;
+		}
+		hw->mbx.ops.configure(hw, adapter->msix_entries[0].entry, true);
+	}
+
+	rnp_get_link_status(adapter);
+err_mbx:
+	return err;
+}
+
+static int rnpgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct rnpgbevf_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
+#ifdef CONFIG_PM
+	int retval = 0;
+#endif
+
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev)) {
+		rtnl_lock();
+		rnpgbevf_down(adapter);
+		rnpgbevf_free_irq(adapter);
+		rnpgbevf_free_all_tx_resources(adapter);
+		rnpgbevf_free_all_rx_resources(adapter);
+		rtnl_unlock();
+	}
+
+	remove_mbx_irq(adapter);
+	rnpgbevf_clear_interrupt_scheme(adapter);
+
+#ifdef CONFIG_PM
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+
+#endif
+	pci_disable_device(pdev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int rnpgbevf_resume(struct pci_dev *pdev)
+{
+	struct rnpgbevf_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
+	u32 err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	/*
+	 * pci_restore_state clears dev->state_saved so call
+	 * pci_save_state to restore it.
+	 */
+	pci_save_state(pdev);
+
+	err = pcim_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	rtnl_lock();
+	err = rnpgbevf_init_interrupt_scheme(adapter);
+	rtnl_unlock();
+	register_mbx_irq(adapter);
+
+	if (err) {
+		dev_err(&pdev->dev, "Cannot initialize interrupts\n");
+		return err;
+	}
+
+	rnpgbevf_reset(adapter);
+
+	if (netif_running(netdev)) {
+		err = rnpgbevf_open(netdev);
+		if (err)
+			return err;
+	}
+
+	netif_device_attach(netdev);
+
+	return err;
+}
+#endif /* CONFIG_PM */
+
+static void rnpgbevf_shutdown(struct pci_dev *pdev)
+{
+	rnpgbevf_suspend(pdev, PMSG_SUSPEND);
+}
+
+static void rnpgbevf_get_stats64(struct net_device *netdev,
+				 struct rtnl_link_stats64 *stats)
+{
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+	int i;
+	u64 ring_csum_err = 0;
+	u64 ring_csum_good = 0;
+
+	rcu_read_lock();
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct rnpgbevf_ring *ring = adapter->rx_ring[i];
+		u64 bytes, packets;
+		unsigned int start;
+
+		if (ring) {
+			do {
+				start = u64_stats_fetch_begin(&ring->syncp);
+				packets = ring->stats.packets;
+				bytes = ring->stats.bytes;
+				ring_csum_err += ring->rx_stats.csum_err;
+				ring_csum_good += ring->rx_stats.csum_good;
+			} while (u64_stats_fetch_retry(&ring->syncp, start));
+			stats->rx_packets += packets;
+			stats->rx_bytes += bytes;
+		}
+	}
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct rnpgbevf_ring *ring = adapter->tx_ring[i];
+		u64 bytes, packets;
+		unsigned int start;
+
+		if (ring) {
+			do {
+				start = u64_stats_fetch_begin(&ring->syncp);
+				packets = ring->stats.packets;
+				bytes = ring->stats.bytes;
+			} while (u64_stats_fetch_retry(&ring->syncp, start));
+			stats->tx_packets += packets;
+			stats->tx_bytes += bytes;
+		}
+	}
+	rcu_read_unlock();
+	/* following stats updated by rnp_watchdog_task() */
+	stats->multicast = netdev->stats.multicast;
+	stats->rx_errors = netdev->stats.rx_errors;
+	stats->rx_length_errors = netdev->stats.rx_length_errors;
+	stats->rx_crc_errors = netdev->stats.rx_crc_errors;
+	stats->rx_missed_errors = netdev->stats.rx_missed_errors;
+
+}
+
+#define RNPGBE_MAX_TUNNEL_HDR_LEN 80
+#define RNPGBE_MAX_MAC_HDR_LEN 127
+#define RNPGBE_MAX_NETWORK_HDR_LEN 511
+
+static netdev_features_t rnpgbevf_features_check(struct sk_buff *skb,
+						 struct net_device *dev,
+						 netdev_features_t features)
+{
+	unsigned int network_hdr_len, mac_hdr_len;
+
+	/* Make certain the headers can be described by a context descriptor */
+	mac_hdr_len = skb_network_header(skb) - skb->data;
+	if (unlikely(mac_hdr_len > RNPGBE_MAX_MAC_HDR_LEN))
+		return features &
+		       ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC |
+			 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_TSO | NETIF_F_TSO6);
+
+	network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+	if (unlikely(network_hdr_len > RNPGBE_MAX_NETWORK_HDR_LEN))
+		return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC |
+				    NETIF_F_TSO | NETIF_F_TSO6);
+
+	/* We can only support IPV4 TSO in tunnels if we can mangle the
+	 * inner IP ID field, so strip TSO if MANGLEID is not supported.
+	 */
+	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+		features &= ~NETIF_F_TSO;
+
+	return features;
+}
+
+static const struct net_device_ops rnpgbevf_netdev_ops = {
+	.ndo_open = rnpgbevf_open,
+	.ndo_stop = rnpgbevf_close,
+	.ndo_start_xmit = rnpgbevf_xmit_frame,
+	.ndo_validate_addr = eth_validate_addr,
+	.ndo_get_stats64 = rnpgbevf_get_stats64,
+	.ndo_set_rx_mode = rnpgbevf_set_rx_mode,
+	.ndo_set_mac_address = rnpgbevf_set_mac,
+	.ndo_change_mtu = rnpgbevf_change_mtu,
+	.ndo_vlan_rx_add_vid = rnpgbevf_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid = rnpgbevf_vlan_rx_kill_vid,
+	.ndo_features_check = rnpgbevf_features_check,
+	.ndo_set_features = rnpgbevf_set_features,
+	.ndo_fix_features = rnpgbevf_fix_features,
+};
+
+void rnpgbevf_assign_netdev_ops(struct net_device *dev)
+{
+	/* different hw can assign difference fun */
+	dev->netdev_ops = &rnpgbevf_netdev_ops;
+	rnpgbevf_set_ethtool_ops(dev);
+	dev->watchdog_timeo = 5 * HZ;
+}
+
+static u8 rnpgbevf_vfnum_n500(struct rnpgbevf_hw *hw)
+{
+	u16 vf_num;
+
+	vf_num = readl(hw->hw_addr + VF_NUM_REG_N500);
+#define VF_NUM_MASK_N500 (0xff)
+
+	return (vf_num & VF_NUM_MASK_N500);
+}
+
+#ifdef CONFIG_NET_NCSI
+static void rnpgbevf_ncsi_notify_handler(struct ncsi_dev *nd)
+{
+	if (unlikely(nd->state != ncsi_dev_state_functional))
+		return;
+
+	netdev_dbg(nd->dev, "NCSI interface %s\n", nd->link_up ? "up" : "down");
+}
+#endif
+
+static inline unsigned long rnpgbevf_tso_features(struct rnpgbevf_hw *hw)
+{
+	unsigned long features = 0;
+
+	if (hw->feature_flags & RNPVF_NET_FEATURE_TSO)
+		features |= NETIF_F_TSO;
+	if (hw->feature_flags & RNPVF_NET_FEATURE_TSO)
+		features |= NETIF_F_TSO6;
+	features |= NETIF_F_GSO_PARTIAL;
+	if (hw->feature_flags & RNPVF_NET_FEATURE_TX_UDP_TUNNEL)
+		features |= RNPVF_GSO_PARTIAL_FEATURES;
+
+	return features;
+}
+
+static int rnpgbevf_add_adpater(struct pci_dev *pdev,
+				const struct rnpgbevf_info *ii,
+				struct rnpgbevf_adapter **padapter)
+{
+	int err = 0;
+	struct rnpgbevf_adapter *adapter = NULL;
+	struct net_device *netdev;
+	struct rnpgbevf_hw *hw;
+	unsigned int queues = MAX_TX_QUEUES;
+	static int pf0_cards_found;
+	static int pf1_cards_found;
+	static int pf2_cards_found;
+	static int pf3_cards_found;
+
+	pr_info("====  add adapter queues:%d ====", queues);
+
+	netdev = alloc_etherdev_mq(sizeof(struct rnpgbevf_adapter), queues);
+	if (!netdev)
+		return -ENOMEM;
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+
+	adapter = netdev_priv(netdev);
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	/* setup some status */
+
+	if (padapter)
+		*padapter = adapter;
+	pci_set_drvdata(pdev, adapter);
+
+	hw = &adapter->hw;
+	hw->back = adapter;
+	hw->pdev = pdev;
+	hw->board_type = ii->board_type;
+	adapter->msg_enable = netif_msg_init(debug, NETIF_MSG_DRV
+#ifdef MSG_PROBE_ENABLE
+							    | NETIF_MSG_PROBE
+#endif
+#ifdef MSG_IFUP_ENABLE
+							    | NETIF_MSG_IFUP
+#endif
+#ifdef MSG_IFDOWN_ENABLE
+							    | NETIF_MSG_IFDOWN
+#endif
+	);
+
+	switch (ii->mac) {
+	case rnp_mac_2port_10G:
+		hw->mode = MODE_NIC_MODE_2PORT_10G;
+		break;
+	case rnp_mac_2port_40G:
+		hw->mode = MODE_NIC_MODE_2PORT_40G;
+		break;
+	case rnp_mac_4port_10G:
+		hw->mode = MODE_NIC_MODE_4PORT_10G;
+		break;
+	case rnp_mac_8port_10G:
+		hw->mode = MODE_NIC_MODE_8PORT_10G;
+		break;
+	default:
+		break;
+	}
+
+	switch (hw->board_type) {
+	case rnp_board_n500:
+#define RNPGBE_N500_BAR 2
+		hw->hw_addr = pcim_iomap(pdev, RNPGBE_N500_BAR, 0);
+		if (!hw->hw_addr) {
+			err = -EIO;
+			goto err_ioremap;
+		}
+		dev_info(&pdev->dev, "[bar%d]:%p %llx len=%d kB\n",
+			 RNPGBE_N500_BAR, hw->hw_addr,
+			 (unsigned long long)pci_resource_start(
+				 pdev, RNPGBE_N500_BAR),
+			 (int)pci_resource_len(pdev, RNPGBE_N500_BAR) / 1024);
+		hw->vfnum = rnpgbevf_vfnum_n500(hw);
+		hw->ring_msix_base = hw->hw_addr + 0x24700;
+
+		switch ((hw->vfnum & 0x60) >> 5) {
+		case 0x00:
+			adapter->port = adapter->bd_number = pf0_cards_found++;
+			if (pf0_cards_found == 1000)
+				pf0_cards_found = 0;
+			break;
+		case 0x01:
+			adapter->port = adapter->bd_number = pf1_cards_found++;
+			if (pf1_cards_found == 1000)
+				pf1_cards_found = 0;
+			break;
+		case 0x02:
+			adapter->port = adapter->bd_number = pf2_cards_found++;
+			if (pf2_cards_found == 1000)
+				pf2_cards_found = 0;
+			break;
+		case 0x03:
+			adapter->port = adapter->bd_number = pf3_cards_found++;
+			if (pf3_cards_found == 1000)
+				pf3_cards_found = 0;
+			break;
+		}
+		snprintf(adapter->name, sizeof(netdev->name), "%s%d%d",
+			 rnpgbevf_driver_name, (hw->vfnum & 0x60) >> 5,
+			 adapter->bd_number);
+
+		adapter->irq_mode = irq_mode_msix;
+		break;
+
+	case rnp_board_n210:
+#define RNPGBE_N210_BAR 2
+		hw->hw_addr = pcim_iomap(pdev, RNPGBE_N210_BAR, 0);
+		if (!hw->hw_addr) {
+			err = -EIO;
+			goto err_ioremap;
+		}
+		dev_info(&pdev->dev, "[bar%d]:%p %llx len=%d kB\n",
+			 RNPGBE_N210_BAR, hw->hw_addr,
+			 (unsigned long long)pci_resource_start(
+				 pdev, RNPGBE_N210_BAR),
+			 (int)pci_resource_len(pdev, RNPGBE_N210_BAR) / 1024);
+		hw->vfnum = rnpgbevf_vfnum_n500(hw);
+		hw->ring_msix_base = hw->hw_addr + 0x25000;
+
+		switch ((hw->vfnum & 0x60) >> 5) {
+		case 0x00:
+			adapter->port = adapter->bd_number = pf0_cards_found++;
+			if (pf0_cards_found == 1000)
+				pf0_cards_found = 0;
+			break;
+		case 0x01:
+			adapter->port = adapter->bd_number = pf1_cards_found++;
+			if (pf1_cards_found == 1000)
+				pf1_cards_found = 0;
+			break;
+		case 0x02:
+			adapter->port = adapter->bd_number = pf2_cards_found++;
+			if (pf2_cards_found == 1000)
+				pf2_cards_found = 0;
+			break;
+		case 0x03:
+			adapter->port = adapter->bd_number = pf3_cards_found++;
+			if (pf3_cards_found == 1000)
+				pf3_cards_found = 0;
+			break;
+		}
+		snprintf(adapter->name, sizeof(netdev->name), "%s%d%d",
+			 rnpgbevf_driver_name, (hw->vfnum & 0x60) >> 5,
+			 adapter->bd_number);
+
+		adapter->irq_mode = irq_mode_msix;
+		break;
+	default:
+		printk("card type error\n");
+		goto err_ioremap;
+
+		break;
+	}
+
+	pr_info("%s %s: vfnum:0x%x\n", adapter->name, pci_name(pdev),
+		hw->vfnum);
+
+	rnpgbevf_assign_netdev_ops(netdev);
+	strncpy(netdev->name, adapter->name, sizeof(netdev->name) - 1);
+
+	/* Setup hw api */
+	memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+	hw->mac.type = ii->mac;
+
+	ii->get_invariants(hw);
+
+	memcpy(&hw->mbx.ops, &rnpgbevf_mbx_ops,
+	       sizeof(struct rnp_mbx_operations));
+
+	/* setup the private structure */
+	err = rnpgbevf_sw_init(adapter);
+	if (err)
+		goto err_sw_init;
+
+	/* The HW MAC address was set and/or determined in sw_init */
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
+		pr_err("invalid MAC address\n");
+		err = -EIO;
+		goto err_sw_init;
+	}
+	/* MTU range: 68 - 9710 */
+	netdev->min_mtu = hw->min_length;
+	netdev->max_mtu = hw->max_length - (ETH_HLEN + 2 * ETH_FCS_LEN);
+
+	netdev->mtu = hw->mtu;
+
+	if (hw->feature_flags & RNPVF_NET_FEATURE_SG)
+		netdev->features |= NETIF_F_SG;
+	if (hw->feature_flags & RNPVF_NET_FEATURE_TSO)
+		netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+	if (hw->feature_flags & RNPVF_NET_FEATURE_RX_HASH)
+		netdev->features |= NETIF_F_RXHASH;
+	if (hw->feature_flags & RNPVF_NET_FEATURE_RX_CHECKSUM) {
+		netdev->features |= NETIF_F_RXCSUM;
+		adapter->flags |= RNPVF_FLAG_RX_CHKSUM_ENABLED;
+	}
+	if (hw->feature_flags & RNPVF_NET_FEATURE_TX_CHECKSUM)
+		netdev->features |= NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC;
+	if (hw->feature_flags & RNPVF_NET_FEATURE_USO)
+		netdev->features |= NETIF_F_GSO_UDP_L4;
+
+	netdev->features |= NETIF_F_HIGHDMA;
+
+	if (hw->feature_flags & RNPVF_NET_FEATURE_TX_UDP_TUNNEL) {
+		netdev->gso_partial_features = RNPVF_GSO_PARTIAL_FEATURES;
+		netdev->features |=
+			NETIF_F_GSO_PARTIAL | RNPVF_GSO_PARTIAL_FEATURES;
+	}
+
+	netdev->hw_features |= netdev->features;
+
+	if (hw->feature_flags & RNPVF_NET_FEATURE_VLAN_FILTER) {
+		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+		netdev->hw_features |= NETIF_F_HW_VLAN_STAG_FILTER;
+	}
+	if (hw->feature_flags & RNPVF_NET_FEATURE_VLAN_OFFLOAD) {
+		if (!(hw->pf_feature & PF_NCSI_EN)) {
+			netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+			netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+		}
+	}
+
+	if (hw->feature_flags & RNPVF_NET_FEATURE_STAG_OFFLOAD) {
+		if (!(hw->pf_feature & PF_NCSI_EN)) {
+			netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+			netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+		}
+	}
+	netdev->hw_features |= NETIF_F_RXALL;
+	if (hw->feature_flags & RNPVF_NET_FEATURE_RX_NTUPLE_FILTER)
+		netdev->hw_features |= NETIF_F_NTUPLE;
+	if (hw->feature_flags & RNPVF_NET_FEATURE_RX_FCS)
+		netdev->hw_features |= NETIF_F_RXFCS;
+
+	netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+	netdev->hw_enc_features |= netdev->vlan_features;
+	netdev->mpls_features |= NETIF_F_HW_CSUM;
+
+	/* some fixed feature control by pf */
+	if (hw->pf_feature & PF_FEATURE_VLAN_FILTER) {
+		netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+		netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
+	}
+
+	if (hw->feature_flags & RNPVF_NET_FEATURE_VLAN_OFFLOAD) {
+		if (!(hw->pf_feature & PF_NCSI_EN)) {
+			netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+			netdev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+		}
+	}
+
+	if (hw->feature_flags & RNPVF_NET_FEATURE_STAG_OFFLOAD) {
+		if (!(hw->pf_feature & PF_NCSI_EN)) {
+			netdev->features |= NETIF_F_HW_VLAN_STAG_RX;
+			netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
+		}
+	}
+
+	netdev->priv_flags |= IFF_UNICAST_FLT;
+	netdev->priv_flags |= IFF_SUPP_NOFCS;
+
+
+	netdev->priv_flags |= IFF_UNICAST_FLT;
+	netdev->priv_flags |= IFF_SUPP_NOFCS;
+
+	timer_setup(&adapter->watchdog_timer, rnpgbevf_watchdog, 0);
+	INIT_WORK(&adapter->watchdog_task, rnpgbevf_watchdog_task);
+	err = rnpgbevf_init_interrupt_scheme(adapter);
+	if (err)
+		goto err_sw_init;
+
+	err = register_mbx_irq(adapter);
+	if (err)
+		goto err_register;
+
+	if (fix_eth_name) {
+		strncpy(netdev->name, adapter->name, sizeof(netdev->name) - 1);
+	} else {
+		strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+		strscpy(netdev->name, "eth%d", sizeof(netdev->name));
+	}
+	err = register_netdev(netdev);
+	if (err) {
+		rnpgbevf_err("register_netdev faild!\n");
+		dev_err(&pdev->dev,
+			"%s %s: vfnum:0x%x. register_netdev faild!\n",
+			adapter->name, pci_name(pdev), hw->vfnum);
+		goto err_register;
+	}
+
+	/* carrier off reporting is important to ethtool even BEFORE open */
+	netif_carrier_off(netdev);
+
+	rnpgbevf_sysfs_init(netdev);
+
+#ifdef CONFIG_NET_NCSI
+	if (be_ncsi_mc &&
+	    (hw->vfnum & BIT(6)) == 0) {
+		adapter->ncsi_dev =
+			ncsi_register_dev(netdev, rnpgbevf_ncsi_notify_handler);
+		if (adapter->ncsi_dev == NULL) {
+			dev_err(&pdev->dev, "ncsi register faild!\n");
+			goto err_register;
+		}
+	}
+#endif
+
+	/* print the MAC address */
+	hw_dbg(hw, "%pM\n", netdev->dev_addr);
+
+	hw_dbg(hw, "Mucse(R) n10 Virtual Function\n");
+
+	return 0;
+err_register:
+	remove_mbx_irq(adapter);
+	rnpgbevf_clear_interrupt_scheme(adapter);
+err_sw_init:
+err_ioremap:
+	free_netdev(netdev);
+
+	dev_err(&pdev->dev, "%s faild. err:%d\n", __func__, err);
+	return err;
+}
+
+static int rnpgbevf_rm_adpater(struct rnpgbevf_adapter *adapter)
+{
+	struct net_device *netdev;
+
+	if (!adapter)
+		return -EINVAL;
+
+	rnpgbevf_info("= remove adapter:%s =\n", adapter->name);
+	netdev = adapter->netdev;
+
+#ifdef CONFIG_NET_NCSI
+	if (adapter->ncsi_dev) {
+		ncsi_unregister_dev(adapter->ncsi_dev);
+		adapter->ncsi_dev = NULL;
+	}
+#endif
+
+	if (netdev) {
+		netif_carrier_off(netdev);
+		rnpgbevf_sysfs_exit(netdev);
+	}
+
+	set_bit(__RNPVF_REMOVE, &adapter->state);
+	del_timer_sync(&adapter->watchdog_timer);
+
+	cancel_work_sync(&adapter->watchdog_task);
+
+#ifdef CONFIG_NET_NCSI
+	if (adapter->ncsi_dev)
+		ncsi_unregister_dev(adapter->ncsi_dev);
+#endif
+
+	if (netdev) {
+		if (netdev->reg_state == NETREG_REGISTERED)
+			unregister_netdev(netdev);
+	}
+
+	remove_mbx_irq(adapter);
+	rnpgbevf_clear_interrupt_scheme(adapter);
+	rnpgbevf_reset_interrupt_capability(adapter);
+
+	free_netdev(netdev);
+
+	rnpgbevf_info("remove %s  complete\n", adapter->name);
+
+	return 0;
+}
+
+/**
+ * rnpgbevf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in rnpgbevf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * rnpgbevf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int rnpgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct rnpgbevf_adapter *adapter = NULL;
+	const struct rnpgbevf_info *ii = rnpgbevf_info_tbl[ent->driver_data];
+	int err;
+
+	err = pci_enable_device_mem(pdev);
+	if (err)
+		return err;
+
+	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(56)) &&
+	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(56))) {
+		pci_using_hi_dma = 1;
+	} else {
+		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (err) {
+			err = dma_set_coherent_mask(&pdev->dev,
+						    DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pdev->dev,
+					"No usable DMA "
+					"configuration, aborting\n");
+				goto err_dma;
+			}
+		}
+		pci_using_hi_dma = 0;
+	}
+
+	err = pci_request_mem_regions(pdev, rnpgbevf_driver_name);
+	if (err) {
+		dev_err(&pdev->dev,
+			"pci_request_selected_regions failed 0x%x\n", err);
+		goto err_pci_reg;
+	}
+
+	pci_set_master(pdev);
+	pci_save_state(pdev);
+
+	err = rnpgbevf_add_adpater(pdev, ii, &adapter);
+	if (err) {
+		dev_err(&pdev->dev, "ERROR %s: %d\n", __func__, __LINE__);
+		goto err_regions;
+	}
+
+	return 0;
+
+err_regions:
+	pci_release_mem_regions(pdev);
+err_dma:
+err_pci_reg:
+	return err;
+}
+
+/**
+ * rnpgbevf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * rnpgbevf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void rnpgbevf_remove(struct pci_dev *pdev)
+{
+	struct rnpgbevf_adapter *adapter = pci_get_drvdata(pdev);
+
+	rnpgbevf_rm_adpater(adapter);
+	pci_release_mem_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+/**
+ * rnpgbevf_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t rnpgbevf_io_error_detected(struct pci_dev *pdev,
+						   pci_channel_state_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+
+	netif_device_detach(netdev);
+
+	if (state == pci_channel_io_perm_failure)
+		return PCI_ERS_RESULT_DISCONNECT;
+
+	if (netif_running(netdev))
+		rnpgbevf_down(adapter);
+
+	pci_disable_device(pdev);
+
+	/* Request a slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * rnpgbevf_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the rnpgbevf_resume routine.
+ */
+static pci_ers_result_t rnpgbevf_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+
+	if (pci_enable_device_mem(pdev)) {
+		dev_err(&pdev->dev,
+			"Cannot re-enable PCI device after reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_set_master(pdev);
+
+	rnpgbevf_reset(adapter);
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * rnpgbevf_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the rnpgbevf_resume routine.
+ */
+static void rnpgbevf_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct rnpgbevf_adapter *adapter = netdev_priv(netdev);
+
+	if (netif_running(netdev))
+		rnpgbevf_up(adapter);
+
+	netif_device_attach(netdev);
+}
+
+/* PCI Error Recovery (ERS) */
+static const struct pci_error_handlers rnpgbevf_err_handler = {
+	.error_detected = rnpgbevf_io_error_detected,
+	.slot_reset = rnpgbevf_io_slot_reset,
+	.resume = rnpgbevf_io_resume,
+};
+
+static struct pci_driver rnpgbevf_driver = {
+	.name = rnpgbevf_driver_name,
+	.id_table = rnpgbevf_pci_tbl,
+	.probe = rnpgbevf_probe,
+	.remove = rnpgbevf_remove,
+#ifdef CONFIG_PM
+	/* Power Management Hooks */
+	.suspend = rnpgbevf_suspend,
+	.resume = rnpgbevf_resume,
+#endif
+	.shutdown = rnpgbevf_shutdown,
+	.err_handler = &rnpgbevf_err_handler,
+};
+
+/**
+ * rnpgbevf_init_module - Driver Registration Routine
+ *
+ * rnpgbevf_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init rnpgbevf_init_module(void)
+{
+	int ret;
+
+	pr_info("%s - version %s\n", rnpgbevf_driver_string,
+		rnpgbevf_driver_version);
+
+	pr_info("%s\n", rnpgbevf_copyright);
+
+	ret = pci_register_driver(&rnpgbevf_driver);
+	return ret;
+}
+
+module_init(rnpgbevf_init_module);
+
+/**
+ * rnpgbevf_exit_module - Driver Exit Cleanup Routine
+ *
+ * rnpgbevf_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit rnpgbevf_exit_module(void)
+{
+	pci_unregister_driver(&rnpgbevf_driver);
+}
+
+module_exit(rnpgbevf_exit_module);
diff --git a/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_mbx.c b/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_mbx.c
new file mode 100644
index 0000000000000..2708bfe662698
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_mbx.c
@@ -0,0 +1,668 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include "rnpgbevf_mbx.h"
+#include "rnpgbevf.h"
+
+struct counter {
+	union {
+		struct {
+			unsigned short pf_req;
+			unsigned short pf_ack;
+		};
+		struct {
+			unsigned short cpu_req;
+			unsigned short cpu_ack;
+		};
+	};
+	unsigned short vf_req;
+	unsigned short vf_ack;
+} __packed;
+
+static s32 rnpgbevf_poll_for_msg(struct rnpgbevf_hw *hw, bool to_cm3);
+static s32 rnpgbevf_poll_for_ack(struct rnpgbevf_hw *hw, bool to_cm3);
+
+/* == VEC == */
+#define PF2VF_MBOX_VEC(mbx, vf) (mbx->pf2vf_mbox_vec_base + 4 * (vf))
+#define CPU2VF_MBOX_VEC(mbx, vf) (mbx->cpu2vf_mbox_vec_base + 4 * (vf))
+/* == PF <--> VF mailbox ==== */
+#define PF_VF_SHM(mbx, vf)                                                     \
+	((mbx->pf_vf_shm_base) +                                               \
+	 (64 * (vf)))
+#define PF2VF_COUNTER(mbx, vf) (PF_VF_SHM(mbx, vf) + 0)
+#define VF2PF_COUNTER(mbx, vf) (PF_VF_SHM(mbx, vf) + 4)
+#define PF_VF_SHM_DATA(mbx, vf) (PF_VF_SHM(mbx, vf) + 8)
+#define VF2PF_MBOX_CTRL(mbx, vf) ((mbx->vf2pf_mbox_ctrl_base) + (4 * (vf)))
+/* === CPU <--> VF === */
+#define CPU_VF_SHM(mbx, vf) (mbx->cpu_vf_shm_base + (64 * (vf)))
+#define CPU2VF_COUNTER(mbx, vf) (CPU_VF_SHM(mbx, vf) + 0)
+#define VF2CPU_COUNTER(mbx, vf) (CPU_VF_SHM(mbx, vf) + 4)
+#define CPU_VF_SHM_DATA(mbx, vf) (CPU_VF_SHM(mbx, vf) + 8)
+#define VF2CPU_MBOX_CTRL(mbx, vf) (mbx->vf2cpu_mbox_ctrl_base + 64 * (vf))
+#define CPU_VF_MBOX_MASK_LO(mbx, vf) (mbx->cpu_vf_mbox_mask_lo_base + 64 * (vf))
+#define CPU_VF_MBOX_MASK_HI(mbx, vf) (mbx->cpu_vf_mbox_mask_hi_base + 64 * (vf))
+#define MBOX_CTRL_REQ (1 << 0) /* WO */
+#define MBOX_CTRL_VF_HOLD_SHM (1 << 2) /* VF:WR, PF:RO */
+#define MBOX_IRQ_EN 0
+#define MBOX_IRQ_DISABLE 1
+
+/**
+ *  rnpgbevf_read_posted_mbx - Wait for message notification and receive message
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *
+ *  returns 0 if it successfully received a message notification and
+ *  copied it into the receive buffer.
+ **/
+static s32 rnpgbevf_read_posted_mbx(struct rnpgbevf_hw *hw, u32 *msg, u16 size,
+				    bool to_cm3)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = -RNPGBE_ERR_MBX;
+
+	if (!mbx->ops.read)
+		goto out;
+
+	ret_val = rnpgbevf_poll_for_msg(hw, to_cm3);
+
+	/* if ack received read message, otherwise we timed out */
+	if (!ret_val)
+		ret_val = mbx->ops.read(hw, msg, size, to_cm3);
+	else
+		printk(KERN_DEBUG "poll read timeout\n");
+out:
+	return ret_val;
+}
+
+/**
+ *  rnpgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *
+ *  returns 0 if it successfully copied message into the buffer and
+ *  received an ack to that message within delay * timeout period
+ **/
+static s32 rnpgbevf_write_posted_mbx(struct rnpgbevf_hw *hw, u32 *msg, u16 size,
+				     bool to_cm3)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = -RNPGBE_ERR_MBX;
+
+	/* exit if either we can't write or there isn't a defined timeout */
+	if (!mbx->ops.write || !mbx->timeout)
+		goto out;
+
+	/* send msg */
+	ret_val = mbx->ops.write(hw, msg, size, to_cm3);
+
+	/* if msg sent wait until we receive an ack */
+	if (!ret_val)
+		ret_val = rnpgbevf_poll_for_ack(hw, to_cm3);
+out:
+	return ret_val;
+}
+
+static inline u16 rnpgbevf_mbx_get_req(struct rnpgbevf_hw *hw, int reg)
+{
+	/* memory barrier */
+	mb();
+	return mbx_rd32(hw, reg) & 0xffff;
+}
+
+static inline u16 rnpgbevf_mbx_get_ack(struct rnpgbevf_hw *hw, int reg)
+{
+	/* memory barrier */
+	mb();
+	return (mbx_rd32(hw, reg) >> 16) & 0xffff;
+}
+
+static inline void rnpgbevf_mbx_inc_vfreq(struct rnpgbevf_hw *hw, bool to_cm3)
+{
+	u16 req;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+	int reg =
+		to_cm3 ? VF2CPU_COUNTER(mbx, vfnum) : VF2PF_COUNTER(mbx, vfnum);
+	u32 v = mbx_rd32(hw, reg);
+
+	req = (v & 0xffff);
+	req++;
+	v &= ~(0x0000ffff);
+	v |= req;
+	/* memory barrier */
+	mb();
+	mbx_wr32(hw, reg, v);
+
+	/* update stats */
+	hw->mbx.stats.msgs_tx++;
+}
+
+static inline void rnpgbevf_mbx_inc_vfack(struct rnpgbevf_hw *hw, bool to_cm3)
+{
+	u16 ack;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+	int reg =
+		to_cm3 ? VF2CPU_COUNTER(mbx, vfnum) : VF2PF_COUNTER(mbx, vfnum);
+	u32 v = mbx_rd32(hw, reg);
+
+	ack = (v >> 16) & 0xffff;
+	ack++;
+	v &= ~(0xffff0000);
+	v |= (ack << 16);
+	/* memory barrier */
+	mb();
+	mbx_wr32(hw, reg, v);
+
+	/* update stats */
+	hw->mbx.stats.msgs_rx++;
+}
+
+/**
+ *  rnpgbevf_check_for_msg_vf - checks to see if the PF has sent mail
+ *  @hw: pointer to the HW structure
+ *
+ *  returns 0 if the PF has set the Status bit or else ERR_MBX
+ **/
+static s32 rnpgbevf_check_for_msg_vf(struct rnpgbevf_hw *hw, bool to_cm3)
+{
+	s32 ret_val = RNPGBE_ERR_MBX;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+
+	if (to_cm3 == true) {
+		if (rnpgbevf_mbx_get_req(hw, CPU2VF_COUNTER(mbx, vfnum)) !=
+		    hw->mbx.cpu_req) {
+			ret_val = 0;
+			hw->mbx.stats.reqs++;
+		}
+	} else {
+		if (rnpgbevf_mbx_get_req(hw, PF2VF_COUNTER(mbx, vfnum)) !=
+		    hw->mbx.pf_req) {
+			ret_val = 0;
+			hw->mbx.stats.reqs++;
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ *  rnpgbevf_poll_for_msg - Wait for message notification
+ *  @hw: pointer to the HW structure
+ *
+ *  returns 0 if it successfully received a message notification
+ **/
+static s32 rnpgbevf_poll_for_msg(struct rnpgbevf_hw *hw, bool to_cm3)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	while (countdown && mbx->ops.check_for_msg(hw, to_cm3)) {
+		countdown--;
+		udelay(mbx->udelay);
+	}
+
+	return countdown ? 0 : RNPGBE_ERR_MBX;
+}
+
+/**
+ *  rnpgbevf_poll_for_ack - Wait for message acknowledgement
+ *  @hw: pointer to the HW structure
+ *
+ *  returns 0 if it successfully received a message acknowledgement
+ **/
+static s32 rnpgbevf_poll_for_ack(struct rnpgbevf_hw *hw, bool to_cm3)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	while (countdown && mbx->ops.check_for_ack(hw, to_cm3)) {
+		countdown--;
+		udelay(mbx->udelay);
+	}
+
+	/* if we failed, all future posted messages fail until reset */
+	if (!countdown) {
+		mbx->timeout = 0;
+		dbg("%s timeout\n", __func__);
+	}
+
+	return countdown ? 0 : RNPGBE_ERR_MBX;
+}
+
+/**
+ *  rnpgbevf_check_for_rst_msg_vf - checks to see if the PF has ACK'd
+ *  @hw: pointer to the HW structure
+ *
+ *  returns 0 if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 rnpgbevf_check_for_rst_msg_vf(struct rnpgbevf_hw *hw, bool to_cm3)
+{
+	struct rnpgbevf_adapter *adapter = hw->back;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = RNPGBE_ERR_MBX;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+	u32 DATA_REG = (to_cm3) ? CPU_VF_SHM_DATA(mbx, vfnum) :
+				  PF_VF_SHM_DATA(mbx, vfnum);
+	u32 data;
+	int ret = 1;
+
+	ret_val = rnpgbevf_check_for_msg_vf(hw, to_cm3);
+	if (!ret_val) {
+		mbx->ops.read(hw, &data, 1, 0);
+		data &= ~RNPGBE_PF_VFNUM_MASK;
+		dbg("mbx %x\n", data);
+		/* add other mailbox setup */
+		if (((data) & (~RNPGBE_VT_MSGTYPE_CTS)) ==
+		    RNPGBE_PF_CONTROL_PRING_MSG) {
+		} else if ((data) == RNPGBE_PF_SET_FCS) {
+			data = mbx_rd32(hw, DATA_REG + 4);
+			if (data) {
+				adapter->priv_flags |= RNPVF_PRIV_FLAG_FCS_ON;
+				adapter->netdev->features |= NETIF_F_RXFCS;
+			} else {
+				adapter->priv_flags &=
+					(~RNPVF_PRIV_FLAG_FCS_ON);
+				adapter->netdev->features &= (~NETIF_F_RXFCS);
+			}
+			/* if fcs on we must turn off rx-chksum */
+			if ((adapter->priv_flags & RNPVF_PRIV_FLAG_FCS_ON) &&
+			    (adapter->netdev->features & NETIF_F_RXCSUM))
+				adapter->netdev->features &= (~NETIF_F_RXCSUM);
+			else {
+				/* set back rx-chksum status */
+				if (adapter->flags &
+				    RNPVF_FLAG_RX_CHKSUM_ENABLED)
+					adapter->netdev->features |=
+						NETIF_F_RXCSUM;
+				else
+					adapter->netdev->features &=
+						(~NETIF_F_RXCSUM);
+			}
+
+		} else if ((data) == RNPGBE_PF_SET_PAUSE) {
+			hw->fc.current_mode = mbx_rd32(hw, DATA_REG + 4);
+		} else if ((data) == RNPGBE_PF_SET_FT_PADDING) {
+			data = mbx_rd32(hw, DATA_REG + 4);
+			if (data) {
+				adapter->priv_flags |=
+					RNPVF_PRIV_FLAG_FT_PADDING;
+			} else {
+				adapter->priv_flags &=
+					(~RNPVF_PRIV_FLAG_FT_PADDING);
+			}
+		} else if ((data) == RNPGBE_PF_SET_VLAN_FILTER) {
+			data = mbx_rd32(hw, DATA_REG + 4);
+			if (data) {
+				if (hw->feature_flags &
+				    RNPVF_NET_FEATURE_VLAN_OFFLOAD) {
+					adapter->netdev->features |=
+						NETIF_F_HW_VLAN_CTAG_FILTER;
+				}
+
+				if (hw->feature_flags &
+				    RNPVF_NET_FEATURE_STAG_OFFLOAD) {
+					adapter->netdev->features |=
+						NETIF_F_HW_VLAN_STAG_FILTER;
+				}
+			} else {
+				if (hw->feature_flags &
+				    RNPVF_NET_FEATURE_VLAN_OFFLOAD) {
+					adapter->netdev->features &=
+						~NETIF_F_HW_VLAN_CTAG_FILTER;
+				}
+				if (hw->feature_flags &
+				    RNPVF_NET_FEATURE_STAG_OFFLOAD) {
+					adapter->netdev->features &=
+						~NETIF_F_HW_VLAN_STAG_FILTER;
+				}
+			}
+		} else if ((data) == RNPGBE_PF_SET_VLAN) {
+			struct rnp_mbx_info *mbx = &hw->mbx;
+
+			data = mbx_rd32(hw, DATA_REG + 4);
+			/* pf set vlan for this vf */
+			adapter->flags |= RNPVF_FLAG_PF_UPDATE_VLAN;
+			if (data) {
+				adapter->flags |= RNPVF_FLAG_PF_SET_VLAN;
+				adapter->vf_vlan = data;
+				/* should close tx vlan offload */
+				/* should open rx vlan offload */
+				if (adapter->netdev->features &
+				    NETIF_F_HW_VLAN_CTAG_RX)
+					adapter->priv_flags |=
+						RNPVF_FLAG_RX_CVLAN_OFFLOAD;
+				else
+					adapter->priv_flags &=
+						~RNPVF_FLAG_RX_CVLAN_OFFLOAD;
+
+				adapter->netdev->features |=
+					NETIF_F_HW_VLAN_CTAG_RX;
+				if (adapter->netdev->features &
+				    NETIF_F_HW_VLAN_CTAG_TX)
+					adapter->priv_flags |=
+						RNPVF_FLAG_TX_CVLAN_OFFLOAD;
+				else
+					adapter->priv_flags &=
+						~RNPVF_FLAG_TX_CVLAN_OFFLOAD;
+				adapter->netdev->features &=
+					~NETIF_F_HW_VLAN_CTAG_TX;
+				if (adapter->netdev->features &
+				    NETIF_F_HW_VLAN_STAG_RX)
+					adapter->priv_flags |=
+						RNPVF_FLAG_RX_SVLAN_OFFLOAD;
+				else
+					adapter->priv_flags &=
+						~RNPVF_FLAG_RX_SVLAN_OFFLOAD;
+
+				adapter->netdev->features |=
+					NETIF_F_HW_VLAN_STAG_RX;
+				if (adapter->netdev->features &
+				    NETIF_F_HW_VLAN_STAG_TX)
+					adapter->priv_flags |=
+						RNPVF_FLAG_TX_SVLAN_OFFLOAD;
+				else
+					adapter->priv_flags &=
+						~RNPVF_FLAG_TX_SVLAN_OFFLOAD;
+
+				adapter->netdev->features &=
+					~NETIF_F_HW_VLAN_STAG_TX;
+			} else {
+				adapter->flags &= (~RNPVF_FLAG_PF_SET_VLAN);
+				adapter->vf_vlan = 0;
+				if (adapter->priv_flags &
+				    RNPVF_FLAG_RX_CVLAN_OFFLOAD)
+					adapter->netdev->features |=
+						NETIF_F_HW_VLAN_CTAG_RX;
+				else
+					adapter->netdev->features &=
+						~NETIF_F_HW_VLAN_CTAG_RX;
+
+				if (adapter->priv_flags &
+				    RNPVF_FLAG_TX_CVLAN_OFFLOAD)
+					adapter->netdev->features |=
+						NETIF_F_HW_VLAN_CTAG_TX;
+				else
+					adapter->netdev->features &=
+						~NETIF_F_HW_VLAN_CTAG_TX;
+
+				if (adapter->priv_flags &
+				    RNPVF_FLAG_RX_SVLAN_OFFLOAD)
+					adapter->netdev->features |=
+						NETIF_F_HW_VLAN_STAG_RX;
+				else
+					adapter->netdev->features &=
+						~NETIF_F_HW_VLAN_STAG_RX;
+
+				if (adapter->priv_flags &
+				    RNPVF_FLAG_TX_SVLAN_OFFLOAD)
+					adapter->netdev->features |=
+						NETIF_F_HW_VLAN_STAG_TX;
+				else
+					adapter->netdev->features &=
+						~NETIF_F_HW_VLAN_STAG_TX;
+
+			}
+			hw->ops.set_veb_vlan(hw, data, VFNUM(mbx, hw->vfnum));
+		} else if ((data) == RNPGBE_PF_SET_LINK) {
+			data = mbx_rd32(hw, DATA_REG + 4);
+			if (data & RNPGBE_PF_LINK_UP) {
+				hw->link = true;
+				hw->speed = data & 0xffff;
+			} else {
+				hw->link = false;
+				hw->speed = 0;
+			}
+		} else if ((data) == RNPGBE_PF_SET_MTU) {
+			data = mbx_rd32(hw, DATA_REG + 4);
+			hw->mtu = data;
+			adapter->flags |= RNPVF_FLAG_PF_UPDATE_MTU;
+		} else if ((data) == RNPGBE_PF_SET_RESET) {
+			adapter->flags |= RNPVF_FLAG_PF_RESET;
+		} else {
+			return RNPGBE_ERR_MBX;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ *  rnpgbevf_check_for_ack_vf - checks to see if the PF has ACK'd
+ *  @hw: pointer to the HW structure
+ *
+ *  returns 0 if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 rnpgbevf_check_for_ack_vf(struct rnpgbevf_hw *hw, bool to_cm3)
+{
+	s32 ret_val = RNPGBE_ERR_MBX;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+
+	if (to_cm3 == true) {
+		if (rnpgbevf_mbx_get_ack(hw, CPU2VF_COUNTER(mbx, vfnum)) !=
+		    hw->mbx.cpu_ack) {
+			ret_val = 0;
+			hw->mbx.stats.acks++;
+		}
+	} else {
+		if (rnpgbevf_mbx_get_ack(hw, PF2VF_COUNTER(mbx, vfnum)) !=
+		    hw->mbx.pf_ack) {
+			ret_val = 0;
+			hw->mbx.stats.acks++;
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ *  rnpgbevf_obtain_mbx_lock_vf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *
+ *  return 0 if we obtained the mailbox lock
+ **/
+static s32 rnpgbevf_obtain_mbx_lock_vf(struct rnpgbevf_hw *hw, bool to_cm3)
+{
+	int try_cnt = 2 * 1000;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+	struct rnpgbevf_adapter *adapter = hw->back;
+	u32 CTRL_REG = (to_cm3) ? VF2CPU_MBOX_CTRL(mbx, vfnum) :
+				  VF2PF_MBOX_CTRL(mbx, vfnum);
+
+	while (try_cnt-- > 0) {
+		/* Take ownership of the buffer */
+		mbx_wr32(hw, CTRL_REG, MBOX_CTRL_VF_HOLD_SHM);
+		/* memory barrier */
+		mb();
+		/* reserve mailbox for vf use */
+		if (mbx_rd32(hw, CTRL_REG) & MBOX_CTRL_VF_HOLD_SHM)
+			return 0;
+		udelay(500);
+	}
+
+	printk(KERN_DEBUG "[rnpvf] %s: faild to get mbx-lock\n", adapter->name);
+	return RNPGBE_ERR_MBX;
+}
+
+/**
+ *  rnpgbevf_write_mbx_vf - Write a message to the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *
+ *  returns 0 if it successfully copied message into the buffer
+ **/
+static s32 rnpgbevf_write_mbx_vf(struct rnpgbevf_hw *hw, u32 *msg, u16 size,
+				 bool to_cm3)
+{
+	s32 ret_val;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 i;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+	u32 DATA_REG = (to_cm3) ? CPU_VF_SHM_DATA(mbx, vfnum) :
+				  PF_VF_SHM_DATA(mbx, vfnum);
+	u32 CTRL_REG = (to_cm3) ? VF2CPU_MBOX_CTRL(mbx, vfnum) :
+				  VF2PF_MBOX_CTRL(mbx, vfnum);
+
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = rnpgbevf_obtain_mbx_lock_vf(hw, to_cm3);
+	if (ret_val) {
+		printk(KERN_DEBUG
+		       "%s: get mbx wlock failed. ret:%d. req:0x%08x-0x%08x\n",
+		       __func__, ret_val, msg[0], msg[1]);
+		goto out_no_write;
+	}
+
+	/* add mailbox_id [27:21] */
+#define VF_NUM_OFFSET (21)
+	if (!to_cm3)
+		msg[0] |= ((hw->vfnum & 0x3f) << VF_NUM_OFFSET);
+
+	/* copy the caller specified message to the mailbox memory buffer */
+	for (i = 0; i < size; i++)
+		mbx_wr32(hw, DATA_REG + i * 4, msg[i]);
+
+	/* update acks. used by rnpgbevf_check_for_ack_vf  */
+	if (to_cm3 == true)
+		hw->mbx.cpu_ack =
+			rnpgbevf_mbx_get_ack(hw, CPU2VF_COUNTER(mbx, vfnum));
+	else
+		hw->mbx.pf_ack =
+			rnpgbevf_mbx_get_ack(hw, PF2VF_COUNTER(mbx, vfnum));
+	rnpgbevf_mbx_inc_vfreq(hw, to_cm3);
+
+	/* Drop VFU and interrupt the PF/CM3 to
+	 * tell it a message has been sent
+	 */
+	mbx_wr32(hw, CTRL_REG, MBOX_CTRL_REQ);
+
+out_no_write:
+	return ret_val;
+}
+
+/**
+ *  rnpgbevf_read_mbx_vf - Reads a message from the inbox intended for vf
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *
+ *  returns 0 if it successfully read message from buffer
+ **/
+static s32 rnpgbevf_read_mbx_vf(struct rnpgbevf_hw *hw, u32 *msg, u16 size,
+				bool to_cm3)
+{
+	s32 ret_val = 0;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 i;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+	u32 BUF_REG = (to_cm3) ? CPU_VF_SHM_DATA(mbx, vfnum) :
+				 PF_VF_SHM_DATA(mbx, vfnum);
+	u32 CTRL_REG = (to_cm3) ? VF2CPU_MBOX_CTRL(mbx, vfnum) :
+				  VF2PF_MBOX_CTRL(mbx, vfnum);
+
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = rnpgbevf_obtain_mbx_lock_vf(hw, to_cm3);
+	if (ret_val)
+		goto out_no_read;
+
+	/* we need this */
+	mb();
+	/* copy the message from the mailbox memory buffer */
+	for (i = 0; i < size; i++)
+		msg[i] = mbx_rd32(hw, BUF_REG + 4 * i);
+
+	/* clear vf_num */
+#define RNPGBE_VF_NUM_MASK (0x7f << 21)
+	msg[0] &= (~RNPGBE_VF_NUM_MASK);
+
+	/* update req. used by rnpgbevf_check_for_msg_vf  */
+	if (to_cm3 == true)
+		hw->mbx.cpu_req =
+			rnpgbevf_mbx_get_req(hw, CPU2VF_COUNTER(mbx, vfnum));
+	else
+		hw->mbx.pf_req =
+			rnpgbevf_mbx_get_req(hw, PF2VF_COUNTER(mbx, vfnum));
+	/* Acknowledge receipt and release mailbox, then we're done */
+	rnpgbevf_mbx_inc_vfack(hw, to_cm3);
+
+	/* free ownership of the buffer */
+	mbx_wr32(hw, CTRL_REG, 0);
+
+out_no_read:
+	return ret_val;
+}
+
+static void rnpgbevf_reset_mbx(struct rnpgbevf_hw *hw)
+{
+	u32 v;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+
+	/* release vfu */
+	mbx_wr32(hw, VF2CPU_MBOX_CTRL(mbx, vfnum), 0);
+	mbx_wr32(hw, VF2PF_MBOX_CTRL(mbx, vfnum), 0);
+
+	/* fetch mbx counter values */
+	v = mbx_rd32(hw, PF2VF_COUNTER(mbx, vfnum));
+	hw->mbx.pf_req = v & 0xffff;
+	hw->mbx.pf_ack = (v >> 16) & 0xffff;
+
+	v = mbx_rd32(hw, CPU2VF_COUNTER(mbx, vfnum));
+	hw->mbx.cpu_req = v & 0xffff;
+	hw->mbx.cpu_ack = (v >> 16) & 0xffff;
+}
+
+static s32 rnpgbevf_mbx_configure_vf(struct rnpgbevf_hw *hw, int nr_vec,
+				     bool enable)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int mbx_vec_reg, vfnum = VFNUM(mbx, hw->vfnum);
+
+	/* PF --> VF */
+	mbx_vec_reg = PF2VF_MBOX_VEC(mbx, vfnum);
+	mbx_wr32(hw, mbx_vec_reg, nr_vec);
+
+	return 0;
+}
+
+/**
+ *  rnpgbevf_init_mbx_params_vf - set initial values for vf mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+static s32 rnpgbevf_init_mbx_params_vf(struct rnpgbevf_hw *hw)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+
+	/* start mailbox as timed out and let the reset_hw call set the timeout
+	 * value to begin communications
+	 */
+	mbx->timeout = 0;
+	mbx->udelay = RNPGBE_VF_MBX_INIT_DELAY;
+	mbx->stats.msgs_tx = 0;
+	mbx->stats.msgs_rx = 0;
+	mbx->stats.reqs = 0;
+	mbx->stats.acks = 0;
+	mbx->stats.rsts = 0;
+	mbx->size = RNPGBE_VFMAILBOX_SIZE;
+	rnpgbevf_reset_mbx(hw);
+	return 0;
+}
+
+const struct rnp_mbx_operations rnpgbevf_mbx_ops = {
+	.init_params = rnpgbevf_init_mbx_params_vf,
+	.read = rnpgbevf_read_mbx_vf,
+	.write = rnpgbevf_write_mbx_vf,
+	.read_posted = rnpgbevf_read_posted_mbx,
+	.write_posted = rnpgbevf_write_posted_mbx,
+	.check_for_msg = rnpgbevf_check_for_msg_vf,
+	.check_for_ack = rnpgbevf_check_for_ack_vf,
+	.check_for_rst = rnpgbevf_check_for_rst_msg_vf,
+	.configure = rnpgbevf_mbx_configure_vf,
+};
diff --git a/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_mbx.h b/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_mbx.h
new file mode 100644
index 0000000000000..bae2bb18ae6bb
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_mbx.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBEVF_MBX_H_
+#define _RNPGBEVF_MBX_H_
+
+#include "vf.h"
+
+#define RNPGBE_VFMAILBOX_SIZE 14 /* 16 32 bit words - 64 bytes */
+#define RNPGBE_ERR_MBX -100
+
+struct mbx_shm {
+	u32 stat;
+#define MBX_ST_PF_ACK (1 << 0)
+#define MBX_ST_PF_STS (1 << 1)
+#define MBX_ST_PF_RST (1 << 2)
+#define MBX_ST_VF_ACK (1 << 3)
+#define MBX_ST_VF_REQ (1 << 4)
+#define MBX_ST_VF_RST (1 << 5)
+#define MBX_ST_CPU_ACK (1 << 6)
+#define MBX_ST_CPU_REQ (1 << 7)
+
+	u32 data[RNPGBE_VFMAILBOX_SIZE];
+} __aligned(4);
+
+/* If it's a RNPGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF.  The reverse is true if it is RNPGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define RNPGBE_VT_MSGTYPE_ACK 0x80000000
+/* Messages below or'd with
+ * this are the ACK
+ */
+#define RNPGBE_VT_MSGTYPE_NACK 0x40000000
+/* Messages below or'd with
+ * this are the NACK
+ */
+#define RNPGBE_VT_MSGTYPE_CTS 0x20000000
+/* Indicates that VF is still
+ * clear to send requests
+ */
+#define RNPGBE_VT_MSGINFO_SHIFT 14
+/* bits 23:16 are used for exra info for certain messages */
+#define RNPGBE_VT_MSGINFO_MASK (0xFF << RNPGBE_VT_MSGINFO_SHIFT)
+
+/* mailbox API, legacy requests */
+#define RNPGBE_VF_RESET 0x01 /* VF requests reset */
+#define RNPGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define RNPGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define RNPGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+
+/* mailbox API, version 1.0 VF requests */
+#define RNPGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define RNPGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+#define RNPGBE_VF_GET_MACVLAN 0x07 /* VF requests mac */
+#define RNPGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+
+/* mailbox API, version 1.1 VF requests */
+#define RNPGBE_VF_GET_QUEUE 0x09 /* get queue configuration */
+#define RNPGBE_VF_SET_VLAN_STRIP 0x0a /* VF requests PF to set VLAN STRIP */
+#define RNPGBE_VF_REG_RD 0x0b /* vf read reg */
+#define RNPGBE_VF_GET_MTU 0x0c /* vf read reg */
+#define RNPGBE_VF_SET_MTU 0x0d /* vf read reg */
+#define RNPGBE_VF_GET_FW 0x0e /* vf read reg */
+#define RNPGBE_VF_RESET_PF 0x13 /* vf read reg */
+#define RNPGBE_PF_VFNUM_MASK (0x3f << 21)
+#define RNPGBE_PF_SET_FCS 0x10 /* PF set fcs status */
+#define RNPGBE_PF_SET_PAUSE 0x11 /* PF set pause status */
+#define RNPGBE_PF_SET_FT_PADDING 0x12 /* PF set ft padding status */
+#define RNPGBE_PF_SET_VLAN_FILTER 0x13
+#define RNPGBE_PF_SET_VLAN 0x14
+#define RNPGBE_PF_SET_LINK 0x15
+#define RNPGBE_PF_SET_MTU 0x16
+#define RNPGBE_PF_SET_RESET 0x17
+#define RNPGBE_PF_LINK_UP (1 << 31)
+#define RNPGBE_PF_REMOVE 0x0f
+#define RNPGBE_PF_GET_LINK 0x10
+/* GET_QUEUES return data indices within the mailbox */
+#define RNPGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define RNPGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define RNPGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define RNPGBE_VF_DEF_QUEUE 4 /* Default queue offset */
+
+/* length of permanent address message returned from PF */
+#define RNPGBE_VF_PERMADDR_MSG_LEN 11
+/* word in permanent address message with the current multicast type */
+#define RNPGBE_VF_MC_TYPE_WORD 3
+#define RNPGBE_VF_DMA_VERSION_WORD 4
+#define RNPGBE_VF_VLAN_WORD 5
+#define RNPGBE_VF_PHY_TYPE_WORD 6
+#define RNPGBE_VF_FW_VERSION_WORD 7
+#define RNPGBE_VF_LINK_STATUS_WORD 8
+#define RNPGBE_VF_AXI_MHZ 9
+#define RNPGBE_VF_FEATURE 10
+#define RNPGBE_PF_CONTROL_PRING_MSG 0x0100 /* PF control message */
+#define RNPGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define RNPGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+/* forward declaration of the HW struct */
+struct rnpgbevf_hw;
+
+enum MBX_ID {
+	MBX_VF0 = 0,
+	MBX_VF1,
+	//...
+	MBX_VF63,
+	MBX_CM3CPU,
+	MBX_VFCNT
+};
+
+#endif /* _RNPGBEVF_MBX_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_regs.h b/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_regs.h
new file mode 100644
index 0000000000000..187a1705a4f1f
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_regs.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPGBEVF_REGS_H_
+#define _RNPGBEVF_REGS_H_
+
+enum NIC_MODE {
+	MODE_NIC_MODE_2PORT_40G = 0,
+	MODE_NIC_MODE_2PORT_10G = 1,
+	MODE_NIC_MODE_4PORT_10G = 2,
+	MODE_NIC_MODE_8PORT_10G = 3,
+};
+
+/* RNP-Ring Registers */
+#define RNPGBE_DMA_RING_BASE 0x8000
+#define RNPGBE_DMA_RX_DESC_TIMEOUT_TH 0x8000
+#define RNPGBE_DMA_TX_DESC_FETCH_CTL 0x8004
+#define RNPGBE_DMA_TX_FLOW_CTRL_TM 0x8008
+/* DMA-ENABLE-IRQ */
+#define RNPGBE_RING_BASE_N10 (0x8000)
+#define RNPGBE_RING_BASE_N500 (0x1000)
+#define RNPGBE_RING_OFFSET(i) (0x100 * i)
+#define RNPGBE_DMA_RX_START (0x10)
+#define RNPGBE_DMA_RX_READY (0x14)
+#define RNPGBE_DMA_TX_START (0x18)
+#define RNPGBE_DMA_TX_READY (0x1c)
+#define RNPGBE_DMA_INT_STAT (0x20)
+#define RNPGBE_DMA_INT_MASK (0x24)
+#define TX_INT_MASK (1 << 1)
+#define RX_INT_MASK (1 << 0)
+#define RNPGBE_DMA_INT_CLR (0x28)
+#define RNPGBE_DMA_INT_TRIG (0x2c)
+/* RX-Queue Registers */
+#define RNPGBE_DMA_REG_RX_DESC_BUF_BASE_ADDR_HI (0x30)
+#define RNPGBE_DMA_REG_RX_DESC_BUF_BASE_ADDR_LO (0x34)
+#define RNPGBE_DMA_REG_RX_DESC_BUF_LEN (0x38)
+#define RNPGBE_DMA_REG_RX_DESC_BUF_HEAD (0x3c)
+#define RNPGBE_DMA_REG_RX_DESC_BUF_TAIL (0x40)
+#define RNPGBE_DMA_REG_RX_DESC_FETCH_CTRL (0x44)
+#define RNPGBE_DMA_REG_RX_INT_DELAY_TIMER (0x48)
+#define RNPGBE_DMA_REG_RX_INT_DELAY_PKTCNT (0x4c)
+#define RNPGBE_DMA_REG_RX_ARB_DEF_LVL (0x50)
+#define PCI_DMA_REG_RX_DESC_TIMEOUT_TH (0x54)
+#define PCI_DMA_REG_RX_SCATTER_LENGH (0x58)
+/* TX-Queue Registers */
+#define RNPGBE_DMA_REG_TX_DESC_BUF_BASE_ADDR_HI (0x60)
+#define RNPGBE_DMA_REG_TX_DESC_BUF_BASE_ADDR_LO (0x64)
+#define RNPGBE_DMA_REG_TX_DESC_BUF_LEN (0x68)
+#define RNPGBE_DMA_REG_TX_DESC_BUF_HEAD (0x6c)
+#define RNPGBE_DMA_REG_TX_DESC_BUF_TAIL (0x70)
+#define RNPGBE_DMA_REG_TX_DESC_FETCH_CTRL (0x74)
+#define RNPGBE_DMA_REG_TX_INT_DELAY_TIMER (0x78)
+#define RNPGBE_DMA_REG_TX_INT_DELAY_PKTCNT (0x7c)
+#define RNPGBE_DMA_REG_TX_ARB_DEF_LVL (0x80)
+#define RNPGBE_DMA_REG_TX_FLOW_CTRL_TH (0x84)
+#define RNPGBE_DMA_REG_TX_FLOW_CTRL_TM (0x88)
+
+/* VEB Registers */
+#define VEB_TBL_CNTS 64
+#define RNPGBE_DMA_PORT_VBE_MAC_LO_TBL_N10(port, vf)                           \
+	(0x80A0 + 4 * (port) + 0x100 * (vf))
+#define RNPGBE_DMA_PORT_VBE_MAC_HI_TBL_N10(port, vf)                           \
+	(0x80B0 + 4 * (port) + 0x100 * (vf))
+#define RNPGBE_DMA_PORT_VEB_VID_TBL_N10(port, vf)                              \
+	(0x80C0 + 4 * (port) + 0x100 * (vf))
+#define RNPGBE_DMA_PORT_VEB_VF_RING_TBL_N10(port, vf)                          \
+	(0x80D0 + 4 * (port) +                                                 \
+	 0x100 * (vf))
+
+#define RNPGBE_DMA_PORT_VBE_MAC_LO_TBL_N500 (0x10c0)
+#define RNPGBE_DMA_PORT_VBE_MAC_HI_TBL_N500 (0x10c4)
+#define RNPGBE_DMA_PORT_VEB_VID_TBL_N500 (0x10c8)
+#define RNPGBE_DMA_PORT_VEB_VF_RING_TBL_N500 (0x10cc)
+
+#define RNPGBE_DMA_STATS_DMA_TO_MAC (0x1a0)
+#define RNPGBE_DMA_STATS_DMA_TO_SWITCH (0x1a4)
+#define RNPGBE_DMA_STATS_MAC_TO_MAC (0x1b0)
+#define RNPGBE_DMA_STATS_SWITCH_TO_SWITCH (0x1a4)
+#define RNPGBE_DMA_STATS_MAC_TO_DMA (0x1a8)
+#define RNPGBE_DMA_STATS_SWITCH_TO_DMA (0x1ac)
+
+#define RNPVF500_VEB_VFMPRC(i) (0x4018 + 0x100 * i)
+/* =====  PF-VF Functions ==== */
+#define VF_NUM_REG 0xa3000
+#define VF_NUM_REG_N10 0x75f000
+#define VF_NUM_REG_N500 (0xe000)
+/* 8bit: 7:vf_actiove 6:fun0/fun1 [5:0]:vf_num */
+#define VF_NUM(vfnum, fun) ((1 << 7) | (((fun) & 0x1) << 6) | ((vfnum) & 0x3f))
+#define PF_NUM(fun) (((fun) & 0x1) << 6)
+
+/* ==== Ring-MSIX Registers (MSI-X_module_design.docs) === */
+#define RING_VECTOR(n) (0x4000 + 0x04 * (n))
+
+static inline unsigned int p_rnpgbevf_rd_reg(void *reg)
+{
+	unsigned int v = ioread32((void *)(reg));
+
+	printk(KERN_DEBUG " rd-reg: %p ==> 0x%08x\n", reg, v);
+	return v;
+}
+#define p_rnpgbevf_wr_reg(reg, val)                                            \
+	do {                                                                   \
+		printk(KERN_DEBUG " wr-reg: %p <== 0x%08x \t#%-4d %s\n",       \
+		       (reg), (val), __LINE__, __FILE__);                      \
+		iowrite32((val), (void *)(reg));                               \
+	} while (0)
+
+#ifdef IO_PRINT
+#define rnpgbevf_rd_reg(reg) p_rnpgbevf_rd_reg(reg)
+#define rnpgbevf_wr_reg(reg, val) p_rnpgbevf_wr_reg(reg, val)
+#else
+#define rnpgbevf_rd_reg(reg) readl((void *)(reg))
+#define rnpgbevf_wr_reg(reg, val) writel((val), (void *)(reg))
+#endif
+
+#ifdef CONFIG_RNPGBE_MBX_DEBUG
+#define mbx_rd32(hw, reg) p_rnpgbevf_rd_reg((hw)->hw_addr + (reg))
+#define mbx_wr32(hw, reg, val) p_rnpgbevf_wr_reg((hw)->hw_addr + (reg), (val))
+#else
+#define mbx_rd32(hw, reg) rnpgbevf_rd_reg((hw)->hw_addr + (reg))
+#define mbx_wr32(hw, reg, val) rnpgbevf_wr_reg((hw)->hw_addr + (reg), (val))
+#endif
+
+#define rd32(hw, off) rnpgbevf_rd_reg((hw)->hw_addr + (off))
+#define wr32(hw, off, val) rnpgbevf_wr_reg((hw)->hw_addr + (off), (val))
+
+#define ring_rd32(ring, off) rnpgbevf_rd_reg((ring)->ring_addr + (off))
+#define ring_wr32(ring, off, val)                                              \
+	rnpgbevf_wr_reg((ring)->ring_addr + (off), (val))
+
+#define pwr32(hw, reg, val)                                                    \
+	do {                                                                   \
+		printk(KERN_DEBUG " wr-reg: %p <== 0x%08x \t#%-4d %s\n",       \
+		       (hw)->hw_addr + (reg), (val), __LINE__, __FILE__);      \
+		iowrite32((val), (hw)->hw_addr + (reg));                       \
+	} while (0)
+
+/* ==== log helper === */
+#ifdef DEBUG
+#define hw_dbg(hw, fmt, args...) printk("hw-dbg : " fmt, ##args)
+#else
+#define hw_dbg(hw, fmt, args...)
+#endif
+
+#endif /* _RNPGBEVF_REGS_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_sysfs.c b/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_sysfs.c
new file mode 100644
index 0000000000000..ef06eb43fa0d3
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbevf/rnpgbevf_sysfs.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include 
+#include 
+#include 
+
+#include "rnpgbevf.h"
+
+#define to_net_device(n) container_of(n, struct net_device, dev)
+
+
+int rnpgbevf_sysfs_init(struct net_device *ndev)
+{
+	return 0;
+}
+
+void rnpgbevf_sysfs_exit(struct net_device *ndev)
+{
+}
diff --git a/drivers/net/ethernet/mucse/rnpgbevf/vf.c b/drivers/net/ethernet/mucse/rnpgbevf/vf.c
new file mode 100644
index 0000000000000..9208e10ac5e39
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbevf/vf.c
@@ -0,0 +1,869 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include "vf.h"
+#include "rnpgbevf.h"
+
+static int rnpgbevf_reset_pf(struct rnpgbevf_hw *hw)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[2];
+	s32 ret_val;
+
+	memset(msgbuf, 0, sizeof(msgbuf));
+	msgbuf[0] = RNPGBE_VF_RESET_PF;
+
+	ret_val = mbx->ops.write_posted(hw, msgbuf, 2, false);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read_posted(hw, msgbuf, 2, false);
+
+	return ret_val;
+}
+
+static int rnpgbevf_get_mtu(struct rnpgbevf_hw *hw)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[2];
+	s32 ret_val;
+
+	memset(msgbuf, 0, sizeof(msgbuf));
+	msgbuf[0] = RNPGBE_VF_GET_MTU;
+
+	ret_val = mbx->ops.write_posted(hw, msgbuf, 2, false);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read_posted(hw, msgbuf, 2, false);
+
+	msgbuf[0] &= ~RNPGBE_VT_MSGTYPE_CTS;
+
+	/* if nacked the address was rejected, use "perm_addr" */
+	if (!ret_val &&
+	    (msgbuf[0] == (RNPGBE_VF_GET_MTU | RNPGBE_VT_MSGTYPE_NACK))) {
+		/* get mtu failed */
+		return -1;
+	}
+	hw->mtu = msgbuf[1];
+
+	return ret_val;
+}
+
+static int rnpgbevf_set_mtu(struct rnpgbevf_hw *hw, int mtu)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[2];
+	s32 ret_val;
+
+	memset(msgbuf, 0, sizeof(msgbuf));
+	msgbuf[0] = RNPGBE_VF_SET_MTU;
+	msgbuf[1] = mtu;
+
+	ret_val = mbx->ops.write_posted(hw, msgbuf, 2, false);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read_posted(hw, msgbuf, 2, false);
+
+	msgbuf[0] &= ~RNPGBE_VT_MSGTYPE_CTS;
+
+	/* if nacked the address was rejected, use "perm_addr" */
+	if (!ret_val &&
+	    (msgbuf[0] == (RNPGBE_VF_SET_MTU | RNPGBE_VT_MSGTYPE_NACK))) {
+		/* set mtu failed */
+		return -1;
+	}
+
+	return ret_val;
+}
+static int rnpgbevf_read_eth_reg(struct rnpgbevf_hw *hw, int reg, u32 *value)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[2];
+	int err;
+
+	msgbuf[0] = RNPGBE_VF_REG_RD;
+	msgbuf[1] = reg;
+
+	err = mbx->ops.write_posted(hw, msgbuf, 2, false);
+	if (err)
+		goto mbx_err;
+
+	err = mbx->ops.read_posted(hw, msgbuf, 2, false);
+	if (err)
+		goto mbx_err;
+
+	/* remove extra bits from the message */
+	msgbuf[0] &= ~RNPGBE_VT_MSGTYPE_CTS;
+	msgbuf[0] &= ~(0xFF << RNPGBE_VT_MSGINFO_SHIFT);
+
+	if (msgbuf[0] != (RNPGBE_VF_REG_RD | RNPGBE_VT_MSGTYPE_ACK))
+		err = RNPGBE_ERR_INVALID_ARGUMENT;
+
+	*value = msgbuf[1];
+
+mbx_err:
+	return err;
+}
+
+/**
+ *  rnpgbevf_start_hw_vf - Prepare hardware for Tx/Rx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware by filling the bus info structure and media type, clears
+ *  all on chip counters, initializes receive address registers, multicast
+ *  table, VLAN filter table, calls routine to set up link and flow control
+ *  settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+static s32 rnpgbevf_start_hw_vf(struct rnpgbevf_hw *hw)
+{
+	/* Clear adapter stopped flag */
+	hw->adapter_stopped = false;
+
+	return 0;
+}
+
+/**
+ *  rnpgbevf_init_hw_vf - virtual function hardware initialization
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the hardware by resetting the hardware and then starting
+ *  the hardware
+ **/
+static s32 rnpgbevf_init_hw_vf(struct rnpgbevf_hw *hw)
+{
+	s32 status;
+
+	status = hw->mac.ops.start_hw(hw);
+
+	hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+	return status;
+}
+
+/**
+ *  rnpgbevf_reset_hw_vf - Performs hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by reseting the transmit and receive units, masks and
+ *  clears all interrupts.
+ **/
+static s32 rnpgbevf_reset_hw_vf(struct rnpgbevf_hw *hw)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	struct rnpgbevf_adapter *adapter = hw->back;
+	s32 ret_val = RNPGBE_ERR_INVALID_MAC_ADDR;
+	u32 msgbuf[RNPGBE_VF_PERMADDR_MSG_LEN];
+	u8 *addr = (u8 *)(&msgbuf[1]);
+	u32 vlan;
+	int try_cnt = 10;
+
+	/* Call adapter stop to disable tx/rx and clear interrupts */
+	hw->mac.ops.stop_adapter(hw);
+
+	/* reset the api version */
+	hw->api_version = 0;
+
+	/* mailbox timeout can now become active */
+	mbx->timeout = RNPGBE_VF_MBX_INIT_TIMEOUT;
+
+	while (try_cnt--) {
+		msgbuf[0] = RNPGBE_VF_RESET;
+		mbx->ops.write_posted(hw, msgbuf, 1, false);
+		/* ack write back maybe too fast */
+		mdelay(20);
+
+		/* set our "perm_addr" based on info provided by PF */
+		/* also set up the mc_filter_type which is piggy backed
+		 * on the mac address in word 3
+		 */
+		ret_val = mbx->ops.read_posted(
+			hw, msgbuf, RNPGBE_VF_PERMADDR_MSG_LEN, false);
+		if (ret_val == 0)
+			break;
+	}
+	if (ret_val) {
+		dev_info(&hw->pdev->dev, "echo vf reset timeout\n");
+		return ret_val;
+	}
+
+	/* New versions of the PF may NACK the reset return message
+	 * to indicate that no MAC address has yet been assigned for
+	 * the VF.
+	 */
+	if (msgbuf[0] != (RNPGBE_VF_RESET | RNPGBE_VT_MSGTYPE_ACK) &&
+	    msgbuf[0] != (RNPGBE_VF_RESET | RNPGBE_VT_MSGTYPE_NACK))
+		return RNPGBE_ERR_INVALID_MAC_ADDR;
+	/* we get mac address from mailbox */
+
+	memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
+	hw->mac.mc_filter_type = msgbuf[RNPGBE_VF_MC_TYPE_WORD] & 0xff;
+
+	/* ft padding */
+	if ((msgbuf[RNPGBE_VF_MC_TYPE_WORD] >> 8) & 0xff)
+		adapter->priv_flags |= RNPVF_PRIV_FLAG_FT_PADDING;
+	else
+		adapter->priv_flags = 0;
+	/* fc mode */
+	hw->fc.current_mode = (msgbuf[RNPGBE_VF_MC_TYPE_WORD] >> 16) & 0xff;
+
+	/* phy status */
+	hw->phy_type = (msgbuf[RNPGBE_VF_PHY_TYPE_WORD] & 0xffff);
+
+	hw->dma_version = hw->mac.dma_version =
+		msgbuf[RNPGBE_VF_DMA_VERSION_WORD];
+
+	/* vlan status */
+	vlan = msgbuf[RNPGBE_VF_VLAN_WORD];
+	if (vlan & 0xffff) {
+		adapter->vf_vlan = vlan & 0xffff;
+		adapter->flags |= RNPVF_FLAG_PF_SET_VLAN;
+	}
+
+	hw->ops.set_veb_vlan(hw, vlan, VFNUM(mbx, hw->vfnum));
+	hw->fw_version = msgbuf[RNPGBE_VF_FW_VERSION_WORD];
+
+	if (msgbuf[RNPGBE_VF_LINK_STATUS_WORD] & RNPGBE_PF_LINK_UP) {
+		hw->link = true;
+		hw->speed = msgbuf[RNPGBE_VF_LINK_STATUS_WORD] & 0xffff;
+	} else {
+		hw->link = false;
+		hw->speed = 0;
+	}
+
+	hw->usecstocount = msgbuf[RNPGBE_VF_AXI_MHZ];
+
+	DPRINTK(PROBE, INFO, "dma_versioin:%x vlan %d\n", hw->mac.dma_version,
+		adapter->vf_vlan);
+	DPRINTK(PROBE, INFO, "axi:%x\n", hw->usecstocount);
+	DPRINTK(PROBE, INFO, "firmware :%x\n", hw->fw_version);
+	DPRINTK(PROBE, INFO, "link speed :%x\n", hw->speed);
+	DPRINTK(PROBE, INFO, "link status :%s\n", hw->link ? "up" : "down");
+	hw->pf_feature = msgbuf[RNPGBE_VF_FEATURE];
+
+	return 0;
+}
+
+/**
+ *  rnpgbevf_stop_hw_vf - Generic stop Tx/Rx units
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the adapter_stopped flag within rnpgbevf_hw struct. Clears interrupts,
+ *  disables transmit and receive units. The adapter_stopped flag is used by
+ *  the shared code and drivers to determine if the adapter is in a stopped
+ *  state and should not touch the hardware.
+ **/
+static s32 rnpgbevf_stop_hw_vf(struct rnpgbevf_hw *hw)
+{
+	u32 number_of_queues;
+	u16 i;
+	struct rnpgbevf_adapter *adapter = hw->back;
+	struct rnpgbevf_ring *ring;
+
+	/*
+	 * Set the adapter_stopped flag so other driver functions stop touching
+	 * the hardware
+	 */
+	hw->adapter_stopped = true;
+
+	/* Disable the receive unit by stopped each queue */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		ring = adapter->rx_ring[i];
+		ring_wr32(ring, RNPGBE_DMA_RX_START, 0);
+	}
+
+	/* Disable the transmit unit.  Each queue must be disabled. */
+	number_of_queues = hw->mac.max_tx_queues;
+
+	return 0;
+}
+
+/**
+ *  rnpgbevf_mta_vector - Determines bit-vector in multicast table to set
+ *  @hw: pointer to hardware structure
+ *  @mc_addr: the multicast address
+ *
+ *  Extracts the 12 bits, from a multicast address, to determine which
+ *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ *  incoming rx multicast addresses, to determine the bit-vector to check in
+ *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ *  by the MO field of the MCSTCTRL. The MO field is set during initialization
+ *  to mc_filter_type.
+ **/
+static s32 rnpgbevf_mta_vector(struct rnpgbevf_hw *hw, u8 *mc_addr)
+{
+	u32 vector = 0;
+
+	switch (hw->mac.mc_filter_type) {
+	case 0: /* use bits [47:36] of the address */
+		vector = ((mc_addr[4] << 8) | (((u16)mc_addr[5])));
+		break;
+	case 1: /* use bits [46:35] of the address */
+		vector = ((mc_addr[4] << 7) | (((u16)mc_addr[5]) >> 1));
+		break;
+	case 2: /* use bits [45:34] of the address */
+		vector = ((mc_addr[4] << 6) | (((u16)mc_addr[5]) >> 2));
+		break;
+	case 3: /* use bits [43:32] of the address */
+		vector = ((mc_addr[4]) << 4 | (((u16)mc_addr[5]) >> 4));
+		break;
+	case 4: /* use bits [32:43] of the address */
+		vector = ((mc_addr[0] << 8) | (((u16)mc_addr[1])));
+		vector = (vector >> 4);
+		break;
+	case 5: /* use bits [32:43] of the address */
+		vector = ((mc_addr[0] << 8) | (((u16)mc_addr[1])));
+		vector = (vector >> 3);
+		break;
+	case 6: /* use bits [32:43] of the address */
+		vector = ((mc_addr[0] << 8) | (((u16)mc_addr[1])));
+		vector = (vector >> 2);
+		break;
+	case 7: /* use bits [32:43] of the address */
+		vector = ((mc_addr[0] << 8) | (((u16)mc_addr[1])));
+		break;
+	default: /* Invalid mc_filter_type */
+		break;
+	}
+
+	/* vector can only be 12-bits or boundary will be exceeded */
+	vector &= 0xFFF;
+	return vector;
+}
+
+/**
+ *  rnpgbevf_get_mac_addr_vf - Read device MAC address
+ *  @hw: pointer to the HW structure
+ *  @mac_addr: pointer to storage for retrieved MAC address
+ **/
+static s32 rnpgbevf_get_mac_addr_vf(struct rnpgbevf_hw *hw, u8 *mac_addr)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[3];
+	u8 *msg_addr = (u8 *)(&msgbuf[1]);
+	s32 ret_val = 0;
+
+	memset(msgbuf, 0, sizeof(msgbuf));
+	/*
+	 * If index is one then this is the start of a new list and needs
+	 * indication to the PF so it can do it's own list management.
+	 * If it is zero then that tells the PF to just clear all of
+	 * this VF's macvlans and there is no new list.
+	 */
+	msgbuf[0] |= RNPGBE_VF_SET_MACVLAN;
+	ret_val = mbx->ops.write_posted(hw, msgbuf, 1, false);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read_posted(hw, msgbuf, 3, false);
+
+	msgbuf[0] &= ~RNPGBE_VT_MSGTYPE_CTS;
+
+	if (!ret_val)
+		if (msgbuf[0] ==
+		    (RNPGBE_VF_GET_MACVLAN | RNPGBE_VT_MSGTYPE_NACK))
+			ret_val = -ENOMEM;
+
+	memcpy(mac_addr, msg_addr, 6);
+
+	return 0;
+}
+
+/**
+ *  rnpgbevf_get_queues_vf - Read device MAC address
+ *  @hw: pointer to the HW structure
+ *  @mac_addr: pointer to storage for retrieved MAC address
+ **/
+static s32 rnpgbevf_get_queues_vf(struct rnpgbevf_hw *hw)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = 0;
+	u32 msgbuf[7];
+
+	memset(msgbuf, 0, sizeof(msgbuf));
+	msgbuf[0] |= RNPGBE_VF_GET_QUEUE;
+
+	ret_val = mbx->ops.write_posted(hw, msgbuf, 1, false);
+
+	mdelay(10);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read_posted(hw, msgbuf, 7, false);
+
+	msgbuf[0] &= ~RNPGBE_VT_MSGTYPE_CTS;
+
+	if (!ret_val)
+		if (msgbuf[0] == (RNPGBE_VF_GET_QUEUE | RNPGBE_VT_MSGTYPE_NACK))
+			ret_val = -ENOMEM;
+#define MSG_TX_NUM_WORD 1
+#define MSG_RX_NUM_WORD 2
+#define MSG_RING_BASE_WORD 5
+#define MSG_RING_DEPTH 6
+
+	hw->queue_ring_base = msgbuf[MSG_RING_BASE_WORD];
+	hw->mac.max_tx_queues = msgbuf[MSG_TX_NUM_WORD];
+	hw->mac.max_rx_queues = msgbuf[MSG_RX_NUM_WORD];
+	hw->tx_items_count = 0xffff & (msgbuf[MSG_RING_DEPTH] >> 16);
+	hw->rx_items_count = 0xffff & (msgbuf[MSG_RING_DEPTH] >> 0);
+
+	return 0;
+}
+
+static s32 rnpgbevf_set_uc_addr_vf(struct rnpgbevf_hw *hw, u32 index, u8 *addr)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[3];
+	u8 *msg_addr = (u8 *)(&msgbuf[1]);
+	s32 ret_val = 0;
+
+	memset(msgbuf, 0, sizeof(msgbuf));
+	/*
+	 * If index is one then this is the start of a new list and needs
+	 * indication to the PF so it can do it's own list management.
+	 * If it is zero then that tells the PF to just clear all of
+	 * this VF's macvlans and there is no new list.
+	 */
+	msgbuf[0] |= index << RNPGBE_VT_MSGINFO_SHIFT;
+	msgbuf[0] |= RNPGBE_VF_SET_MACVLAN;
+	if (addr)
+		memcpy(msg_addr, addr, 6);
+	ret_val = mbx->ops.write_posted(hw, msgbuf, 3, false);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read_posted(hw, msgbuf, 3, false);
+
+	msgbuf[0] &= ~RNPGBE_VT_MSGTYPE_CTS;
+
+	if (!ret_val)
+		if (msgbuf[0] ==
+		    (RNPGBE_VF_SET_MACVLAN | RNPGBE_VT_MSGTYPE_NACK))
+			ret_val = -ENOMEM;
+	return ret_val;
+}
+
+/**
+ *  rnpgbevf_set_rar_vf - set device MAC address
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *  @addr: Address to put into receive address register
+ *  @vmdq: Unused in this implementation
+ **/
+static s32 rnpgbevf_set_rar_vf(struct rnpgbevf_hw *hw, u32 index, u8 *addr,
+			       u32 vmdq)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[3];
+	u8 *msg_addr = (u8 *)(&msgbuf[1]);
+	s32 ret_val;
+
+	memset(msgbuf, 0, sizeof(msgbuf));
+	msgbuf[0] = RNPGBE_VF_SET_MAC_ADDR;
+	memcpy(msg_addr, addr, 6);
+	ret_val = mbx->ops.write_posted(hw, msgbuf, 3, false);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read_posted(hw, msgbuf, 3, false);
+
+	msgbuf[0] &= ~RNPGBE_VT_MSGTYPE_CTS;
+
+	/* if nacked the address was rejected, use "perm_addr" */
+	if (!ret_val &&
+	    (msgbuf[0] == (RNPGBE_VF_SET_MAC_ADDR | RNPGBE_VT_MSGTYPE_NACK))) {
+		rnpgbevf_get_mac_addr_vf(hw, hw->mac.addr);
+		return -1;
+	}
+
+	return ret_val;
+}
+
+static void rnpgbevf_write_msg_read_ack(struct rnpgbevf_hw *hw, u32 *msg,
+					u16 size)
+{
+	u32 retmsg[RNPGBE_VFMAILBOX_SIZE];
+	s32 retval;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+
+	retval = mbx->ops.write_posted(hw, msg, size, false);
+	if (!retval)
+		mbx->ops.read_posted(hw, retmsg, size, false);
+}
+
+u8 *rnpgbevf_addr_list_itr(struct rnpgbevf_hw __maybe_unused *hw,
+			   u8 **mc_addr_ptr)
+{
+	struct netdev_hw_addr *mc_ptr;
+	u8 *addr = *mc_addr_ptr;
+
+	mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]);
+	if (mc_ptr->list.next) {
+		struct netdev_hw_addr *ha;
+
+		ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list);
+		*mc_addr_ptr = ha->addr;
+	} else
+		*mc_addr_ptr = NULL;
+
+	return addr;
+}
+
+/**
+ *  rnpgbevf_update_mc_addr_list_vf - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @netdev: pointer to net device structure
+ *
+ *  Updates the Multicast Table Array.
+ **/
+static s32 rnpgbevf_update_mc_addr_list_vf(struct rnpgbevf_hw *hw,
+					   struct net_device *netdev)
+{
+	struct netdev_hw_addr *ha;
+	u32 msgbuf[RNPGBE_VFMAILBOX_SIZE];
+	u16 *vector_list = (u16 *)&msgbuf[1];
+	u32 cnt, i;
+	int addr_count = 0;
+	u8 *addr_list = NULL;
+
+	/* Each entry in the list uses 1 16 bit word.  We have 30
+	 * 16 bit words available in our HW msg buffer (minus 1 for the
+	 * msg type).  That's 30 hash values if we pack 'em right.  If
+	 * there are more than 30 MC addresses to add then punt the
+	 * extras for now and then add code to handle more than 30 later.
+	 * It would be unusual for a server to request that many multi-cast
+	 * addresses except for in large enterprise network environments.
+	 */
+
+	cnt = netdev_mc_count(netdev);
+	if (cnt > 30)
+		cnt = 30;
+	msgbuf[0] = RNPGBE_VF_SET_MULTICAST;
+	msgbuf[0] |= cnt << RNPGBE_VT_MSGINFO_SHIFT;
+
+	addr_count = netdev_mc_count(netdev);
+
+	ha = list_first_entry(&netdev->mc.list, struct netdev_hw_addr, list);
+	addr_list = ha->addr;
+	for (i = 0; i < addr_count; i++) {
+		vector_list[i] = rnpgbevf_mta_vector(
+			hw, rnpgbevf_addr_list_itr(hw, &addr_list));
+	}
+
+	rnpgbevf_write_msg_read_ack(hw, msgbuf, RNPGBE_VFMAILBOX_SIZE);
+
+	return 0;
+}
+
+/**
+ *  rnpgbevf_set_vfta_vf - Set/Unset vlan filter table address
+ *  @hw: pointer to the HW structure
+ *  @vlan: 12 bit VLAN ID
+ *  @vind: unused by VF drivers
+ *  @vlan_on: if true then set bit, else clear bit
+ **/
+static s32 rnpgbevf_set_vfta_vf(struct rnpgbevf_hw *hw, u32 vlan, u32 vind,
+				bool vlan_on)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[2];
+	s32 err;
+
+	msgbuf[0] = RNPGBE_VF_SET_VLAN;
+	msgbuf[1] = vlan;
+	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+	msgbuf[0] |= vlan_on << RNPGBE_VT_MSGINFO_SHIFT;
+
+	err = mbx->ops.write_posted(hw, msgbuf, 2, false);
+	if (err) {
+		printk(KERN_DEBUG "vlan write_posted failed\n");
+		goto mbx_err;
+	}
+
+	err = mbx->ops.read_posted(hw, msgbuf, 2, false);
+	if (err) {
+		printk(KERN_DEBUG "vlan read_posted failed\n");
+		goto mbx_err;
+	}
+
+	/* remove extra bits from the message */
+	msgbuf[0] &= ~RNPGBE_VT_MSGTYPE_CTS;
+	msgbuf[0] &= ~(0xFF << RNPGBE_VT_MSGINFO_SHIFT);
+
+	if (msgbuf[0] != (RNPGBE_VF_SET_VLAN | RNPGBE_VT_MSGTYPE_ACK))
+		err = RNPGBE_ERR_INVALID_ARGUMENT;
+
+mbx_err:
+	return err;
+}
+
+static s32 rnpgbevf_set_vlan_strip(struct rnpgbevf_hw *hw, bool vlan_on)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	struct rnpgbevf_adapter *adapter = (struct rnpgbevf_adapter *)hw->back;
+	u32 msgbuf[4];
+	s32 err;
+	int i;
+
+	if (adapter->num_rx_queues > 2) {
+		err = -EINVAL;
+		goto mbx_err;
+	}
+
+	msgbuf[0] = RNPGBE_VF_SET_VLAN_STRIP;
+	msgbuf[1] = (vlan_on << 31) | adapter->num_rx_queues;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		msgbuf[2 + i] = adapter->rx_ring[i]->rnpgbevf_queue_idx;
+
+	err = mbx->ops.write_posted(hw, msgbuf, 2 + adapter->num_rx_queues,
+				    false);
+	if (err)
+		goto mbx_err;
+
+	err = mbx->ops.read_posted(hw, msgbuf, 1, false);
+	if (err)
+		goto mbx_err;
+
+	/* remove extra bits from the message */
+	msgbuf[0] &= ~RNPGBE_VT_MSGTYPE_CTS;
+	msgbuf[0] &= ~(0xFF << RNPGBE_VT_MSGINFO_SHIFT);
+
+	if (msgbuf[0] != (RNPGBE_VF_SET_VLAN_STRIP | RNPGBE_VT_MSGTYPE_ACK))
+		err = RNPGBE_ERR_INVALID_ARGUMENT;
+
+mbx_err:
+	return err;
+}
+
+/**
+ *  rnpgbevf_setup_mac_link_vf - Setup MAC link settings
+ *  @hw: pointer to hardware structure
+ *  @speed: Unused in this implementation
+ *  @autoneg: Unused in this implementation
+ *  @autoneg_wait_to_complete: Unused in this implementation
+ *
+ *  Do nothing and return success.  VF drivers are not allowed to change
+ *  global settings.  Maintained for driver compatibility.
+ **/
+static s32 rnpgbevf_setup_mac_link_vf(struct rnpgbevf_hw *hw,
+				      rnp_link_speed speed, bool autoneg,
+				      bool autoneg_wait_to_complete)
+{
+	return 0;
+}
+
+/**
+ *  rnpgbevf_check_mac_link_vf - Get link/speed status
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @link_up: true is link is up, false otherwise
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ *  Reads the links register to determine if link is up and the current speed
+ **/
+static s32 rnpgbevf_check_mac_link_vf(struct rnpgbevf_hw *hw,
+				      rnp_link_speed *speed, bool *link_up,
+				      bool autoneg_wait_to_complete)
+{
+	*speed = hw->speed;
+	*link_up = hw->link;
+
+	return 0;
+}
+
+/**
+ *  rnpgbevf_rlpml_set_vf - Set the maximum receive packet length
+ *  @hw: pointer to the HW structure
+ *  @max_size: value to assign to max frame size
+ **/
+void rnpgbevf_rlpml_set_vf(struct rnpgbevf_hw *hw, u16 max_size)
+{
+	u32 msgbuf[2];
+
+	msgbuf[0] = RNPGBE_VF_SET_LPE;
+	msgbuf[1] = max_size;
+	rnpgbevf_write_msg_read_ack(hw, msgbuf, 2);
+}
+
+/**
+ *  rnpgbevf_negotiate_api_version - Negotiate supported API version
+ *  @hw: pointer to the HW structure
+ *  @api: integer containing requested API version
+ **/
+int rnpgbevf_negotiate_api_version(struct rnpgbevf_hw *hw, int api)
+{
+	return 0;
+}
+
+int rnpgbevf_get_queues(struct rnpgbevf_hw *hw, unsigned int *num_tcs,
+			unsigned int *default_tc)
+{
+	return -1;
+}
+
+void rnpgbevf_set_veb_mac_n500(struct rnpgbevf_hw *hw, u8 *mac, u32 vf_num,
+			       u32 ring)
+{
+	u32 maclow, machi;
+
+	maclow = (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5];
+	machi = (mac[0] << 8) | mac[1];
+	maclow = (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5];
+	machi = (mac[0] << 8) | mac[1];
+
+	wr32(hw, RNPGBE_DMA_PORT_VBE_MAC_LO_TBL_N500, maclow);
+	wr32(hw, RNPGBE_DMA_PORT_VBE_MAC_HI_TBL_N500, machi);
+	wr32(hw, RNPGBE_DMA_PORT_VEB_VF_RING_TBL_N500, ring);
+}
+
+void rnpgbevf_set_vlan_n500(struct rnpgbevf_hw *hw, u16 vid, u32 vf_num)
+{
+	wr32(hw, RNPGBE_DMA_PORT_VEB_VID_TBL_N500, vid);
+}
+
+static const struct rnpgbevf_hw_operations rnpgbevf_hw_ops_n500 = {
+	.set_veb_mac = rnpgbevf_set_veb_mac_n500,
+	.set_veb_vlan = rnpgbevf_set_vlan_n500,
+};
+
+void rnpgbevf_set_veb_mac_n10(struct rnpgbevf_hw *hw, u8 *mac, u32 vfnum,
+			      u32 ring)
+{
+	int port;
+	u32 maclow, machi;
+
+	maclow = (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5];
+	machi = (mac[0] << 8) | mac[1];
+	for (port = 0; port < 4; port++) {
+		maclow = (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) |
+			 mac[5];
+		machi = (mac[0] << 8) | mac[1];
+
+		wr32(hw, RNPGBE_DMA_PORT_VBE_MAC_LO_TBL_N10(port, vfnum),
+		     maclow);
+		wr32(hw, RNPGBE_DMA_PORT_VBE_MAC_HI_TBL_N10(port, vfnum),
+		     machi);
+		wr32(hw, RNPGBE_DMA_PORT_VEB_VF_RING_TBL_N10(port, vfnum),
+		     ring);
+	}
+}
+
+void rnpgbevf_set_vlan_n10(struct rnpgbevf_hw *hw, u16 vid, u32 vf_num)
+{
+	int port;
+
+	for (port = 0; port < 4; port++)
+		wr32(hw, RNPGBE_DMA_PORT_VEB_VID_TBL_N10(port, vf_num), vid);
+}
+
+static const struct rnpgbevf_hw_operations rnpgbevf_hw_ops_n10 = {
+	.set_veb_mac = rnpgbevf_set_veb_mac_n10,
+	.set_veb_vlan = rnpgbevf_set_vlan_n10,
+};
+
+static s32 rnpgbevf_get_invariants_n500(struct rnpgbevf_hw *hw)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+
+	/* set up hw feature */
+	hw->feature_flags |=
+		RNPVF_NET_FEATURE_SG | RNPVF_NET_FEATURE_TX_CHECKSUM |
+		RNPVF_NET_FEATURE_RX_CHECKSUM | RNPVF_NET_FEATURE_TSO |
+		RNPVF_NET_FEATURE_VLAN_OFFLOAD |
+		RNPVF_NET_FEATURE_STAG_OFFLOAD | RNPVF_NET_FEATURE_USO |
+		RNPVF_NET_FEATURE_RX_HASH;
+
+	/* mbx setup */
+	mbx->pf2vf_mbox_vec_base = 0x28800;
+	mbx->vf2pf_mbox_vec_base = 0x28900;
+	mbx->cpu2vf_mbox_vec_base = 0x28a00;
+	mbx->cpu2pf_mbox_vec = 0x28b00;
+	mbx->pf_vf_shm_base = 0x29000;
+	mbx->cpu_vf_shm_base = 0x2b000;
+	mbx->vf2cpu_mbox_ctrl_base = 0x2c000;
+	mbx->cpu_vf_mbox_mask_lo_base = 0x2c200;
+	mbx->cpu_vf_mbox_mask_hi_base = 0;
+	mbx->mbx_mem_size = 64;
+	mbx->vf2pf_mbox_ctrl_base = 0x2a000;
+	mbx->pf2vf_mbox_ctrl_base = 0x2a100;
+	mbx->pf_vf_mbox_mask_lo = 0x2a200;
+	mbx->pf_vf_mbox_mask_hi = 0;
+	mbx->cpu_pf_shm_base = 0x2d040;
+	mbx->pf2cpu_mbox_ctrl = 0x2e000;
+	mbx->pf2cpu_mbox_mask = 0x2e200;
+	mbx->vf_num_mask = 0x1f;
+
+	hw->min_length = RNPVF_MIN_MTU;
+	hw->max_length = RNPVF_N500_MAX_JUMBO_FRAME_SIZE;
+
+	memcpy(&hw->ops, &rnpgbevf_hw_ops_n500, sizeof(hw->ops));
+
+	return 0;
+}
+
+static s32 rnpgbevf_get_invariants_n210(struct rnpgbevf_hw *hw)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+
+	hw->feature_flags |=
+		RNPVF_NET_FEATURE_SG | RNPVF_NET_FEATURE_TX_CHECKSUM |
+		RNPVF_NET_FEATURE_RX_CHECKSUM | RNPVF_NET_FEATURE_TSO |
+		RNPVF_NET_FEATURE_VLAN_OFFLOAD |
+		RNPVF_NET_FEATURE_STAG_OFFLOAD | RNPVF_NET_FEATURE_USO |
+		RNPVF_NET_FEATURE_RX_HASH;
+
+	mbx->pf2vf_mbox_vec_base = 0x29100;
+	mbx->vf2pf_mbox_vec_base = 0x29200;
+	mbx->cpu2vf_mbox_vec_base = 0x29300;
+	mbx->cpu2pf_mbox_vec = 0x29400;
+	mbx->pf_vf_shm_base = 0x29900;
+	mbx->cpu_vf_shm_base = 0x2b900;
+	mbx->vf2cpu_mbox_ctrl_base = 0x2c900;
+	mbx->cpu_vf_mbox_mask_lo_base = 0x2cb00;
+	mbx->cpu_vf_mbox_mask_hi_base = 0;
+	mbx->mbx_mem_size = 64;
+	mbx->vf2pf_mbox_ctrl_base = 0x2a900;
+	mbx->pf2vf_mbox_ctrl_base = 0x2aa00;
+	mbx->pf_vf_mbox_mask_lo = 0x2a200;
+	mbx->pf_vf_mbox_mask_hi = 0;
+	mbx->cpu_pf_shm_base = 0x2d940;
+	mbx->pf2cpu_mbox_ctrl = 0x2e900;
+	mbx->pf2cpu_mbox_mask = 0x2eb00;
+	mbx->vf_num_mask = 0x1f;
+
+	hw->min_length = RNPVF_MIN_MTU;
+	hw->max_length = RNPVF_N500_MAX_JUMBO_FRAME_SIZE;
+
+	memcpy(&hw->ops, &rnpgbevf_hw_ops_n500, sizeof(hw->ops));
+
+	return 0;
+}
+
+static const struct rnp_mac_operations rnpgbevf_mac_ops = {
+	.init_hw = rnpgbevf_init_hw_vf,
+	.reset_hw = rnpgbevf_reset_hw_vf,
+	.start_hw = rnpgbevf_start_hw_vf,
+	.get_mac_addr = rnpgbevf_get_mac_addr_vf,
+	.get_queues = rnpgbevf_get_queues_vf,
+	.stop_adapter = rnpgbevf_stop_hw_vf,
+	.setup_link = rnpgbevf_setup_mac_link_vf,
+	.check_link = rnpgbevf_check_mac_link_vf,
+	.set_rar = rnpgbevf_set_rar_vf,
+	.update_mc_addr_list = rnpgbevf_update_mc_addr_list_vf,
+	.set_uc_addr = rnpgbevf_set_uc_addr_vf,
+	.set_vfta = rnpgbevf_set_vfta_vf,
+	.set_vlan_strip = rnpgbevf_set_vlan_strip,
+	.read_eth_reg = rnpgbevf_read_eth_reg,
+	.get_mtu = rnpgbevf_get_mtu,
+	.set_mtu = rnpgbevf_set_mtu,
+	.req_reset_pf = rnpgbevf_reset_pf,
+};
+
+const struct rnpgbevf_info rnp_n500_vf_info = {
+	.mac = rnp_mac_2port_40G,
+	.mac_ops = &rnpgbevf_mac_ops,
+	.board_type = rnp_board_n500,
+	.get_invariants = &rnpgbevf_get_invariants_n500,
+};
+
+const struct rnpgbevf_info rnp_n210_vf_info = {
+	.mac = rnp_mac_2port_40G,
+	.mac_ops = &rnpgbevf_mac_ops,
+	.board_type = rnp_board_n210,
+	.get_invariants = &rnpgbevf_get_invariants_n210,
+};
diff --git a/drivers/net/ethernet/mucse/rnpgbevf/vf.h b/drivers/net/ethernet/mucse/rnpgbevf/vf.h
new file mode 100644
index 0000000000000..972f3abf2883a
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpgbevf/vf.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef __RNPGBE_VF_H__
+#define __RNPGBE_VF_H__
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "defines.h"
+#include "rnpgbevf_regs.h"
+#include "rnpgbevf_mbx.h"
+
+struct rnpgbevf_hw;
+
+/* iterator type for walking multicast address lists */
+typedef u8 *(*rnp_mc_addr_itr)(struct rnpgbevf_hw *hw, u8 **mc_addr_ptr,
+			       u32 *vmdq);
+struct rnp_mac_operations {
+	s32 (*init_hw)(struct rnpgbevf_hw *hw);
+	s32 (*reset_hw)(struct rnpgbevf_hw *hw);
+	s32 (*start_hw)(struct rnpgbevf_hw *hw);
+	s32 (*clear_hw_cntrs)(struct rnpgbevf_hw *hw);
+	enum rnp_media_type (*get_media_type)(struct rnpgbevf_hw *hw);
+	u32 (*get_supported_physical_layer)(struct rnpgbevf_hw *hw);
+	s32 (*get_mac_addr)(struct rnpgbevf_hw *hw, u8 *mac);
+	s32 (*get_queues)(struct rnpgbevf_hw *hw);
+	s32 (*stop_adapter)(struct rnpgbevf_hw *hw);
+	s32 (*get_bus_info)(struct rnpgbevf_hw *hw);
+	int (*read_eth_reg)(struct rnpgbevf_hw *hw, int reg, u32 *value);
+	int (*get_mtu)(struct rnpgbevf_hw *hw);
+	int (*set_mtu)(struct rnpgbevf_hw *hw, int mtu);
+	int (*req_reset_pf)(struct rnpgbevf_hw *hw);
+	/* Link */
+	s32 (*setup_link)(struct rnpgbevf_hw *hw, rnp_link_speed speed,
+			  bool autoneg, bool autoneg_wait_to_complete);
+	s32 (*check_link)(struct rnpgbevf_hw *hw, rnp_link_speed *speed,
+			  bool *link_up, bool autoneg_wait_to_complete);
+	s32 (*get_link_capabilities)(struct rnpgbevf_hw *hw,
+				     rnp_link_speed *speed,
+				     bool *autoneg_wait_to_complete);
+	/* RAR, Multicast, VLAN */
+	s32 (*set_rar)(struct rnpgbevf_hw *hw, u32 index, u8 *addr, u32 vmdq);
+	s32 (*set_uc_addr)(struct rnpgbevf_hw *hw, u32 index, u8 *addr);
+	s32 (*init_rx_addrs)(struct rnpgbevf_hw *hw);
+	s32 (*update_mc_addr_list)(struct rnpgbevf_hw *hw,
+				   struct net_device *netdev);
+	s32 (*enable_mc)(struct rnpgbevf_hw *hw);
+	s32 (*disable_mc)(struct rnpgbevf_hw *hw);
+	s32 (*clear_vfta)(struct rnpgbevf_hw *hw);
+	s32 (*set_vfta)(struct rnpgbevf_hw *hw, u32 vlan, u32 vind,
+			bool vlan_on);
+	s32 (*set_vlan_strip)(struct rnpgbevf_hw *hw, bool vlan_on);
+};
+
+enum rnp_mac_type {
+	rnp_mac_unknown = 0,
+	rnp_mac_2port_10G,
+	rnp_mac_2port_40G,
+	rnp_mac_4port_10G,
+	rnp_mac_8port_10G,
+	rnp_num_macs
+};
+
+enum rnp_board_type {
+	rnp_board_n10,
+	rnp_board_n500,
+	rnp_board_n210,
+};
+
+struct rnp_mac_info {
+	struct rnp_mac_operations ops;
+	u8 addr[6];
+	u8 perm_addr[6];
+	enum rnp_mac_type type;
+	s32 mc_filter_type;
+	u32 dma_version;
+	bool get_link_status;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	u32 max_msix_vectors;
+};
+
+#define RNP_MAX_TRAFFIC_CLASS 4
+enum rnp_fc_mode {
+	rnp_fc_none = 0,
+	rnp_fc_rx_pause,
+	rnp_fc_tx_pause,
+	rnp_fc_full,
+	rnp_fc_default
+};
+struct rnp_fc_info {
+	u32 high_water[RNP_MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
+	u32 low_water[RNP_MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */
+	u16 pause_time; /* Flow Control Pause timer */
+	bool send_xon; /* Flow control send XON */
+	bool strict_ieee; /* Strict IEEE mode */
+	bool disable_fc_autoneg; /* Do not autonegotiate FC */
+	bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
+	enum rnp_fc_mode current_mode; /* FC mode in effect */
+	enum rnp_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+struct rnp_mbx_operations {
+	s32 (*init_params)(struct rnpgbevf_hw *hw);
+	s32 (*read)(struct rnpgbevf_hw *hw, u32 *msg, u16 size, bool to_cm3);
+	s32 (*write)(struct rnpgbevf_hw *hw, u32 *msg, u16 size, bool to_cm3);
+	s32 (*read_posted)(struct rnpgbevf_hw *hw, u32 *msg, u16 size,
+			   bool to_cm3);
+	s32 (*write_posted)(struct rnpgbevf_hw *hw, u32 *msg, u16 size,
+			    bool to_cm3);
+	s32 (*check_for_msg)(struct rnpgbevf_hw *hw, bool to_cm3);
+	s32 (*check_for_ack)(struct rnpgbevf_hw *hw, bool to_cm3);
+	s32 (*check_for_rst)(struct rnpgbevf_hw *hw, bool to_cm3);
+	s32 (*configure)(struct rnpgbevf_hw *hw, int nr_vec, bool enable);
+};
+
+struct rnpgbevf_hw_operations {
+	void (*set_veb_mac)(struct rnpgbevf_hw *hw, u8 *mac, u32 vf_num,
+			    u32 ring);
+	void (*set_veb_vlan)(struct rnpgbevf_hw *hw, u16 vid, u32 vf_num);
+};
+
+struct rnp_mbx_stats {
+	u32 msgs_tx;
+	u32 msgs_rx;
+	u32 acks;
+	u32 reqs;
+	u32 rsts;
+};
+
+struct rnp_mbx_info {
+	struct rnp_mbx_operations ops;
+	struct rnp_mbx_stats stats;
+	u32 timeout;
+	u32 udelay;
+	u32 v2p_mailbox;
+	u16 size;
+	u16 pf_req;
+	u16 pf_ack;
+	u16 cpu_req;
+	u16 cpu_ack;
+	u32 vf_num_mask;
+	int mbx_size;
+	int mbx_mem_size;
+	/* cm3 <-> pf mbx */
+	u32 cpu_pf_shm_base;
+	u32 pf2cpu_mbox_ctrl;
+	u32 pf2cpu_mbox_mask;
+	u32 cpu_pf_mbox_mask;
+	u32 cpu2pf_mbox_vec;
+	/* cm3 <-> vf mbx */
+	u32 cpu_vf_shm_base;
+	u32 cpu2vf_mbox_vec_base;
+	u32 cpu_vf_mbox_mask_lo_base;
+	u32 cpu_vf_mbox_mask_hi_base;
+	/* pf <--> vf mbx */
+	u32 pf_vf_shm_base;
+	u32 vf2cpu_mbox_ctrl_base;
+	u32 pf2vf_mbox_ctrl_base;
+	u32 pf_vf_mbox_mask_lo;
+	u32 pf_vf_mbox_mask_hi;
+	u32 pf2vf_mbox_vec_base;
+	u32 vf2pf_mbox_vec_base;
+	u32 vf2pf_mbox_ctrl_base;
+};
+
+struct rnpgbevf_hw_stats_own {
+	u64 vlan_add_cnt;
+	u64 vlan_strip_cnt;
+	u64 csum_err;
+	u64 csum_good;
+};
+
+struct rnpgbevf_hw_stats {
+	u64 base_vfgprc;
+	u64 base_vfgptc;
+	u64 base_vfgorc;
+	u64 base_vfgotc;
+	u64 base_vfmprc;
+	u64 last_vfgprc;
+	u64 last_vfgptc;
+	u64 last_vfgorc;
+	u64 last_vfgotc;
+	u64 last_vfmprc;
+	u64 vfgprc;
+	u64 vfgptc;
+	u64 vfgorc;
+	u64 vfgotc;
+	u64 vfmprc;
+	u64 saved_reset_vfgprc;
+	u64 saved_reset_vfgptc;
+	u64 saved_reset_vfgorc;
+	u64 saved_reset_vfgotc;
+	u64 saved_reset_vfmprc;
+};
+
+struct rnpgbevf_info {
+	enum rnp_mac_type mac;
+	enum rnp_board_type board_type;
+	const struct rnp_mac_operations *mac_ops;
+	s32 (*get_invariants)(struct rnpgbevf_hw *hw);
+};
+
+void rnpgbevf_rlpml_set_vf(struct rnpgbevf_hw *hw, u16 max_size);
+#endif /* __RNPGBE_VF_H__ */
diff --git a/drivers/net/ethernet/mucse/rnpvf/Makefile b/drivers/net/ethernet/mucse/rnpvf/Makefile
new file mode 100644
index 0000000000000..a9593401e4d7e
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpvf/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright(c) 2022 - 2024 Mucse Corporation
+#
+# Makefile for the Mucse(R) 10GbE-2ports PCI Express ethernet driver
+#
+#
+
+obj-$(CONFIG_MXGBEVF) += rnpvf.o
+rnpvf-objs :=   \
+		vf.o \
+                mbx.o \
+                ethtool.o \
+                sysfs.o \
+                rnpvf_main.o
+
diff --git a/drivers/net/ethernet/mucse/rnpvf/defines.h b/drivers/net/ethernet/mucse/rnpvf/defines.h
new file mode 100644
index 0000000000000..33dc3deb02cc2
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpvf/defines.h
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPVF_DEFINES_H_
+#define _RNPVF_DEFINES_H_
+
+#include 
+#include 
+/* Device IDs */
+#define RNP_DEV_ID_N10_PF0_VF 0x8001
+#define RNP_DEV_ID_N10_PF1_VF 0x8002
+
+#define RNP_DEV_ID_N10_PF0_VF_N 0x1010
+#define RNP_DEV_ID_N10_PF1_VF_N 0x1011
+
+#define RNP_VF_IRQ_CLEAR_MASK 7
+#define RNP_VF_MAX_TX_QUEUES 8
+#define RNP_VF_MAX_RX_QUEUES 8
+
+/* DCB define */
+#define RNP_VF_MAX_TRAFFIC_CLASS 8
+
+/* Link speed */
+typedef u32 rnp_link_speed;
+#define RNP_LINK_SPEED_UNKNOWN 0
+#define RNP_LINK_SPEED_10_FULL BIT(2)
+#define RNP_LINK_SPEED_100_FULL BIT(3)
+#define RNP_LINK_SPEED_1GB_FULL BIT(4)
+#define RNP_LINK_SPEED_10GB_FULL BIT(5)
+#define RNP_LINK_SPEED_40GB_FULL BIT(6)
+#define RNP_LINK_SPEED_25GB_FULL BIT(7)
+#define RNP_LINK_SPEED_50GB_FULL BIT(8)
+#define RNP_LINK_SPEED_100GB_FULL BIT(9)
+#define RNP_LINK_SPEED_10_HALF BIT(10)
+#define RNP_LINK_SPEED_100_HALF BIT(11)
+#define RNP_LINK_SPEED_1GB_HALF BIT(12)
+#define RNP_SFP_MODE_10G_LR BIT(13)
+#define RNP_SFP_MODE_10G_SR BIT(14)
+#define RNP_SFP_MODE_10G_LRM BIT(15)
+#define RNP_SFP_MODE_1G_T BIT(16)
+#define RNP_SFP_MODE_1G_KX BIT(17)
+#define RNP_SFP_MODE_1G_SX BIT(18)
+#define RNP_SFP_MODE_1G_LX BIT(19)
+#define RNP_SFP_MODE_40G_SR4 BIT(20)
+#define RNP_SFP_MODE_40G_CR4 BIT(21)
+#define RNP_SFP_MODE_40G_LR4 BIT(22)
+#define RNP_SFP_MODE_1G_CX BIT(23)
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define RNP_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define RNP_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define RNP_REQ_TX_BUFFER_GRANULARITY 1024
+
+/* Interrupt Vector Allocation Registers */
+#define RNP_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
+
+#define RNP_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+/* Transmit Descriptor - Advanced */
+struct rnp_tx_desc {
+	union {
+		__le64 pkt_addr; /* Packet buffer address */
+		struct {
+			__le32 adr_lo;
+			__le32 adr_hi;
+		};
+	};
+	__le16 blen;
+	union {
+		struct {
+			__le16 ip_len : 9;
+			__le16 mac_len : 7;
+		};
+		__le16 mac_ip_len;
+	};
+	__le16 vlan;
+#define RNP_TXD_FLAGS_VLAN_PRIO_MASK 0xe000
+#define RNP_TX_FLAGS_VLAN_PRIO_SHIFT 13
+#define RNP_TX_FLAGS_VLAN_CFI_SHIFT 12
+
+	__le16 cmd;
+#define RNP_TXD_VLAN_VALID (1 << 15)
+#define RNP_TXD_SVLAN_TYPE (1 << 14)
+#define RNP_TXD_VLAN_CTRL_NOP (0x00 << 13)
+#define RNP_TXD_VLAN_CTRL_RM_VLAN (0x01 << 13)
+#define RNP_TXD_VLAN_CTRL_INSERT_VLAN (0x02 << 13)
+#define RNP_TXD_L4_CSUM (1 << 12)
+#define RNP_TXD_IP_CSUM (1 << 11)
+#define RNP_TXD_TUNNEL_MASK (0x3000000)
+#define RNP_TXD_TUNNEL_VXLAN (0x01 << 8)
+#define RNP_TXD_TUNNEL_NVGRE (0x02 << 8)
+#define RNP_TXD_L4_TYPE_UDP (0x03 << 6)
+#define RNP_TXD_L4_TYPE_TCP (0x01 << 6)
+#define RNP_TXD_L4_TYPE_SCTP (0x02 << 6)
+#define RNP_TXD_FLAG_IPv4 (0 << 5)
+#define RNP_TXD_FLAG_IPv6 (1 << 5)
+#define RNP_TXD_FLAG_TSO (1 << 4)
+#define RNP_TXD_CMD_RS (1 << 2)
+#define RNP_TXD_STAT_DD (1 << 1)
+#define RNP_TXD_CMD_EOP (1 << 0)
+} __packed;
+
+struct rnp_tx_ctx_desc {
+	__le16 mss_len;
+	u8 vfnum;
+	u8 l4_hdr_len;
+	u8 tunnel_hdr_len;
+	__le16 inner_vlan;
+	u8 vf_veb_flags;
+#define VF_IGNORE_VLAN (1 << 1) /* bit 57 */
+#define VF_VEB_MARK (1 << 0) /* bit 56 */
+	__le32 res;
+	__le32 resv_cmd;
+#define RNP_TXD_FLAG_TO_RPU (1 << 15)
+#define RNP_TXD_SMAC_CTRL_NOP (0x00 << 12)
+#define RNP_TXD_SMAC_CTRL_REPLACE_MACADDR0 (0x02 << 12)
+#define RNP_TXD_SMAC_CTRL_REPLACE_MACADDR1 (0bx06 << 12)
+#define RNP_TXD_CTX_VLAN_CTRL_NOP (0x00 << 10)
+#define RNP_TXD_CTX_VLAN_CTRL_RM_VLAN (0x01 << 10)
+#define RNP_TXD_CTX_VLAN_CTRL_INSERT_VLAN (0x02 << 10)
+#define RNP_TXD_MTI_CRC_PAD_CTRL (0x01000000)
+#define RNP_TXD_CTX_CTRL_DESC (0x080000)
+#define RNP_TXD_CTX_CMD_RS (1 << 2)
+#define RNP_TXD_STAT_DD (1 << 1)
+} __packed;
+
+/* Receive Descriptor - Advanced */
+union rnp_rx_desc {
+	struct {
+		union {
+			__le64 pkt_addr; /* Packet buffer address */
+			struct {
+				__le32 addr_lo;
+				__le32 addr_hi;
+			};
+		};
+		u8 dumy[6];
+		__le16 cmd; /* DD back */
+#define RNP_RXD_FLAG_RS (1 << 2)
+	};
+
+	struct {
+		__le32 rss_hash;
+		__le16 mark;
+		__le16 rev1;
+#define RNP_RX_L3_TYPE_MASK (1 << 15) /* 1 is ipv4 */
+#define VEB_VF_PKG (1 << 0) /* bit 48 */
+#define VEB_VF_IGNORE_VLAN (1 << 1) /* bit 49 */
+		__le16 len;
+		__le16 padding_len;
+		__le16 vlan;
+		__le16 cmd;
+#define RNP_RXD_STAT_VLAN_VALID (1 << 15)
+#define RNP_RXD_STAT_STAG (0x01 << 14)
+#define RNP_RXD_STAT_TUNNEL_NVGRE (0x02 << 13)
+#define RNP_RXD_STAT_TUNNEL_VXLAN (0x01 << 13)
+#define RNP_RXD_STAT_ERR_MASK (0x1f << 8)
+#define RNP_RXD_STAT_TUNNEL_MASK (0x03 << 13)
+#define RNP_RXD_STAT_SCTP_MASK (0x04 << 8)
+#define RNP_RXD_STAT_L4_MASK (0x02 << 8)
+#define RNP_RXD_STAT_ERR_MASK_NOSCTP (0x1b << 8)
+#define RNP_RXD_STAT_L4_SCTP (0x02 << 6)
+#define RNP_RXD_STAT_L4_TCP (0x01 << 6)
+#define RNP_RXD_STAT_L4_UDP (0x03 << 6)
+#define RNP_RXD_STAT_IPV6 (1 << 5)
+#define RNP_RXD_STAT_IPV4 (0 << 5)
+#define RNP_RXD_STAT_PTP (1 << 4)
+#define RNP_RXD_STAT_DD (1 << 1)
+#define RNP_RXD_STAT_EOP (1 << 0)
+	} wb;
+} __packed;
+
+/* Interrupt register bitmasks */
+
+#define RNP_EITR_CNT_WDIS 0x80000000
+#define RNP_MAX_EITR 0x00000FF8
+#define RNP_MIN_EITR 8
+
+/* Error Codes */
+#define RNP_ERR_INVALID_MAC_ADDR -1
+#define RNP_ERR_RESET_FAILED -2
+#define RNP_ERR_INVALID_ARGUMENT -3
+
+#ifdef DEBUG
+#define dbg(fmt, args...) \
+	printk(KERN_DEBUG "[ %s:%d ] " fmt, __func__, __LINE__, ##args)
+#else
+#define dbg(fmt, args...)
+#endif
+
+#define rnpvf_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
+#define rnpvf_info(fmt, args...) \
+	printk(KERN_DEBUG "rnpvf-info: " fmt, ##args)
+#define rnpvf_warn(fmt, args...) \
+	printk(KERN_DEBUG "rnpvf-warn: " fmt, ##args)
+#define rnpvf_err(fmt, args...) printk(KERN_ERR "rnpvf-err : " fmt, ##args)
+
+#define DPRINTK(nlevel, klevel, fmt, args...)                         \
+	((NETIF_MSG_##nlevel & adapter->msg_enable) ?                 \
+		 (void)(netdev_printk(KERN_##klevel, adapter->netdev, \
+				      fmt, ##args)) :                 \
+		 NULL)
+
+#ifdef CONFIG_RNP_TX_DEBUG
+static inline void buf_dump_line(const char *msg, int line, void *buf,
+				 int len)
+{
+	int i, offset = 0;
+	int msg_len = 1024;
+	u8 msg_buf[1024];
+	u8 *ptr = (u8 *)buf;
+
+	offset += snprintf(msg_buf + offset, msg_len,
+			   "=== %s #%d line:%d buf:%p==\n000: ", msg, len,
+			   line, buf);
+
+	for (i = 0; i < len; ++i) {
+		if ((i != 0) && (i % 16) == 0 &&
+		    (offset >= (1024 - 10 * 16))) {
+			printk("%s\n", msg_buf);
+			offset = 0;
+		}
+
+		if ((i != 0) && (i % 16) == 0) {
+			offset += snprintf(msg_buf + offset, msg_len,
+					   "\n%03x: ", i);
+		}
+		offset += snprintf(msg_buf + offset, msg_len, "%02x ",
+				   ptr[i]);
+	}
+
+	offset += snprintf(msg_buf + offset, msg_len, "\n");
+	printk(KERN_DEBUG "%s\n", msg_buf);
+}
+#else
+#define buf_dump_line(msg, line, buf, len)
+#endif
+
+static inline void buf_dump(const char *msg, void *buf, int len)
+{
+	int i, offset = 0;
+	int msg_len = 1024;
+	u8 msg_buf[1024];
+	u8 *ptr = (u8 *)buf;
+
+	offset += snprintf(msg_buf + offset, msg_len,
+			   "=== %s #%d ==\n000: ", msg, len);
+
+	for (i = 0; i < len; ++i) {
+		if ((i != 0) && (i % 16) == 0 &&
+		    (offset >= (1024 - 10 * 16))) {
+			printk("%s\n", msg_buf);
+			offset = 0;
+		}
+
+		if ((i != 0) && (i % 16) == 0) {
+			offset += snprintf(msg_buf + offset, msg_len,
+					   "\n%03x: ", i);
+		}
+		offset += snprintf(msg_buf + offset, msg_len, "%02x ",
+				   ptr[i]);
+	}
+
+	offset += snprintf(msg_buf + offset, msg_len, "\n=== done ==\n");
+	printk(KERN_DEBUG "%s\n", msg_buf);
+}
+
+static inline void _rnp_skb_dump(const struct sk_buff *skb, bool full_pkt)
+{
+	static atomic_t can_dump_full = ATOMIC_INIT(5);
+	struct skb_shared_info *sh = skb_shinfo(skb);
+	struct net_device *dev = skb->dev;
+	struct sk_buff *list_skb;
+	bool has_mac, has_trans;
+	int headroom, tailroom;
+	int i, len, seg_len;
+	const char *level = KERN_WARNING;
+
+	if (full_pkt)
+		full_pkt = atomic_dec_if_positive(&can_dump_full) >= 0;
+
+	if (full_pkt)
+		len = skb->len;
+	else
+		len = min_t(int, skb->len, MAX_HEADER + 128);
+
+	headroom = skb_headroom(skb);
+	tailroom = skb_tailroom(skb);
+
+	has_mac = skb_mac_header_was_set(skb);
+	has_trans = skb_transport_header_was_set(skb);
+
+	printk(KERN_DEBUG
+	       "%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
+	       "mac=(%d,%d) net=(%d,%d) trans=%d\n"
+	       "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
+	       "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
+	       "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n",
+	       level, skb->len, headroom, skb_headlen(skb), tailroom,
+	       has_mac ? skb->mac_header : -1,
+	       has_mac ? (skb->network_header - skb->mac_header) : -1,
+	       skb->network_header,
+	       has_trans ? skb_network_header_len(skb) : -1,
+	       has_trans ? skb->transport_header : -1, sh->tx_flags,
+	       sh->nr_frags, sh->gso_size, sh->gso_type, sh->gso_segs,
+	       skb->csum, skb->ip_summed, skb->csum_complete_sw,
+	       skb->csum_valid, skb->csum_level, skb->hash, skb->sw_hash,
+	       skb->l4_hash, ntohs(skb->protocol), skb->pkt_type,
+	       skb->skb_iif);
+
+	if (dev)
+		printk(KERN_DEBUG "%sdev name=%s feat=0x%pNF\n", level,
+		       dev->name, &dev->features);
+
+	seg_len = min_t(int, skb_headlen(skb), len);
+	if (seg_len)
+		print_hex_dump(level, "skb linear:   ", DUMP_PREFIX_OFFSET,
+			       16, 1, skb->data, seg_len, false);
+	len -= seg_len;
+
+	for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		u32 p_len;
+		struct page *p;
+		u8 *vaddr;
+
+		p = skb_frag_address(frag);
+		p_len = skb_frag_size(frag);
+		seg_len = min_t(int, p_len, len);
+		vaddr = kmap_atomic(p);
+		print_hex_dump(level, "skb frag:     ", DUMP_PREFIX_OFFSET,
+			       16, 1, vaddr, seg_len, false);
+		kunmap_atomic(vaddr);
+		len -= seg_len;
+		if (!len)
+			break;
+	}
+
+	if (full_pkt && skb_has_frag_list(skb)) {
+		printk(KERN_DEBUG "skb fraglist:\n");
+		skb_walk_frags(skb, list_skb)
+			_rnp_skb_dump(list_skb, true);
+	}
+}
+
+#define TRACE() printk(KERN_DEBUG "=[%s] %d == \n", __func__, __LINE__)
+
+#ifdef CONFIG_RNP_TX_DEBUG
+#define desc_hex_dump(msg, buf, len)                                 \
+	print_hex_dump(KERN_WARNING, msg, DUMP_PREFIX_OFFSET, 16, 1, \
+		       (buf), (len), false)
+#define rnpvf_skb_dump _rnp_skb_dump
+#else
+#define desc_hex_dump(msg, buf, len)
+#define rnpvf_skb_dump(skb, full_pkt)
+#endif
+
+
+#ifdef CONFIG_RNP_RX_DEBUG
+#define rx_debug_printk printk
+#define rx_buf_dump buf_dump
+#else
+#define rx_debug_printk(fmt, args...)
+#define rx_buf_dump(a, b, c)
+#endif //CONFIG_RNP_RX_DEBUG
+
+#endif /* _RNPVF_DEFINES_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpvf/ethtool.c b/drivers/net/ethernet/mucse/rnpvf/ethtool.c
new file mode 100644
index 0000000000000..9bac619f03599
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpvf/ethtool.c
@@ -0,0 +1,781 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "rnpvf.h"
+
+#define RNP_ALL_RAR_ENTRIES 16
+
+struct rnpvf_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+	int base_stat_offset;
+	int saved_reset_offset;
+};
+
+#define RNPVF_NUM_RX_QUEUES netdev->real_num_rx_queues
+#define RNPVF_NUM_TX_QUEUES netdev->real_num_tx_queues
+
+#define RNP_NETDEV_STAT(_net_stat)                                        \
+	{                                                                 \
+		.stat_string = #_net_stat,                                \
+		.sizeof_stat =                                            \
+			sizeof_field(struct net_device_stats, _net_stat), \
+		.stat_offset =                                            \
+			offsetof(struct net_device_stats, _net_stat)      \
+	}
+
+static const struct rnpvf_stats rnp_gstrings_net_stats[] = {
+	RNP_NETDEV_STAT(rx_packets),
+	RNP_NETDEV_STAT(tx_packets),
+	RNP_NETDEV_STAT(rx_bytes),
+	RNP_NETDEV_STAT(tx_bytes),
+	RNP_NETDEV_STAT(rx_errors),
+	RNP_NETDEV_STAT(tx_errors),
+	RNP_NETDEV_STAT(rx_dropped),
+	RNP_NETDEV_STAT(tx_dropped),
+	RNP_NETDEV_STAT(collisions),
+	RNP_NETDEV_STAT(rx_over_errors),
+	RNP_NETDEV_STAT(rx_crc_errors),
+	RNP_NETDEV_STAT(rx_frame_errors),
+	RNP_NETDEV_STAT(rx_fifo_errors),
+	RNP_NETDEV_STAT(rx_missed_errors),
+	RNP_NETDEV_STAT(tx_aborted_errors),
+	RNP_NETDEV_STAT(tx_carrier_errors),
+	RNP_NETDEV_STAT(tx_fifo_errors),
+	RNP_NETDEV_STAT(tx_heartbeat_errors),
+};
+
+#define RNPVF_GLOBAL_STATS_LEN ARRAY_SIZE(rnp_gstrings_net_stats)
+
+#define RNPVF_HW_STAT(_name, _stat)                                       \
+	{                                                                 \
+		.stat_string = _name,                                     \
+		.sizeof_stat = sizeof_field(struct rnpvf_adapter, _stat), \
+		.stat_offset = offsetof(struct rnpvf_adapter, _stat)      \
+	}
+
+static struct rnpvf_stats rnpvf_hwstrings_stats[] = {
+	RNPVF_HW_STAT("vlan_add_cnt", hw_stats.vlan_add_cnt),
+	RNPVF_HW_STAT("vlan_strip_cnt", hw_stats.vlan_strip_cnt),
+	RNPVF_HW_STAT("rx_csum_offload_errors", hw_stats.csum_err),
+	RNPVF_HW_STAT("rx_csum_offload_good", hw_stats.csum_good),
+	RNPVF_HW_STAT("tx_spoof_dropped", hw_stats.spoof_dropped),
+};
+
+#define RNPVF_HWSTRINGS_STATS_LEN ARRAY_SIZE(rnpvf_hwstrings_stats)
+
+struct rnpvf_tx_queue_ring_stat {
+	u64 hw_head;
+	u64 hw_tail;
+	u64 sw_to_clean;
+};
+
+struct rnpvf_rx_queue_ring_stat {
+	u64 hw_head;
+	u64 hw_tail;
+	u64 sw_to_use;
+};
+
+#define RNP_QUEUE_STATS_LEN                                           \
+	(RNPVF_NUM_TX_QUEUES *                                        \
+		 (sizeof(struct rnpvf_tx_queue_stats) / sizeof(u64) + \
+		  sizeof(struct rnpvf_queue_stats) / sizeof(u64) +    \
+		  sizeof(struct rnpvf_tx_queue_ring_stat) /           \
+			  sizeof(u64)) +                              \
+	 RNPVF_NUM_RX_QUEUES *                                        \
+		 (sizeof(struct rnpvf_rx_queue_stats) / sizeof(u64) + \
+		  sizeof(struct rnpvf_queue_stats) / sizeof(u64) +    \
+		  sizeof(struct rnpvf_rx_queue_ring_stat) / sizeof(u64)))
+
+#define RNPVF_STATS_LEN                                 \
+	(RNPVF_GLOBAL_STATS_LEN + RNP_QUEUE_STATS_LEN + \
+	 RNPVF_HWSTRINGS_STATS_LEN)
+
+static const char rnp_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Register test  (offline)", "Link test   (on/offline)"
+};
+#define RNPVF_TEST_LEN (sizeof(rnp_gstrings_test) / ETH_GSTRING_LEN)
+
+enum priv_bits {
+	padding_enable = 0,
+};
+
+static const char rnpvf_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define RNPVF_FT_PADDING BIT(0)
+#define RNPVF_FCS_ON BIT(1)
+	"ft_padding", "fcs"
+};
+#define RNPVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(rnpvf_priv_flags_strings)
+
+#define ADVERTISED_MASK_10G                                        \
+	(SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full | \
+	 SUPPORTED_10000baseKR_Full)
+static int rnpvf_get_link_ksettings(struct net_device *netdev,
+				    struct ethtool_link_ksettings *cmd)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+	struct rnpvf_hw *hw = &adapter->hw;
+	bool autoneg = false;
+	bool link_up;
+	u32 supported, advertising = 0;
+	u32 link_speed = 0;
+
+	ethtool_convert_link_mode_to_legacy_u32(&supported,
+						cmd->link_modes.supported);
+
+	hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+
+	switch (link_speed) {
+	case RNP_LINK_SPEED_1GB_FULL:
+		supported |= SUPPORTED_1000baseT_Full;
+		supported |= SUPPORTED_FIBRE;
+		advertising |= ADVERTISED_FIBRE |
+			       ADVERTISED_1000baseKX_Full;
+		cmd->base.port = PORT_FIBRE;
+		break;
+	case RNP_LINK_SPEED_10GB_FULL:
+		supported |= SUPPORTED_10000baseT_Full;
+		supported |= SUPPORTED_FIBRE;
+		advertising |= ADVERTISED_FIBRE |
+			       SUPPORTED_10000baseT_Full;
+		cmd->base.port = PORT_FIBRE;
+		break;
+	case RNP_LINK_SPEED_25GB_FULL:
+		supported |= SUPPORTED_40000baseKR4_Full;
+		supported |= SUPPORTED_FIBRE;
+		advertising |= ADVERTISED_FIBRE |
+			       SUPPORTED_40000baseKR4_Full;
+		cmd->base.port = PORT_FIBRE;
+		break;
+	case RNP_LINK_SPEED_40GB_FULL:
+		supported |= SUPPORTED_40000baseCR4_Full |
+			     SUPPORTED_40000baseSR4_Full |
+			     SUPPORTED_40000baseLR4_Full;
+		supported |= SUPPORTED_FIBRE;
+		advertising |= ADVERTISED_FIBRE;
+		cmd->base.port = PORT_FIBRE;
+		break;
+	}
+
+	if (autoneg) {
+		supported |= SUPPORTED_Autoneg;
+		advertising |= ADVERTISED_Autoneg;
+		cmd->base.autoneg = AUTONEG_ENABLE;
+	} else
+		cmd->base.autoneg = AUTONEG_DISABLE;
+
+	/* set pause support */
+	supported |= SUPPORTED_Pause;
+
+	switch (hw->fc.current_mode) {
+	case rnp_fc_full:
+		advertising |= ADVERTISED_Pause;
+		break;
+	case rnp_fc_rx_pause:
+		advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+		break;
+	case rnp_fc_tx_pause:
+		advertising |= ADVERTISED_Asym_Pause;
+		break;
+	default:
+		advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+	}
+
+	if (link_up) {
+		switch (link_speed) {
+		case RNP_LINK_SPEED_40GB_FULL:
+			cmd->base.speed = SPEED_40000;
+			break;
+		case RNP_LINK_SPEED_25GB_FULL:
+			cmd->base.speed = SPEED_25000;
+			break;
+		case RNP_LINK_SPEED_10GB_FULL:
+			cmd->base.speed = SPEED_10000;
+			break;
+		case RNP_LINK_SPEED_1GB_FULL:
+			cmd->base.speed = SPEED_1000;
+			break;
+		case RNP_LINK_SPEED_100_FULL:
+			cmd->base.speed = SPEED_100;
+			break;
+		default:
+			break;
+		}
+		cmd->base.duplex = DUPLEX_FULL;
+	} else {
+		cmd->base.speed = SPEED_UNKNOWN;
+		cmd->base.duplex = DUPLEX_UNKNOWN;
+	}
+
+	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+						supported);
+	ethtool_convert_legacy_u32_to_link_mode(
+		cmd->link_modes.advertising, advertising);
+	return 0;
+}
+
+static void rnpvf_get_drvinfo(struct net_device *netdev,
+			      struct ethtool_drvinfo *drvinfo)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+	struct rnpvf_hw *hw = &adapter->hw;
+
+	strlcpy(drvinfo->driver, rnpvf_driver_name,
+		sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, rnpvf_driver_version,
+		sizeof(drvinfo->version));
+	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info));
+	if (hw->board_type == rnp_board_n10) {
+		snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+			 "%d.%d.%d.%d", ((char *)&(hw->fw_version))[3],
+			 ((char *)&(hw->fw_version))[2],
+			 ((char *)&(hw->fw_version))[1],
+			 ((char *)&(hw->fw_version))[0]);
+	}
+	drvinfo->n_priv_flags = RNPVF_PRIV_FLAGS_STR_LEN;
+}
+
+void rnpvf_get_ringparam(
+	struct net_device *netdev, struct ethtool_ringparam *ring,
+	struct kernel_ethtool_ringparam __always_unused *ker,
+	struct netlink_ext_ack __always_unused *extack)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+
+	ring->rx_max_pending = RNPVF_MAX_RXD;
+	ring->tx_max_pending = RNPVF_MAX_TXD;
+	ring->rx_pending = adapter->rx_ring_item_count;
+	ring->tx_pending = adapter->tx_ring_item_count;
+}
+
+static void rnpvf_get_strings(struct net_device *netdev, u32 stringset,
+			      u8 *data)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+	char *p = (char *)data;
+	int i;
+	struct rnpvf_ring *ring;
+	u16 queue_idx;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < RNPVF_GLOBAL_STATS_LEN; i++) {
+			memcpy(p, rnp_gstrings_net_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+
+		for (i = 0; i < RNPVF_HWSTRINGS_STATS_LEN; i++) {
+			memcpy(p, rnpvf_hwstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+
+		BUG_ON(RNPVF_NUM_TX_QUEUES != RNPVF_NUM_RX_QUEUES);
+
+		for (i = 0; i < RNPVF_NUM_TX_QUEUES; i++) {
+			/* ====  tx ======== */
+			ring = adapter->tx_ring[i];
+			queue_idx = ring->rnpvf_queue_idx;
+			sprintf(p, "\n     queue%u_tx_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_bytes", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_tx_restart", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_busy", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_done_old", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_clean_desc", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_poll_count", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_irq_more", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_tx_hw_head", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_hw_tail", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_sw_next_to_clean", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_added_vlan_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_irq_miss", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_next_to_clean", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_tx_equal_count", i);
+			p += ETH_GSTRING_LEN;
+
+			/* ====  rx ======== */
+			ring = adapter->rx_ring[i];
+			queue_idx = ring->rnpvf_queue_idx;
+			sprintf(p, "\n     queue%u_rx_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_bytes", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_rx_driver_drop_packets", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_rsc", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_rsc_flush", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_non_eop_descs", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_alloc_page_failed", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_alloc_buff_failed", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_alloc_page", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_csum_err", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_csum_good", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_poll_again_count", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_poll_count", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_rm_vlan_packets", i);
+			p += ETH_GSTRING_LEN;
+
+			sprintf(p, "queue%u_rx_hw_head", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_hw_tail", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_sw_next_to_use", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_irq_miss", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_next_to_clean", i);
+			p += ETH_GSTRING_LEN;
+			sprintf(p, "queue%u_rx_equal_count", i);
+			p += ETH_GSTRING_LEN;
+		}
+		break;
+	case ETH_SS_PRIV_FLAGS:
+		memcpy(data, rnpvf_priv_flags_strings,
+		       RNPVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+		break;
+	}
+}
+
+static int rnpvf_get_sset_count(struct net_device *netdev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return RNPVF_STATS_LEN;
+	case ETH_SS_PRIV_FLAGS:
+		return RNPVF_PRIV_FLAGS_STR_LEN;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static u32 rnpvf_get_priv_flags(struct net_device *netdev)
+{
+	struct rnpvf_adapter *adapter =
+		(struct rnpvf_adapter *)netdev_priv(netdev);
+	u32 priv_flags = 0;
+
+	if (adapter->priv_flags & RNPVF_PRIV_FLAG_FT_PADDING)
+		priv_flags |= RNPVF_FT_PADDING;
+	if (adapter->priv_flags & RNPVF_PRIV_FLAG_FCS_ON)
+		priv_flags |= RNPVF_FCS_ON;
+
+	return priv_flags;
+}
+
+static int rnpvf_get_coalesce(struct net_device *netdev,
+			      struct ethtool_coalesce *coal,
+			      struct kernel_ethtool_coalesce *kernel_coal,
+			      struct netlink_ext_ack *extack)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+
+	coal->use_adaptive_tx_coalesce = adapter->adaptive_tx_coal;
+	coal->tx_coalesce_usecs = adapter->tx_usecs;
+	coal->tx_coalesce_usecs_irq = 0;
+	coal->tx_max_coalesced_frames = adapter->tx_frames;
+	coal->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
+
+	coal->use_adaptive_rx_coalesce = adapter->adaptive_rx_coal;
+	coal->rx_coalesce_usecs_irq = 0;
+	coal->rx_coalesce_usecs = adapter->rx_usecs;
+	coal->rx_max_coalesced_frames = adapter->rx_frames;
+	coal->rx_max_coalesced_frames_irq = adapter->napi_budge;
+
+	/* this is not support */
+	coal->pkt_rate_low = 0;
+	coal->pkt_rate_high = 0;
+	coal->rx_coalesce_usecs_low = 0;
+	coal->rx_max_coalesced_frames_low = 0;
+	coal->tx_coalesce_usecs_low = 0;
+	coal->tx_max_coalesced_frames_low = 0;
+	coal->rx_coalesce_usecs_high = 0;
+	coal->rx_max_coalesced_frames_high = 0;
+	coal->tx_coalesce_usecs_high = 0;
+	coal->tx_max_coalesced_frames_high = 0;
+	coal->rate_sample_interval = 0;
+	return 0;
+}
+
+static int rnpvf_set_coalesce(struct net_device *netdev,
+			      struct ethtool_coalesce *ec,
+			      struct kernel_ethtool_coalesce *kernel_coal,
+			      struct netlink_ext_ack *extack)
+{
+	int reset = 0;
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+	u32 value;
+	/* we don't support close tx and rx coalesce */
+	if (!(ec->use_adaptive_tx_coalesce) ||
+	    !(ec->use_adaptive_rx_coalesce)) {
+		return -EINVAL;
+	}
+
+	if ((ec->tx_max_coalesced_frames_irq < RNPVF_MIN_TX_WORK) ||
+			(ec->tx_max_coalesced_frames_irq > RNPVF_MAX_TX_WORK))
+		return -EINVAL;
+
+	value = clamp_t(u32, ec->tx_max_coalesced_frames_irq,
+			RNPVF_MIN_TX_WORK, RNPVF_MAX_TX_WORK);
+	value = ALIGN(value, RNPVF_WORK_ALIGN);
+
+	if (adapter->tx_work_limit != value) {
+		reset = 1;
+		adapter->tx_work_limit = value;
+	}
+
+	if ((ec->tx_max_coalesced_frames < RNPVF_MIN_TX_FRAME) ||
+			(ec->tx_max_coalesced_frames > RNPVF_MAX_TX_FRAME))
+		return -EINVAL;
+
+	value = clamp_t(u32, ec->tx_max_coalesced_frames,
+			RNPVF_MIN_TX_FRAME, RNPVF_MAX_TX_FRAME);
+	if (adapter->tx_frames != value) {
+		reset = 1;
+		adapter->tx_frames = value;
+	}
+
+	if ((ec->tx_coalesce_usecs < RNPVF_MIN_TX_USEC) ||
+			(ec->tx_coalesce_usecs > RNPVF_MAX_TX_USEC))
+		return -EINVAL;
+	value = clamp_t(u32, ec->tx_coalesce_usecs,
+			RNPVF_MIN_TX_USEC, RNPVF_MAX_TX_USEC);
+	if (adapter->tx_usecs != value) {
+		reset = 1;
+		adapter->tx_usecs = value;
+	}
+
+	if ((ec->rx_max_coalesced_frames_irq < RNPVF_MIN_RX_WORK) ||
+			(ec->rx_max_coalesced_frames_irq > RNPVF_MAX_RX_WORK))
+		return -EINVAL;
+	value = clamp_t(u32, ec->rx_max_coalesced_frames_irq,
+			RNPVF_MIN_RX_WORK, RNPVF_MAX_RX_WORK);
+	value = ALIGN(value, RNPVF_WORK_ALIGN);
+
+	if (adapter->napi_budge != value) {
+		reset = 1;
+		adapter->napi_budge = value;
+	}
+
+	if ((ec->rx_max_coalesced_frames < RNPVF_MIN_RX_FRAME) ||
+			(ec->rx_max_coalesced_frames > RNPVF_MAX_RX_FRAME))
+		return -EINVAL;
+	value = clamp_t(u32, ec->rx_max_coalesced_frames,
+			RNPVF_MIN_RX_FRAME, RNPVF_MAX_RX_FRAME);
+	if (adapter->rx_frames != value) {
+		reset = 1;
+		adapter->rx_frames = value;
+	}
+
+	if ((ec->rx_coalesce_usecs < RNPVF_MIN_RX_USEC) ||
+			(ec->rx_coalesce_usecs > RNPVF_MAX_RX_USEC))
+		return -EINVAL;
+	value = clamp_t(u32, ec->rx_coalesce_usecs,
+			RNPVF_MIN_RX_USEC, RNPVF_MAX_RX_USEC);
+
+	if (adapter->rx_usecs != value) {
+		reset = 1;
+		adapter->rx_usecs = value;
+	}
+
+	/* other setup is not supported */
+	if ((ec->pkt_rate_low) || (ec->pkt_rate_high) ||
+			(ec->rx_coalesce_usecs_low) ||
+			(ec->rx_max_coalesced_frames_low) ||
+			(ec->tx_coalesce_usecs_low) ||
+			(ec->tx_max_coalesced_frames_low) ||
+			(ec->rx_coalesce_usecs_high) ||
+			(ec->rx_max_coalesced_frames_high) ||
+			(ec->tx_coalesce_usecs_high) ||
+			(ec->tx_max_coalesced_frames_high) ||
+			(ec->rate_sample_interval) || (ec->tx_coalesce_usecs_irq) ||
+			(ec->rx_coalesce_usecs_irq))
+		return -EINVAL;
+
+	if (reset) {
+		if (netif_running(netdev))
+			rnpvf_close(netdev);
+		remove_mbx_irq(adapter);
+		rnpvf_clear_interrupt_scheme(adapter);
+		rnpvf_init_interrupt_scheme(adapter);
+		register_mbx_irq(adapter);
+		if (netif_running(netdev))
+			return rnpvf_open(netdev);
+	}
+	return 0;
+}
+
+static void rnpvf_get_ethtool_stats(struct net_device *netdev,
+				    struct ethtool_stats *stats, u64 *data)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+	struct net_device_stats *net_stats = &netdev->stats;
+	struct rnpvf_ring *ring;
+	int i = 0, j;
+	char *p = NULL;
+
+	rnpvf_update_stats(adapter);
+
+	for (i = 0; i < RNPVF_GLOBAL_STATS_LEN; i++) {
+		p = (char *)net_stats +
+		    rnp_gstrings_net_stats[i].stat_offset;
+		data[i] = (rnp_gstrings_net_stats[i].sizeof_stat ==
+			   sizeof(u64)) ?
+				  *(u64 *)p :
+				  *(u32 *)p;
+	}
+	for (j = 0; j < RNPVF_HWSTRINGS_STATS_LEN; j++, i++) {
+		p = (char *)adapter + rnpvf_hwstrings_stats[j].stat_offset;
+		data[i] = (rnpvf_hwstrings_stats[j].sizeof_stat ==
+			   sizeof(u64)) ?
+				  *(u64 *)p :
+				  *(u32 *)p;
+	}
+
+	BUG_ON(RNPVF_NUM_TX_QUEUES != RNPVF_NUM_RX_QUEUES);
+
+	for (j = 0; j < RNPVF_NUM_TX_QUEUES; j++) {
+		/* ===== tx-ring == */
+		ring = adapter->tx_ring[j];
+
+		if (!ring) {
+			data[i++] = 0;
+			data[i++] = 0;
+
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+
+			/* rnpvf_tx_queue_ring_stat */
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+
+			/* ===== rx-ring == */
+			data[i++] = 0;
+			data[i++] = 0;
+
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			continue;
+		}
+
+		data[i++] = ring->stats.packets;
+		data[i++] = ring->stats.bytes;
+
+		data[i++] = ring->tx_stats.restart_queue;
+		data[i++] = ring->tx_stats.tx_busy;
+		data[i++] = ring->tx_stats.tx_done_old;
+		data[i++] = ring->tx_stats.clean_desc;
+		data[i++] = ring->tx_stats.poll_count;
+		data[i++] = ring->tx_stats.irq_more_count;
+
+		/* rnpvf_tx_queue_ring_stat */
+		data[i++] = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_HEAD);
+		data[i++] = ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_TAIL);
+		data[i++] = ring->next_to_clean;
+		data[i++] = ring->tx_stats.vlan_add;
+		data[i++] = ring->tx_stats.tx_irq_miss;
+		if (ring->tx_stats.tx_next_to_clean == -1)
+			data[i++] = ring->count;
+		else
+			data[i++] = ring->tx_stats.tx_next_to_clean;
+		data[i++] = ring->tx_stats.tx_equal_count;
+
+		/* ===== rx-ring == */
+		ring = adapter->rx_ring[j];
+
+		if (!ring) {
+			/* ===== rx-ring == */
+			data[i++] = 0;
+			data[i++] = 0;
+
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+
+			data[i++] = 0;
+			data[i++] = 0;
+			data[i++] = 0;
+			continue;
+		}
+
+		data[i++] = ring->stats.packets;
+		data[i++] = ring->stats.bytes;
+
+		data[i++] = ring->rx_stats.driver_drop_packets;
+		data[i++] = ring->rx_stats.rsc_count;
+		data[i++] = ring->rx_stats.rsc_flush;
+		data[i++] = ring->rx_stats.non_eop_descs;
+		data[i++] = ring->rx_stats.alloc_rx_page_failed;
+		data[i++] = ring->rx_stats.alloc_rx_buff_failed;
+		data[i++] = ring->rx_stats.alloc_rx_page;
+		data[i++] = ring->rx_stats.csum_err;
+		data[i++] = ring->rx_stats.csum_good;
+		data[i++] = ring->rx_stats.poll_again_count;
+		data[i++] = ring->rx_stats.poll_count;
+		data[i++] = ring->rx_stats.vlan_remove;
+		data[i++] = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD);
+		data[i++] = ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_TAIL);
+		data[i++] = ring->next_to_clean;
+
+		data[i++] = ring->rx_stats.rx_irq_miss;
+		if (ring->rx_stats.rx_next_to_clean == -1)
+			data[i++] = ring->count;
+		else
+			data[i++] = ring->rx_stats.rx_next_to_clean;
+		data[i++] = ring->rx_stats.rx_equal_count;
+	}
+}
+
+static void rnpvf_get_channels(struct net_device *dev,
+			       struct ethtool_channels *ch)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(dev);
+
+	/* report maximum channels */
+	ch->max_combined = min_t(int, adapter->hw.mac.max_tx_queues,
+				 adapter->hw.mac.max_rx_queues);
+
+	/* report info for other vector */
+	ch->max_other = NON_Q_VECTORS;
+	ch->other_count = NON_Q_VECTORS;
+
+	/* record RSS queues */
+	ch->combined_count = adapter->dma_channels;
+}
+
+static u32 rnpvf_get_msglevel(struct net_device *netdev)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+
+	return adapter->msg_enable;
+}
+
+static void rnpvf_get_pauseparam(struct net_device *netdev,
+				 struct ethtool_pauseparam *pause)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+	struct rnpvf_hw *hw = &adapter->hw;
+
+	/* we don't support autoneg */
+	pause->autoneg = 0;
+
+	if (hw->fc.current_mode == rnp_fc_rx_pause) {
+		pause->rx_pause = 1;
+	} else if (hw->fc.current_mode == rnp_fc_tx_pause) {
+		pause->tx_pause = 1;
+	} else if (hw->fc.current_mode == rnp_fc_full) {
+		pause->rx_pause = 1;
+		pause->tx_pause = 1;
+	}
+}
+
+static void rnpvf_set_msglevel(struct net_device *netdev, u32 data)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+
+	adapter->msg_enable = data;
+}
+
+static const struct ethtool_ops rnpvf_ethtool_ops = {
+
+	.get_link_ksettings = rnpvf_get_link_ksettings,
+	.get_drvinfo = rnpvf_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+	.get_ringparam = rnpvf_get_ringparam,
+	.get_strings = rnpvf_get_strings,
+	.get_pauseparam = rnpvf_get_pauseparam,
+	.get_msglevel = rnpvf_get_msglevel,
+	.set_msglevel = rnpvf_set_msglevel,
+	.get_sset_count = rnpvf_get_sset_count,
+	.get_priv_flags = rnpvf_get_priv_flags,
+	.get_ethtool_stats = rnpvf_get_ethtool_stats,
+	.get_coalesce = rnpvf_get_coalesce,
+	.set_coalesce = rnpvf_set_coalesce,
+	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
+				     ETHTOOL_COALESCE_MAX_FRAMES,
+	.get_channels = rnpvf_get_channels,
+};
+
+void rnpvf_set_ethtool_ops(struct net_device *netdev)
+{
+	netdev->ethtool_ops = &rnpvf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/mucse/rnpvf/mbx.c b/drivers/net/ethernet/mucse/rnpvf/mbx.c
new file mode 100644
index 0000000000000..d73ed4b4e2e8a
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpvf/mbx.c
@@ -0,0 +1,624 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include "mbx.h"
+#include "rnpvf.h"
+
+struct counter {
+	union {
+		struct {
+			unsigned short pf_req;
+			unsigned short pf_ack;
+		};
+		struct {
+			unsigned short cpu_req;
+			unsigned short cpu_ack;
+		};
+	};
+	unsigned short vf_req;
+	unsigned short vf_ack;
+} __packed;
+
+static s32 rnpvf_poll_for_msg(struct rnpvf_hw *hw, bool to_cm3);
+static s32 rnpvf_poll_for_ack(struct rnpvf_hw *hw, bool to_cm3);
+
+#define PF2VF_MBOX_VEC(mbx, vf) (mbx->pf2vf_mbox_vec_base + 4 * (vf))
+#define CPU2VF_MBOX_VEC(mbx, vf) (mbx->cpu2vf_mbox_vec_base + 4 * (vf))
+#define PF_VF_SHM(mbx, vf)       \
+	((mbx->pf_vf_shm_base) + \
+	 (64 * (vf)))
+#define PF2VF_COUNTER(mbx, vf) (PF_VF_SHM(mbx, vf) + 0)
+#define VF2PF_COUNTER(mbx, vf) (PF_VF_SHM(mbx, vf) + 4)
+#define PF_VF_SHM_DATA(mbx, vf) (PF_VF_SHM(mbx, vf) + 8)
+#define VF2PF_MBOX_CTRL(mbx, vf) ((mbx->vf2pf_mbox_ctrl_base) + (4 * (vf)))
+#define CPU_VF_SHM(mbx, vf) (mbx->cpu_vf_shm_base + (64 * (vf)))
+#define CPU2VF_COUNTER(mbx, vf) (CPU_VF_SHM(mbx, vf) + 0)
+#define VF2CPU_COUNTER(mbx, vf) (CPU_VF_SHM(mbx, vf) + 4)
+#define CPU_VF_SHM_DATA(mbx, vf) (CPU_VF_SHM(mbx, vf) + 8)
+#define VF2CPU_MBOX_CTRL(mbx, vf) (mbx->vf2cpu_mbox_ctrl_base + 64 * (vf))
+#define CPU_VF_MBOX_MASK_LO(mbx, vf) \
+	(mbx->cpu_vf_mbox_mask_lo_base + 64 * (vf))
+#define CPU_VF_MBOX_MASK_HI(mbx, vf) \
+	(mbx->cpu_vf_mbox_mask_hi_base + 64 * (vf))
+#define MBOX_CTRL_REQ (1 << 0)
+#define MBOX_CTRL_VF_HOLD_SHM (1 << 2)
+#define MBOX_IRQ_EN 0
+#define MBOX_IRQ_DISABLE 1
+
+/**
+ *  rnpvf_read_posted_mbx - Wait for message notification and receive message
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *
+ *  returns 0 if it successfully received a message notification and
+ *  copied it into the receive buffer.
+ **/
+static s32 rnpvf_read_posted_mbx(struct rnpvf_hw *hw, u32 *msg, u16 size,
+				 bool to_cm3)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = -RNP_ERR_MBX;
+
+	if (!mbx->ops.read)
+		goto out;
+
+	ret_val = rnpvf_poll_for_msg(hw, to_cm3);
+
+	/* if ack received read message, otherwise we timed out */
+	if (!ret_val)
+		ret_val = mbx->ops.read(hw, msg, size, to_cm3);
+out:
+	return ret_val;
+}
+
+/**
+ *  rnpvf_write_posted_mbx - Write a message to the mailbox, wait for ack
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *
+ *  returns 0 if it successfully copied message into the buffer and
+ *  received an ack to that message within delay * timeout period
+ **/
+static s32 rnpvf_write_posted_mbx(struct rnpvf_hw *hw, u32 *msg, u16 size,
+				  bool to_cm3)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = -RNP_ERR_MBX;
+
+	/* exit if either we can't write or there isn't a defined timeout */
+	if (!mbx->ops.write || !mbx->timeout)
+		goto out;
+
+	/* send msg */
+	ret_val = mbx->ops.write(hw, msg, size, to_cm3);
+
+	/* if msg sent wait until we receive an ack */
+	if (!ret_val)
+		ret_val = rnpvf_poll_for_ack(hw, to_cm3);
+out:
+	return ret_val;
+}
+
+static inline u16 rnpvf_mbx_get_req(struct rnpvf_hw *hw, int reg)
+{
+	mb();
+	return mbx_rd32(hw, reg) & 0xffff;
+}
+
+static inline u16 rnpvf_mbx_get_ack(struct rnpvf_hw *hw, int reg)
+{
+	mb();
+	return (mbx_rd32(hw, reg) >> 16) & 0xffff;
+}
+
+static inline void rnpvf_mbx_inc_vfreq(struct rnpvf_hw *hw, bool to_cm3)
+{
+	u16 req;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+	int reg = to_cm3 ? VF2CPU_COUNTER(mbx, vfnum) :
+			   VF2PF_COUNTER(mbx, vfnum);
+	u32 v = mbx_rd32(hw, reg);
+
+	req = (v & 0xffff);
+	req++;
+	v &= ~(0x0000ffff);
+	v |= req;
+	mb();
+	mbx_wr32(hw, reg, v);
+
+	/* update stats */
+	hw->mbx.stats.msgs_tx++;
+}
+
+static inline void rnpvf_mbx_inc_vfack(struct rnpvf_hw *hw, bool to_cm3)
+{
+	u16 ack;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+	int reg = to_cm3 ? VF2CPU_COUNTER(mbx, vfnum) :
+			   VF2PF_COUNTER(mbx, vfnum);
+	u32 v = mbx_rd32(hw, reg);
+
+	ack = (v >> 16) & 0xffff;
+	ack++;
+	v &= ~(0xffff0000);
+	v |= (ack << 16);
+	mb();
+	mbx_wr32(hw, reg, v);
+
+	/* update stats */
+	hw->mbx.stats.msgs_rx++;
+}
+
+/**
+ *  rnpvf_check_for_msg_vf - checks to see if the PF has sent mail
+ *  @hw: pointer to the HW structure
+ *
+ *  returns 0 if the PF has set the Status bit or else ERR_MBX
+ **/
+static s32 rnpvf_check_for_msg_vf(struct rnpvf_hw *hw, bool to_cm3)
+{
+	s32 ret_val = RNP_ERR_MBX;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+
+	if (to_cm3 == true) {
+		if (rnpvf_mbx_get_req(hw, CPU2VF_COUNTER(mbx, vfnum)) !=
+		    hw->mbx.cpu_req) {
+			ret_val = 0;
+			hw->mbx.stats.reqs++;
+		}
+	} else {
+		if (rnpvf_mbx_get_req(hw, PF2VF_COUNTER(mbx, vfnum)) !=
+		    hw->mbx.pf_req) {
+			ret_val = 0;
+			hw->mbx.stats.reqs++;
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ *  rnpvf_poll_for_msg - Wait for message notification
+ *  @hw: pointer to the HW structure
+ *
+ *  returns 0 if it successfully received a message notification
+ **/
+static s32 rnpvf_poll_for_msg(struct rnpvf_hw *hw, bool to_cm3)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	while (countdown && mbx->ops.check_for_msg(hw, to_cm3)) {
+		countdown--;
+		udelay(mbx->udelay);
+	}
+
+	return countdown ? 0 : RNP_ERR_MBX;
+}
+
+/**
+ *  rnpvf_poll_for_ack - Wait for message acknowledgment
+ *  @hw: pointer to the HW structure
+ *
+ *  returns 0 if it successfully received a message acknowledgment
+ **/
+static s32 rnpvf_poll_for_ack(struct rnpvf_hw *hw, bool to_cm3)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	while (countdown && mbx->ops.check_for_ack(hw, to_cm3)) {
+		countdown--;
+		udelay(mbx->udelay);
+	}
+
+	/* if we failed, all future posted messages fail until reset */
+	if (!countdown) {
+		mbx->timeout = 0;
+		dbg("%s timeout\n", __func__);
+	}
+
+	return countdown ? 0 : RNP_ERR_MBX;
+}
+
+/**
+ *  rnpvf_check_for_rst_msg_vf - checks to see if the PF has ACK'd
+ *  @hw: pointer to the HW structure
+ *
+ *  returns 0 if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 rnpvf_check_for_rst_msg_vf(struct rnpvf_hw *hw, bool to_cm3)
+{
+	struct rnpvf_adapter *adapter = hw->back;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = RNP_ERR_MBX;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+	u32 DATA_REG = (to_cm3) ? CPU_VF_SHM_DATA(mbx, vfnum) :
+				  PF_VF_SHM_DATA(mbx, vfnum);
+	u32 data;
+	int ret = 1;
+
+	ret_val = rnpvf_check_for_msg_vf(hw, to_cm3);
+	if (!ret_val) {
+		data = mbx_rd32(hw, DATA_REG);
+
+		data &= ~RNP_PF_VFNUM_MASK;
+		dbg("mbx %x\n", data);
+		/* add other mailbox setup */
+		if (((data) & (~RNP_VT_MSGTYPE_CTS)) ==
+		    RNP_PF_CONTROL_PRING_MSG) {
+		} else if ((data) == RNP_PF_SET_FCS) {
+			data = mbx_rd32(hw, DATA_REG + 4);
+			if (data) {
+				adapter->priv_flags |=
+					RNPVF_PRIV_FLAG_FCS_ON;
+				adapter->netdev->features |= NETIF_F_RXFCS;
+			} else {
+				adapter->priv_flags &=
+					(~RNPVF_PRIV_FLAG_FCS_ON);
+				adapter->netdev->features &=
+					(~NETIF_F_RXFCS);
+			}
+			if ((adapter->priv_flags &
+			     RNPVF_PRIV_FLAG_FCS_ON) &&
+			    (adapter->netdev->features & NETIF_F_RXCSUM))
+				adapter->netdev->features &=
+					(~NETIF_F_RXCSUM);
+			else {
+				if (adapter->flags &
+				    RNPVF_FLAG_RX_CHKSUM_ENABLED)
+					adapter->netdev->features |=
+						NETIF_F_RXCSUM;
+				else
+					adapter->netdev->features &=
+						(~NETIF_F_RXCSUM);
+			}
+
+		} else if ((data) == RNP_PF_SET_PAUSE) {
+			hw->fc.current_mode = mbx_rd32(hw, DATA_REG + 4);
+		} else if ((data) == RNP_PF_SET_FT_PADDING) {
+			data = mbx_rd32(hw, DATA_REG + 4);
+			if (data) {
+				adapter->priv_flags |=
+					RNPVF_PRIV_FLAG_FT_PADDING;
+			} else {
+				adapter->priv_flags &=
+					(~RNPVF_PRIV_FLAG_FT_PADDING);
+			}
+		} else if ((data) == RNP_PF_SET_VLAN_FILTER) {
+			data = mbx_rd32(hw, DATA_REG + 4);
+			if (data) {
+				if (hw->feature_flags &
+				    RNPVF_NET_FEATURE_VLAN_OFFLOAD) {
+					adapter->netdev->features |=
+						NETIF_F_HW_VLAN_CTAG_FILTER;
+				}
+
+				if (hw->feature_flags &
+				    RNPVF_NET_FEATURE_STAG_OFFLOAD) {
+					adapter->netdev->features |=
+						NETIF_F_HW_VLAN_STAG_FILTER;
+				}
+
+			} else {
+				if (hw->feature_flags &
+				    RNPVF_NET_FEATURE_VLAN_OFFLOAD) {
+					adapter->netdev->features &=
+						~NETIF_F_HW_VLAN_CTAG_FILTER;
+				}
+				if (hw->feature_flags &
+				    RNPVF_NET_FEATURE_STAG_OFFLOAD) {
+					adapter->netdev->features &=
+						~NETIF_F_HW_VLAN_STAG_FILTER;
+				}
+			}
+		} else if ((data) == RNP_PF_SET_VLAN) {
+			struct rnp_mbx_info *mbx = &hw->mbx;
+
+			data = mbx_rd32(hw, DATA_REG + 4);
+			/* pf set vlan for this vf */
+			adapter->flags |= RNPVF_FLAG_PF_UPDATE_VLAN;
+			if (data) {
+				adapter->flags |= RNPVF_FLAG_PF_SET_VLAN;
+				adapter->vf_vlan = data;
+				/* we should record old value */
+				/* we should open rx vlan offload */
+				if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+					adapter->priv_flags |= RNPVF_FLAG_RX_VLAN_OFFLOAD;
+				else
+					adapter->priv_flags &= ~RNPVF_FLAG_RX_VLAN_OFFLOAD;
+				adapter->netdev->features |=
+					NETIF_F_HW_VLAN_CTAG_RX;
+				/* should close tx vlan offload */
+				if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
+					adapter->priv_flags |= RNPVF_FLAG_TX_VLAN_OFFLOAD;
+				else
+					adapter->priv_flags &= ~RNPVF_FLAG_TX_VLAN_OFFLOAD;
+				adapter->netdev->features &=
+					~NETIF_F_HW_VLAN_CTAG_TX;
+			} else {
+				adapter->flags &=
+					(~RNPVF_FLAG_PF_SET_VLAN);
+				adapter->vf_vlan = 0;
+				/* write back old value */
+				if (adapter->priv_flags & RNPVF_FLAG_RX_VLAN_OFFLOAD)
+					adapter->netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+				else
+					adapter->netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+
+				if (adapter->priv_flags & RNPVF_FLAG_TX_VLAN_OFFLOAD)
+					adapter->netdev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+				else
+					adapter->netdev->features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+			}
+			hw->ops.set_veb_vlan(hw, data,
+					     VFNUM(mbx, hw->vfnum));
+		} else if ((data) == RNP_PF_SET_LINK) {
+			data = mbx_rd32(hw, DATA_REG + 4);
+			if (data & RNP_PF_LINK_UP) {
+				hw->link = true;
+				hw->speed = data & 0xffff;
+			} else {
+				hw->link = false;
+				hw->speed = 0;
+			}
+		} else if ((data) == RNP_PF_SET_MTU) {
+			data = mbx_rd32(hw, DATA_REG + 4);
+			hw->mtu = data;
+			adapter->flags |= RNPVF_FLAG_PF_UPDATE_MTU;
+		} else if ((data) == RNP_PF_SET_RESET) {
+			adapter->flags |= RNPVF_FLAG_PF_RESET;
+		} else if ((data) == RNP_PF_SET_MAC_SPOOF) {
+			data = mbx_rd32(hw, DATA_REG + 4);
+			if (data)
+				hw->pf_feature |= PF_MAC_SPOOF;
+			else 
+				hw->pf_feature &= (~PF_MAC_SPOOF);  
+
+		} else {
+			return RNP_ERR_MBX;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ *  rnpvf_check_for_ack_vf - checks to see if the PF has ACK'd
+ *  @hw: pointer to the HW structure
+ *
+ *  returns 0 if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 rnpvf_check_for_ack_vf(struct rnpvf_hw *hw, bool to_cm3)
+{
+	s32 ret_val = RNP_ERR_MBX;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+
+	if (to_cm3 == true) {
+		if (rnpvf_mbx_get_ack(hw, CPU2VF_COUNTER(mbx, vfnum)) !=
+		    hw->mbx.cpu_ack) {
+			ret_val = 0;
+			hw->mbx.stats.acks++;
+		}
+	} else {
+		if (rnpvf_mbx_get_ack(hw, PF2VF_COUNTER(mbx, vfnum)) !=
+		    hw->mbx.pf_ack) {
+			ret_val = 0;
+			hw->mbx.stats.acks++;
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ *  rnpvf_obtain_mbx_lock_vf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *
+ *  return 0 if we obtained the mailbox lock
+ **/
+static s32 rnpvf_obtain_mbx_lock_vf(struct rnpvf_hw *hw, bool to_cm3)
+{
+	int try_cnt = 2 * 1000; // 1s
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+	struct rnpvf_adapter *adapter = hw->back;
+	u32 CTRL_REG = (to_cm3) ? VF2CPU_MBOX_CTRL(mbx, vfnum) :
+				  VF2PF_MBOX_CTRL(mbx, vfnum);
+
+	while (try_cnt-- > 0) {
+		/* Take ownership of the buffer */
+		mbx_wr32(hw, CTRL_REG, MBOX_CTRL_VF_HOLD_SHM);
+		mb();
+		/* reserve mailbox for vf use */
+		if (mbx_rd32(hw, CTRL_REG) & MBOX_CTRL_VF_HOLD_SHM)
+			return 0;
+		udelay(500);
+	}
+
+	printk("[rnpvf] %s: failed to get mbx-lock \n", adapter->name);
+	return RNP_ERR_MBX;
+}
+
+/**
+ *  rnpvf_write_mbx_vf - Write a message to the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *
+ *  returns 0 if it successfully copied message into the buffer
+ **/
+static s32 rnpvf_write_mbx_vf(struct rnpvf_hw *hw, u32 *msg, u16 size,
+			      bool to_cm3)
+{
+	s32 ret_val;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 i;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+	u32 DATA_REG = (to_cm3) ? CPU_VF_SHM_DATA(mbx, vfnum) :
+				  PF_VF_SHM_DATA(mbx, vfnum);
+	u32 CTRL_REG = (to_cm3) ? VF2CPU_MBOX_CTRL(mbx, vfnum) :
+				  VF2PF_MBOX_CTRL(mbx, vfnum);
+
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = rnpvf_obtain_mbx_lock_vf(hw, to_cm3);
+	if (ret_val) {
+		printk("%s: get mbx wlock failed. ret:%d. req:0x%08x-0x%08x\n",
+		       __func__, ret_val, msg[0], msg[1]);
+		goto out_no_write;
+	}
+
+	/* add mailbox_id [27:21] */
+#define VF_NUM_OFFSET (21)
+	if (!to_cm3)
+		msg[0] |= ((hw->vfnum & 0x3f) << VF_NUM_OFFSET);
+
+	/* copy the caller specified message to the mailbox memory buffer */
+	for (i = 0; i < size; i++)
+		mbx_wr32(hw, DATA_REG + i * 4, msg[i]);
+
+	/* update acks. used by rnpvf_check_for_ack_vf  */
+	if (to_cm3 == true)
+		hw->mbx.cpu_ack =
+			rnpvf_mbx_get_ack(hw, CPU2VF_COUNTER(mbx, vfnum));
+	else
+		hw->mbx.pf_ack =
+			rnpvf_mbx_get_ack(hw, PF2VF_COUNTER(mbx, vfnum));
+	rnpvf_mbx_inc_vfreq(hw, to_cm3);
+
+	/* Drop VFU and interrupt the PF/CM3 to
+	 * tell it a message has been sent
+	 */
+	mbx_wr32(hw, CTRL_REG, MBOX_CTRL_REQ);
+
+out_no_write:
+	return ret_val;
+}
+
+/**
+ *  rnpvf_read_mbx_vf - Reads a message from the inbox intended for vf
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *
+ *  returns 0 if it successfully read message from buffer
+ **/
+static s32 rnpvf_read_mbx_vf(struct rnpvf_hw *hw, u32 *msg, u16 size,
+			     bool to_cm3)
+{
+	s32 ret_val = 0;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 i;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+	u32 BUF_REG = (to_cm3) ? CPU_VF_SHM_DATA(mbx, vfnum) :
+				 PF_VF_SHM_DATA(mbx, vfnum);
+	u32 CTRL_REG = (to_cm3) ? VF2CPU_MBOX_CTRL(mbx, vfnum) :
+				  VF2PF_MBOX_CTRL(mbx, vfnum);
+
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = rnpvf_obtain_mbx_lock_vf(hw, to_cm3);
+	if (ret_val)
+		goto out_no_read;
+
+	mb();
+	/* copy the message from the mailbox memory buffer */
+	for (i = 0; i < size; i++)
+		msg[i] = mbx_rd32(hw, BUF_REG + 4 * i);
+
+	/* clear vf_num */
+#define RNP_VF_NUM_MASK (0x7f << 21)
+	msg[0] &= (~RNP_VF_NUM_MASK);
+
+	/* update req. used by rnpvf_check_for_msg_vf  */
+	if (to_cm3 == true)
+		hw->mbx.cpu_req =
+			rnpvf_mbx_get_req(hw, CPU2VF_COUNTER(mbx, vfnum));
+	else
+		hw->mbx.pf_req =
+			rnpvf_mbx_get_req(hw, PF2VF_COUNTER(mbx, vfnum));
+	/* Acknowledge receipt and release mailbox, then we're done */
+	rnpvf_mbx_inc_vfack(hw, to_cm3);
+
+	/* free ownership of the buffer */
+	mbx_wr32(hw, CTRL_REG, 0);
+
+out_no_read:
+	return ret_val;
+}
+
+static void rnpvf_reset_mbx(struct rnpvf_hw *hw)
+{
+	u32 v;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+
+	/* release vfu */
+	mbx_wr32(hw, VF2CPU_MBOX_CTRL(mbx, vfnum), 0);
+	mbx_wr32(hw, VF2PF_MBOX_CTRL(mbx, vfnum), 0);
+
+	/* fetch mbx counter values */
+	v = mbx_rd32(hw, PF2VF_COUNTER(mbx, vfnum));
+	hw->mbx.pf_req = v & 0xffff;
+	hw->mbx.pf_ack = (v >> 16) & 0xffff;
+
+	v = mbx_rd32(hw, CPU2VF_COUNTER(mbx, vfnum));
+	hw->mbx.cpu_req = v & 0xffff;
+	hw->mbx.cpu_ack = (v >> 16) & 0xffff;
+
+	return;
+}
+
+static s32 rnpvf_mbx_configure_vf(struct rnpvf_hw *hw, int nr_vec,
+				  bool enable)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int mbx_vec_reg, vfnum = VFNUM(mbx, hw->vfnum);
+
+	mbx_vec_reg = PF2VF_MBOX_VEC(mbx, vfnum);
+	mbx_wr32(hw, mbx_vec_reg, nr_vec);
+
+	return 0;
+}
+
+/**
+ *  rnpvf_init_mbx_params_vf - set initial values for vf mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+static s32 rnpvf_init_mbx_params_vf(struct rnpvf_hw *hw)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+
+	/* start mailbox as timed out and let the reset_hw call set the timeout
+	 * value to begin communications
+	 */
+	mbx->timeout = 0;
+	mbx->udelay = RNP_VF_MBX_INIT_DELAY;
+	mbx->stats.msgs_tx = 0;
+	mbx->stats.msgs_rx = 0;
+	mbx->stats.reqs = 0;
+	mbx->stats.acks = 0;
+	mbx->stats.rsts = 0;
+	mbx->size = RNP_VFMAILBOX_SIZE;
+	rnpvf_reset_mbx(hw);
+	return 0;
+}
+
+const struct rnp_mbx_operations rnpvf_mbx_ops = {
+	.init_params = rnpvf_init_mbx_params_vf,
+	.read = rnpvf_read_mbx_vf,
+	.write = rnpvf_write_mbx_vf,
+	.read_posted = rnpvf_read_posted_mbx,
+	.write_posted = rnpvf_write_posted_mbx,
+	.check_for_msg = rnpvf_check_for_msg_vf,
+	.check_for_ack = rnpvf_check_for_ack_vf,
+	.check_for_rst = rnpvf_check_for_rst_msg_vf,
+	.configure = rnpvf_mbx_configure_vf,
+};
diff --git a/drivers/net/ethernet/mucse/rnpvf/mbx.h b/drivers/net/ethernet/mucse/rnpvf/mbx.h
new file mode 100644
index 0000000000000..bb9eb55a9bc36
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpvf/mbx.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNP_MBX_H_
+#define _RNP_MBX_H_
+
+#include "vf.h"
+
+#define RNP_VFMAILBOX_SIZE 14 /* 16 32 bit words - 64 bytes */
+#define RNP_ERR_MBX -100
+
+struct mbx_shm {
+	u32 stat;
+#define MBX_ST_PF_ACK (1 << 0)
+#define MBX_ST_PF_STS (1 << 1)
+#define MBX_ST_PF_RST (1 << 2)
+
+#define MBX_ST_VF_ACK (1 << 3)
+#define MBX_ST_VF_REQ (1 << 4)
+#define MBX_ST_VF_RST (1 << 5)
+
+#define MBX_ST_CPU_ACK (1 << 6)
+#define MBX_ST_CPU_REQ (1 << 7)
+
+	u32 data[RNP_VFMAILBOX_SIZE];
+} __aligned(4);
+
+/* If it's a RNP_VF_* msg then it originates in the VF and is sent to the
+ * PF.  The reverse is true if it is RNP_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define RNP_VT_MSGTYPE_ACK 0x80000000
+/* Messages below or'd with
+ * this are the ACK
+ */
+#define RNP_VT_MSGTYPE_NACK 0x40000000
+/* Messages below or'd with
+ * this are the NACK
+ */
+#define RNP_VT_MSGTYPE_CTS 0x20000000
+/* Indicates that VF is still
+ * clear to send requests
+ */
+#define RNP_VT_MSGINFO_SHIFT 14
+/* bits 23:16 are used for exra info for certain messages */
+#define RNP_VT_MSGINFO_MASK (0xFF << RNP_VT_MSGINFO_SHIFT)
+
+/* mailbox API, legacy requests */
+#define RNP_VF_RESET 0x01 /* VF requests reset */
+#define RNP_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define RNP_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define RNP_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+
+/* mailbox API, version 1.0 VF requests */
+#define RNP_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define RNP_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+#define RNP_VF_GET_MACVLAN 0x07 /* VF requests mac */
+#define RNP_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+
+/* mailbox API, version 1.1 VF requests */
+#define RNP_VF_GET_QUEUE 0x09 /* get queue configuration */
+#define RNP_VF_SET_VLAN_STRIP 0x0a /* VF requests PF to set VLAN STRIP */
+#define RNP_VF_REG_RD 0x0b /* vf read reg */
+#define RNP_VF_GET_MTU 0x0c /* vf read reg */
+#define RNP_VF_SET_MTU 0x0d /* vf read reg */
+#define RNP_VF_GET_FW 0x0e /* vf read reg */
+#define RNP_VF_RESET_PF 0x11 /* vf read reg */
+#define RNP_VF_SET_PROMISCE 0x16
+
+#define RNP_PF_VFNUM_MASK (0x3f << 21)
+#define RNP_PF_SET_FCS 0x10 /* PF set fcs status */
+#define RNP_PF_SET_PAUSE 0x11 /* PF set pause status */
+#define RNP_PF_SET_FT_PADDING 0x12 /* PF set ft padding status */
+#define RNP_PF_SET_VLAN_FILTER 0x13
+#define RNP_PF_SET_VLAN 0x14
+#define RNP_PF_SET_LINK 0x15
+#define RNP_PF_SET_MTU 0x16
+#define RNP_PF_SET_RESET 0x17
+#define RNP_PF_SET_MAC_SPOOF 0x18
+#define RNP_PF_LINK_UP (1 << 31)
+
+#define RNP_PF_REMOVE 0x0f
+#define RNP_PF_GET_LINK 0x10
+/* GET_QUEUES return data indices within the mailbox */
+#define RNP_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define RNP_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define RNP_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define RNP_VF_DEF_QUEUE 4 /* Default queue offset */
+
+/* length of permanent address message returned from PF */
+#define RNP_VF_PERMADDR_MSG_LEN 11
+/* word in permanent address message with the current multicast type */
+#define RNP_VF_MC_TYPE_WORD 3
+#define RNP_VF_DMA_VERSION_WORD 4
+#define RNP_VF_VLAN_WORD 5
+#define RNP_VF_PHY_TYPE_WORD 6
+#define RNP_VF_FW_VERSION_WORD 7
+#define RNP_VF_LINK_STATUS_WORD 8
+#define RNP_VF_AXI_MHZ 9
+#define RNP_VF_FEATURE 10
+
+#define RNP_PF_CONTROL_PRING_MSG 0x0100 /* PF control message */
+
+#define RNP_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define RNP_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+/* forward declaration of the HW struct */
+struct rnpvf_hw;
+
+enum MBX_ID {
+	MBX_VF0 = 0,
+	MBX_VF1,
+	//...
+	MBX_VF63,
+	MBX_CM3CPU,
+	MBX_VFCNT
+};
+
+#endif /* _RNP_MBX_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpvf/regs.h b/drivers/net/ethernet/mucse/rnpvf/regs.h
new file mode 100644
index 0000000000000..4ed8eff2437a3
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpvf/regs.h
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPVF_REGS_H_
+#define _RNPVF_REGS_H_
+
+enum NIC_MODE {
+	MODE_NIC_MODE_2PORT_40G = 0,
+	MODE_NIC_MODE_2PORT_10G = 1,
+	MODE_NIC_MODE_4PORT_10G = 2,
+	MODE_NIC_MODE_8PORT_10G = 3,
+};
+
+/* RNP-Ring Registers */
+#define RNP_DMA_RING_BASE 0x8000
+#define RNP_DMA_RX_DESC_TIMEOUT_TH 0x8000
+#define RNP_DMA_TX_DESC_FETCH_CTL 0x8004
+#define RNP_DMA_TX_FLOW_CTRL_TM 0x8008
+/* DMA-ENABLE-IRQ */
+#define RNP_RING_BASE_N10 (0x8000)
+#define RNP_RING_BASE_N500 (0x1000)
+#define RNP_RING_OFFSET(i) (0x100 * i)
+#define RNP_DMA_RX_START (0x10)
+#define RNP_DMA_RX_READY (0x14)
+#define RNP_DMA_TX_START (0x18)
+#define RNP_DMA_TX_READY (0x1c)
+#define RNP_DMA_INT_STAT (0x20)
+#define RNP_DMA_INT_MASK (0x24)
+#define TX_INT_MASK (1 << 1)
+#define RX_INT_MASK (1 << 0)
+#define RNP_DMA_INT_CLR (0x28)
+#define RNP_DMA_INT_TRIG (0x2c)
+/* RX-Queue Registers */
+#define RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_HI (0x30)
+#define RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_LO (0x34)
+#define RNP_DMA_REG_RX_DESC_BUF_LEN (0x38)
+#define RNP_DMA_REG_RX_DESC_BUF_HEAD (0x3c)
+#define RNP_DMA_REG_RX_DESC_BUF_TAIL (0x40)
+#define RNP_DMA_REG_RX_DESC_FETCH_CTRL (0x44)
+#define RNP_DMA_REG_RX_INT_DELAY_TIMER (0x48)
+#define RNP_DMA_REG_RX_INT_DELAY_PKTCNT (0x4c)
+#define RNP_DMA_REG_RX_ARB_DEF_LVL (0x50)
+#define PCI_DMA_REG_RX_DESC_TIMEOUT_TH (0x54)
+#define PCI_DMA_REG_RX_SCATTER_LENGTH (0x58)
+/* TX-Queue Registers */
+#define RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_HI (0x60)
+#define RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_LO (0x64)
+#define RNP_DMA_REG_TX_DESC_BUF_LEN (0x68)
+#define RNP_DMA_REG_TX_DESC_BUF_HEAD (0x6c)
+#define RNP_DMA_REG_TX_DESC_BUF_TAIL (0x70)
+#define RNP_DMA_REG_TX_DESC_FETCH_CTRL (0x74)
+#define RNP_DMA_REG_TX_INT_DELAY_TIMER (0x78)
+#define RNP_DMA_REG_TX_INT_DELAY_PKTCNT (0x7c)
+#define RNP_DMA_REG_TX_ARB_DEF_LVL (0x80)
+#define RNP_DMA_REG_TX_FLOW_CTRL_TH (0x84)
+#define RNP_DMA_REG_TX_FLOW_CTRL_TM (0x88)
+/* VEB Registers */
+#define VEB_TBL_CNTS 64
+#define RNP_DMA_PORT_VBE_MAC_LO_TBL_N10(port, vf) \
+	(0x80A0 + 4 * (port) + 0x100 * (vf))
+#define RNP_DMA_PORT_VBE_MAC_HI_TBL_N10(port, vf) \
+	(0x80B0 + 4 * (port) + 0x100 * (vf))
+#define RNP_DMA_PORT_VEB_VID_TBL_N10(port, vf) \
+	(0x80C0 + 4 * (port) + 0x100 * (vf))
+#define RNP_DMA_PORT_VEB_VF_RING_TBL_N10(port, vf) \
+	(0x80D0 + 4 * (port) +                     \
+	 0x100 * (vf))
+/* [0:7]:Ring_id,[8:15]:vf_num,vf_num[7]=1=vf valid */
+#define RNP_DMA_PORT_VBE_MAC_LO_TBL_N500 (0x10c0)
+#define RNP_DMA_PORT_VBE_MAC_HI_TBL_N500 (0x10c4)
+#define RNP_DMA_PORT_VEB_VID_TBL_N500 (0x10c8)
+#define RNP_DMA_PORT_VEB_VF_RING_TBL_N500 (0x10cc)
+#define RNP_DMA_STATS_DMA_TO_MAC (0x1a0)
+#define RNP_DMA_STATS_DMA_TO_SWITCH (0x1a4)
+#define RNP_DMA_STATS_MAC_TO_MAC (0x1b0)
+#define RNP_DMA_STATS_SWITCH_TO_SWITCH (0x1a4)
+#define RNP_DMA_STATS_MAC_TO_DMA (0x1a8)
+#define RNP_DMA_STATS_SWITCH_TO_DMA (0x1ac)
+/* =====  PF-VF Functions ==== */
+#define VF_NUM_REG 0xa3000
+#define VF_NUM_REG_N10 0x75f000
+#define VF_NUM_REG_N500 (0xe000)
+/* 8bit: 7:vf_actiove 6:fun0/fun1 [5:0]:vf_num */
+#define VF_NUM(vfnum, fun) \
+	((1 << 7) | (((fun) & 0x1) << 6) | ((vfnum) & 0x3f))
+#define PF_NUM(fun) (((fun) & 0x1) << 6)
+/* ==== Ring-MSIX Registers (MSI-X_module_design.docs) === */
+#define RING_VECTOR(n) (0x4000 + 0x04 * (n))
+
+static inline unsigned int p_rnpvf_rd_reg(void *reg)
+{
+	unsigned int v = ioread32((void *)(reg));
+
+	printk(" rd-reg: %p ==> 0x%08x\n", reg, v);
+	return v;
+}
+#define p_rnpvf_wr_reg(reg, val)                                     \
+	do {                                                         \
+		printk(" wr-reg: %p <== 0x%08x \t#%-4d %s\n", (reg), \
+		       (val), __LINE__, __FILE__);                   \
+		iowrite32((val), (void *)(reg));                     \
+	} while (0)
+
+#ifdef IO_PRINT
+#define rnpvf_rd_reg(reg) p_rnpvf_rd_reg(reg)
+#define rnpvf_wr_reg(reg, val) p_rnpvf_wr_reg(reg, val)
+#else
+#define rnpvf_rd_reg(reg) readl((void *)(reg))
+#define rnpvf_wr_reg(reg, val) writel((val), (void *)(reg))
+#endif
+
+#ifdef CONFIG_RNP_MBX_DEBUG
+#define mbx_rd32(hw, reg) p_rnpvf_rd_reg((hw)->hw_addr + (reg))
+#define mbx_wr32(hw, reg, val) p_rnpvf_wr_reg((hw)->hw_addr + (reg), (val))
+#else
+#define mbx_rd32(hw, reg) rnpvf_rd_reg((hw)->hw_addr + (reg))
+#define mbx_wr32(hw, reg, val) rnpvf_wr_reg((hw)->hw_addr + (reg), (val))
+#endif
+
+#define rd32(hw, off) rnpvf_rd_reg((hw)->hw_addr + (off))
+#define wr32(hw, off, val) rnpvf_wr_reg((hw)->hw_addr + (off), (val))
+
+#define ring_rd32(ring, off) rnpvf_rd_reg((ring)->ring_addr + (off))
+#define ring_wr32(ring, off, val) \
+	rnpvf_wr_reg((ring)->ring_addr + (off), (val))
+
+#define pwr32(hw, reg, val)                                               \
+	do {                                                              \
+		printk(" wr-reg: %p <== 0x%08x \t#%-4d %s\n",             \
+		       (hw)->hw_addr + (reg), (val), __LINE__, __FILE__); \
+		iowrite32((val), (hw)->hw_addr + (reg));                  \
+	} while (0)
+
+/* ==== log helper === */
+#ifdef DEBUG
+#define hw_dbg(hw, fmt, args...) printk("hw-dbg : " fmt, ##args)
+#else
+#define hw_dbg(hw, fmt, args...)
+#endif
+
+#endif /* _RNPVF_REGS_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpvf/rnpvf.h b/drivers/net/ethernet/mucse/rnpvf/rnpvf.h
new file mode 100644
index 0000000000000..99ce052f06ace
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpvf/rnpvf.h
@@ -0,0 +1,738 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef _RNPVF_H_
+#define _RNPVF_H_
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "vf.h"
+
+#define RNPVF_ALLOC_PAGE_ORDER 0
+#define RNPVF_PAGE_BUFFER_NUMS(ring) \
+	(((1 << RNPVF_ALLOC_PAGE_ORDER) * PAGE_SIZE) >> 11)
+
+#define RNPVF_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
+#if defined(CONFIG_MXGBEVF_FIX_VF_QUEUE) && !defined(FIX_VF_QUEUE)
+#define FIX_VF_BUG
+#endif
+
+#if defined(CONFIG_MXGBEVF_FIX_MAC_PADDING) && !defined(FIX_MAC_PADDING)
+#define FIX_MAC_PADDIN
+#endif
+
+#if defined(CONFIG_MXGBEVF_OPTM_WITH_LARGE) && !defined(OPTM_WITH_LARGE)
+#define OPTM_WITH_LPAGE
+#endif
+
+#if (PAGE_SIZE < 8192)
+//error
+#ifdef OPTM_WITH_LPAGE
+//#error can't open OPTM_WITH_LPAGE with PAGE_SIZE small than 8192
+#undef OPTM_WITH_LPAGE
+#endif
+#endif
+
+struct rnpvf_queue_stats {
+	u64 packets;
+	u64 bytes;
+};
+
+struct rnpvf_tx_queue_stats {
+	u64 restart_queue;
+	u64 tx_busy;
+	u64 tx_done_old;
+	u64 clean_desc;
+	u64 poll_count;
+	u64 irq_more_count;
+	u64 vlan_add;
+	u64 tx_irq_miss;
+	u64 tx_next_to_clean;
+	u64 tx_equal_count;
+};
+
+struct rnpvf_rx_queue_stats {
+	u64 driver_drop_packets;
+	u64 rsc_count;
+	u64 rsc_flush;
+	u64 non_eop_descs;
+	u64 alloc_rx_page_failed;
+	u64 alloc_rx_buff_failed;
+	u64 alloc_rx_page;
+	u64 csum_err;
+	u64 csum_good;
+	u64 poll_again_count;
+	u64 poll_count;
+	u64 vlan_remove;
+	u64 rx_irq_miss;
+	u64 rx_next_to_clean;
+	u64 rx_equal_count;
+};
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the Buffers
+ */
+struct rnpvf_tx_buffer {
+	struct rnp_tx_desc *next_to_watch;
+	unsigned long time_stamp;
+	struct sk_buff *skb;
+	unsigned int bytecount;
+	unsigned short gso_segs;
+	bool gso_need_padding;
+
+	__be16 protocol;
+	DEFINE_DMA_UNMAP_ADDR(dma);
+	DEFINE_DMA_UNMAP_LEN(len);
+	union {
+		u32 tx_flags;
+		struct {
+			u16 vlan;
+			u16 cmd_flags;
+		};
+	};
+	__le32 mac_ip_len;
+	/* for control desc */
+	union {
+		u32 mss_len_vf_num;
+		struct {
+			__le16 mss_len;
+			u8 vf_num;
+			u8 l4_hdr_len;
+		};
+	};
+	union {
+		u32 inner_vlan_tunnel_len;
+		struct {
+			u8 tunnel_hdr_len;
+			u8 inner_vlan_l;
+			u8 inner_vlan_h;
+			u8 resv;
+		};
+	};
+	bool ctx_flag;
+};
+
+struct rnpvf_rx_buffer {
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	struct page *page;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+	__u32 page_offset;
+#else
+	__u16 page_offset;
+#endif
+	__u16 pagecnt_bias;
+};
+
+enum rnpvf_ring_state_t {
+	__RNPVF_RX_3K_BUFFER,
+	__RNPVF_RX_BUILD_SKB_ENABLED,
+	__RNPVF_TX_FDIR_INIT_DONE,
+	__RNPVF_TX_XPS_INIT_DONE,
+	__RNPVF_TX_DETECT_HANG,
+	__RNPVF_HANG_CHECK_ARMED,
+	__RNPVF_RX_CSUM_UDP_ZERO_ERR,
+	__RNPVF_RX_FCOE,
+};
+
+#define ring_uses_build_skb(ring) \
+	test_bit(__RNPVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
+
+/* now tx max 4k for one desc */
+#define RNPVF_MAX_TXD_PWR 12
+#define RNPVF_MAX_DATA_PER_TXD (1 << RNPVF_MAX_TXD_PWR)
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), RNPVF_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
+struct rnpvf_ring {
+	struct rnpvf_ring *next; /* pointer to next ring in q_vector */
+	struct rnpvf_q_vector *q_vector; /* backpointer to host q_vector */
+	struct net_device *netdev; /* netdev ring belongs to */
+	struct device *dev; /* device for DMA mapping */
+	void *desc; /* descriptor ring memory */
+	union {
+		struct rnpvf_tx_buffer *tx_buffer_info;
+		struct rnpvf_rx_buffer *rx_buffer_info;
+	};
+	unsigned long last_rx_timestamp;
+	unsigned long state;
+	u8 __iomem *ring_addr;
+	u8 __iomem *hw_addr;
+	u8 __iomem *tail;
+	u8 __iomem *dma_int_stat;
+	u8 __iomem *dma_int_mask;
+	u8 __iomem *dma_int_clr;
+	dma_addr_t dma; /* phys. address of descriptor ring */
+	unsigned int size; /* length in bytes */
+	u32 ring_flags;
+#define RNPVF_RING_FLAG_DELAY_SETUP_RX_LEN ((u32)(1 << 0))
+#define RNPVF_RING_FLAG_CHANGE_RX_LEN ((u32)(1 << 1))
+#define RNPVF_RING_FLAG_DO_RESET_RX_LEN ((u32)(1 << 2))
+#define RNPVF_RING_SKIP_TX_START ((u32)(1 << 3))
+#define RNPVF_RING_NO_TUNNEL_SUPPORT ((u32)(1 << 4))
+#define RNPVF_RING_SIZE_CHANGE_FIX ((u32)(1 << 5))
+#define RNPVF_RING_SCATER_SETUP ((u32)(1 << 6))
+#define RNPVF_RING_STAGS_SUPPORT ((u32)(1 << 7))
+#define RNPVF_RING_DOUBLE_VLAN_SUPPORT ((u32)(1 << 8))
+#define RNPVF_RING_VEB_MULTI_FIX ((u32)(1 << 9))
+#define RNPVF_RING_IRQ_MISS_FIX ((u32)(1 << 10))
+#define RNPVF_RING_CHKSM_FIX ((u32)(1 << 11))
+
+	u8 vfnum;
+	u8 rnpvf_msix_off;
+
+	u16 count; /* amount of descriptors */
+
+	u8 queue_index; /* queue_index needed for multiqueue queue management */
+	u8 rnpvf_queue_idx;
+
+	u16 next_to_use;
+	u16 next_to_clean;
+
+	u16 device_id;
+#ifdef OPTM_WITH_LPAGE
+	u16 rx_page_buf_nums;
+	u32 rx_per_buf_mem;
+	struct sk_buff *skb;
+#endif
+	union {
+		u16 next_to_alloc;
+		struct {
+			u8 atr_sample_rate;
+			u8 atr_count;
+		};
+	};
+
+	u8 dcb_tc;
+	struct rnpvf_queue_stats stats;
+	struct u64_stats_sync syncp;
+	union {
+		struct rnpvf_tx_queue_stats tx_stats;
+		struct rnpvf_rx_queue_stats rx_stats;
+	};
+} ____cacheline_internodealigned_in_smp;
+
+#define RNPVF_ITR_ADAPTIVE_MIN_INC 2
+#define RNPVF_ITR_ADAPTIVE_MIN_USECS 5
+#define RNPVF_ITR_ADAPTIVE_MAX_USECS 800
+#define RNPVF_ITR_ADAPTIVE_LATENCY 0x400
+#define RNPVF_ITR_ADAPTIVE_BULK 0x00
+#define RNPVF_ITR_ADAPTIVE_MASK_USECS \
+	(RNPVF_ITR_ADAPTIVE_LATENCY - RNPVF_ITR_ADAPTIVE_MIN_INC)
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define RNPVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define RNPVF_VF_MAX_TX_QUEUES 32
+#define RNPVF_VF_MAX_RX_QUEUES 32
+#define MAX_RX_QUEUES RNPVF_VF_MAX_RX_QUEUES
+#define MAX_TX_QUEUES RNPVF_VF_MAX_TX_QUEUES
+
+#ifndef RNPVF_PKT_TIMEOUT
+#define RNPVF_PKT_TIMEOUT 30
+#endif
+
+#ifndef RNPVF_RX_PKT_POLL_BUDGET
+#define RNPVF_RX_PKT_POLL_BUDGET 64
+#endif
+
+#ifndef RNPVF_TX_PKT_POLL_BUDGET
+#define RNPVF_TX_PKT_POLL_BUDGET 0x30
+#endif
+
+#ifndef RNPVF_PKT_TIMEOUT_TX
+#define RNPVF_PKT_TIMEOUT_TX 100
+#endif
+
+#define RNPVF_MIN_RX_WORK (32)
+#define RNPVF_DEFAULT_RX_WORK (64)
+#define RNPVF_MAX_RX_WORK (512)
+#define RNPVF_WORK_ALIGN (2)
+#define RNPVF_MIN_TX_FRAME (1)
+#define RNPVF_MAX_TX_FRAME (256)
+#define RNPVF_MIN_TX_USEC (30)
+#define RNPVF_MAX_TX_USEC (10000)
+
+#define RNPVF_MIN_RX_FRAME (1)
+#define RNPVF_MAX_RX_FRAME (256)
+#define RNPVF_MIN_RX_USEC (10)
+#define RNPVF_MAX_RX_USEC (10000)
+
+#define RNPVF_MIN_TX_WORK (32)
+#define RNPVF_MAX_TX_WORK (512)
+#define RNPVF_DEFAULT_TX_WORK 256
+#define RNPVF_DEFAULT_TXD 512
+#define RNPVF_DEFAULT_RXD 512
+#define RNPVF_MAX_TXD 4096
+#define RNPVF_MIN_TXD 256
+#define RNPVF_MAX_RXD 4096
+#define RNPVF_MIN_RXD 256
+
+#ifndef TSRN10_RX_DEFAULT_BURST
+#define TSRN10_RX_DEFAULT_BURST 16
+#endif
+
+#ifndef TSRN10_RX_DEFAULT_LINE
+#define TSRN10_RX_DEFAULT_LINE 64
+#endif
+
+#define TSRN10_TX_DEFAULT_BURST 8
+
+/* Supported Rx Buffer Sizes */
+#define RNPVF_RXBUFFER_256 256 /* Used for packet split */
+#define RNPVF_RXBUFFER_2K 2048
+#define RNPVF_RXBUFFER_1536 1536
+#define RNPVF_RXBUFFER_3K 3072
+#define RNPVF_RXBUFFER_4K 4096
+#define RNPVF_RXBUFFER_8K 8192
+#define RNPVF_RXBUFFER_10K 10240
+
+#define RNPVF_RX_HDR_SIZE RNPVF_RXBUFFER_256
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define RNPVF_TX_FLAGS_CSUM ((u32)(1))
+#define RNPVF_TX_FLAGS_VLAN ((u32)(1 << 1))
+#define RNPVF_TX_FLAGS_TSO ((u32)(1 << 2))
+#define RNPVF_TX_FLAGS_IPV4 ((u32)(1 << 3))
+#define RNPVF_TX_FLAGS_FCOE ((u32)(1 << 4))
+#define RNPVF_TX_FLAGS_FSO ((u32)(1 << 5))
+#define RNPVF_TX_FLAGS_VLAN_MASK 0xffff0000
+#define RNPVF_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
+#define RNPVF_TX_FLAGS_VLAN_SHIFT 16
+
+#define RNPVF_GSO_PARTIAL_FEATURES                \
+	(NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | \
+	 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+struct rnpvf_ring_container {
+	struct rnpvf_ring *ring; /* pointer to linked list of rings */
+	unsigned long next_update; /* jiffies value of last update */
+	unsigned int total_bytes; /* total bytes processed this int */
+	unsigned int total_packets; /* total packets processed this int */
+	unsigned int total_packets_old;
+	u8 count; /* total number of rings in vector */
+	u8 itr; /* current ITR setting for ring */
+	u16 add_itr;
+};
+
+/* iterator for handling rings in ring container */
+#define rnpvf_for_each_ring(pos, head) \
+	for (pos = (head).ring; pos != NULL; pos = pos->next)
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct rnpvf_q_vector {
+	int old_rx_count;
+	int new_rx_count;
+	int large_times;
+	int small_times;
+	int too_small_times;
+	int middle_time;
+	struct rnpvf_adapter *adapter;
+	u16 v_idx;
+	/* index of q_vector within array, also used for
+	 * finding the bit in EICR and friends that
+	 * represents the vector for this rings
+	 */
+	struct rnpvf_ring_container rx, tx;
+
+	struct napi_struct napi;
+	cpumask_t affinity_mask;
+	int numa_node;
+	u16 itr_rx;
+	u16 itr_tx;
+	struct rcu_head rcu; /* to avoid race with update stats on free */
+	u32 vector_flags;
+#define RNPVF_QVECTOR_FLAG_IRQ_MISS_CHECK ((u32)(1 << 0))
+#define RNPVF_QVECTOR_FLAG_ITR_FEATURE ((u32)(1 << 1))
+#define RNPVF_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS ((u32)(1 << 2))
+
+	int irq_check_usecs;
+	struct hrtimer irq_miss_check_timer;
+
+	char name[IFNAMSIZ + 9];
+
+	/* for dynamic allocation of rings associated with this q_vector */
+	struct rnpvf_ring ring[0] ____cacheline_internodealigned_in_smp;
+};
+
+static inline __le16 rnpvf_test_staterr(union rnp_rx_desc *rx_desc,
+					const u16 stat_err_bits)
+{
+	return rx_desc->wb.cmd & cpu_to_le16(stat_err_bits);
+}
+
+static inline __le16 rnpvf_get_stat(union rnp_rx_desc *rx_desc,
+				    const u16 stat_mask)
+{
+	return rx_desc->wb.cmd & cpu_to_le16(stat_mask);
+}
+
+static inline u16 rnpvf_desc_unused(struct rnpvf_ring *ring)
+{
+	u16 ntc = ring->next_to_clean;
+	u16 ntu = ring->next_to_use;
+
+	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
+
+/*
+ * microsecond values for various ITR rates shifted by 2 to fit itr register
+ * with the first 3 bits reserved 0
+ */
+#define RNPVF_MIN_RSC_ITR 24
+#define RNPVF_100K_ITR 40
+#define RNPVF_20K_ITR 200
+#define RNPVF_10K_ITR 400
+#define RNPVF_8K_ITR 500
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways.  The lowest value
+ * supported by all of the rnp hardware is 8.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
+	((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define RNPVF_DESC_UNUSED(R)                                          \
+	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+	 (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define RNPVF_RX_DESC(R, i) (&(((union rnp_rx_desc *)((R)->desc))[i]))
+#define RNPVF_TX_DESC(R, i) (&(((struct rnp_tx_desc *)((R)->desc))[i]))
+#define RNPVF_TX_CTXTDESC(R, i) \
+	(&(((struct rnp_tx_ctx_desc *)((R)->desc))[i]))
+
+#define RNPVF_N10_MAX_JUMBO_FRAME_SIZE \
+	9590 /* Maximum Supported Size 9.5KB */
+#define RNPVF_N500_MAX_JUMBO_FRAME_SIZE \
+	9722 /* Maximum Supported Size 9.5KB */
+#define RNPVF_MIN_MTU 68
+
+#define MAX_MSIX_VECTORS 4
+#define OTHER_VECTOR 1
+#define NON_Q_VECTORS (OTHER_VECTOR)
+
+#define MAX_MSIX_Q_VECTORS 2
+
+#define MIN_MSIX_Q_VECTORS 1
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+
+enum phy_type {
+	PHY_TYPE_NONE = 0,
+	PHY_TYPE_1G_BASE_KX,
+	PHY_TYPE_RGMII,
+	PHY_TYPE_10G_BASE_KR,
+	PHY_TYPE_25G_BASE_KR,
+	PHY_TYPE_40G_BASE_KR4,
+};
+
+struct rnpvf_hw {
+	void *back;
+	u8 __iomem *hw_addr;
+	u8 __iomem *hw_addr_bar0;
+	u8 __iomem *ring_msix_base;
+	u8 vfnum;
+#define VF_NUM_MASK 0x3f
+	struct pci_dev *pdev;
+
+	u16 device_id;
+	u16 vendor_id;
+	u16 subsystem_device_id;
+	u16 subsystem_vendor_id;
+
+	enum rnp_board_type board_type;
+
+	u32 dma_version;
+	u16 min_length;
+	u16 max_length;
+
+	u16 queue_ring_base;
+	u32 tx_items_count;
+	u32 rx_items_count;
+	u16 mac_type;
+	u16 phy_type;
+	int mtu;
+	u16 link;
+	u16 speed;
+
+	struct rnpvf_hw_operations ops;
+	struct rnp_mac_info mac;
+	struct rnp_fc_info fc;
+	struct rnp_mbx_info mbx;
+
+	bool adapter_stopped;
+	u32 api_version;
+	int fw_version;
+	int usecstocount;
+#define PF_FEATURE_VLAN_FILTER BIT(0)
+#define PF_NCSI_EN BIT(1)
+#define PF_MAC_SPOOF BIT(2)
+	u32 pf_feature;
+
+	int mode;
+#define RNPVF_NET_FEATURE_SG ((u32)(1 << 0))
+#define RNPVF_NET_FEATURE_TX_CHECKSUM ((u32)(1 << 1))
+#define RNPVF_NET_FEATURE_RX_CHECKSUM ((u32)(1 << 2))
+#define RNPVF_NET_FEATURE_TSO ((u32)(1 << 3))
+#define RNPVF_NET_FEATURE_TX_UDP_TUNNEL (1 << 4)
+#define RNPVF_NET_FEATURE_VLAN_FILTER (1 << 5)
+#define RNPVF_NET_FEATURE_VLAN_OFFLOAD (1 << 6)
+#define RNPVF_NET_FEATURE_RX_NTUPLE_FILTER (1 << 7)
+#define RNPVF_NET_FEATURE_TCAM (1 << 8)
+#define RNPVF_NET_FEATURE_RX_HASH (1 << 9)
+#define RNPVF_NET_FEATURE_RX_FCS (1 << 10)
+#define RNPVF_NET_FEATURE_HW_TC (1 << 11)
+#define RNPVF_NET_FEATURE_USO (1 << 12)
+#define RNPVF_NET_FEATURE_STAG_FILTER (1 << 13)
+#define RNPVF_NET_FEATURE_STAG_OFFLOAD (1 << 14)
+
+	u32 feature_flags;
+};
+#define VFNUM(mbx, num) ((num) & mbx->vf_num_mask)
+
+enum irq_mode_enum {
+	irq_mode_msix,
+	irq_mode_msi,
+	irq_mode_legency,
+};
+
+/* board specific private data structure */
+struct rnpvf_adapter {
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+#define GET_VFNUM_FROM_BAR0 BIT(0)
+	u16 status;
+	u16 vf_vlan;
+	struct timer_list watchdog_timer;
+	u16 bd_number;
+	struct work_struct reset_task;
+	bool promisc_mode;
+
+	/* Interrupt Throttle Rate */
+	u16 rx_itr_setting;
+	u16 tx_itr_setting;
+
+	u16 rx_usecs;
+	u16 rx_frames;
+	u16 tx_usecs;
+	u16 tx_frames;
+	u32 pkt_rate_low;
+	u16 rx_usecs_low;
+	u32 pkt_rate_high;
+	u16 rx_usecs_high;
+	u32 sample_interval;
+	u32 adaptive_rx_coal;
+	u32 adaptive_tx_coal;
+	u32 auto_rx_coal;
+	u32 napi_budge;
+	u32 tx_work_limit;
+	/* TX */
+	struct rnpvf_ring
+		*tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
+	int tx_ring_item_count;
+	int num_q_vectors;
+	int num_tx_queues;
+	u64 restart_queue;
+	u64 hw_csum_tx_good;
+	u64 lsc_int;
+	u64 hw_tso_ctxt;
+	u64 hw_tso6_ctxt;
+	u32 tx_timeout_count;
+
+	/* RX */
+	struct rnpvf_ring *rx_ring[MAX_RX_QUEUES];
+	int rx_ring_item_count;
+	int num_rx_queues;
+	u64 hw_csum_rx_error;
+	u64 hw_rx_no_dma_resources;
+	u64 hw_csum_rx_good;
+	u64 non_eop_descs;
+
+	u32 alloc_rx_page_failed;
+	u32 alloc_rx_buff_failed;
+
+	int vector_off;
+	int num_other_vectors;
+	int irq_mode;
+	struct rnpvf_q_vector *q_vector[MAX_MSIX_VECTORS];
+
+	int num_msix_vectors;
+	struct msix_entry *msix_entries;
+
+	u32 dma_channels;
+	/* the real used dma ring channels */
+
+	/* Some features need tri-state capability,
+	 * thus the additional *_CAPABLE flags.
+	 */
+	u32 flags;
+#define RNPVF_FLAG_IN_WATCHDOG_TASK ((u32)(1))
+#define RNPVF_FLAG_IN_NETPOLL ((u32)(1 << 1))
+#define RNPVF_FLAG_PF_SET_VLAN ((u32)(1 << 2))
+#define RNPVF_FLAG_PF_UPDATE_MTU ((u32)(1 << 3))
+#define RNPVF_FLAG_PF_UPDATE_MAC ((u32)(1 << 4))
+#define RNPVF_FLAG_PF_UPDATE_VLAN ((u32)(1 << 5))
+#define RNPVF_FLAG_PF_RESET ((u32)(1 << 6))
+#define RNPVF_FLAG_PF_RESET_REQ ((u32)(1 << 7))
+#define RNPVF_FLAG_MSI_CAPABLE ((u32)(1 << 8))
+#define RNPVF_FLAG_MSI_ENABLED ((u32)(1 << 9))
+#define RNPVF_FLAG_MSIX_CAPABLE ((u32)(1 << 10))
+#define RNPVF_FLAG_MSIX_ENABLED ((u32)(1 << 11))
+#define RNPVF_FLAG_RX_CHKSUM_ENABLED ((u32)(1 << 12))
+#define RNPVF_FLAG_TX_VLAN_OFFLOAD ((u32)(1 << 13))
+#define RNPVF_FLAG_RX_VLAN_OFFLOAD ((u32)(1 << 14))
+
+	u32 priv_flags;
+#define RNPVF_PRIV_FLAG_FT_PADDING BIT(0)
+#define RNPVF_PRIV_FLAG_PADDING_DEBUG BIT(1)
+#define RNPVF_PRIV_FLAG_FCS_ON BIT(2)
+#define RNPVF_PRIV_FLAG_TX_PADDING BIT(3)
+
+	/* OS defined structs */
+	struct net_device *netdev;
+	struct pci_dev *pdev;
+
+	/* structs defined in rnp_vf.h */
+	struct rnpvf_hw hw;
+	u16 msg_enable;
+	struct rnpvf_hw_stats stats;
+	struct rnpvf_hw_stats_own hw_stats;
+	u64 zero_base;
+	/* Interrupt Throttle Rate */
+	u32 eitr_param;
+
+	unsigned long state;
+	u64 tx_busy;
+	u32 link_speed;
+	bool link_up;
+
+	struct work_struct watchdog_task;
+
+	u8 port;
+
+	spinlock_t mbx_lock;
+	char name[60];
+};
+
+enum rnpvf_state_t {
+	__RNPVF_TESTING,
+	__RNPVF_RESETTING,
+	__RNPVF_DOWN,
+	__RNPVF_REMOVE,
+	__RNPVF_MBX_POLLING,
+	__RNPVF_LINK_DOWN
+};
+
+struct rnpvf_cb {
+	union { /* Union defining head/tail partner */
+		struct sk_buff *head;
+		struct sk_buff *tail;
+	};
+	dma_addr_t dma;
+	u16 append_cnt;
+	bool page_released;
+};
+#define RNPVF_CB(skb) ((struct rnpvf_cb *)(skb)->cb)
+
+#define RING2ADAPT(ring) netdev_priv((ring)->netdev)
+
+enum rnpvf_boards {
+	board_n10,
+	board_n500,
+};
+
+extern const struct rnpvf_info rnpvf_82599_vf_info;
+extern const struct rnpvf_info rnpvf_X540_vf_info;
+extern const struct rnp_mbx_operations rnpvf_mbx_ops;
+
+/* needed by ethtool.c */
+extern char rnpvf_driver_name[];
+extern const char rnpvf_driver_version[];
+
+extern void rnpvf_up(struct rnpvf_adapter *adapter);
+extern void rnpvf_down(struct rnpvf_adapter *adapter);
+extern void rnpvf_reinit_locked(struct rnpvf_adapter *adapter);
+extern void rnpvf_reset(struct rnpvf_adapter *adapter);
+extern void rnpvf_set_ethtool_ops(struct net_device *netdev);
+extern int rnpvf_setup_rx_resources(struct rnpvf_adapter *,
+				    struct rnpvf_ring *);
+extern int rnpvf_setup_tx_resources(struct rnpvf_adapter *,
+				    struct rnpvf_ring *);
+extern void rnpvf_free_rx_resources(struct rnpvf_adapter *,
+				    struct rnpvf_ring *);
+extern void rnpvf_free_tx_resources(struct rnpvf_adapter *,
+				    struct rnpvf_ring *);
+extern void rnpvf_update_stats(struct rnpvf_adapter *adapter);
+extern int ethtool_ioctl(struct ifreq *ifr);
+extern void remove_mbx_irq(struct rnpvf_adapter *adapter);
+extern void rnpvf_clear_interrupt_scheme(struct rnpvf_adapter *adapter);
+extern int register_mbx_irq(struct rnpvf_adapter *adapter);
+extern int rnpvf_init_interrupt_scheme(struct rnpvf_adapter *adapter);
+extern int rnpvf_close(struct net_device *netdev);
+extern int rnpvf_open(struct net_device *netdev);
+
+extern void rnp_napi_add_all(struct rnpvf_adapter *adapter);
+extern void rnp_napi_del_all(struct rnpvf_adapter *adapter);
+
+extern int rnpvf_sysfs_init(struct net_device *ndev);
+extern void rnpvf_sysfs_exit(struct net_device *ndev);
+
+static inline int rnpvf_is_pf1(struct pci_dev *pdev)
+{
+	return ((pdev->devfn) ? 1 : 0);
+}
+
+static inline struct netdev_queue *
+txring_txq(const struct rnpvf_ring *ring)
+{
+	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
+/*
+ * FCoE requires that all Rx buffers be over 2200 bytes in length.  Since
+ * this is twice the size of a half page we need to double the page order
+ * for FCoE enabled Rx queues.
+ */
+static inline unsigned int rnpvf_rx_bufsz(struct rnpvf_ring *ring)
+{
+	/* 1 rx-desc trans max half page(2048), for jumbo frame sg is needed */
+	return RNPVF_RXBUFFER_1536;
+}
+
+/* SG , 1 rx-desc use one page */
+static inline unsigned int rnpvf_rx_pg_order(struct rnpvf_ring *ring)
+{
+	return 0;
+}
+#define rnpvf_rx_pg_size(_ring) (PAGE_SIZE << rnpvf_rx_pg_order(_ring))
+
+static inline u32 rnpvf_rx_desc_used_hw(struct rnpvf_hw *hw,
+					struct rnpvf_ring *rx_ring)
+{
+	u32 head = ring_rd32(rx_ring, RNP_DMA_REG_RX_DESC_BUF_HEAD);
+	u32 tail = ring_rd32(rx_ring, RNP_DMA_REG_RX_DESC_BUF_TAIL);
+	u16 count = rx_ring->count;
+
+	return ((tail >= head) ? (count - tail + head) : (head - tail));
+}
+
+static inline u32 rnpvf_tx_desc_unused_hw(struct rnpvf_hw *hw,
+					  struct rnpvf_ring *tx_ring)
+{
+	u32 head = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_HEAD);
+	u32 tail = ring_rd32(tx_ring, RNP_DMA_REG_TX_DESC_BUF_TAIL);
+	u16 count = tx_ring->count;
+
+	return ((tail > head) ? (count - tail + head) : (head - tail));
+}
+
+#define IS_VALID_VID(vid) ((vid) >= 0 && (vid) < 4096)
+
+#endif /* _RNPVF_H_ */
diff --git a/drivers/net/ethernet/mucse/rnpvf/rnpvf_main.c b/drivers/net/ethernet/mucse/rnpvf/rnpvf_main.c
new file mode 100644
index 0000000000000..85a25bc5a2f7b
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpvf/rnpvf_main.c
@@ -0,0 +1,6434 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "rnpvf.h"
+
+#ifdef FIX_VF_BUG
+#define CONFIG_BAR4_PFVFNUM 0
+#else
+#define CONFIG_BAR4_PFVFNUM 1
+#endif
+char rnpvf_driver_name[] = "rnpvf";
+static const char rnpvf_driver_string[] =
+	"Mucse(R) 10/40G Gigabit PCI Express Virtual Function Network Driver";
+
+#define DRV_VERSION "1.0.1-rc7"
+const char rnpvf_driver_version[] = DRV_VERSION;
+static const char rnpvf_copyright[] =
+	"Copyright (c) 2020 - 2024 Mucse Corporation.";
+
+extern const struct rnpvf_info rnp_n10_vf_info;
+
+static const struct rnpvf_info *rnpvf_info_tbl[] = {
+	[board_n10] = &rnp_n10_vf_info,
+};
+
+#define N10_BOARD board_n10
+
+static unsigned int fix_eth_name;
+module_param(fix_eth_name, uint, 0000);
+MODULE_PARM_DESC(fix_eth_name, "set eth adapter name to rnpvfXX");
+static struct pci_device_id rnpvf_pci_tbl[] = {
+	{ PCI_DEVICE(0x8848, 0x1080), .driver_data = N10_BOARD },
+	{ PCI_DEVICE(0x8848, 0x1084), .driver_data = N10_BOARD },
+	{ PCI_DEVICE(0x8848, 0x1081), .driver_data = N10_BOARD },
+	{ PCI_DEVICE(0x8848, 0x1083), .driver_data = N10_BOARD },
+	{ PCI_DEVICE(0x8848, 0x1C80), .driver_data = N10_BOARD },
+	{ PCI_DEVICE(0x8848, 0x1C81), .driver_data = N10_BOARD },
+	{ PCI_DEVICE(0x8848, 0x1C83), .driver_data = N10_BOARD },
+	/* required last entry */
+	{
+		0,
+	},
+};
+
+MODULE_DEVICE_TABLE(pci, rnpvf_pci_tbl);
+MODULE_AUTHOR("Mucse Corporation, ");
+MODULE_DESCRIPTION("Mucse(R) N10/N400 Virtual Function Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define DEFAULT_MSG_ENABLE \
+	(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0000);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static int pci_using_hi_dma;
+
+/* forward decls */
+static void rnpvf_set_itr(struct rnpvf_q_vector *q_vector);
+static void rnpvf_free_all_rx_resources(struct rnpvf_adapter *adapter);
+
+#define RNPVF_XDP_PASS 0
+#define RNPVF_XDP_CONSUMED 1
+#define RNPVF_XDP_TX 2
+
+static void rnpvf_pull_tail(struct sk_buff *skb);
+#ifdef OPTM_WITH_LPAGE
+static bool rnpvf_alloc_mapped_page(struct rnpvf_ring *rx_ring,
+				    struct rnpvf_rx_buffer *bi,
+				    union rnp_rx_desc *rx_desc, u16 bufsz,
+				    u64 fun_id);
+
+static void rnpvf_put_rx_buffer(struct rnpvf_ring *rx_ring,
+				struct rnpvf_rx_buffer *rx_buffer);
+#else /* OPTM_WITH_LPAGE */
+static bool rnpvf_alloc_mapped_page(struct rnpvf_ring *rx_ring,
+				    struct rnpvf_rx_buffer *bi);
+static void rnpvf_put_rx_buffer(struct rnpvf_ring *rx_ring,
+				struct rnpvf_rx_buffer *rx_buffer,
+				struct sk_buff *skb);
+#endif /* OPTM_WITH_LPAGE */
+
+/**
+ * rnpvf_set_ivar - set IVAR registers - maps interrupt causes to vectors
+ * @adapter: pointer to adapter struct
+ * @direction: 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ */
+static void rnpvf_set_ring_vector(struct rnpvf_adapter *adapter,
+				  u8 rnpvf_queue, u8 rnpvf_msix_vector)
+{
+	struct rnpvf_hw *hw = &adapter->hw;
+	u32 data = 0;
+
+	data = hw->vfnum << 24;
+	data |= (rnpvf_msix_vector << 8);
+	data |= (rnpvf_msix_vector << 0);
+	DPRINTK(IFUP, INFO,
+		"Set Ring-Vector queue:%d (reg:0x%x) <-- Rx-MSIX:%d, Tx-MSIX:%d\n",
+		rnpvf_queue, RING_VECTOR(rnpvf_queue), rnpvf_msix_vector,
+		rnpvf_msix_vector);
+
+	rnpvf_wr_reg(hw->ring_msix_base + RING_VECTOR(rnpvf_queue), data);
+}
+
+void rnpvf_unmap_and_free_tx_resource(struct rnpvf_ring *ring,
+				      struct rnpvf_tx_buffer *tx_buffer)
+{
+	if (tx_buffer->skb) {
+		dev_kfree_skb_any(tx_buffer->skb);
+		if (dma_unmap_len(tx_buffer, len))
+			dma_unmap_single(ring->dev,
+					 dma_unmap_addr(tx_buffer, dma),
+					 dma_unmap_len(tx_buffer, len),
+					 DMA_TO_DEVICE);
+	} else if (dma_unmap_len(tx_buffer, len)) {
+		dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma),
+			       dma_unmap_len(tx_buffer, len),
+			       DMA_TO_DEVICE);
+	}
+	tx_buffer->next_to_watch = NULL;
+	tx_buffer->skb = NULL;
+	dma_unmap_len_set(tx_buffer, len, 0);
+	/* tx_buffer must be completely set up in the transmit path */
+}
+
+static void rnpvf_tx_timeout(struct net_device *netdev);
+
+/**
+ * rnpvf_clean_tx_irq - Reclaim resources after transmit completes
+ * @q_vector: board private structure
+ * @tx_ring: tx ring to clean
+ **/
+static bool rnpvf_clean_tx_irq(struct rnpvf_q_vector *q_vector,
+			       struct rnpvf_ring *tx_ring)
+{
+	struct rnpvf_adapter *adapter = q_vector->adapter;
+	struct rnpvf_tx_buffer *tx_buffer;
+	struct rnp_tx_desc *tx_desc;
+	unsigned int total_bytes = 0, total_packets = 0;
+	unsigned int budget = adapter->tx_work_limit;
+	unsigned int i = tx_ring->next_to_clean;
+
+	if (test_bit(__RNPVF_DOWN, &adapter->state))
+		return true;
+	tx_ring->tx_stats.poll_count++;
+	tx_buffer = &tx_ring->tx_buffer_info[i];
+	tx_desc = RNPVF_TX_DESC(tx_ring, i);
+	i -= tx_ring->count;
+
+	do {
+		struct rnp_tx_desc *eop_desc = tx_buffer->next_to_watch;
+
+		/* if next_to_watch is not set then there is no work pending */
+		if (!eop_desc)
+			break;
+
+		/* prevent any other reads prior to eop_desc */
+		rmb();
+
+		/* if eop DD is not set pending work has not been completed */
+		if (!(eop_desc->cmd & cpu_to_le16(RNP_TXD_STAT_DD)))
+			break;
+
+		/* clear next_to_watch to prevent false hangs */
+		tx_buffer->next_to_watch = NULL;
+
+		/* update the statistics for this packet */
+		total_bytes += tx_buffer->bytecount;
+		total_packets += tx_buffer->gso_segs;
+
+		/* free the skb */
+		dev_kfree_skb_any(tx_buffer->skb);
+
+		/* unmap skb header data */
+		dma_unmap_single(tx_ring->dev,
+				 dma_unmap_addr(tx_buffer, dma),
+				 dma_unmap_len(tx_buffer, len),
+				 DMA_TO_DEVICE);
+
+		/* clear tx_buffer data */
+		tx_buffer->skb = NULL;
+		dma_unmap_len_set(tx_buffer, len, 0);
+
+		/* unmap remaining buffers */
+		while (tx_desc != eop_desc) {
+			tx_buffer++;
+			tx_desc++;
+			i++;
+			if (unlikely(!i)) {
+				i -= tx_ring->count;
+				tx_buffer = tx_ring->tx_buffer_info;
+				tx_desc = RNPVF_TX_DESC(tx_ring, 0);
+			}
+
+			/* unmap any remaining paged data */
+			if (dma_unmap_len(tx_buffer, len)) {
+				dma_unmap_page(
+					tx_ring->dev,
+					dma_unmap_addr(tx_buffer, dma),
+					dma_unmap_len(tx_buffer, len),
+					DMA_TO_DEVICE);
+				dma_unmap_len_set(tx_buffer, len, 0);
+			}
+		}
+
+		/* move us one more past the eop_desc for start of next pkt */
+		tx_buffer++;
+		tx_desc++;
+		i++;
+		if (unlikely(!i)) {
+			i -= tx_ring->count;
+			tx_buffer = tx_ring->tx_buffer_info;
+			tx_desc = RNPVF_TX_DESC(tx_ring, 0);
+		}
+
+		/* issue prefetch for next Tx descriptor */
+		prefetch(tx_desc);
+
+		/* update budget accounting */
+		budget--;
+	} while (likely(budget));
+
+	i += tx_ring->count;
+	tx_ring->next_to_clean = i;
+	u64_stats_update_begin(&tx_ring->syncp);
+	tx_ring->stats.bytes += total_bytes;
+	tx_ring->stats.packets += total_packets;
+	u64_stats_update_end(&tx_ring->syncp);
+	q_vector->tx.total_bytes += total_bytes;
+	q_vector->tx.total_packets += total_packets;
+
+	netdev_tx_completed_queue(txring_txq(tx_ring), total_packets,
+				  total_bytes);
+
+	if (!(q_vector->vector_flags &
+	      RNPVF_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS)) {
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+		if (unlikely(total_packets &&
+			     netif_carrier_ok(tx_ring->netdev) &&
+			     (rnpvf_desc_unused(tx_ring) >=
+			      TX_WAKE_THRESHOLD))) {
+			/* Make sure that anybody stopping the queue after this
+			 * sees the new next_to_clean.
+			 */
+			smp_mb();
+			if (__netif_subqueue_stopped(
+				    tx_ring->netdev,
+				    tx_ring->queue_index) &&
+			    !test_bit(__RNPVF_DOWN, &adapter->state)) {
+				netif_wake_subqueue(tx_ring->netdev,
+						    tx_ring->queue_index);
+				++tx_ring->tx_stats.restart_queue;
+			}
+		}
+	}
+
+	return !!budget;
+}
+
+static inline void rnpvf_rx_hash(struct rnpvf_ring *ring,
+				 union rnp_rx_desc *rx_desc,
+				 struct sk_buff *skb)
+{
+	int rss_type;
+
+	if (!(ring->netdev->features & NETIF_F_RXHASH))
+		return;
+
+#define RNPVF_RSS_TYPE_MASK 0xc0
+	rss_type = rx_desc->wb.cmd & RNPVF_RSS_TYPE_MASK;
+	skb_set_hash(skb, le32_to_cpu(rx_desc->wb.rss_hash),
+		     rss_type ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
+}
+
+/**
+ * rnpvf_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @ring: structure containing ring specific data
+ * @rx_desc: current Rx descriptor being processed
+ * @skb: skb currently being received and modified
+ **/
+static inline void rnpvf_rx_checksum(struct rnpvf_ring *ring,
+				     union rnp_rx_desc *rx_desc,
+				     struct sk_buff *skb)
+{
+	bool encap_pkt = false;
+
+	skb_checksum_none_assert(skb);
+
+	/* Rx csum disabled */
+	if (!(ring->netdev->features & NETIF_F_RXCSUM))
+		return;
+
+	/* vxlan packet handle ? */
+	if (!(ring->ring_flags & RNPVF_RING_NO_TUNNEL_SUPPORT)) {
+		if (rnpvf_get_stat(rx_desc, RNP_RXD_STAT_TUNNEL_MASK) ==
+		    RNP_RXD_STAT_TUNNEL_VXLAN) {
+			encap_pkt = true;
+			skb->encapsulation = 1;
+			skb->ip_summed = CHECKSUM_NONE;
+		}
+	}
+
+	/* if L3/L4  error:ignore errors from veb(other vf) */
+	if (unlikely(rnpvf_test_staterr(rx_desc, RNP_RXD_STAT_ERR_MASK))) {
+		ring->rx_stats.csum_err++;
+		return;
+	}
+	ring->rx_stats.csum_good++;
+	/* It must be a TCP or UDP packet with a valid checksum */
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	if (encap_pkt) {
+		/* If we checked the outer header let the stack know */
+		skb->csum_level = 1;
+	}
+}
+
+static inline void rnpvf_update_rx_tail(struct rnpvf_ring *rx_ring,
+					u32 val)
+{
+	rx_ring->next_to_use = val;
+
+	/* update next to alloc since we have filled the ring */
+	rx_ring->next_to_alloc = val;
+	/*
+	 * Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64).
+	 */
+	wmb();
+	rnpvf_wr_reg(rx_ring->tail, val);
+}
+
+#ifndef OPTM_WITH_LPAGE
+/**
+ * rnpvf_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void rnpvf_alloc_rx_buffers(struct rnpvf_ring *rx_ring, u16 cleaned_count)
+{
+	union rnp_rx_desc *rx_desc;
+	struct rnpvf_rx_buffer *bi;
+	u16 i = rx_ring->next_to_use;
+	u64 fun_id = ((u64)(rx_ring->vfnum) << (32 + 24));
+	u16 bufsz;
+
+	/* nothing to do */
+	if (!cleaned_count)
+		return;
+
+	rx_desc = RNPVF_RX_DESC(rx_ring, i);
+	BUG_ON(rx_desc == NULL);
+	bi = &rx_ring->rx_buffer_info[i];
+	BUG_ON(bi == NULL);
+	i -= rx_ring->count;
+	bufsz = rnpvf_rx_bufsz(rx_ring);
+
+	do {
+		if (!rnpvf_alloc_mapped_page(rx_ring, bi))
+			break;
+
+		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+						 bi->page_offset, bufsz,
+						 DMA_FROM_DEVICE);
+
+		/*
+		 * Refresh the desc even if buffer_addrs didn't change
+		 * because each write-back erases this info.
+		 */
+		rx_desc->pkt_addr =
+			cpu_to_le64(bi->dma + bi->page_offset + fun_id);
+		/* clean dd */
+		rx_desc->cmd = 0;
+
+		rx_desc++;
+		bi++;
+		i++;
+		if (unlikely(!i)) {
+			rx_desc = RNPVF_RX_DESC(rx_ring, 0);
+			bi = rx_ring->rx_buffer_info;
+			i -= rx_ring->count;
+		}
+
+		/* clear the hdr_addr for the next_to_use descriptor */
+		cleaned_count--;
+	} while (cleaned_count);
+
+	i += rx_ring->count;
+
+	if (rx_ring->next_to_use != i)
+		rnpvf_update_rx_tail(rx_ring, i);
+}
+#endif
+
+/**
+ * rnpvf_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void rnpvf_reuse_rx_page(struct rnpvf_ring *rx_ring,
+				struct rnpvf_rx_buffer *old_buff)
+{
+	struct rnpvf_rx_buffer *new_buff;
+	u16 nta = rx_ring->next_to_alloc;
+
+	new_buff = &rx_ring->rx_buffer_info[nta];
+
+	/* update, and store next to alloc */
+	nta++;
+	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+	/*
+	 * Transfer page from old buffer to new buffer.
+	 * Move each member individually to avoid possible store
+	 * forwarding stalls and unnecessary copy of skb.
+	 */
+	new_buff->dma = old_buff->dma;
+	new_buff->page = old_buff->page;
+	new_buff->page_offset = old_buff->page_offset;
+	new_buff->pagecnt_bias = old_buff->pagecnt_bias;
+}
+
+static inline bool rnpvf_page_is_reserved(struct page *page)
+{
+	return (page_to_nid(page) != numa_mem_id()) ||
+	       page_is_pfmemalloc(page);
+}
+
+static bool rnpvf_can_reuse_rx_page(struct rnpvf_rx_buffer *rx_buffer)
+{
+	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+	struct page *page = rx_buffer->page;
+
+#ifdef OPTM_WITH_LPAGE
+	return false;
+#endif
+	/* avoid re-using remote pages */
+	if (unlikely(rnpvf_page_is_reserved(page)))
+		return false;
+
+#if (PAGE_SIZE < 8192)
+	/* if we are only owner of page we can reuse it */
+	if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+		return false;
+#else
+	/*
+	 * The last offset is a bit aggressive in that we assume the
+	 * worst case of FCoE being enabled and using a 3K buffer.
+	 * However this should have minimal impact as the 1K extra is
+	 * still less than one buffer in size.
+	 */
+#define RNPVF_LAST_OFFSET \
+	(SKB_WITH_OVERHEAD(PAGE_SIZE) - RNPVF_RXBUFFER_2K)
+	if (rx_buffer->page_offset > RNPVF_LAST_OFFSET)
+		return false;
+#endif
+
+	/* If we have drained the page fragment pool we need to update
+	 * the pagecnt_bias and page count so that we fully restock the
+	 * number of references the driver holds.
+	 */
+	if (unlikely(pagecnt_bias == 1)) {
+		page_ref_add(page, USHRT_MAX - 1);
+		rx_buffer->pagecnt_bias = USHRT_MAX;
+	}
+
+	return true;
+}
+
+#if (PAGE_SIZE < 8192)
+#define RNPVF_MAX_2K_FRAME_BUILD_SKB (RNPVF_RXBUFFER_1536 - NET_IP_ALIGN)
+#define RNPVF_2K_TOO_SMALL_WITH_PADDING        \
+	((NET_SKB_PAD + RNPVF_RXBUFFER_1536) > \
+	 SKB_WITH_OVERHEAD(RNPVF_RXBUFFER_2K))
+
+static inline int rnpvf_compute_pad(int rx_buf_len)
+{
+	int page_size, pad_size;
+
+	page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+	pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+	return pad_size;
+}
+
+static inline int rnpvf_skb_pad(void)
+{
+	int rx_buf_len;
+
+	/* If a 2K buffer cannot handle a standard Ethernet frame then
+	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
+	 *
+	 * For a 3K buffer we need to add enough padding to allow for
+	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
+	 * cache-line alignment.
+	 */
+	if (RNPVF_2K_TOO_SMALL_WITH_PADDING)
+		rx_buf_len =
+			RNPVF_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN);
+	else
+		rx_buf_len = RNPVF_RXBUFFER_1536;
+
+	/* if needed make room for NET_IP_ALIGN */
+	rx_buf_len -= NET_IP_ALIGN;
+	return rnpvf_compute_pad(rx_buf_len);
+}
+
+#define RNPVF_SKB_PAD rnpvf_skb_pad()
+#else /* PAGE_SIZE < 8192 */
+#define RNPVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#endif
+
+/**
+ * rnp_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+static void rnpvf_clean_rx_ring(struct rnpvf_ring *rx_ring)
+{
+	u16 i = rx_ring->next_to_clean;
+	struct rnpvf_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
+
+	/* Free all the Rx ring sk_buffs */
+	while (i != rx_ring->next_to_alloc) {
+		if (rx_buffer->skb) {
+			struct sk_buff *skb = rx_buffer->skb;
+
+			dev_kfree_skb(skb);
+			rx_buffer->skb = NULL;
+		}
+
+		/* Invalidate cache lines that may have been written to by
+		 * device so that we avoid corrupting memory.
+		 */
+		dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma,
+					      rx_buffer->page_offset,
+					      rnpvf_rx_bufsz(rx_ring),
+					      DMA_FROM_DEVICE);
+
+		/* free resources associated with mapping */
+		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+				     rnpvf_rx_pg_size(rx_ring),
+				     DMA_FROM_DEVICE,
+				     RNPVF_RX_DMA_ATTR);
+
+		__page_frag_cache_drain(rx_buffer->page,
+					rx_buffer->pagecnt_bias);
+		/* now this page is not used */
+		rx_buffer->page = NULL;
+		i++;
+		rx_buffer++;
+		if (i == rx_ring->count) {
+			i = 0;
+			rx_buffer = rx_ring->rx_buffer_info;
+		}
+	}
+
+	rx_ring->next_to_alloc = 0;
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+}
+
+static inline unsigned int rnpvf_rx_offset(struct rnpvf_ring *rx_ring)
+{
+	return ring_uses_build_skb(rx_ring) ? RNPVF_SKB_PAD : 0;
+}
+
+#ifdef OPTM_WITH_LPAGE
+static bool rnpvf_alloc_mapped_page(struct rnpvf_ring *rx_ring,
+				    struct rnpvf_rx_buffer *bi,
+				    union rnp_rx_desc *rx_desc, u16 bufsz,
+				    u64 fun_id)
+{
+	struct page *page = bi->page;
+	dma_addr_t dma;
+
+	/* since we are recycling buffers we should seldom need to alloc */
+	if (likely(page))
+		return true;
+
+	page = dev_alloc_pages(RNPVF_ALLOC_PAGE_ORDER);
+	if (unlikely(!page)) {
+		rx_ring->rx_stats.alloc_rx_page_failed++;
+		return false;
+	}
+
+	bi->page_offset = rnpvf_rx_offset(rx_ring);
+
+	/* map page for use */
+	dma = dma_map_page_attrs(rx_ring->dev, page, bi->page_offset,
+				 bufsz, DMA_FROM_DEVICE,
+				 RNPVF_RX_DMA_ATTR);
+
+	/*
+	 * if mapping failed free memory back to system since
+	 * there isn't much point in holding memory we can't use
+	 */
+	if (dma_mapping_error(rx_ring->dev, dma)) {
+		__free_pages(page, RNPVF_ALLOC_PAGE_ORDER);
+		printk("map failed\n");
+
+		rx_ring->rx_stats.alloc_rx_page_failed++;
+		return false;
+	}
+	bi->dma = dma;
+	bi->page = page;
+	bi->page_offset = rnpvf_rx_offset(rx_ring);
+	page_ref_add(page, USHRT_MAX - 1);
+	bi->pagecnt_bias = USHRT_MAX;
+	rx_ring->rx_stats.alloc_rx_page++;
+
+	/* sync the buffer for use by the device */
+	dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0, bufsz,
+					 DMA_FROM_DEVICE);
+
+	/*
+	 * Refresh the desc even if buffer_addrs didn't change
+	 * because each write-back erases this info.
+	 */
+	rx_desc->pkt_addr = cpu_to_le64(bi->dma + fun_id);
+
+	return true;
+}
+
+static void rnpvf_put_rx_buffer(struct rnpvf_ring *rx_ring,
+				struct rnpvf_rx_buffer *rx_buffer)
+{
+	if (rnpvf_can_reuse_rx_page(rx_buffer)) {
+		/* hand second half of page back to the ring */
+		rnpvf_reuse_rx_page(rx_ring, rx_buffer);
+	} else {
+		/* we are not reusing the buffer so unmap it */
+		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+				     rnpvf_rx_bufsz(rx_ring),
+				     DMA_FROM_DEVICE,
+				     RNPVF_RX_DMA_ATTR);
+		__page_frag_cache_drain(rx_buffer->page,
+					rx_buffer->pagecnt_bias);
+	}
+
+	/* clear contents of rx_buffer */
+	rx_buffer->page = NULL;
+}
+
+/**
+ * rnpvf_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void rnpvf_alloc_rx_buffers(struct rnpvf_ring *rx_ring, u16 cleaned_count)
+{
+	union rnp_rx_desc *rx_desc;
+	struct rnpvf_rx_buffer *bi;
+	u16 i = rx_ring->next_to_use;
+	u64 fun_id = ((u64)(rx_ring->vfnum) << (32 + 24));
+	u16 bufsz;
+	/* nothing to do */
+	if (!cleaned_count)
+		return;
+
+	rx_desc = RNPVF_RX_DESC(rx_ring, i);
+
+	BUG_ON(rx_desc == NULL);
+
+	bi = &rx_ring->rx_buffer_info[i];
+
+	BUG_ON(bi == NULL);
+
+	i -= rx_ring->count;
+	bufsz = rnpvf_rx_bufsz(rx_ring);
+
+	do {
+		int count = 1;
+		struct page *page;
+
+		if (!rnpvf_alloc_mapped_page(rx_ring, bi, rx_desc, bufsz,
+					     fun_id))
+			break;
+		page = bi->page;
+
+		rx_desc->cmd = 0;
+
+		rx_desc++;
+		i++;
+		bi++;
+
+		if (unlikely(!i)) {
+			rx_desc = RNPVF_RX_DESC(rx_ring, 0);
+			bi = rx_ring->rx_buffer_info;
+			i -= rx_ring->count;
+		}
+
+		rx_desc->cmd = 0;
+
+		cleaned_count--;
+
+		while (count < rx_ring->rx_page_buf_nums &&
+		       cleaned_count) {
+			dma_addr_t dma;
+
+			bi->page_offset = rx_ring->rx_per_buf_mem * count +
+					  rnpvf_rx_offset(rx_ring);
+			/* map page for use */
+			dma = dma_map_page_attrs(rx_ring->dev, page,
+						 bi->page_offset, bufsz,
+						 DMA_FROM_DEVICE,
+						 RNPVF_RX_DMA_ATTR);
+
+			if (dma_mapping_error(rx_ring->dev, dma)) {
+				printk("map second error\n");
+				rx_ring->rx_stats.alloc_rx_page_failed++;
+				break;
+			}
+
+			bi->dma = dma;
+			bi->page = page;
+
+			page_ref_add(page, USHRT_MAX);
+			bi->pagecnt_bias = USHRT_MAX;
+
+			/* sync the buffer for use by the device */
+			dma_sync_single_range_for_device(rx_ring->dev,
+							 bi->dma, 0, bufsz,
+							 DMA_FROM_DEVICE);
+
+			/*
+			 * Refresh the desc even if buffer_addrs didn't change
+			 * because each write-back erases this info.
+			 */
+			rx_desc->pkt_addr = cpu_to_le64(bi->dma + fun_id);
+			/* clean dd */
+			rx_desc->cmd = 0;
+
+			rx_desc++;
+			bi++;
+			i++;
+			if (unlikely(!i)) {
+				rx_desc = RNPVF_RX_DESC(rx_ring, 0);
+				bi = rx_ring->rx_buffer_info;
+				i -= rx_ring->count;
+			}
+			count++;
+			/* clear the hdr_addr for the next_to_use descriptor */
+			cleaned_count--;
+		}
+	} while (cleaned_count);
+
+	i += rx_ring->count;
+
+	if (rx_ring->next_to_use != i)
+		rnpvf_update_rx_tail(rx_ring, i);
+}
+
+#else
+
+static bool rnpvf_alloc_mapped_page(struct rnpvf_ring *rx_ring,
+				    struct rnpvf_rx_buffer *bi)
+{
+	struct page *page = bi->page;
+	dma_addr_t dma;
+
+	/* since we are recycling buffers we should seldom need to alloc */
+	if (likely(page))
+		return true;
+
+	page = dev_alloc_pages(rnpvf_rx_pg_order(rx_ring));
+	if (unlikely(!page)) {
+		rx_ring->rx_stats.alloc_rx_page_failed++;
+		return false;
+	}
+
+	/* map page for use */
+	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+				 rnpvf_rx_pg_size(rx_ring),
+				 DMA_FROM_DEVICE,
+				 RNPVF_RX_DMA_ATTR);
+
+	/*
+	 * if mapping failed free memory back to system since
+	 * there isn't much point in holding memory we can't use
+	 */
+	if (dma_mapping_error(rx_ring->dev, dma)) {
+		__free_pages(page, rnpvf_rx_pg_order(rx_ring));
+		printk("map failed\n");
+
+		rx_ring->rx_stats.alloc_rx_page_failed++;
+		return false;
+	}
+	bi->dma = dma;
+	bi->page = page;
+	bi->page_offset = rnpvf_rx_offset(rx_ring);
+	page_ref_add(page, USHRT_MAX - 1);
+	bi->pagecnt_bias = USHRT_MAX;
+	rx_ring->rx_stats.alloc_rx_page++;
+
+	return true;
+}
+
+static void rnpvf_put_rx_buffer(struct rnpvf_ring *rx_ring,
+				struct rnpvf_rx_buffer *rx_buffer,
+				struct sk_buff *skb)
+{
+	if (rnpvf_can_reuse_rx_page(rx_buffer)) {
+		/* hand second half of page back to the ring */
+		rnpvf_reuse_rx_page(rx_ring, rx_buffer);
+	} else {
+		/* we are not reusing the buffer so unmap it */
+		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+				     rnpvf_rx_pg_size(rx_ring),
+				     DMA_FROM_DEVICE,
+				     RNPVF_RX_DMA_ATTR);
+		__page_frag_cache_drain(rx_buffer->page,
+					rx_buffer->pagecnt_bias);
+	}
+
+	/* clear contents of rx_buffer */
+	rx_buffer->page = NULL;
+	rx_buffer->skb = NULL;
+}
+
+#endif /* OPTM_WITH_LPAGE */
+
+/* drop this packets if error */
+static bool rnpvf_check_csum_error(struct rnpvf_ring *rx_ring,
+				   union rnp_rx_desc *rx_desc,
+				   unsigned int size,
+				   unsigned int *driver_drop_packets)
+{
+	bool err = false;
+
+	struct net_device *netdev = rx_ring->netdev;
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+
+	if ((netdev->features & NETIF_F_RXCSUM) &&
+	    (!(adapter->priv_flags & RNPVF_PRIV_FLAG_FCS_ON))) {
+		if (unlikely(rnpvf_test_staterr(rx_desc,
+						RNP_RXD_STAT_ERR_MASK))) {
+			/* push this packet to stack if in promisc mode */
+			rx_ring->rx_stats.csum_err++;
+
+			if ((!(netdev->flags & IFF_PROMISC) &&
+			     (!(netdev->features & NETIF_F_RXALL)))) {
+				if (rx_ring->ring_flags &
+				    RNPVF_RING_CHKSM_FIX) {
+					err = true;
+					goto skip_fix;
+				}
+				if (unlikely(rnpvf_test_staterr(
+					     rx_desc,
+					     RNP_RXD_STAT_L4_MASK) &&
+					    (!(rx_desc->wb.rev1 &
+					       RNP_RX_L3_TYPE_MASK)))) {
+					rx_ring->rx_stats.csum_err--;
+					goto skip_fix;
+				}
+
+				if (unlikely(rnpvf_test_staterr(
+					    rx_desc,
+					    RNP_RXD_STAT_SCTP_MASK))) {
+					if (size > 60) {
+						err = true;
+					} else {
+						/* sctp less than 60 hw report err by mistake */
+						rx_ring->rx_stats
+							.csum_err--;
+					}
+				} else {
+					err = true;
+				}
+			}
+		}
+	}
+
+skip_fix:
+	if (err) {
+		u32 ntc = rx_ring->next_to_clean + 1;
+		struct rnpvf_rx_buffer *rx_buffer;
+#if (PAGE_SIZE < 8192)
+		unsigned int truesize = rnpvf_rx_pg_size(rx_ring) / 2;
+#else
+		unsigned int truesize =
+			ring_uses_build_skb(rx_ring) ?
+				SKB_DATA_ALIGN(RNPVF_SKB_PAD + size) :
+				SKB_DATA_ALIGN(size);
+#endif
+
+		if (likely(rnpvf_test_staterr(rx_desc, RNP_RXD_STAT_EOP)))
+			*driver_drop_packets = *driver_drop_packets + 1;
+
+		/* we are reusing so sync this buffer for CPU use */
+		rx_buffer =
+			&rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+		dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma,
+					      rx_buffer->page_offset, size,
+					      DMA_FROM_DEVICE);
+
+		/* we should clean it since we used all info in it */
+		rx_desc->wb.cmd = 0;
+
+#if (PAGE_SIZE < 8192)
+		rx_buffer->page_offset ^= truesize;
+#else
+		rx_buffer->page_offset += truesize;
+#endif
+#ifdef OPTM_WITH_LPAGE
+		rnpvf_put_rx_buffer(rx_ring, rx_buffer);
+#else
+		rnpvf_put_rx_buffer(rx_ring, rx_buffer, NULL);
+#endif
+		ntc = (ntc < rx_ring->count) ? ntc : 0;
+		rx_ring->next_to_clean = ntc;
+	}
+	return err;
+}
+
+/**
+ * rnpvf_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, timestamp, protocol, and
+ * other fields within the skb.
+ **/
+static void rnpvf_process_skb_fields(struct rnpvf_ring *rx_ring,
+				     union rnp_rx_desc *rx_desc,
+				     struct sk_buff *skb)
+{
+	struct net_device *dev = rx_ring->netdev;
+	struct rnpvf_adapter *adapter = netdev_priv(dev);
+	struct rnpvf_hw *hw = &adapter->hw;
+
+	rnpvf_rx_hash(rx_ring, rx_desc, skb);
+	rnpvf_rx_checksum(rx_ring, rx_desc, skb);
+
+	/* if it is a ncsi card and pf set vlan, we should check vlan id here
+	 * in this case rx vlan offload must off
+	 */
+	if ((hw->pf_feature & PF_NCSI_EN) &&
+	    (adapter->flags & RNPVF_FLAG_PF_SET_VLAN)) {
+		u16 vid_pf;
+		u8 header[ETH_ALEN + ETH_ALEN];
+		u8 *data = skb->data;
+
+		if (__vlan_get_tag(skb, &vid_pf))
+			goto skip_vf_vlan;
+
+		if (vid_pf == adapter->vf_vlan) {
+			memcpy(header, data, ETH_ALEN + ETH_ALEN);
+			memcpy(skb->data + 4, header, ETH_ALEN + ETH_ALEN);
+			skb->len -= 4;
+			skb->data += 4;
+			goto skip_vf_vlan;
+		}
+	}
+
+	/* remove vlan if pf set a vlan */
+	if (((dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+	     || (dev->features & NETIF_F_HW_VLAN_STAG_RX)) &&
+	    rnpvf_test_staterr(rx_desc, RNP_RXD_STAT_VLAN_VALID) &&
+	    !(cpu_to_le16(rx_desc->wb.rev1) & VEB_VF_IGNORE_VLAN)) {
+		u16 vid = le16_to_cpu(rx_desc->wb.vlan);
+
+		if ((adapter->vf_vlan) && (adapter->vf_vlan == vid))
+			goto skip_vf_vlan;
+
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+				vid);
+		rx_ring->rx_stats.vlan_remove++;
+	}
+skip_vf_vlan:
+	skb_record_rx_queue(skb, rx_ring->queue_index);
+
+	skb->protocol = eth_type_trans(skb, dev);
+}
+
+static void rnpvf_rx_skb(struct rnpvf_q_vector *q_vector,
+			 struct sk_buff *skb)
+{
+	struct rnpvf_adapter *adapter = q_vector->adapter;
+
+	if (!(adapter->flags & RNPVF_FLAG_IN_NETPOLL))
+		napi_gro_receive(&q_vector->napi, skb);
+	else
+		netif_rx(skb);
+}
+
+static bool rnpvf_check_src_mac(struct sk_buff *skb,
+				struct net_device *netdev)
+{
+	char *data = (char *)skb->data;
+	bool ret = false;
+	struct netdev_hw_addr *ha;
+
+	if (is_multicast_ether_addr(data)) {
+		if (0 == memcmp(data + netdev->addr_len, netdev->dev_addr,
+				netdev->addr_len)) {
+			dev_kfree_skb_any(skb);
+			ret = true;
+		}
+		/* if src mac equal own mac */
+		netdev_for_each_uc_addr(ha, netdev) {
+			if (0 == memcmp(data + netdev->addr_len, ha->addr,
+					netdev->addr_len)) {
+				dev_kfree_skb_any(skb);
+				ret = true;
+			}
+		}
+	}
+	return ret;
+}
+
+/**
+ * rnpvf_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
+ * @data: pointer to the start of the headers
+ * @max_len: total length of section to find headers in
+ *
+ * This function is meant to determine the length of headers that will
+ * be recognized by hardware for LRO, GRO, and RSC offloads.  The main
+ * motivation of doing this is to only perform one pull for IPv4 TCP
+ * packets so that we can do basic things like calculating the gso_size
+ * based on the average data per packet.
+ **/
+static unsigned int rnpvf_get_headlen(unsigned char *data,
+				      unsigned int max_len)
+{
+	union {
+		unsigned char *network;
+		/* l2 headers */
+		struct ethhdr *eth;
+		struct vlan_hdr *vlan;
+		/* l3 headers */
+		struct iphdr *ipv4;
+		struct ipv6hdr *ipv6;
+	} hdr;
+	__be16 protocol;
+	u8 nexthdr = 0; /* default to not TCP */
+	u8 hlen;
+
+	/* this should never happen, but better safe than sorry */
+	if (max_len < ETH_HLEN)
+		return max_len;
+
+	/* initialize network frame pointer */
+	hdr.network = data;
+
+	/* set first protocol and move network header forward */
+	protocol = hdr.eth->h_proto;
+	hdr.network += ETH_HLEN;
+
+	/* handle any vlan tag if present */
+	if (protocol == htons(ETH_P_8021Q)) {
+		if ((hdr.network - data) > (max_len - VLAN_HLEN))
+			return max_len;
+
+		protocol = hdr.vlan->h_vlan_encapsulated_proto;
+		hdr.network += VLAN_HLEN;
+	}
+
+	/* handle L3 protocols */
+	if (protocol == htons(ETH_P_IP)) {
+		if ((hdr.network - data) >
+		    (max_len - sizeof(struct iphdr)))
+			return max_len;
+
+		/* access ihl as a u8 to avoid unaligned access on ia64 */
+		hlen = (hdr.network[0] & 0x0F) << 2;
+
+		/* verify hlen meets minimum size requirements */
+		if (hlen < sizeof(struct iphdr))
+			return hdr.network - data;
+
+		/* record next protocol if header is present */
+		if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
+			nexthdr = hdr.ipv4->protocol;
+	} else if (protocol == htons(ETH_P_IPV6)) {
+		if ((hdr.network - data) >
+		    (max_len - sizeof(struct ipv6hdr)))
+			return max_len;
+
+		/* record next protocol */
+		nexthdr = hdr.ipv6->nexthdr;
+		hlen = sizeof(struct ipv6hdr);
+	} else {
+		return hdr.network - data;
+	}
+
+	/* relocate pointer to start of L4 header */
+	hdr.network += hlen;
+
+	/* finally sort out TCP/UDP */
+	if (nexthdr == IPPROTO_TCP) {
+		if ((hdr.network - data) >
+		    (max_len - sizeof(struct tcphdr)))
+			return max_len;
+
+		/* access doff as a u8 to avoid unaligned access on ia64 */
+		hlen = (hdr.network[12] & 0xF0) >> 2;
+
+		/* verify hlen meets minimum size requirements */
+		if (hlen < sizeof(struct tcphdr))
+			return hdr.network - data;
+
+		hdr.network += hlen;
+	} else if (nexthdr == IPPROTO_UDP) {
+		if ((hdr.network - data) >
+		    (max_len - sizeof(struct udphdr)))
+			return max_len;
+
+		hdr.network += sizeof(struct udphdr);
+	}
+
+	/*
+	 * If everything has gone correctly hdr.network should be the
+	 * data section of the packet and will be the end of the header.
+	 * If not then it probably represents the end of the last recognized
+	 * header.
+	 */
+	if ((hdr.network - data) < max_len)
+		return hdr.network - data;
+	else
+		return max_len;
+}
+
+/**
+ * rnpvf_pull_tail - rnp specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an rnp specific version of __pskb_pull_tail.  The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void rnpvf_pull_tail(struct sk_buff *skb)
+{
+	skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+	unsigned char *va;
+	unsigned int pull_len;
+
+	/*
+	 * it is valid to use page_address instead of kmap since we are
+	 * working with pages allocated out of the lomem pool per
+	 * alloc_page(GFP_ATOMIC)
+	 */
+	va = skb_frag_address(frag);
+
+	/*
+	 * we need the header to contain the greater of either ETH_HLEN or
+	 * 60 bytes if the skb->len is less than 60 for skb_pad.
+	 */
+	pull_len = rnpvf_get_headlen(va, RNPVF_RX_HDR_SIZE);
+
+	/* align pull length to size of long to optimize memcpy performance */
+	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+	/* update all of the pointers */
+	skb_frag_size_sub(frag, pull_len);
+	skb_frag_off_add(frag, pull_len);
+	skb->data_len -= pull_len;
+	skb->tail += pull_len;
+}
+
+/**
+ * rnpvf_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Check for corrupted packet headers caused by senders on the local L2
+ * embedded NIC switch not setting up their Tx Descriptors right.  These
+ * should be very rare.
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool rnpvf_cleanup_headers(struct rnpvf_ring *rx_ring,
+				  union rnp_rx_desc *rx_desc,
+				  struct sk_buff *skb)
+{
+#ifdef OPTM_WITH_LPAGE
+#else
+	/* XDP packets use error pointer so abort at this point */
+	if (IS_ERR(skb))
+		return true;
+#endif
+
+	/* place header in linear portion of buffer */
+	if (!skb_headlen(skb))
+		rnpvf_pull_tail(skb);
+
+	if (eth_skb_pad(skb))
+		return true;
+	if (!(rx_ring->ring_flags & RNPVF_RING_VEB_MULTI_FIX))
+		return rnpvf_check_src_mac(skb, rx_ring->netdev);
+	else
+		return false;
+
+	return false;
+}
+
+/**
+ * rnpvf_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @skb: sk_buff to place the data into
+ * @size: size of data
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static void rnpvf_add_rx_frag(struct rnpvf_ring *rx_ring,
+			      struct rnpvf_rx_buffer *rx_buffer,
+			      struct sk_buff *skb, unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = rnpvf_rx_pg_size(rx_ring) / 2;
+#else
+	unsigned int truesize =
+		ring_uses_build_skb(rx_ring) ?
+			SKB_DATA_ALIGN(RNPVF_SKB_PAD + size) :
+			SKB_DATA_ALIGN(size);
+#endif
+
+	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+			rx_buffer->page_offset, size, truesize);
+
+#if (PAGE_SIZE < 8192)
+	rx_buffer->page_offset ^= truesize;
+#else
+	rx_buffer->page_offset += truesize;
+#endif
+}
+
+#ifdef OPTM_WITH_LPAGE
+static struct sk_buff *rnpvf_build_skb(struct rnpvf_ring *rx_ring,
+				       struct rnpvf_rx_buffer *rx_buffer,
+				       union rnp_rx_desc *rx_desc,
+				       unsigned int size)
+{
+	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+	unsigned int truesize =
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+		SKB_DATA_ALIGN(size + RNPVF_SKB_PAD);
+	struct sk_buff *skb;
+
+	/* prefetch first cache line of first page */
+	prefetch(va);
+#if L1_CACHE_BYTES < 128
+	prefetch(va + L1_CACHE_BYTES);
+#endif
+
+	/* build an skb around the page buffer */
+	skb = build_skb(va - RNPVF_SKB_PAD, truesize);
+	if (unlikely(!skb))
+		return NULL;
+
+	/* update pointers within the skb to store the data */
+	skb_reserve(skb, RNPVF_SKB_PAD);
+	__skb_put(skb, size);
+
+	return skb;
+}
+
+static struct rnpvf_rx_buffer *
+rnpvf_get_rx_buffer(struct rnpvf_ring *rx_ring, union rnp_rx_desc *rx_desc,
+		    const unsigned int size)
+{
+	struct rnpvf_rx_buffer *rx_buffer;
+
+	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+	prefetchw(rx_buffer->page);
+
+	rx_buf_dump("rx buf",
+		    page_address(rx_buffer->page) + rx_buffer->page_offset,
+		    rx_desc->wb.len);
+
+	/* we are reusing so sync this buffer for CPU use */
+	dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, 0,
+				      size, DMA_FROM_DEVICE);
+	/* skip_sync: */
+	rx_buffer->pagecnt_bias--;
+
+	return rx_buffer;
+}
+
+/**
+ * rnpvf_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean.  If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool rnpvf_is_non_eop(struct rnpvf_ring *rx_ring,
+			     union rnp_rx_desc *rx_desc)
+{
+	u32 ntc = rx_ring->next_to_clean + 1;
+	/* fetch, update, and store next to clean */
+	ntc = (ntc < rx_ring->count) ? ntc : 0;
+	rx_ring->next_to_clean = ntc;
+
+	prefetch(RNPVF_RX_DESC(rx_ring, ntc));
+
+	/* if we are the last buffer then there is nothing else to do */
+	if (likely(rnpvf_test_staterr(rx_desc, RNP_RXD_STAT_EOP)))
+		return false;
+	/* place skb in next buffer to be received */
+	/* we should clean it since we used all info in it */
+	rx_desc->wb.cmd = 0;
+
+	return true;
+}
+
+static struct sk_buff *
+rnpvf_construct_skb(struct rnpvf_ring *rx_ring,
+		    struct rnpvf_rx_buffer *rx_buffer,
+		    union rnp_rx_desc *rx_desc, unsigned int size)
+{
+	void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+	unsigned int truesize = SKB_DATA_ALIGN(size);
+	unsigned int headlen;
+	struct sk_buff *skb;
+
+	/* prefetch first cache line of first page */
+	prefetch(va);
+#if L1_CACHE_BYTES < 128
+	prefetch(va + L1_CACHE_BYTES);
+#endif
+	/* Note, we get here by enabling legacy-rx via:
+	 *
+	 *    ethtool --set-priv-flags  legacy-rx on
+	 *
+	 * In this mode, we currently get 0 extra XDP headroom as
+	 * opposed to having legacy-rx off, where we process XDP
+	 * packets going to stack via rnpvf_build_skb(). The latter
+	 * provides us currently with 192 bytes of headroom.
+	 *
+	 * For rnp_construct_skb() mode it means that the
+	 * xdp->data_meta will always point to xdp->data, since
+	 * the helper cannot expand the head. Should this ever
+	 * change in future for legacy-rx mode on, then lets also
+	 * add xdp->data_meta handling here.
+	 */
+
+	/* allocate a skb to store the frags */
+	skb = napi_alloc_skb(&rx_ring->q_vector->napi, RNPVF_RX_HDR_SIZE);
+	if (unlikely(!skb))
+		return NULL;
+
+	prefetchw(skb->data);
+
+	/* Determine available headroom for copy */
+	headlen = size;
+	if (headlen > RNPVF_RX_HDR_SIZE)
+		headlen = rnpvf_get_headlen(va, RNPVF_RX_HDR_SIZE);
+
+	/* align pull length to size of long to optimize memcpy performance */
+	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+
+	/* update all of the pointers */
+	size -= headlen;
+
+	if (size) {
+
+		skb_add_rx_frag(skb, 0, rx_buffer->page,
+				(va + headlen) -
+					page_address(rx_buffer->page),
+				size, truesize);
+		rx_buffer->page_offset += truesize;
+	} else {
+		rx_buffer->pagecnt_bias++;
+	}
+
+	return skb;
+}
+
+/**
+ * rnp_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @q_vector: structure containing interrupt and ring information
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing.  The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed.
+ **/
+static int rnpvf_clean_rx_irq(struct rnpvf_q_vector *q_vector,
+			      struct rnpvf_ring *rx_ring, int budget)
+{
+	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+	unsigned int err_packets = 0;
+	unsigned int driver_drop_packets = 0;
+	struct sk_buff *skb = rx_ring->skb;
+	struct rnpvf_adapter *adapter = q_vector->adapter;
+	u16 cleaned_count = rnpvf_desc_unused(rx_ring);
+
+	while (likely(total_rx_packets < budget)) {
+		union rnp_rx_desc *rx_desc;
+		struct rnpvf_rx_buffer *rx_buffer;
+		unsigned int size;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= RNPVF_RX_BUFFER_WRITE) {
+			rnpvf_alloc_rx_buffers(rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+		rx_desc = RNPVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
+		rx_buf_dump("rx-desc:", rx_desc, sizeof(*rx_desc));
+		rx_debug_printk("  dd set: %s\n",
+				(rx_desc->wb.cmd & RNP_RXD_STAT_DD) ?
+					"Yes" :
+					"No");
+
+		if (!rnpvf_test_staterr(rx_desc, RNP_RXD_STAT_DD))
+			break;
+
+		rx_debug_printk(
+			"queue:%d  rx-desc:%d has-data len:%d next_to_clean %d\n",
+			rx_ring->rnp_queue_idx, rx_ring->next_to_clean,
+			rx_desc->wb.len, rx_ring->next_to_clean);
+
+		/* handle padding */
+		if ((adapter->priv_flags & RNPVF_PRIV_FLAG_FT_PADDING) &&
+		    (!(adapter->priv_flags &
+		       RNPVF_PRIV_FLAG_PADDING_DEBUG))) {
+			if (likely(rnpvf_test_staterr(rx_desc,
+						      RNP_RXD_STAT_EOP))) {
+				size = le16_to_cpu(rx_desc->wb.len) -
+				       le16_to_cpu(
+					       rx_desc->wb.padding_len);
+			} else {
+				size = le16_to_cpu(rx_desc->wb.len);
+			}
+		} else {
+			/* size should not zero */
+			size = le16_to_cpu(rx_desc->wb.len);
+		}
+
+		if (!size)
+			break;
+
+		/*
+		 * should check csum err
+		 * maybe one packet use multiple descs
+		 * no problems hw set all csum_err in multiple descs
+		 * maybe BUG if the last sctp desc less than 60
+		 */
+		if (rnpvf_check_csum_error(rx_ring, rx_desc, size,
+					   &driver_drop_packets)) {
+			cleaned_count++;
+			err_packets++;
+			if (err_packets + total_rx_packets > budget)
+				break;
+			continue;
+		}
+		/* This memory barrier is needed to keep us from reading
+		 * any other fields out of the rx_desc until we know the
+		 * descriptor has been written back
+		 */
+		dma_rmb();
+
+		rx_buffer = rnpvf_get_rx_buffer(rx_ring, rx_desc, size);
+
+		if (skb) {
+			rnpvf_add_rx_frag(rx_ring, rx_buffer, skb, size);
+		} else if (ring_uses_build_skb(rx_ring)) {
+			skb = rnpvf_build_skb(rx_ring, rx_buffer, rx_desc,
+					      size);
+		} else {
+			skb = rnpvf_construct_skb(rx_ring, rx_buffer,
+						  rx_desc, size);
+		}
+
+		/* exit if we failed to retrieve a buffer */
+		if (!skb) {
+			rx_ring->rx_stats.alloc_rx_buff_failed++;
+			rx_buffer->pagecnt_bias++;
+			break;
+		}
+
+		rnpvf_put_rx_buffer(rx_ring, rx_buffer);
+		cleaned_count++;
+
+		/* place incomplete frames back on ring for completion */
+		if (rnpvf_is_non_eop(rx_ring, rx_desc))
+			continue;
+
+		/* verify the packet layout is correct */
+		if (rnpvf_cleanup_headers(rx_ring, rx_desc, skb)) {
+			/* we should clean it since we used all info in it */
+			rx_desc->wb.cmd = 0;
+			skb = NULL;
+			continue;
+		}
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += skb->len;
+
+		/* populate checksum, timestamp, VLAN, and protocol */
+		rnpvf_process_skb_fields(rx_ring, rx_desc, skb);
+
+		/* we should clean it since we used all info in it */
+		rx_desc->wb.cmd = 0;
+
+		rnpvf_rx_skb(q_vector, skb);
+		skb = NULL;
+
+		/* update budget accounting */
+		total_rx_packets++;
+	}
+
+	rx_ring->skb = skb;
+
+	u64_stats_update_begin(&rx_ring->syncp);
+	rx_ring->stats.packets += total_rx_packets;
+	rx_ring->stats.bytes += total_rx_bytes;
+	rx_ring->rx_stats.driver_drop_packets += driver_drop_packets;
+	u64_stats_update_end(&rx_ring->syncp);
+	q_vector->rx.total_packets += total_rx_packets;
+	q_vector->rx.total_bytes += total_rx_bytes;
+
+	if (total_rx_packets >= budget)
+		rx_ring->rx_stats.poll_again_count++;
+
+	return total_rx_packets;
+}
+
+#else
+
+/**
+ * rnpvf_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean.  If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool rnpvf_is_non_eop(struct rnpvf_ring *rx_ring,
+			     union rnp_rx_desc *rx_desc,
+			     struct sk_buff *skb)
+{
+	u32 ntc = rx_ring->next_to_clean + 1;
+
+	/* fetch, update, and store next to clean */
+	ntc = (ntc < rx_ring->count) ? ntc : 0;
+	rx_ring->next_to_clean = ntc;
+
+	prefetch(RNPVF_RX_DESC(rx_ring, ntc));
+
+	/* if we are the last buffer then there is nothing else to do */
+	if (likely(rnpvf_test_staterr(rx_desc, RNP_RXD_STAT_EOP)))
+		return false;
+	/* place skb in next buffer to be received */
+	rx_ring->rx_buffer_info[ntc].skb = skb;
+	/* we should clean it since we used all info in it */
+	rx_desc->wb.cmd = 0;
+	rx_ring->rx_stats.non_eop_descs++;
+
+	return true;
+}
+
+static struct sk_buff *rnpvf_build_skb(struct rnpvf_ring *rx_ring,
+				       struct rnpvf_rx_buffer *rx_buffer,
+				       struct xdp_buff *xdp,
+				       union rnp_rx_desc *rx_desc)
+{
+	unsigned int metasize = xdp->data - xdp->data_meta;
+	void *va = xdp->data_meta;
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = rnpvf_rx_pg_size(rx_ring) / 2;
+#else
+	unsigned int truesize =
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+		SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start);
+#endif
+	struct sk_buff *skb;
+
+	/* prefetch first cache line of first page */
+	prefetch(va);
+#if L1_CACHE_BYTES < 128
+	prefetch(va + L1_CACHE_BYTES);
+#endif
+
+	/* build an skb around the page buffer */
+	skb = build_skb(xdp->data_hard_start, truesize);
+	if (unlikely(!skb))
+		return NULL;
+
+	/* update pointers within the skb to store the data */
+	skb_reserve(skb, xdp->data - xdp->data_hard_start);
+	__skb_put(skb, xdp->data_end - xdp->data);
+	if (metasize)
+		skb_metadata_set(skb, metasize);
+#if (PAGE_SIZE < 8192)
+	rx_buffer->page_offset ^= truesize;
+#else
+	rx_buffer->page_offset += truesize;
+#endif
+
+	return skb;
+}
+
+static void rnpvf_rx_buffer_flip(struct rnpvf_ring *rx_ring,
+				 struct rnpvf_rx_buffer *rx_buffer,
+				 unsigned int size)
+{
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = rnpvf_rx_pg_size(rx_ring) / 2;
+
+	rx_buffer->page_offset ^= truesize;
+#else
+	unsigned int truesize =
+		ring_uses_build_skb(rx_ring) ?
+			SKB_DATA_ALIGN(RNPVF_SKB_PAD + size) :
+			SKB_DATA_ALIGN(size);
+
+	rx_buffer->page_offset += truesize;
+#endif
+}
+
+static struct rnpvf_rx_buffer *
+rnpvf_get_rx_buffer(struct rnpvf_ring *rx_ring, union rnp_rx_desc *rx_desc,
+		    struct sk_buff **skb, const unsigned int size)
+{
+	struct rnpvf_rx_buffer *rx_buffer;
+
+	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+	prefetchw(rx_buffer->page);
+	*skb = rx_buffer->skb;
+
+	rx_buf_dump("rx buf",
+		    page_address(rx_buffer->page) + rx_buffer->page_offset,
+		    rx_desc->wb.len);
+
+	/* we are reusing so sync this buffer for CPU use */
+	dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma,
+				      rx_buffer->page_offset, size,
+				      DMA_FROM_DEVICE);
+	/* skip_sync: */
+	rx_buffer->pagecnt_bias--;
+
+	return rx_buffer;
+}
+
+static struct sk_buff *
+rnpvf_construct_skb(struct rnpvf_ring *rx_ring,
+		    struct rnpvf_rx_buffer *rx_buffer,
+		    struct xdp_buff *xdp, union rnp_rx_desc *rx_desc)
+{
+	unsigned int size = xdp->data_end - xdp->data;
+#if (PAGE_SIZE < 8192)
+	unsigned int truesize = rnpvf_rx_pg_size(rx_ring) / 2;
+#else
+	unsigned int truesize =
+		SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start);
+#endif
+	struct sk_buff *skb;
+
+	/* prefetch first cache line of first page */
+	prefetch(xdp->data);
+#if L1_CACHE_BYTES < 128
+	prefetch(xdp->data + L1_CACHE_BYTES);
+#endif
+	/* Note, we get here by enabling legacy-rx via:
+	 *
+	 *    ethtool --set-priv-flags  legacy-rx on
+	 *
+	 * In this mode, we currently get 0 extra XDP headroom as
+	 * opposed to having legacy-rx off, where we process XDP
+	 * packets going to stack via rnpvf_build_skb(). The latter
+	 * provides us currently with 192 bytes of headroom.
+	 *
+	 * For rnp_construct_skb() mode it means that the
+	 * xdp->data_meta will always point to xdp->data, since
+	 * the helper cannot expand the head. Should this ever
+	 * change in future for legacy-rx mode on, then lets also
+	 * add xdp->data_meta handling here.
+	 */
+
+	/* allocate a skb to store the frags */
+	skb = napi_alloc_skb(&rx_ring->q_vector->napi, RNPVF_RX_HDR_SIZE);
+	if (unlikely(!skb))
+		return NULL;
+
+	prefetchw(skb->data);
+
+	if (size > RNPVF_RX_HDR_SIZE) {
+
+		skb_add_rx_frag(skb, 0, rx_buffer->page,
+				xdp->data - page_address(rx_buffer->page),
+				size, truesize);
+#if (PAGE_SIZE < 8192)
+		rx_buffer->page_offset ^= truesize;
+#else
+		rx_buffer->page_offset += truesize;
+#endif
+	} else {
+		memcpy(__skb_put(skb, size), xdp->data,
+		       ALIGN(size, sizeof(long)));
+		rx_buffer->pagecnt_bias++;
+	}
+
+	return skb;
+}
+
+/**
+ * rnp_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @q_vector: structure containing interrupt and ring information
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing.  The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed.
+ **/
+static int rnpvf_clean_rx_irq(struct rnpvf_q_vector *q_vector,
+			      struct rnpvf_ring *rx_ring, int budget)
+{
+	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+	unsigned int err_packets = 0;
+	unsigned int driver_drop_packets = 0;
+	struct rnpvf_adapter *adapter = q_vector->adapter;
+	u16 cleaned_count = rnpvf_desc_unused(rx_ring);
+	bool xdp_xmit = false;
+	struct xdp_buff xdp;
+
+	xdp.data = NULL;
+	xdp.data_end = NULL;
+
+	while (likely(total_rx_packets < budget)) {
+		union rnp_rx_desc *rx_desc;
+		struct rnpvf_rx_buffer *rx_buffer;
+		struct sk_buff *skb;
+		unsigned int size;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= RNPVF_RX_BUFFER_WRITE) {
+			rnpvf_alloc_rx_buffers(rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+		rx_desc = RNPVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+		rx_buf_dump("rx-desc:", rx_desc, sizeof(*rx_desc));
+		rx_debug_printk("  dd set: %s\n",
+				(rx_desc->wb.cmd & RNP_RXD_STAT_DD) ?
+					"Yes" :
+					"No");
+
+		if (!rnpvf_test_staterr(rx_desc, RNP_RXD_STAT_DD))
+			break;
+
+		rx_debug_printk(
+			"queue:%d  rx-desc:%d has-data len:%d next_to_clean %d\n",
+			rx_ring->rnpvf_queue_idx, rx_ring->next_to_clean,
+			rx_desc->wb.len, rx_ring->next_to_clean);
+
+		/* handle padding */
+		if ((adapter->priv_flags & RNPVF_PRIV_FLAG_FT_PADDING) &&
+		    (!(adapter->priv_flags &
+		       RNPVF_PRIV_FLAG_PADDING_DEBUG))) {
+			if (likely(rnpvf_test_staterr(rx_desc,
+						      RNP_RXD_STAT_EOP))) {
+				size = le16_to_cpu(rx_desc->wb.len) -
+				       le16_to_cpu(
+					       rx_desc->wb.padding_len);
+			} else {
+				size = le16_to_cpu(rx_desc->wb.len);
+			}
+		} else {
+			/* size should not zero */
+			size = le16_to_cpu(rx_desc->wb.len);
+		}
+
+		if (!size)
+			break;
+
+		/*
+		 * should check csum err
+		 * maybe one packet use multiple descs
+		 * no problems hw set all csum_err in multiple descs
+		 * maybe BUG if the last sctp desc less than 60
+		 */
+		if (rnpvf_check_csum_error(rx_ring, rx_desc, size,
+					   &driver_drop_packets)) {
+			cleaned_count++;
+			err_packets++;
+			if (err_packets + total_rx_packets > budget)
+				break;
+			continue;
+		}
+		/* This memory barrier is needed to keep us from reading
+		 * any other fields out of the rx_desc until we know the
+		 * descriptor has been written back
+		 */
+		dma_rmb();
+
+		rx_buffer =
+			rnpvf_get_rx_buffer(rx_ring, rx_desc, &skb, size);
+
+		if (!skb) {
+			xdp.data = page_address(rx_buffer->page) +
+				   rx_buffer->page_offset;
+			xdp.data_meta = xdp.data;
+			xdp.data_hard_start =
+				xdp.data - rnpvf_rx_offset(rx_ring);
+			xdp.data_end = xdp.data + size;
+		}
+
+		if (IS_ERR(skb)) {
+			if (PTR_ERR(skb) == -RNPVF_XDP_TX) {
+				xdp_xmit = true;
+				rnpvf_rx_buffer_flip(rx_ring, rx_buffer,
+						     size);
+			} else {
+				rx_buffer->pagecnt_bias++;
+			}
+			total_rx_packets++;
+			total_rx_bytes += size;
+		} else if (skb) {
+			rnpvf_add_rx_frag(rx_ring, rx_buffer, skb, size);
+		} else if (ring_uses_build_skb(rx_ring)) {
+			skb = rnpvf_build_skb(rx_ring, rx_buffer, &xdp,
+					      rx_desc);
+		} else {
+			skb = rnpvf_construct_skb(rx_ring, rx_buffer, &xdp,
+						  rx_desc);
+		}
+
+		/* exit if we failed to retrieve a buffer */
+		if (!skb) {
+			rx_ring->rx_stats.alloc_rx_buff_failed++;
+			rx_buffer->pagecnt_bias++;
+			break;
+		}
+
+		rnpvf_put_rx_buffer(rx_ring, rx_buffer, skb);
+		cleaned_count++;
+
+		/* place incomplete frames back on ring for completion */
+		if (rnpvf_is_non_eop(rx_ring, rx_desc, skb))
+			continue;
+
+		/* verify the packet layout is correct */
+		if (rnpvf_cleanup_headers(rx_ring, rx_desc, skb)) {
+
+			continue;
+		}
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += skb->len;
+
+		/* populate checksum, timestamp, VLAN, and protocol */
+		rnpvf_process_skb_fields(rx_ring, rx_desc, skb);
+
+		/* we should clean it since we used all info in it */
+		rx_desc->wb.cmd = 0;
+
+		rnpvf_rx_skb(q_vector, skb);
+
+		/* update budget accounting */
+		total_rx_packets++;
+	}
+
+	u64_stats_update_begin(&rx_ring->syncp);
+	rx_ring->stats.packets += total_rx_packets;
+	rx_ring->stats.bytes += total_rx_bytes;
+	rx_ring->rx_stats.driver_drop_packets += driver_drop_packets;
+	u64_stats_update_end(&rx_ring->syncp);
+	q_vector->rx.total_packets += total_rx_packets;
+	q_vector->rx.total_bytes += total_rx_bytes;
+
+	if (total_rx_packets >= budget)
+		rx_ring->rx_stats.poll_again_count++;
+	return total_rx_packets;
+}
+#endif
+
+/**
+ * rnpvf_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
+ *
+ * rnpvf_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ **/
+static void rnpvf_configure_msix(struct rnpvf_adapter *adapter)
+{
+	struct rnpvf_q_vector *q_vector;
+	int i;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct rnpvf_ring *ring;
+
+		q_vector = adapter->q_vector[i];
+
+		rnpvf_for_each_ring(ring, q_vector->rx) {
+			rnpvf_set_ring_vector(adapter,
+					      ring->rnpvf_msix_off,
+					      q_vector->v_idx);
+		}
+	}
+}
+
+enum latency_range {
+	lowest_latency = 0,
+	low_latency = 1,
+	bulk_latency = 2,
+	latency_invalid = 255
+};
+
+static inline void rnpvf_irq_enable_queues(struct rnpvf_q_vector *q_vector)
+{
+	struct rnpvf_ring *ring;
+
+	rnpvf_for_each_ring(ring, q_vector->rx) {
+		rnpvf_wr_reg(ring->dma_int_clr, RX_INT_MASK | TX_INT_MASK);
+		/* we need this */
+		wmb();
+		rnpvf_wr_reg(ring->dma_int_mask,
+			     ~(RX_INT_MASK | TX_INT_MASK));
+	}
+}
+
+static inline void
+rnpvf_irq_disable_queues(struct rnpvf_q_vector *q_vector)
+{
+	struct rnpvf_ring *ring;
+
+	rnpvf_for_each_ring(ring, q_vector->tx) {
+		rnpvf_wr_reg(ring->dma_int_mask,
+			     (RX_INT_MASK | TX_INT_MASK));
+	}
+}
+
+/**
+ * rnpvf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static inline void rnpvf_irq_enable(struct rnpvf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_q_vectors; i++)
+		rnpvf_irq_enable_queues(adapter->q_vector[i]);
+}
+
+static irqreturn_t rnpvf_msix_other(int irq, void *data)
+{
+	struct rnpvf_adapter *adapter = data;
+	struct rnpvf_hw *hw = &adapter->hw;
+
+	dbg("\n\n !!! %s irq-coming !!!\n", __func__);
+
+	/* link is down by pf */
+	if (test_bit(__RNPVF_MBX_POLLING, &adapter->state))
+		goto NO_WORK_DONE;
+	if (!hw->mbx.ops.check_for_rst(hw, false)) {
+		if (test_bit(__RNPVF_REMOVE, &adapter->state)) {
+			printk("rnpvf is removed\n");
+		}
+	}
+NO_WORK_DONE:
+
+	return IRQ_HANDLED;
+}
+
+static void rnpvf_htimer_start(struct rnpvf_q_vector *q_vector)
+{
+	unsigned long ns = q_vector->irq_check_usecs * NSEC_PER_USEC / 2;
+
+	hrtimer_start_range_ns(&q_vector->irq_miss_check_timer,
+			       ns_to_ktime(ns), ns, HRTIMER_MODE_REL);
+}
+
+static void rnpvf_htimer_stop(struct rnpvf_q_vector *q_vector)
+{
+	hrtimer_cancel(&q_vector->irq_miss_check_timer);
+}
+
+static irqreturn_t rnpvf_intr(int irq, void *data)
+{
+	struct rnpvf_adapter *adapter = data;
+	struct rnpvf_q_vector *q_vector = adapter->q_vector[0];
+	struct rnpvf_hw *hw = &adapter->hw;
+
+	if (q_vector->vector_flags & RNPVF_QVECTOR_FLAG_IRQ_MISS_CHECK)
+		rnpvf_htimer_stop(q_vector);
+
+	/*  disabled interrupts (on this vector) for us */
+	rnpvf_irq_disable_queues(q_vector);
+
+	if (q_vector->rx.ring || q_vector->tx.ring)
+		napi_schedule_irqoff(&q_vector->napi);
+
+	dbg("\n\n !!! %s irq-coming !!!\n", __func__);
+
+	/* link is down by pf */
+	if (test_bit(__RNPVF_MBX_POLLING, &adapter->state))
+		goto WORK_DONE;
+	if (!hw->mbx.ops.check_for_rst(hw, false)) {
+		if (test_bit(__RNPVF_REMOVE, &adapter->state)) {
+			printk("rnpvf is removed\n");
+		}
+	}
+WORK_DONE:
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t rnpvf_msix_clean_rings(int irq, void *data)
+{
+	struct rnpvf_q_vector *q_vector = data;
+
+	if (q_vector->vector_flags & RNPVF_QVECTOR_FLAG_IRQ_MISS_CHECK)
+		rnpvf_htimer_stop(q_vector);
+	/* disabled interrupts (on this vector) for us */
+	rnpvf_irq_disable_queues(q_vector);
+
+	if (q_vector->rx.ring || q_vector->tx.ring)
+		napi_schedule(&q_vector->napi);
+
+	return IRQ_HANDLED;
+}
+
+void update_rx_count(int cleaned, struct rnpvf_q_vector *q_vector)
+{
+	struct rnpvf_adapter *adapter = q_vector->adapter;
+
+	if ((cleaned) && (cleaned != q_vector->new_rx_count)) {
+		if (cleaned < 5) {
+			q_vector->small_times = 0;
+			q_vector->large_times = 0;
+			q_vector->too_small_times++;
+			if (q_vector->too_small_times >= 2) {
+				q_vector->new_rx_count = 1;
+			}
+		} else if (cleaned < 30) {
+			q_vector->too_small_times = 0;
+			q_vector->middle_time++;
+			if (cleaned < q_vector->new_rx_count) {
+				q_vector->small_times = 0;
+				q_vector->new_rx_count -=
+					(1 << (q_vector->large_times++));
+				if (q_vector->new_rx_count < 0)
+					q_vector->new_rx_count = 1;
+			} else {
+				q_vector->large_times = 0;
+
+				if (cleaned > 30) {
+					if (q_vector->new_rx_count ==
+					    (cleaned - 4)) {
+					} else {
+						q_vector->new_rx_count +=
+							(1
+							 << (q_vector->small_times++));
+					}
+					if (q_vector->new_rx_count >=
+					    cleaned) {
+						q_vector->new_rx_count =
+							cleaned - 4;
+						q_vector->small_times = 0;
+					}
+
+				} else {
+					if (q_vector->new_rx_count ==
+					    (cleaned - 1)) {
+					} else {
+						q_vector->new_rx_count +=
+							(1
+							 << (q_vector->small_times++));
+					}
+					if (q_vector->new_rx_count >=
+					    cleaned) {
+						q_vector->new_rx_count =
+							cleaned - 1;
+						q_vector->small_times = 0;
+					}
+				}
+			}
+		} else {
+			q_vector->too_small_times = 0;
+			q_vector->new_rx_count =
+				max_t(int, 64, adapter->rx_frames);
+			q_vector->small_times = 0;
+			q_vector->large_times = 0;
+		}
+	}
+}
+
+static void rnpvf_check_restart_tx(struct rnpvf_q_vector *q_vector,
+				   struct rnpvf_ring *tx_ring)
+{
+	struct rnpvf_adapter *adapter = q_vector->adapter;
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+	if (likely(netif_carrier_ok(tx_ring->netdev) &&
+		   (rnpvf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+		if (__netif_subqueue_stopped(tx_ring->netdev,
+					     tx_ring->queue_index) &&
+		    !test_bit(__RNPVF_DOWN, &adapter->state)) {
+			netif_wake_subqueue(tx_ring->netdev,
+					    tx_ring->queue_index);
+			++tx_ring->tx_stats.restart_queue;
+		}
+	}
+}
+
+/**
+ * rnpvf_poll - NAPI polling calback
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean more than one or more rings associated with a
+ * q_vector.
+ **/
+static int rnpvf_poll(struct napi_struct *napi, int budget)
+{
+	struct rnpvf_q_vector *q_vector =
+		container_of(napi, struct rnpvf_q_vector, napi);
+	struct rnpvf_adapter *adapter = q_vector->adapter;
+	struct rnpvf_ring *ring;
+	int per_ring_budget, work_done = 0;
+	bool clean_complete = true;
+	int cleaned_total = 0;
+
+	rnpvf_for_each_ring(ring, q_vector->tx)
+		clean_complete &= !!rnpvf_clean_tx_irq(q_vector, ring);
+
+	/* attempt to distribute budget to each queue fairly, but don't allow
+	 * the budget to go below 1 because we'll exit polling
+	 */
+	if (q_vector->rx.count > 1)
+		per_ring_budget = max(budget / q_vector->rx.count, 1);
+	else
+		per_ring_budget = budget;
+
+	rnpvf_for_each_ring(ring, q_vector->rx) {
+		int cleaned = 0;
+
+		cleaned = rnpvf_clean_rx_irq(q_vector, ring,
+					     per_ring_budget);
+
+		work_done += cleaned;
+		cleaned_total += cleaned;
+
+		if (cleaned >= per_ring_budget)
+			clean_complete = false;
+	}
+
+	if (test_bit(__RNPVF_DOWN, &adapter->state))
+		clean_complete = true;
+
+	if (!(q_vector->vector_flags & RNPVF_QVECTOR_FLAG_ITR_FEATURE))
+		update_rx_count(cleaned_total, q_vector);
+
+	/* If all work not completed, return budget and keep polling */
+	if (!clean_complete)
+		return budget;
+
+	/* all work done, exit the polling mode */
+	if (likely(napi_complete_done(napi, work_done))) {
+		/* try to do itr handle */
+		if (q_vector->vector_flags &
+		    RNPVF_QVECTOR_FLAG_ITR_FEATURE)
+			rnpvf_set_itr(q_vector);
+
+		if (!test_bit(__RNPVF_DOWN, &adapter->state)) {
+			rnpvf_irq_enable_queues(q_vector);
+			smp_mb();
+			/* we need this to ensure irq start before tx start */
+			if (q_vector->vector_flags &
+			    RNPVF_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS) {
+				rnpvf_for_each_ring(ring, q_vector->tx) {
+					rnpvf_check_restart_tx(q_vector,
+							       ring);
+					if (q_vector->new_rx_count !=
+					    q_vector->old_rx_count) {
+						ring_wr32(
+							ring,
+							RNP_DMA_REG_RX_INT_DELAY_PKTCNT,
+							q_vector->new_rx_count);
+						q_vector->old_rx_count =
+							q_vector->new_rx_count;
+					}
+				}
+			}
+			if (q_vector->vector_flags &
+					RNPVF_QVECTOR_FLAG_IRQ_MISS_CHECK)
+				rnpvf_htimer_start(q_vector);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * rnpvf_request_msix_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * rnpvf_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int rnpvf_request_msix_irqs(struct rnpvf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int err;
+	int i = 0;
+
+	DPRINTK(IFUP, INFO, "num_q_vectors:%d\n", adapter->num_q_vectors);
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct rnpvf_q_vector *q_vector = adapter->q_vector[i];
+		struct msix_entry *entry =
+			&adapter->msix_entries[i + adapter->vector_off];
+
+		if (q_vector->tx.ring && q_vector->rx.ring) {
+			snprintf(q_vector->name,
+				 sizeof(q_vector->name) - 1, "%s-%s-%d-%d",
+				 netdev->name, "TxRx", i, q_vector->v_idx);
+		} else {
+			WARN(!(q_vector->tx.ring && q_vector->rx.ring),
+			     "%s vector%d tx rx is null, v_idx:%d\n",
+			     netdev->name, i, q_vector->v_idx);
+			/* skip this unused q_vector */
+			continue;
+		}
+		err = request_irq(entry->vector, &rnpvf_msix_clean_rings,
+				  0, q_vector->name, q_vector);
+		if (err) {
+			rnpvf_err(
+				"%s:request_irq failed for MSIX interrupt:%d "
+				"Error: %d\n",
+				netdev->name, entry->vector, err);
+			goto free_queue_irqs;
+		}
+		irq_set_affinity_hint(entry->vector,
+				      &q_vector->affinity_mask);
+	}
+
+	return 0;
+
+free_queue_irqs:
+	while (i) {
+		i--;
+		irq_set_affinity_hint(
+			adapter->msix_entries[i + adapter->vector_off]
+				.vector,
+			NULL);
+		free_irq(adapter->msix_entries[i + adapter->vector_off]
+				 .vector,
+			 adapter->q_vector[i]);
+	}
+	return err;
+}
+
+static int rnpvf_free_msix_irqs(struct rnpvf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct rnpvf_q_vector *q_vector = adapter->q_vector[i];
+		struct msix_entry *entry =
+			&adapter->msix_entries[i + adapter->vector_off];
+
+		/* free only the irqs that were actually requested */
+		if (!q_vector->rx.ring && !q_vector->tx.ring)
+			continue;
+
+		/* clear the affinity_mask in the IRQ descriptor */
+		irq_set_affinity_hint(entry->vector, NULL);
+		DPRINTK(IFDOWN, INFO, "free irq %s\n", q_vector->name);
+		free_irq(entry->vector, q_vector);
+	}
+
+	return 0;
+}
+
+/**
+ * rnpvf_update_itr - update the dynamic ITR value based on statistics
+ * @q_vector: structure containing interrupt and ring information
+ * @ring_container: structure containing ring performance data
+ *
+ *      Stores a new ITR value based on packets and byte
+ *      counts during the last interrupt.  The advantage of per interrupt
+ *      computation is faster updates and more accurate ITR for the current
+ *      traffic pattern.  Constants in this function were computed
+ *      based on theoretical maximum wire speed and thresholds were set based
+ *      on testing data as well as attempting to minimize response time
+ *      while increasing bulk throughput.
+ **/
+static void rnpvf_update_itr(struct rnpvf_q_vector *q_vector,
+			     struct rnpvf_ring_container *ring_container,
+			     int type)
+{
+	unsigned int itr = RNPVF_ITR_ADAPTIVE_MIN_USECS |
+			   RNPVF_ITR_ADAPTIVE_LATENCY;
+	unsigned int avg_wire_size, packets, bytes;
+	unsigned int packets_old;
+	unsigned long next_update = jiffies;
+	u32 old_itr;
+	u16 add_itr, add = 0;
+	if (type)
+		old_itr = q_vector->itr_rx;
+	else
+		old_itr = q_vector->itr_tx;
+
+	/* If we don't have any rings just leave ourselves set for maximum
+	 * possible latency so we take ourselves out of the equation.
+	 */
+	if (!ring_container->ring)
+		return;
+
+	packets_old = ring_container->total_packets_old;
+	packets = ring_container->total_packets;
+	bytes = ring_container->total_bytes;
+	add_itr = ring_container->add_itr;
+	/* If Rx and there are 1 to 23 packets and bytes are less than
+	 * 12112 assume insufficient data to use bulk rate limiting
+	 * approach. Instead we will focus on simply trying to target
+	 * receiving 8 times as much data in the next interrupt.
+	 */
+
+	if (!packets)
+		return;
+
+	if (packets && packets < 24 && bytes < 12112) {
+		itr = RNPVF_ITR_ADAPTIVE_LATENCY;
+
+		avg_wire_size = (bytes + packets * 24);
+		avg_wire_size =
+			clamp_t(unsigned int, avg_wire_size, 128, 12800);
+
+		goto adjust_for_speed;
+	}
+
+	/* Less than 48 packets we can assume that our current interrupt delay
+	 * is only slightly too low. As such we should increase it by a small
+	 * fixed amount.
+	 */
+	if (packets < 48) {
+		if (add_itr) {
+			if (packets_old < packets) {
+				itr = (old_itr >> 2) +
+				      RNPVF_ITR_ADAPTIVE_MIN_INC;
+				if (itr > RNPVF_ITR_ADAPTIVE_MAX_USECS)
+					itr = RNPVF_ITR_ADAPTIVE_MAX_USECS;
+				add = 1;
+
+				if (packets < 8)
+					itr += RNPVF_ITR_ADAPTIVE_LATENCY;
+				else
+					itr += ring_container->itr &
+					       RNPVF_ITR_ADAPTIVE_LATENCY;
+
+			} else {
+				itr = (old_itr >> 2) -
+				      RNPVF_ITR_ADAPTIVE_MIN_INC;
+				if (itr < RNPVF_ITR_ADAPTIVE_MIN_USECS)
+					itr = RNPVF_ITR_ADAPTIVE_MIN_USECS;
+			}
+
+		} else {
+			add = 1;
+			itr = (old_itr >> 2) + RNPVF_ITR_ADAPTIVE_MIN_INC;
+			if (itr > RNPVF_ITR_ADAPTIVE_MAX_USECS)
+				itr = RNPVF_ITR_ADAPTIVE_MAX_USECS;
+
+			/* If sample size is 0 - 7 we should probably switch
+			 * to latency mode instead of trying to control
+			 * things as though we are in bulk.
+			 *
+			 * Otherwise if the number of packets is less than 48
+			 * we should maintain whatever mode we are currently
+			 * in. The range between 8 and 48 is the cross-over
+			 * point between latency and bulk traffic.
+			 */
+			if (packets < 8)
+				itr += RNPVF_ITR_ADAPTIVE_LATENCY;
+			else
+				itr += ring_container->itr &
+				       RNPVF_ITR_ADAPTIVE_LATENCY;
+		}
+		goto clear_counts;
+	}
+
+	/* Between 48 and 96 is our "goldilocks" zone where we are working
+	 * out "just right". Just report that our current ITR is good for us.
+	 */
+	if (packets < 96) {
+		itr = old_itr >> 2;
+		goto clear_counts;
+	}
+	/* If packet count is 96 or greater we are likely looking at a slight
+	 * overrun of the delay we want. Try halving our delay to see if that
+	 * will cut the number of packets in half per interrupt.
+	 */
+	if (packets < 256) {
+		itr = old_itr >> 3;
+		if (itr < RNPVF_ITR_ADAPTIVE_MIN_USECS)
+			itr = RNPVF_ITR_ADAPTIVE_MIN_USECS;
+		goto clear_counts;
+	}
+
+	/* The paths below assume we are dealing with a bulk ITR since number
+	 * of packets is 256 or greater. We are just going to have to compute
+	 * a value and try to bring the count under control, though for smaller
+	 * packet sizes there isn't much we can do as NAPI polling will likely
+	 * be kicking in sooner rather than later.
+	 */
+	itr = RNPVF_ITR_ADAPTIVE_BULK;
+
+	/* If packet counts are 256 or greater we can assume we have a gross
+	 * overestimation of what the rate should be. Instead of trying to fine
+	 * tune it just use the formula below to try and dial in an exact value
+	 * give the current packet size of the frame.
+	 */
+	avg_wire_size = bytes / packets;
+
+	/* The following is a crude approximation of:
+	 *  wmem_default / (size + overhead) = desired_pkts_per_int
+	 *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
+	 *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
+	 *
+	 * Assuming wmem_default is 212992 and overhead is 640 bytes per
+	 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
+	 * formula down to
+	 *
+	 *  (170 * (size + 24)) / (size + 640) = ITR
+	 *
+	 * We first do some math on the packet size and then finally bitshift
+	 * by 8 after rounding up. We also have to account for PCIe link speed
+	 * difference as ITR scales based on this.
+	 */
+	if (avg_wire_size <= 60) {
+		/* Start at 50k ints/sec */
+		avg_wire_size = 5120;
+	} else if (avg_wire_size <= 316) {
+		/* 50K ints/sec to 16K ints/sec */
+		avg_wire_size *= 40;
+		avg_wire_size += 2720;
+	} else if (avg_wire_size <= 1084) {
+		/* 16K ints/sec to 9.2K ints/sec */
+		avg_wire_size *= 15;
+		avg_wire_size += 11452;
+	} else if (avg_wire_size <= 1980) {
+		/* 9.2K ints/sec to 8K ints/sec */
+		avg_wire_size *= 5;
+		avg_wire_size += 22420;
+	} else {
+		/* plateau at a limit of 8K ints/sec */
+		avg_wire_size = 32256;
+	}
+
+adjust_for_speed:
+	/* Resultant value is 256 times larger than it needs to be. This
+	 * gives us room to adjust the value as needed to either increase
+	 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
+	 *
+	 * Use addition as we have already recorded the new latency flag
+	 * for the ITR value.
+	 */
+	switch (q_vector->adapter->link_speed) {
+	case RNP_LINK_SPEED_10GB_FULL:
+	case RNP_LINK_SPEED_100_FULL:
+	default:
+		itr += DIV_ROUND_UP(avg_wire_size,
+				    RNPVF_ITR_ADAPTIVE_MIN_INC * 256) *
+		       RNPVF_ITR_ADAPTIVE_MIN_INC;
+		break;
+	case RNP_LINK_SPEED_1GB_FULL:
+	case RNP_LINK_SPEED_10_FULL:
+		itr += DIV_ROUND_UP(avg_wire_size,
+				    RNPVF_ITR_ADAPTIVE_MIN_INC * 64) *
+		       RNPVF_ITR_ADAPTIVE_MIN_INC;
+		break;
+	}
+
+	/* In the case of a latency specific workload only allow us to
+	 * reduce the ITR by at most 2us. By doing this we should dial
+	 * in so that our number of interrupts is no more than 2x the number
+	 * of packets for the least busy workload. So for example in the case
+	 * of a TCP worload the ack packets being received would set the
+	 * the interrupt rate as they are a latency specific workload.
+	 */
+	if ((itr & RNPVF_ITR_ADAPTIVE_LATENCY) &&
+	    itr < ring_container->itr)
+		itr = ring_container->itr - RNPVF_ITR_ADAPTIVE_MIN_INC;
+
+clear_counts:
+	/* write back value */
+	ring_container->itr = itr;
+
+	/* next update should occur within next jiffy */
+	ring_container->next_update = next_update + 1;
+
+	ring_container->total_bytes = 0;
+	ring_container->total_packets_old = packets;
+	ring_container->add_itr = add;
+	ring_container->total_packets = 0;
+}
+
+/**
+ * rnpvf_write_eitr - write EITR register in hardware specific way
+ * @q_vector: structure containing interrupt and ring information
+ *
+ * This function is made to be called by ethtool and by the driver
+ * when it needs to update EITR registers at runtime.  Hardware
+ * specific quirks/differences are taken care of here.
+ */
+void rnpvf_write_eitr_rx(struct rnpvf_q_vector *q_vector)
+{
+	struct rnpvf_adapter *adapter = q_vector->adapter;
+	struct rnpvf_hw *hw = &adapter->hw;
+	u32 itr_reg = q_vector->itr_rx >> 2;
+	struct rnpvf_ring *ring;
+
+	itr_reg = itr_reg * hw->usecstocount;
+
+	rnpvf_for_each_ring(ring, q_vector->rx)
+		ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER, itr_reg);
+}
+
+static void rnpvf_set_itr(struct rnpvf_q_vector *q_vector)
+{
+	u32 new_itr_rx;
+
+	rnpvf_update_itr(q_vector, &q_vector->rx, 1);
+
+	new_itr_rx = q_vector->rx.itr;
+	/* Clear latency flag if set, shift into correct position */
+	new_itr_rx &= RNPVF_ITR_ADAPTIVE_MASK_USECS;
+	/* in 2us unit */
+	new_itr_rx <<= 2;
+	if (new_itr_rx != q_vector->itr_rx) {
+		/* save the algorithm value here */
+		q_vector->itr_rx = new_itr_rx;
+		rnpvf_write_eitr_rx(q_vector);
+	}
+}
+
+/**
+ * rnpvf_request_irq - initialize interrupts
+ * @adapter: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int rnpvf_request_irq(struct rnpvf_adapter *adapter)
+{
+	int err;
+
+	if (adapter->flags & RNPVF_FLAG_MSIX_ENABLED) {
+		err = rnpvf_request_msix_irqs(adapter);
+	} else if (adapter->flags & RNPVF_FLAG_MSI_ENABLED) {
+		/* in this case one for all */
+		err = request_irq(adapter->pdev->irq, rnpvf_intr, 0,
+				  adapter->netdev->name, adapter);
+	} else {
+		err = request_irq(adapter->pdev->irq, rnpvf_intr,
+				  IRQF_SHARED, adapter->netdev->name,
+				  adapter);
+	}
+	if (err)
+		rnpvf_err("request_irq failed, Error %d\n", err);
+
+	return err;
+}
+
+static void rnpvf_free_irq(struct rnpvf_adapter *adapter)
+{
+
+	if (adapter->flags & RNPVF_FLAG_MSIX_ENABLED) {
+		rnpvf_free_msix_irqs(adapter);
+	} else if (adapter->flags & RNPVF_FLAG_MSI_ENABLED) {
+		/* in this case one for all */
+		free_irq(adapter->pdev->irq, adapter);
+	} else {
+		free_irq(adapter->pdev->irq, adapter);
+	}
+}
+
+/**
+ * rnpvf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static inline void rnpvf_irq_disable(struct rnpvf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		rnpvf_irq_disable_queues(adapter->q_vector[i]);
+		if (adapter->flags & RNPVF_FLAG_MSIX_ENABLED) {
+			synchronize_irq(
+				adapter->msix_entries[i +
+						      adapter->vector_off]
+					.vector);
+		} else {
+			synchronize_irq(adapter->pdev->irq);
+		}
+	}
+}
+
+/**
+ * rnpvf_configure_tx_ring - Configure 8259x Tx ring after Reset
+ * @adapter: board private structure
+ * @ring: structure containing ring specific data
+ *
+ * Configure the Tx descriptor ring after a reset.
+ **/
+void rnpvf_configure_tx_ring(struct rnpvf_adapter *adapter,
+			     struct rnpvf_ring *ring)
+{
+	struct rnpvf_hw *hw = &adapter->hw;
+
+	/* disable queue to avoid issues while updating state */
+	if (!(ring->ring_flags & RNPVF_RING_SKIP_TX_START))
+		ring_wr32(ring, RNP_DMA_TX_START, 0);
+
+	ring_wr32(ring, RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_LO,
+		  (u32)ring->dma);
+	/* dma high address is used for vfnum */
+	ring_wr32(ring, RNP_DMA_REG_TX_DESC_BUF_BASE_ADDR_HI,
+		  (u32)(((u64)ring->dma) >> 32) | (hw->vfnum << 24));
+	ring_wr32(ring, RNP_DMA_REG_TX_DESC_BUF_LEN, ring->count);
+
+	ring->next_to_clean =
+		ring_rd32(ring, RNP_DMA_REG_TX_DESC_BUF_HEAD);
+	ring->next_to_use = ring->next_to_clean;
+	ring->tail = ring->ring_addr + RNP_DMA_REG_TX_DESC_BUF_TAIL;
+	rnpvf_wr_reg(ring->tail, ring->next_to_use);
+
+	ring_wr32(ring, RNP_DMA_REG_TX_DESC_FETCH_CTRL,
+		  (8 << 0) /* max_water_flow */
+			  | (TSRN10_TX_DEFAULT_BURST
+			     << 16)); /* max-num_descs_peer_read */
+
+	ring_wr32(ring, RNP_DMA_REG_TX_INT_DELAY_TIMER,
+		  adapter->tx_usecs * hw->usecstocount);
+	ring_wr32(ring, RNP_DMA_REG_TX_INT_DELAY_PKTCNT,
+		  adapter->tx_frames);
+
+	if (!(ring->ring_flags & RNPVF_RING_SKIP_TX_START)) {
+		/* n500 should wait tx_ready before open tx start */
+		int timeout = 0;
+		u32 status = 0;
+
+		do {
+			status = ring_rd32(ring, RNP_DMA_TX_READY);
+			usleep_range(100, 200);
+			timeout++;
+			rnpvf_dbg("wait %d tx ready to 1\n",
+				  ring->rnpvf_queue_idx);
+		} while ((status != 1) && (timeout < 100));
+
+		if (timeout >= 100)
+			printk("wait tx ready timeout\n");
+		ring_wr32(ring, RNP_DMA_TX_START, 1);
+	}
+}
+
+/**
+ * rnpvf_configure_tx - Configure 82599 VF Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void rnpvf_configure_tx(struct rnpvf_adapter *adapter)
+{
+	u32 i;
+
+	/* Setup the HW Tx Head and Tail descriptor pointers */
+	for (i = 0; i < (adapter->num_tx_queues); i++)
+		rnpvf_configure_tx_ring(adapter, adapter->tx_ring[i]);
+}
+
+#define RNP_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
+void rnpvf_disable_rx_queue(struct rnpvf_adapter *adapter,
+			    struct rnpvf_ring *ring)
+{
+	ring_wr32(ring, RNP_DMA_RX_START, 0);
+}
+
+void rnpvf_enable_rx_queue(struct rnpvf_adapter *adapter,
+			   struct rnpvf_ring *ring)
+{
+	ring_wr32(ring, RNP_DMA_RX_START, 1);
+}
+
+void rnpvf_configure_rx_ring(struct rnpvf_adapter *adapter,
+			     struct rnpvf_ring *ring)
+{
+	struct rnpvf_hw *hw = &adapter->hw;
+	u64 desc_phy = ring->dma;
+
+	/* disable queue to avoid issues while updating state */
+	rnpvf_disable_rx_queue(adapter, ring);
+
+	/* set descripts registers*/
+	ring_wr32(ring, RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_LO,
+		  (u32)desc_phy);
+	/* dma address high bits is used */
+	ring_wr32(ring, RNP_DMA_REG_RX_DESC_BUF_BASE_ADDR_HI,
+		  ((u32)(desc_phy >> 32)) | (hw->vfnum << 24));
+	ring_wr32(ring, RNP_DMA_REG_RX_DESC_BUF_LEN, ring->count);
+
+	ring->tail = ring->ring_addr + RNP_DMA_REG_RX_DESC_BUF_TAIL;
+	ring->next_to_clean =
+		ring_rd32(ring, RNP_DMA_REG_RX_DESC_BUF_HEAD);
+	ring->next_to_use = ring->next_to_clean;
+
+#define SCATER_SIZE (96)
+	if (ring->ring_flags & RNPVF_RING_SCATER_SETUP) {
+		ring_wr32(ring, PCI_DMA_REG_RX_SCATTER_LENGTH,
+			  SCATER_SIZE);
+	}
+
+	ring_wr32(ring, RNP_DMA_REG_RX_DESC_FETCH_CTRL,
+		  0 | (TSRN10_RX_DEFAULT_LINE << 0) /* rx-desc-flow */
+			  | (TSRN10_RX_DEFAULT_BURST
+			     << 16) /* max-read-desc-cnt */
+	);
+
+	if (ring->ring_flags & RNPVF_RING_IRQ_MISS_FIX)
+		ring_wr32(ring, RNP_DMA_INT_TRIG,
+			  TX_INT_MASK | RX_INT_MASK);
+
+	ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER,
+		  adapter->rx_usecs * hw->usecstocount);
+	ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_PKTCNT,
+		  adapter->rx_frames);
+
+	rnpvf_alloc_rx_buffers(ring, rnpvf_desc_unused(ring));
+}
+
+static void rnpvf_set_rx_buffer_len(struct rnpvf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN * 3;
+	struct rnpvf_ring *rx_ring;
+	int i;
+
+	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+		max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		rx_ring = adapter->rx_ring[i];
+		clear_bit(__RNPVF_RX_3K_BUFFER, &rx_ring->state);
+		clear_bit(__RNPVF_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+
+		set_bit(__RNPVF_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+
+#ifdef OPTM_WITH_LPAGE
+		rx_ring->rx_page_buf_nums =
+			RNPVF_PAGE_BUFFER_NUMS(rx_ring);
+		rx_ring->rx_per_buf_mem = RNPVF_RXBUFFER_2K;
+#endif
+
+	}
+}
+
+/**
+ * rnpvf_configure_rx - Configure 82599 VF Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void rnpvf_configure_rx(struct rnpvf_adapter *adapter)
+{
+	int i;
+
+	/* set_rx_buffer_len must be called before ring initialization */
+	rnpvf_set_rx_buffer_len(adapter);
+
+	/*
+	 * Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring
+	 */
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rnpvf_configure_rx_ring(adapter, adapter->rx_ring[i]);
+}
+
+static int rnpvf_vlan_rx_add_vid(struct net_device *netdev,
+				 __always_unused __be16 proto, u16 vid)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+	struct rnpvf_hw *hw = &adapter->hw;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int err = 0;
+
+	if ((vid) && (adapter->vf_vlan) && (vid != adapter->vf_vlan)) {
+		dev_err(&adapter->pdev->dev,
+			"only 1 vlan for vf or pf set vlan already\n");
+		return 0;
+	}
+	if ((vid) && (!adapter->vf_vlan)) {
+		spin_lock_bh(&adapter->mbx_lock);
+		set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+		/* add VID to filter table */
+		err = hw->mac.ops.set_vfta(hw, vid, 0, true);
+		clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+		spin_unlock_bh(&adapter->mbx_lock);
+	}
+
+	/* translate error return types so error makes sense */
+	if (err == RNP_ERR_MBX)
+		return -EIO;
+
+	if (err == RNP_ERR_INVALID_ARGUMENT)
+		return -EACCES;
+
+	set_bit(vid, adapter->active_vlans);
+
+	if (vid)
+		hw->ops.set_veb_vlan(hw, vid, VFNUM(mbx, hw->vfnum));
+
+	return err;
+}
+
+static int rnpvf_vlan_rx_kill_vid(struct net_device *netdev,
+				  __always_unused __be16 proto, u16 vid)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+	struct rnpvf_hw *hw = &adapter->hw;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	int err = -EOPNOTSUPP;
+
+	if (vid) {
+		spin_lock_bh(&adapter->mbx_lock);
+		set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+		/* remove VID from filter table */
+		err = hw->mac.ops.set_vfta(hw, vid, 0, false);
+		clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+		spin_unlock_bh(&adapter->mbx_lock);
+		hw->ops.set_veb_vlan(hw, 0, VFNUM(mbx, hw->vfnum));
+	}
+
+	clear_bit(vid, adapter->active_vlans);
+
+	return 0;
+}
+
+/**
+ * rnpvf_vlan_strip_disable - helper to disable hw vlan stripping
+ * @adapter: driver data
+ */
+__maybe_unused static void
+rnpvf_vlan_strip_disable(struct rnpvf_adapter *adapter)
+{
+	struct rnpvf_hw *hw = &adapter->hw;
+
+	spin_lock_bh(&adapter->mbx_lock);
+	set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	hw->mac.ops.set_vlan_strip(hw, false);
+	clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	spin_unlock_bh(&adapter->mbx_lock);
+}
+
+/**
+ * rnpvf_vlan_strip_enable - helper to enable hw vlan stripping
+ * @adapter: driver data
+ */
+__maybe_unused static s32
+rnpvf_vlan_strip_enable(struct rnpvf_adapter *adapter)
+{
+	struct rnpvf_hw *hw = &adapter->hw;
+	int err;
+
+	spin_lock_bh(&adapter->mbx_lock);
+	set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	err = hw->mac.ops.set_vlan_strip(hw, true);
+	clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	spin_unlock_bh(&adapter->mbx_lock);
+
+	return err;
+}
+
+static void rnpvf_restore_vlan(struct rnpvf_adapter *adapter)
+{
+	u16 vid;
+
+	rnpvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
+	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) {
+		rnpvf_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q),
+				      vid);
+	}
+
+}
+
+static int rnpvf_write_uc_addr_list(struct net_device *netdev)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+	struct rnpvf_hw *hw = &adapter->hw;
+	int count = 0;
+
+	if ((netdev_uc_count(netdev)) > 10) {
+		pr_err("Too many unicast filters - No Space\n");
+		return -ENOSPC;
+	}
+
+	if (!netdev_uc_empty(netdev)) {
+		struct netdev_hw_addr *ha;
+
+		netdev_for_each_uc_addr(ha, netdev) {
+			spin_lock_bh(&adapter->mbx_lock);
+			set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+			hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
+			clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+			spin_unlock_bh(&adapter->mbx_lock);
+			udelay(200);
+		}
+	} else {
+		/*
+		 * If the list is empty then send message to PF driver to
+		 * clear all macvlans on this VF.
+		 */
+		spin_lock_bh(&adapter->mbx_lock);
+		set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+		hw->mac.ops.set_uc_addr(hw, 0, NULL);
+		clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+		spin_unlock_bh(&adapter->mbx_lock);
+		udelay(200);
+	}
+
+	return count;
+}
+
+/**
+ * rnpvf_set_rx_mode - Multicast and unicast set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_method entry point is called whenever the multicast address
+ * list, unicast address list or the network interface flags are updated.
+ * This routine is responsible for configuring the hardware for proper
+ * multicast mode and configuring requested unicast filters.
+ **/
+static void rnpvf_set_rx_mode(struct net_device *netdev)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+	struct rnpvf_hw *hw = &adapter->hw;
+
+	netdev_features_t features = netdev->features;
+	if ((netdev->flags & IFF_PROMISC) && (!adapter->promisc_mode)) {
+		adapter->promisc_mode = true;
+		spin_lock_bh(&adapter->mbx_lock);
+		set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+		/* reprogram multicast list */
+		hw->mac.ops.set_promisc_mode(hw, true);
+		clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+		spin_unlock_bh(&adapter->mbx_lock);
+
+	}
+
+	if ((!(netdev->flags & IFF_PROMISC)) && (adapter->promisc_mode)) {
+		adapter->promisc_mode = false;
+		spin_lock_bh(&adapter->mbx_lock);
+		set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+		/* reprogram multicast list */
+		hw->mac.ops.set_promisc_mode(hw, false);
+		clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+		spin_unlock_bh(&adapter->mbx_lock);
+	}
+
+	spin_lock_bh(&adapter->mbx_lock);
+	set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	/* reprogram multicast list */
+	hw->mac.ops.update_mc_addr_list(hw, netdev);
+	clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	spin_unlock_bh(&adapter->mbx_lock);
+
+	rnpvf_write_uc_addr_list(netdev);
+
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		rnpvf_vlan_strip_enable(adapter);
+	else
+		rnpvf_vlan_strip_disable(adapter);
+}
+
+static void rnpvf_napi_enable_all(struct rnpvf_adapter *adapter)
+{
+	int q_idx;
+
+	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
+		napi_enable(&adapter->q_vector[q_idx]->napi);
+}
+
+static void rnpvf_napi_disable_all(struct rnpvf_adapter *adapter)
+{
+	int q_idx;
+
+	for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
+		napi_disable(&adapter->q_vector[q_idx]->napi);
+}
+
+static void rnpvf_configure_veb(struct rnpvf_adapter *adapter)
+{
+	struct rnpvf_hw *hw = &adapter->hw;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+	u32 ring;
+	u8 *mac;
+
+	if (is_valid_ether_addr(hw->mac.addr))
+		mac = hw->mac.addr;
+	else
+		mac = hw->mac.perm_addr;
+
+	ring = adapter->rx_ring[0]->rnpvf_queue_idx;
+	ring |= ((0x80 | vfnum) << 8);
+
+	hw->ops.set_veb_mac(hw, mac, vfnum, ring);
+}
+
+static void rnpvf_configure(struct rnpvf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	rnpvf_set_rx_mode(netdev);
+	rnpvf_restore_vlan(adapter);
+	rnpvf_configure_tx(adapter);
+	rnpvf_configure_rx(adapter);
+	rnpvf_configure_veb(adapter);
+}
+
+#define RNP_MAX_RX_DESC_POLL 10
+
+static void rnpvf_save_reset_stats(struct rnpvf_adapter *adapter)
+{
+	/* Only save pre-reset stats if there are some */
+	if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
+		adapter->stats.saved_reset_vfgprc +=
+			adapter->stats.vfgprc - adapter->stats.base_vfgprc;
+		adapter->stats.saved_reset_vfgptc +=
+			adapter->stats.vfgptc - adapter->stats.base_vfgptc;
+		adapter->stats.saved_reset_vfgorc +=
+			adapter->stats.vfgorc - adapter->stats.base_vfgorc;
+		adapter->stats.saved_reset_vfgotc +=
+			adapter->stats.vfgotc - adapter->stats.base_vfgotc;
+		adapter->stats.saved_reset_vfmprc +=
+			adapter->stats.vfmprc - adapter->stats.base_vfmprc;
+	}
+}
+
+static void rnpvf_up_complete(struct rnpvf_adapter *adapter)
+{
+	struct rnpvf_hw *hw = &adapter->hw;
+	int i;
+
+	rnpvf_configure_msix(adapter);
+
+	spin_lock_bh(&adapter->mbx_lock);
+	set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+
+	if (is_valid_ether_addr(hw->mac.addr))
+		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+	else
+		hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
+
+	clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	spin_unlock_bh(&adapter->mbx_lock);
+
+	rnpvf_napi_enable_all(adapter);
+
+	/*clear any pending interrupts*/
+	rnpvf_irq_enable(adapter);
+
+	/* enable transmits */
+	netif_tx_start_all_queues(adapter->netdev);
+
+	rnpvf_save_reset_stats(adapter);
+
+	hw->mac.get_link_status = 1;
+	mod_timer(&adapter->watchdog_timer, jiffies);
+
+	clear_bit(__RNPVF_DOWN, &adapter->state);
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rnpvf_enable_rx_queue(adapter, adapter->rx_ring[i]);
+}
+
+void rnpvf_reinit_locked(struct rnpvf_adapter *adapter)
+{
+	WARN_ON(in_interrupt());
+	/* put off any impending NetWatchDogTimeout */
+
+	while (test_and_set_bit(__RNPVF_RESETTING, &adapter->state))
+		usleep_range(1000, 2000);
+
+	rnpvf_down(adapter);
+	rnpvf_reset(adapter);
+	rnpvf_up(adapter);
+	clear_bit(__RNPVF_RESETTING, &adapter->state);
+}
+
+void rnpvf_up(struct rnpvf_adapter *adapter)
+{
+
+	rnpvf_configure(adapter);
+	rnpvf_up_complete(adapter);
+}
+
+void rnpvf_reset(struct rnpvf_adapter *adapter)
+{
+	struct rnpvf_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+
+	set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	if (hw->mac.ops.reset_hw(hw))
+		hw_dbg(hw, "PF still resetting\n");
+	else
+		hw->mac.ops.init_hw(hw);
+	clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+
+	if (is_valid_ether_addr(adapter->hw.mac.addr)) {
+		eth_hw_addr_set(netdev, adapter->hw.mac.addr);
+		memcpy(netdev->perm_addr, adapter->hw.mac.addr,
+		       netdev->addr_len);
+	}
+}
+
+/**
+ * rnpvf_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+static void rnpvf_clean_tx_ring(struct rnpvf_adapter *adapter,
+				struct rnpvf_ring *tx_ring)
+{
+	struct rnpvf_tx_buffer *tx_buffer_info;
+	unsigned long size;
+	u16 i;
+
+	BUG_ON(tx_ring == NULL);
+
+	/* ring already cleared, nothing to do */
+	if (!tx_ring->tx_buffer_info)
+		return;
+
+	/* Free all the Tx ring sk_buffs */
+	for (i = 0; i < tx_ring->count; i++) {
+		tx_buffer_info = &tx_ring->tx_buffer_info[i];
+		rnpvf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
+	}
+
+	netdev_tx_reset_queue(txring_txq(tx_ring));
+
+	size = sizeof(struct rnpvf_tx_buffer) * tx_ring->count;
+	memset(tx_ring->tx_buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+}
+
+/**
+ * rnpvf_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void rnpvf_clean_all_rx_rings(struct rnpvf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rnpvf_clean_rx_ring(adapter->rx_ring[i]);
+}
+
+/**
+ * rnpvf_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void rnpvf_clean_all_tx_rings(struct rnpvf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		rnpvf_clean_tx_ring(adapter, adapter->tx_ring[i]);
+}
+
+void rnpvf_down(struct rnpvf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i;
+
+	/* signal that we are down to the interrupt handler */
+	set_bit(__RNPVF_DOWN, &adapter->state);
+	set_bit(__RNPVF_LINK_DOWN, &adapter->state);
+
+	/* disable all enabled rx queues */
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rnpvf_disable_rx_queue(adapter, adapter->rx_ring[i]);
+
+	usleep_range(1000, 2000);
+
+	netif_tx_stop_all_queues(netdev);
+
+	/* call carrier off first to avoid false dev_watchdog timeouts */
+	netif_carrier_off(netdev);
+
+	netif_tx_disable(netdev);
+
+	rnpvf_irq_disable(adapter);
+
+	rnpvf_napi_disable_all(adapter);
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct rnpvf_ring *tx_ring = adapter->tx_ring[i];
+
+		if (!(tx_ring->ring_flags & RNPVF_RING_SKIP_TX_START)) {
+			int head, tail;
+			int timeout = 0;
+
+			head = ring_rd32(tx_ring,
+					 RNP_DMA_REG_TX_DESC_BUF_HEAD);
+			tail = ring_rd32(tx_ring,
+					 RNP_DMA_REG_TX_DESC_BUF_TAIL);
+
+			while (head != tail) {
+				usleep_range(10000, 20000);
+
+				head = ring_rd32(
+					tx_ring,
+					RNP_DMA_REG_TX_DESC_BUF_HEAD);
+				tail = ring_rd32(
+					tx_ring,
+					RNP_DMA_REG_TX_DESC_BUF_TAIL);
+				timeout++;
+				if (timeout >= 100) {
+					printk("vf wait tx done timeout\n");
+					break;
+				}
+			}
+		}
+	}
+
+	/* disable transmits in the hardware now that interrupts are off */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct rnpvf_ring *tx_ring = adapter->tx_ring[i];
+
+		if (!(tx_ring->ring_flags & RNPVF_RING_SKIP_TX_START))
+			ring_wr32(tx_ring, RNP_DMA_TX_START, 0);
+	}
+
+	netif_carrier_off(netdev);
+	rnpvf_clean_all_tx_rings(adapter);
+	rnpvf_clean_all_rx_rings(adapter);
+}
+
+static netdev_features_t rnpvf_fix_features(struct net_device *netdev,
+					    netdev_features_t features)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+
+	/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
+	if (!(features & NETIF_F_RXCSUM)) {
+		features &= ~NETIF_F_LRO;
+		adapter->flags &= (~RNPVF_FLAG_RX_CHKSUM_ENABLED);
+	} else
+		adapter->flags |= RNPVF_FLAG_RX_CHKSUM_ENABLED;
+
+	/* vf not support change vlan filter */
+	if ((netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) !=
+	    (features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
+		if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+			features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+		else
+			features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+	}
+	if (adapter->flags & RNPVF_FLAG_PF_SET_VLAN) {
+		/* if in this mode , close tx/rx vlan offload */
+		if (features & NETIF_F_HW_VLAN_CTAG_RX)
+			adapter->priv_flags |= RNPVF_FLAG_RX_VLAN_OFFLOAD;
+		else
+			adapter->priv_flags &= ~RNPVF_FLAG_RX_VLAN_OFFLOAD;
+
+		features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+		if (features & NETIF_F_HW_VLAN_CTAG_TX)
+			adapter->priv_flags |= RNPVF_FLAG_TX_VLAN_OFFLOAD;
+		else
+			adapter->priv_flags &= ~RNPVF_FLAG_TX_VLAN_OFFLOAD;
+
+		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+	}
+
+	/* if pf set fcs on, we should close rx chksum */
+	if (adapter->priv_flags & RNPVF_PRIV_FLAG_FCS_ON)
+		features &= ~NETIF_F_RXCSUM;
+
+	return features;
+}
+
+static int rnpvf_set_features(struct net_device *netdev,
+			      netdev_features_t features)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+	netdev_features_t changed = netdev->features ^ features;
+	bool need_reset = false;
+	int err = 0;
+
+	netdev->features = features;
+	if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
+		if (features & NETIF_F_HW_VLAN_CTAG_RX) {
+			if ((!rnpvf_vlan_strip_enable(adapter)))
+				features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+		} else {
+			rnpvf_vlan_strip_disable(adapter);
+		}
+	}
+
+	netdev->features = features;
+
+	if (need_reset)
+		rnpvf_reset(adapter);
+
+	return err;
+}
+
+/**
+ * rnpvf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+__maybe_unused static void rnpvf_tx_timeout(struct net_device *netdev)
+{
+}
+
+/**
+ * rnpvf_sw_init - Initialize general software structures
+ * (struct rnpvf_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * rnpvf_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int rnpvf_sw_init(struct rnpvf_adapter *adapter)
+{
+	struct rnpvf_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	struct net_device *netdev = adapter->netdev;
+	int err;
+
+	/* PCI config space info */
+	hw->pdev = pdev;
+
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_device_id = pdev->subsystem_device;
+	hw->mbx.ops.init_params(hw);
+
+	/*initialization default pause flow */
+	hw->fc.requested_mode = rnp_fc_none;
+	hw->fc.current_mode = rnp_fc_none;
+
+	/* now vf other irq handler is not regist */
+	err = hw->mac.ops.reset_hw(hw);
+	if (err) {
+		dev_info(
+			&pdev->dev,
+			"PF still in reset state.  Is the PF interface up?\n");
+		hw->adapter_stopped = false;
+		hw->link = false;
+		hw->speed = 0;
+		hw->usecstocount = 500;
+		return err;
+	} else {
+		err = hw->mac.ops.init_hw(hw);
+		if (err) {
+			pr_err("init_shared_code failed: %d\n", err);
+			goto out;
+		}
+		err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+		if (err)
+			dev_info(&pdev->dev,
+				 "Error reading MAC address\n");
+		else if (is_zero_ether_addr(adapter->hw.mac.addr))
+			dev_info(
+				&pdev->dev,
+				"MAC address not assigned by administrator.\n");
+		eth_hw_addr_set(netdev, hw->mac.addr);
+	}
+
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
+		dev_info(&pdev->dev, "Assigning random MAC address\n");
+		eth_hw_addr_random(netdev);
+		memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
+	}
+	/* get info from pf */
+	err = hw->mac.ops.get_queues(hw);
+	if (err) {
+		dev_info(&pdev->dev,
+			 "Get queue info error, use default one \n");
+		hw->mac.max_tx_queues = MAX_TX_QUEUES;
+		hw->mac.max_rx_queues = MAX_RX_QUEUES;
+		hw->queue_ring_base =
+			(hw->vfnum & VF_NUM_MASK) * MAX_RX_QUEUES;
+	}
+
+	dev_info(&pdev->dev, "queue_ring_base %d num %d\n",
+		 hw->queue_ring_base, hw->mac.max_tx_queues);
+	err = hw->mac.ops.get_mtu(hw);
+	if (err) {
+		dev_info(&pdev->dev, "Get mtu error ,use default one\n");
+		hw->mtu = 1500;
+	}
+	/* lock to protect mailbox accesses */
+	spin_lock_init(&adapter->mbx_lock);
+
+	/* set default ring sizes */
+	adapter->tx_ring_item_count = hw->tx_items_count;
+	adapter->rx_ring_item_count = hw->rx_items_count;
+	adapter->dma_channels =
+		min_t(int, hw->mac.max_tx_queues, hw->mac.max_rx_queues);
+	DPRINTK(PROBE, INFO, "tx parameters %d, rx parameters %d\n",
+		adapter->tx_ring_item_count, adapter->rx_ring_item_count);
+
+	/* set default tx/rx soft count */
+	adapter->adaptive_rx_coal = 1;
+	adapter->adaptive_tx_coal = 1;
+	adapter->napi_budge = RNPVF_DEFAULT_RX_WORK;
+	adapter->tx_work_limit = RNPVF_DEFAULT_TX_WORK;
+	adapter->rx_usecs = RNPVF_PKT_TIMEOUT;
+	adapter->rx_frames = RNPVF_RX_PKT_POLL_BUDGET;
+	adapter->tx_usecs = RNPVF_PKT_TIMEOUT_TX;
+	adapter->tx_frames = RNPVF_TX_PKT_POLL_BUDGET;
+
+	set_bit(__RNPVF_DOWN, &adapter->state);
+	return 0;
+
+out:
+	return err;
+}
+
+static int rnpvf_acquire_msix_vectors(struct rnpvf_adapter *adapter,
+				      int vectors)
+{
+	int err = 0;
+	int vector_threshold;
+
+	/* We'll want at least 2 (vector_threshold):
+	 * 1) TxQ[0] + RxQ[0] handler
+	 * 2) Other (Link Status Change, etc.)
+	 */
+	vector_threshold = MIN_MSIX_COUNT;
+
+	/* The more we get, the more we will assign to Tx/Rx Cleanup
+	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+	 * Right now, we simply care about how many we'll get; we'll
+	 * set them up later while requesting irq's.
+	 */
+	err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+				    vectors, vectors);
+	if (err > 0) {
+		/* Success or a nasty failure. */
+		vectors = err;
+		err = 0;
+	}
+	DPRINTK(PROBE, INFO, "err:%d, vectors:%d\n", err, vectors);
+	if (err < 0) {
+		dev_err(&adapter->pdev->dev,
+			"Unable to allocate MSI-X interrupts\n");
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+	} else {
+		/*
+		 * Adjust for only the vectors we'll use, which is minimum
+		 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+		 * vectors we were allocated.
+		 */
+		adapter->num_msix_vectors = vectors;
+	}
+
+	return err;
+}
+
+/**
+ * rnpvf_set_num_queues - Allocate queues for device, feature dependent
+ * @adapter: board private structure to initialize
+ *
+ * This is the top level queue allocation routine.  The order here is very
+ * important, starting with the "most" number of features turned on at once,
+ * and ending with the smallest set of features.  This way large combinations
+ * can be allocated if they're turned on, and smaller combinations are the
+ * fallthrough conditions.
+ *
+ **/
+static void rnpvf_set_num_queues(struct rnpvf_adapter *adapter)
+{
+	/* Start with base case */
+	adapter->num_rx_queues = adapter->dma_channels;
+	adapter->num_tx_queues = adapter->dma_channels;
+}
+
+/**
+ * rnpvf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int rnpvf_set_interrupt_capability(struct rnpvf_adapter *adapter)
+{
+	int err = 0;
+	int vector, v_budget;
+	int irq_mode_back = adapter->irq_mode;
+	/*
+	 * It's easy to be greedy for MSI-X vectors, but it really
+	 * doesn't do us much good if we have a lot more vectors
+	 * than CPU's.  So let's be conservative and only ask for
+	 * (roughly) the same number of vectors as there are CPU's.
+	 * The default is to use pairs of vectors.
+	 */
+	v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
+	v_budget = min_t(int, v_budget, num_online_cpus());
+	v_budget += NON_Q_VECTORS;
+	v_budget = min_t(int, v_budget, MAX_MSIX_VECTORS);
+
+	if (adapter->irq_mode == irq_mode_msix) {
+		/* A failure in MSI-X entry allocation isn't fatal, but it does
+		 * mean we disable MSI-X capabilities of the adapter. */
+		adapter->msix_entries = kcalloc(
+			v_budget, sizeof(struct msix_entry), GFP_KERNEL);
+		if (!adapter->msix_entries) {
+			err = -ENOMEM;
+			goto out;
+		}
+
+		for (vector = 0; vector < v_budget; vector++)
+			adapter->msix_entries[vector].entry = vector;
+
+		err = rnpvf_acquire_msix_vectors(adapter, v_budget);
+		if (!err) {
+			adapter->vector_off = NON_Q_VECTORS;
+			adapter->num_q_vectors =
+				adapter->num_msix_vectors - NON_Q_VECTORS;
+			DPRINTK(PROBE, INFO,
+				"adapter%d alloc vectors: cnt:%d [%d~%d] num_msix_vectors:%d\n",
+				adapter->bd_number, v_budget,
+				adapter->vector_off,
+				adapter->vector_off + v_budget - 1,
+				adapter->num_msix_vectors);
+			adapter->flags |= RNPVF_FLAG_MSIX_ENABLED;
+			goto out;
+		}
+		kfree(adapter->msix_entries);
+
+		if (adapter->flags & RNPVF_FLAG_MSI_CAPABLE) {
+			adapter->irq_mode = irq_mode_msi;
+			pr_info("acquire msix failed, try to use msi\n");
+		}
+
+	} else {
+		pr_info("adapter not in msix mode\n");
+	}
+
+	if (adapter->irq_mode == irq_mode_msi) {
+		err = pci_enable_msi(adapter->pdev);
+		if (err) {
+			pr_info("Failed to allocate MSI interrupt, falling back to legacy. "
+				"Error");
+		} else {
+			/* msi mode use only 1 irq */
+			adapter->flags |= RNPVF_FLAG_MSI_ENABLED;
+		}
+	}
+	/* write back origin irq_mode */
+	adapter->irq_mode = irq_mode_back;
+	/* legacy and msi only 1 vectors */
+	adapter->num_q_vectors = 1;
+
+out:
+	return err;
+}
+
+static void rnpvf_add_ring(struct rnpvf_ring *ring,
+			   struct rnpvf_ring_container *head)
+{
+	ring->next = head->ring;
+	head->ring = ring;
+	head->count++;
+}
+
+static enum hrtimer_restart irq_miss_check(struct hrtimer *hrtimer)
+{
+	struct rnpvf_q_vector *q_vector;
+	struct rnpvf_ring *ring;
+	struct rnp_tx_desc *eop_desc;
+	struct rnpvf_adapter *adapter;
+	struct rnpvf_hw *hw;
+
+	int tx_next_to_clean;
+	int tx_next_to_use;
+
+	struct rnpvf_tx_buffer *tx_buffer;
+	union rnp_rx_desc *rx_desc;
+
+	q_vector = container_of(hrtimer, struct rnpvf_q_vector,
+				irq_miss_check_timer);
+	adapter = q_vector->adapter;
+	hw = &adapter->hw;
+
+	/* If we're already down or resetting, just bail */
+	if (test_bit(__RNPVF_DOWN, &adapter->state) ||
+	    test_bit(__RNPVF_RESETTING, &adapter->state))
+		goto do_self_napi;
+
+	rnpvf_irq_disable_queues(q_vector);
+	/* check tx irq miss */
+	rnpvf_for_each_ring(ring, q_vector->tx) {
+		tx_next_to_clean = ring->next_to_clean;
+		tx_next_to_use = ring->next_to_use;
+		/* have work to do */
+		if (tx_next_to_use != tx_next_to_clean) {
+			tx_buffer =
+				&ring->tx_buffer_info[tx_next_to_clean];
+			eop_desc = tx_buffer->next_to_watch;
+			/* have tx done */
+			if (eop_desc) {
+				if ((eop_desc->cmd &
+				     cpu_to_le16(RNP_TXD_STAT_DD))) {
+					if (q_vector->new_rx_count !=
+					    q_vector->old_rx_count) {
+						ring_wr32(
+							ring,
+							RNP_DMA_REG_RX_INT_DELAY_PKTCNT,
+							q_vector->new_rx_count);
+						q_vector->old_rx_count =
+							q_vector->new_rx_count;
+					}
+					napi_schedule_irqoff(
+						&q_vector->napi);
+					goto do_self_napi;
+				}
+			}
+		}
+	}
+
+	/* check rx irq */
+	rnpvf_for_each_ring(ring, q_vector->rx) {
+		rx_desc = RNPVF_RX_DESC(ring, ring->next_to_clean);
+
+		if (rx_desc) {
+			if (rnpvf_test_staterr(rx_desc, RNP_RXD_STAT_DD)) {
+				unsigned int size;
+
+				size = le16_to_cpu(rx_desc->wb.len) -
+				       le16_to_cpu(
+					       rx_desc->wb.padding_len);
+
+				if (size) {
+					if (q_vector->new_rx_count !=
+					    q_vector->old_rx_count) {
+						ring_wr32(
+							ring,
+							RNP_DMA_REG_RX_INT_DELAY_PKTCNT,
+							q_vector->new_rx_count);
+						q_vector->old_rx_count =
+							q_vector->new_rx_count;
+					}
+					napi_schedule_irqoff(
+						&q_vector->napi);
+				} else {
+					adapter->flags |=
+						RNPVF_FLAG_PF_RESET_REQ;
+				}
+				goto do_self_napi;
+			}
+		}
+	}
+	rnpvf_irq_enable_queues(q_vector);
+do_self_napi:
+	return HRTIMER_NORESTART;
+}
+
+static int rnpvf_alloc_q_vector(struct rnpvf_adapter *adapter,
+				int eth_queue_idx, int rnpvf_vector,
+				int rnpvf_queue, int r_count, int step)
+{
+	struct rnpvf_q_vector *q_vector;
+	struct rnpvf_ring *ring;
+	struct rnpvf_hw *hw = &adapter->hw;
+	int node = NUMA_NO_NODE;
+	int cpu = -1;
+	int ring_count, size;
+	int txr_count, rxr_count, idx;
+	int rxr_idx = rnpvf_queue, txr_idx = rnpvf_queue;
+
+	DPRINTK(PROBE, INFO,
+		"eth_queue_idx:%d rnpvf_vector:%d(off:%d) ring:%d "
+		"ring_cnt:%d, step:%d\n",
+		eth_queue_idx, rnpvf_vector, adapter->vector_off,
+		rnpvf_queue, r_count, step);
+
+	txr_count = rxr_count = r_count;
+
+	ring_count = txr_count + rxr_count;
+	size = sizeof(struct rnpvf_q_vector) +
+	       (sizeof(struct rnpvf_ring) * ring_count);
+
+	if (cpu_online(rnpvf_vector)) {
+		cpu = rnpvf_vector;
+		node = cpu_to_node(cpu);
+	}
+
+	/* allocate q_vector and rings */
+	q_vector = kzalloc_node(size, GFP_KERNEL, node);
+	if (!q_vector)
+		q_vector = kzalloc(size, GFP_KERNEL);
+	if (!q_vector)
+		return -ENOMEM;
+
+	/* setup affinity mask and node */
+	if (cpu != -1)
+		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
+	q_vector->numa_node = node;
+
+	netif_napi_add_weight(adapter->netdev, &q_vector->napi, rnpvf_poll,
+			      adapter->napi_budge);
+	/* tie q_vector and adapter together */
+	adapter->q_vector[rnpvf_vector - adapter->vector_off] = q_vector;
+	q_vector->adapter = adapter;
+	q_vector->v_idx = rnpvf_vector;
+
+	/* initialize pointer to rings */
+	ring = q_vector->ring;
+
+	for (idx = 0; idx < txr_count; idx++) {
+		/* assign generic ring traits */
+		ring->dev = &adapter->pdev->dev;
+		ring->netdev = adapter->netdev;
+
+		/* configure backlink on ring */
+		ring->q_vector = q_vector;
+
+		/* update q_vector Tx values */
+		rnpvf_add_ring(ring, &q_vector->tx);
+
+		/* apply Tx specific ring traits */
+		ring->count = adapter->tx_ring_item_count;
+		ring->queue_index = eth_queue_idx + idx;
+		ring->rnpvf_queue_idx = txr_idx;
+
+		if (hw->board_type == rnp_board_n10) {
+			ring->ring_flags |= RNPVF_RING_SKIP_TX_START;
+			ring->ring_addr = hw->hw_addr + RNP_RING_BASE_N10 +
+					  RNP_RING_OFFSET(txr_idx);
+			ring->rnpvf_msix_off = txr_idx;
+		}
+
+		ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT;
+		ring->dma_int_mask = ring->dma_int_stat + 4;
+		ring->dma_int_clr = ring->dma_int_stat + 8;
+		ring->device_id = adapter->pdev->device;
+		ring->vfnum = hw->vfnum;
+
+		/* assign ring to adapter */
+		adapter->tx_ring[ring->queue_index] = ring;
+		dbg("adapter->tx_ringp[%d] <= %p\n", ring->queue_index,
+		    ring);
+
+		/* update count and index */
+		txr_idx += step;
+
+		DPRINTK(PROBE, INFO,
+			"vector[%d] <--RNP TxRing:%d, eth_queue:%d\n",
+			rnpvf_vector, ring->rnpvf_queue_idx,
+			ring->queue_index);
+
+		/* push pointer to next ring */
+		ring++;
+	}
+
+	for (idx = 0; idx < rxr_count; idx++) {
+		/* assign generic ring traits */
+		ring->dev = &adapter->pdev->dev;
+		ring->netdev = adapter->netdev;
+
+		/* configure backlink on ring */
+		ring->q_vector = q_vector;
+
+		/* update q_vector Rx values */
+		rnpvf_add_ring(ring, &q_vector->rx);
+
+		/* apply Rx specific ring traits */
+		ring->count = adapter->rx_ring_item_count;
+		ring->queue_index = eth_queue_idx + idx;
+		ring->rnpvf_queue_idx = rxr_idx;
+
+		if (hw->board_type == rnp_board_n10) {
+			ring->ring_addr = hw->hw_addr + RNP_RING_BASE_N10 +
+					  RNP_RING_OFFSET(rxr_idx);
+			ring->rnpvf_msix_off = rxr_idx;
+		}
+		ring->dma_int_stat = ring->ring_addr + RNP_DMA_INT_STAT;
+		ring->dma_int_mask = ring->dma_int_stat + 4;
+		ring->dma_int_clr = ring->dma_int_stat + 8;
+		ring->device_id = adapter->pdev->device;
+		ring->vfnum = hw->vfnum;
+
+		/* assign ring to adapter */
+		adapter->rx_ring[ring->queue_index] = ring;
+		DPRINTK(PROBE, INFO,
+			"vector[%d] <--RNP RxRing:%d, eth_queue:%d\n",
+			rnpvf_vector, ring->rnpvf_queue_idx,
+			ring->queue_index);
+
+		/* update count and index */
+		rxr_idx += step;
+
+		/* push pointer to next ring */
+		ring++;
+	}
+
+	if (hw->board_type == rnp_board_n10) {
+		q_vector->vector_flags |=
+			RNPVF_QVECTOR_FLAG_IRQ_MISS_CHECK;
+		q_vector->vector_flags |=
+			RNPVF_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS;
+		/* initialize timer */
+		q_vector->irq_check_usecs = 1000;
+		hrtimer_init(&q_vector->irq_miss_check_timer,
+			     CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		q_vector->irq_miss_check_timer.function =
+			irq_miss_check; /* initialize NAPI */
+	}
+
+	return 0;
+}
+
+static void rnpvf_free_q_vector(struct rnpvf_adapter *adapter, int v_idx)
+{
+	struct rnpvf_q_vector *q_vector;
+	struct rnpvf_ring *ring;
+
+	dbg("v_idx:%d\n", v_idx);
+
+	q_vector = adapter->q_vector[v_idx];
+
+	rnpvf_for_each_ring(ring, q_vector->tx)
+		adapter->tx_ring[ring->queue_index] = NULL;
+
+	rnpvf_for_each_ring(ring, q_vector->rx)
+		adapter->rx_ring[ring->queue_index] = NULL;
+
+	adapter->q_vector[v_idx] = NULL;
+	netif_napi_del(&q_vector->napi);
+
+	if (q_vector->vector_flags & RNPVF_QVECTOR_FLAG_IRQ_MISS_CHECK)
+		rnpvf_htimer_stop(q_vector);
+
+	/*
+	 * rnpvf_get_stats64() might access the rings on this vector,
+	 * we must wait a grace period before freeing it.
+	 */
+	kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * rnpvf_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int rnpvf_alloc_q_vectors(struct rnpvf_adapter *adapter)
+{
+	int vector_idx = adapter->vector_off;
+	int ring_idx = adapter->hw.queue_ring_base;
+	int ring_remaing =
+		min_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
+	int ring_step = 1;
+	int err, ring_cnt,
+		vector_remaing = adapter->num_msix_vectors - NON_Q_VECTORS;
+	int eth_queue_idx = 0;
+
+	BUG_ON(ring_remaing == 0);
+	BUG_ON(vector_remaing == 0);
+
+	for (; ring_remaing > 0 && vector_remaing > 0; vector_remaing--) {
+		ring_cnt = DIV_ROUND_UP(ring_remaing, vector_remaing);
+
+		err = rnpvf_alloc_q_vector(adapter, eth_queue_idx,
+					   vector_idx, ring_idx, ring_cnt,
+					   ring_step);
+		if (err)
+			goto err_out;
+
+		ring_idx += ring_step * ring_cnt;
+		ring_remaing -= ring_cnt;
+		vector_idx++;
+		eth_queue_idx += ring_cnt;
+	}
+
+	return 0;
+
+err_out:
+	vector_idx -= adapter->vector_off;
+	while (vector_idx--)
+		rnpvf_free_q_vector(adapter, vector_idx);
+	return -ENOMEM;
+}
+
+/**
+ * rnpvf_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void rnpvf_free_q_vectors(struct rnpvf_adapter *adapter)
+{
+	int i, v_idx = adapter->num_q_vectors;
+	struct rnpvf_hw_stats_own *hw_stats = &adapter->hw_stats;
+
+	adapter->num_rx_queues = 0;
+	adapter->num_tx_queues = 0;
+	adapter->num_q_vectors = 0;
+
+	for (i = 0; i < v_idx; i++)
+		rnpvf_free_q_vector(adapter, i);
+	hw_stats->spoof_dropped = 0;
+}
+
+/**
+ * rnpvf_reset_interrupt_capability - Reset MSIX setup
+ * @adapter: board private structure
+ *
+ **/
+static void rnpvf_reset_interrupt_capability(struct rnpvf_adapter *adapter)
+{
+	if (adapter->flags & RNPVF_FLAG_MSIX_ENABLED) {
+		pci_disable_msix(adapter->pdev);
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+	} else if (adapter->flags & RNPVF_FLAG_MSI_ENABLED) {
+		pci_disable_msi(adapter->pdev);
+	}
+}
+
+/**
+ * rnpvf_init_interrupt_scheme - Determine if MSIX is supported and init
+ * @adapter: board private structure to initialize
+ *
+ **/
+int rnpvf_init_interrupt_scheme(struct rnpvf_adapter *adapter)
+{
+	int err;
+
+	/* Number of supported queues */
+	rnpvf_set_num_queues(adapter);
+
+	err = rnpvf_set_interrupt_capability(adapter);
+	if (err) {
+		hw_dbg(&adapter->hw,
+		       "Unable to setup interrupt capabilities\n");
+		goto err_set_interrupt;
+	}
+
+	err = rnpvf_alloc_q_vectors(adapter);
+	if (err) {
+		hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
+				     "vectors\n");
+		goto err_alloc_q_vectors;
+	}
+
+	hw_dbg(&adapter->hw,
+	       "Multiqueue %s: Rx Queue count = %u, "
+	       "Tx Queue count = %u\n",
+	       (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
+	       adapter->num_rx_queues, adapter->num_tx_queues);
+
+	set_bit(__RNPVF_DOWN, &adapter->state);
+
+	return 0;
+err_alloc_q_vectors:
+	rnpvf_reset_interrupt_capability(adapter);
+err_set_interrupt:
+	return err;
+}
+
+/**
+ * rnpvf_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @adapter: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+void rnpvf_clear_interrupt_scheme(struct rnpvf_adapter *adapter)
+{
+	adapter->num_tx_queues = 0;
+	adapter->num_rx_queues = 0;
+
+	rnpvf_free_q_vectors(adapter);
+	rnpvf_reset_interrupt_capability(adapter);
+}
+
+#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)  \
+	{                                                    \
+		u32 current_counter = RNP_READ_REG(hw, reg); \
+		if (current_counter < last_counter)          \
+			counter += 0x100000000LL;            \
+		last_counter = current_counter;              \
+		counter &= 0xFFFFFFFF00000000LL;             \
+		counter |= current_counter;                  \
+	}
+
+#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+	{                                                                \
+		u64 current_counter_lsb = RNP_READ_REG(hw, reg_lsb);     \
+		u64 current_counter_msb = RNP_READ_REG(hw, reg_msb);     \
+		u64 current_counter = (current_counter_msb << 32) |      \
+				      current_counter_lsb;               \
+		if (current_counter < last_counter)                      \
+			counter += 0x1000000000LL;                       \
+		last_counter = current_counter;                          \
+		counter &= 0xFFFFFFF000000000LL;                         \
+		counter |= current_counter;                              \
+	}
+
+/**
+ * rnpvf_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+void rnpvf_update_stats(struct rnpvf_adapter *adapter)
+{
+	struct rnpvf_hw_stats_own *hw_stats = &adapter->hw_stats;
+	int i;
+	struct net_device_stats *net_stats = &adapter->netdev->stats;
+
+	net_stats->tx_packets = 0;
+	net_stats->tx_bytes = 0;
+	net_stats->rx_packets = 0;
+	net_stats->rx_bytes = 0;
+	net_stats->rx_dropped = 0;
+	net_stats->rx_errors = 0;
+
+	hw_stats->vlan_add_cnt = 0;
+	hw_stats->vlan_strip_cnt = 0;
+	hw_stats->csum_err = 0;
+	hw_stats->csum_good = 0;
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct rnpvf_ring *ring;
+		struct rnpvf_q_vector *q_vector = adapter->q_vector[i];
+
+		rnpvf_for_each_ring(ring, q_vector->tx) {
+			hw_stats->vlan_add_cnt += ring->tx_stats.vlan_add;
+			net_stats->tx_packets += ring->stats.packets;
+			net_stats->tx_bytes += ring->stats.bytes;
+		}
+
+		rnpvf_for_each_ring(ring, q_vector->rx) {
+			hw_stats->csum_err += ring->rx_stats.csum_err;
+			hw_stats->csum_good += ring->rx_stats.csum_good;
+			hw_stats->vlan_strip_cnt +=
+				ring->rx_stats.vlan_remove;
+			net_stats->rx_packets += ring->stats.packets;
+			net_stats->rx_bytes += ring->stats.bytes;
+			net_stats->rx_errors += ring->rx_stats.csum_err;
+		}
+	}
+}
+
+static void rnpvf_reset_pf_request(struct rnpvf_adapter *adapter)
+{
+	struct rnpvf_hw *hw = &adapter->hw;
+
+	if (!(adapter->flags & RNPVF_FLAG_PF_RESET_REQ))
+		return;
+
+	adapter->flags &= (~RNPVF_FLAG_PF_RESET_REQ);
+	spin_lock_bh(&adapter->mbx_lock);
+	set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	hw->mac.ops.req_reset_pf(hw);
+	clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	spin_unlock_bh(&adapter->mbx_lock);
+}
+
+static int rnpvf_reset_subtask(struct rnpvf_adapter *adapter)
+{
+	if (!(adapter->flags & RNPVF_FLAG_PF_RESET))
+		return 0;
+	/* If we're already down or resetting, just bail */
+	if (test_bit(__RNPVF_DOWN, &adapter->state) ||
+	    test_bit(__RNPVF_RESETTING, &adapter->state))
+		return 0;
+
+	adapter->tx_timeout_count++;
+
+	rtnl_lock();
+	rnpvf_reinit_locked(adapter);
+	rtnl_unlock();
+
+	adapter->flags &= (~RNPVF_FLAG_PF_RESET);
+
+	return 1;
+}
+
+/**
+ * rnpvf_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void rnpvf_watchdog(struct timer_list *t)
+{
+	struct rnpvf_adapter *adapter =
+		from_timer(adapter, t, watchdog_timer);
+
+	/*
+	 * Do the watchdog outside of interrupt context due to the lovely
+	 * delays that some of the newer hardware requires
+	 */
+
+	if (test_bit(__RNPVF_DOWN, &adapter->state))
+		goto watchdog_short_circuit;
+
+watchdog_short_circuit:
+	if (!test_bit(__RNPVF_REMOVE, &adapter->state))
+		schedule_work(&adapter->watchdog_task);
+}
+
+static void rnpvf_check_hang_subtask(struct rnpvf_adapter *adapter)
+{
+	int i;
+	struct rnpvf_ring *tx_ring;
+	u64 tx_next_to_clean_old;
+	u64 tx_next_to_clean;
+	u64 tx_next_to_use;
+	struct rnpvf_ring *rx_ring;
+	u64 rx_next_to_clean_old;
+	u64 rx_next_to_clean;
+	union rnp_rx_desc *rx_desc;
+
+	/* If we're down or resetting, just bail */
+	if (test_bit(__RNPVF_DOWN, &adapter->state) ||
+	    test_bit(__RNPVF_RESETTING, &adapter->state))
+		return;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		tx_ring = adapter->tx_ring[i];
+		/* get the last next_to_clean */
+		tx_next_to_clean_old = tx_ring->tx_stats.tx_next_to_clean;
+		tx_next_to_clean = tx_ring->next_to_clean;
+		tx_next_to_use = tx_ring->next_to_use;
+
+		/* if we have tx desc to clean */
+		if (tx_next_to_use != tx_next_to_clean) {
+			if (tx_next_to_clean == tx_next_to_clean_old) {
+				tx_ring->tx_stats.tx_equal_count++;
+				if (tx_ring->tx_stats.tx_equal_count > 2) {
+					/* maybe not so good */
+					struct rnpvf_q_vector *q_vector =
+						tx_ring->q_vector;
+
+					/* stats */
+					if (q_vector->rx.ring ||
+					    q_vector->tx.ring)
+						napi_schedule_irqoff(
+							&q_vector->napi);
+
+					tx_ring->tx_stats.tx_irq_miss++;
+					tx_ring->tx_stats.tx_equal_count =
+						0;
+				}
+			} else {
+				tx_ring->tx_stats.tx_equal_count = 0;
+			}
+			/* update */
+			/* record this next_to_clean */
+			tx_ring->tx_stats.tx_next_to_clean =
+				tx_next_to_clean;
+		} else {
+			/* clean record to -1 */
+			tx_ring->tx_stats.tx_next_to_clean = -1;
+		}
+	}
+	/* check if we lost rx irq */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		rx_ring = adapter->rx_ring[i];
+		/* get the last next_to_clean */
+		rx_next_to_clean_old = rx_ring->rx_stats.rx_next_to_clean;
+		/* get the now clean */
+		rx_next_to_clean = rx_ring->next_to_clean;
+
+		if (rx_next_to_clean == rx_next_to_clean_old) {
+			rx_ring->rx_stats.rx_equal_count++;
+
+			if ((rx_ring->rx_stats.rx_equal_count > 2) &&
+			    (rx_ring->rx_stats.rx_equal_count < 5)) {
+				rx_desc = RNPVF_RX_DESC(
+					rx_ring, rx_ring->next_to_clean);
+				if (rnpvf_test_staterr(rx_desc,
+						       RNP_RXD_STAT_DD)) {
+					struct rnpvf_q_vector *q_vector =
+						rx_ring->q_vector;
+					unsigned int size;
+
+					size = le16_to_cpu(
+						       rx_desc->wb.len) -
+					       le16_to_cpu(
+						       rx_desc->wb
+							       .padding_len);
+					if (size) {
+						rx_ring->rx_stats
+							.rx_irq_miss++;
+						if (q_vector->rx.ring ||
+						    q_vector->tx.ring)
+							napi_schedule_irqoff(
+								&q_vector->napi);
+					}
+				}
+			}
+			if (rx_ring->rx_stats.rx_equal_count > 1000)
+				rx_ring->rx_stats.rx_equal_count = 0;
+		} else {
+			rx_ring->rx_stats.rx_equal_count = 0;
+		}
+		/* update new clean */
+		rx_ring->rx_stats.rx_next_to_clean = rx_next_to_clean;
+	}
+}
+
+/**
+ * rnpvf_watchdog_task - worker thread to bring link up
+ * @work: pointer to work_struct containing our data
+ **/
+static void rnpvf_watchdog_task(struct work_struct *work)
+{
+	struct rnpvf_adapter *adapter =
+		container_of(work, struct rnpvf_adapter, watchdog_task);
+	struct net_device *netdev = adapter->netdev;
+	struct rnpvf_hw *hw = &adapter->hw;
+	u32 link_speed = adapter->link_speed;
+	bool link_up = adapter->link_up;
+	s32 need_reset;
+
+	adapter->flags |= RNPVF_FLAG_IN_WATCHDOG_TASK;
+
+	rnpvf_reset_pf_request(adapter);
+
+	if (rnpvf_reset_subtask(adapter)) {
+		adapter->flags &= ~RNPVF_FLAG_PF_UPDATE_MTU;
+		adapter->flags &= ~RNPVF_FLAG_PF_UPDATE_VLAN;
+		goto pf_has_reset;
+	}
+
+	need_reset =
+		hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+
+	if (need_reset) {
+		adapter->link_up = link_up;
+		adapter->link_speed = link_speed;
+		netif_carrier_off(netdev);
+		netif_tx_stop_all_queues(netdev);
+		schedule_work(&adapter->reset_task);
+		goto pf_has_reset;
+	}
+	adapter->link_up = link_up;
+	adapter->link_speed = link_speed;
+
+	/* if we ready down */
+	if (test_bit(__RNPVF_DOWN, &adapter->state)) {
+		if (test_bit(__RNPVF_LINK_DOWN, &adapter->state)) {
+			clear_bit(__RNPVF_LINK_DOWN, &adapter->state);
+			dev_info(&adapter->pdev->dev,
+				 "NIC Link is Down\n");
+		}
+		goto skip_link_check;
+	}
+
+	if (link_up) {
+		if (!netif_carrier_ok(netdev)) {
+			char *link_speed_string;
+			switch (link_speed) {
+			case RNP_LINK_SPEED_40GB_FULL:
+				link_speed_string = "40 Gbps";
+				break;
+			case RNP_LINK_SPEED_25GB_FULL:
+				link_speed_string = "25 Gbps";
+				break;
+			case RNP_LINK_SPEED_10GB_FULL:
+				link_speed_string = "10 Gbps";
+				break;
+			case RNP_LINK_SPEED_1GB_FULL:
+				link_speed_string = "1 Gbps";
+				break;
+			case RNP_LINK_SPEED_100_FULL:
+				link_speed_string = "100 Mbps";
+				break;
+			default:
+				link_speed_string = "unknown speed";
+				break;
+			}
+			dev_info(&adapter->pdev->dev,
+				 "NIC Link is Up, %s\n",
+				 link_speed_string);
+			netif_carrier_on(netdev);
+			netif_tx_wake_all_queues(netdev);
+		}
+	} else {
+		adapter->link_up = false;
+		adapter->link_speed = 0;
+		if (netif_carrier_ok(netdev)) {
+			dev_info(&adapter->pdev->dev,
+				 "NIC Link is Down\n");
+			netif_carrier_off(netdev);
+			netif_tx_stop_all_queues(netdev);
+		}
+	}
+skip_link_check:
+	if (adapter->flags & RNPVF_FLAG_PF_UPDATE_MTU) {
+		adapter->flags &= ~RNPVF_FLAG_PF_UPDATE_MTU;
+		if (netdev->mtu > hw->mtu) {
+			netdev->mtu = hw->mtu;
+			rtnl_lock();
+			call_netdevice_notifiers(NETDEV_CHANGEMTU,
+						 adapter->netdev);
+			rtnl_unlock();
+		}
+	}
+	if (adapter->flags & RNPVF_FLAG_PF_UPDATE_VLAN) {
+		adapter->flags &= ~RNPVF_FLAG_PF_UPDATE_VLAN;
+		rnpvf_set_rx_mode(adapter->netdev);
+	}
+
+	rnpvf_check_hang_subtask(adapter);
+	rnpvf_update_stats(adapter);
+
+pf_has_reset:
+	/* Reset the timer */
+	mod_timer(&adapter->watchdog_timer,
+		  round_jiffies(jiffies + (2 * HZ)));
+
+	adapter->flags &= ~RNPVF_FLAG_IN_WATCHDOG_TASK;
+}
+
+/**
+ * rnpvf_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void rnpvf_free_tx_resources(struct rnpvf_adapter *adapter,
+			     struct rnpvf_ring *tx_ring)
+{
+	BUG_ON(tx_ring == NULL);
+
+	rnpvf_clean_tx_ring(adapter, tx_ring);
+
+	vfree(tx_ring->tx_buffer_info);
+	tx_ring->tx_buffer_info = NULL;
+
+	/* if not set, then don't free */
+	if (!tx_ring->desc)
+		return;
+
+	dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
+			  tx_ring->dma);
+
+	tx_ring->desc = NULL;
+}
+
+/**
+ * rnpvf_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void rnpvf_free_all_tx_resources(struct rnpvf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		rnpvf_free_tx_resources(adapter, adapter->tx_ring[i]);
+}
+
+/**
+ * rnpvf_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @tx_ring:    tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+int rnpvf_setup_tx_resources(struct rnpvf_adapter *adapter,
+			     struct rnpvf_ring *tx_ring)
+{
+	struct device *dev = tx_ring->dev;
+	int orig_node = dev_to_node(dev);
+	int numa_node = NUMA_NO_NODE;
+	int size;
+
+	size = sizeof(struct rnpvf_tx_buffer) * tx_ring->count;
+
+	if (tx_ring->q_vector)
+		numa_node = tx_ring->q_vector->numa_node;
+
+	dbg("%s size:%d count:%d\n", __func__, size, tx_ring->count);
+	tx_ring->tx_buffer_info = vzalloc_node(size, numa_node);
+	if (!tx_ring->tx_buffer_info)
+		tx_ring->tx_buffer_info = vzalloc(size);
+	if (!tx_ring->tx_buffer_info)
+		goto err_buffer;
+
+	/* round up to nearest 4K */
+	tx_ring->size = tx_ring->count * sizeof(struct rnp_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+	set_dev_node(dev, numa_node);
+	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+					   &tx_ring->dma, GFP_KERNEL);
+	set_dev_node(dev, orig_node);
+	if (!tx_ring->desc)
+		tx_ring->desc = dma_alloc_coherent(
+			dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL);
+	if (!tx_ring->desc)
+		goto err;
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+
+	DPRINTK(IFUP, INFO,
+		"%d TxRing:%d, vector:%d ItemCounts:%d "
+		"desc:%p(0x%llx) node:%d\n",
+		tx_ring->queue_index, tx_ring->rnpvf_queue_idx,
+		tx_ring->q_vector->v_idx, tx_ring->count, tx_ring->desc,
+		tx_ring->dma, numa_node);
+	return 0;
+
+err:
+	rnpvf_err(
+		"%s [SetupTxResources] ERROR: #%d TxRing:%d, vector:%d ItemCounts:%d\n",
+		tx_ring->netdev->name, tx_ring->queue_index,
+		tx_ring->rnpvf_queue_idx, tx_ring->q_vector->v_idx,
+		tx_ring->count);
+	vfree(tx_ring->tx_buffer_info);
+err_buffer:
+	tx_ring->tx_buffer_info = NULL;
+	dev_err(dev,
+		"Unable to allocate memory for the Tx descriptor ring\n");
+	return -ENOMEM;
+}
+
+/**
+ * rnpvf_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int rnpvf_setup_all_tx_resources(struct rnpvf_adapter *adapter)
+{
+	int i, err = 0;
+
+	dbg("adapter->num_tx_queues:%d, adapter->tx_ring[0]:%p\n",
+	    adapter->num_tx_queues, adapter->tx_ring[0]);
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		BUG_ON(adapter->tx_ring[i] == NULL);
+		err = rnpvf_setup_tx_resources(adapter,
+					       adapter->tx_ring[i]);
+		if (!err)
+			continue;
+		hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n",
+		       i);
+		goto err_setup_tx;
+	}
+
+	return 0;
+
+err_setup_tx:
+	/* rewind the index freeing the rings as we go */
+	while (i--)
+		rnpvf_free_tx_resources(adapter, adapter->tx_ring[i]);
+	return err;
+}
+
+/**
+ * rnpvf_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rx_ring:    rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int rnpvf_setup_rx_resources(struct rnpvf_adapter *adapter,
+			     struct rnpvf_ring *rx_ring)
+{
+	struct device *dev = rx_ring->dev;
+	int orig_node = dev_to_node(dev);
+	int numa_node = -1;
+	int size;
+
+	BUG_ON(rx_ring == NULL);
+
+	size = sizeof(struct rnpvf_rx_buffer) * rx_ring->count;
+
+	if (rx_ring->q_vector)
+		numa_node = rx_ring->q_vector->numa_node;
+
+	rx_ring->rx_buffer_info = vzalloc_node(size, numa_node);
+	if (!rx_ring->rx_buffer_info)
+		rx_ring->rx_buffer_info = vzalloc(size);
+	if (!rx_ring->rx_buffer_info)
+		goto alloc_buffer;
+
+	/* Round up to nearest 4K */
+	rx_ring->size = rx_ring->count * sizeof(union rnp_rx_desc);
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+	set_dev_node(dev, numa_node);
+	rx_ring->desc = dma_alloc_coherent(&adapter->pdev->dev,
+					   rx_ring->size, &rx_ring->dma,
+					   GFP_KERNEL);
+	set_dev_node(dev, orig_node);
+	if (!rx_ring->desc) {
+		vfree(rx_ring->rx_buffer_info);
+		rx_ring->rx_buffer_info = NULL;
+		goto alloc_failed;
+	}
+
+	memset(rx_ring->desc, 0, rx_ring->size);
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	DPRINTK(IFUP, INFO,
+		"%d RxRing:%d, vector:%d ItemCounts:%d "
+		"desc:%p(0x%llx) node:%d\n",
+		rx_ring->queue_index, rx_ring->rnpvf_queue_idx,
+		rx_ring->q_vector->v_idx, rx_ring->count, rx_ring->desc,
+		rx_ring->dma, numa_node);
+
+	return 0;
+alloc_failed:
+	rnpvf_err(
+		"%s [SetupTxResources] ERROR: #%d RxRing:%d, vector:%d ItemCounts:%d\n",
+		rx_ring->netdev->name, rx_ring->queue_index,
+		rx_ring->rnpvf_queue_idx, rx_ring->q_vector->v_idx,
+		rx_ring->count);
+	vfree(rx_ring->tx_buffer_info);
+alloc_buffer:
+	rx_ring->tx_buffer_info = NULL;
+	dev_err(dev,
+		"Unable to allocate memory for the Rx descriptor ring\n");
+
+	return -ENOMEM;
+}
+
+/**
+ * rnpvf_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int rnpvf_setup_all_rx_resources(struct rnpvf_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		BUG_ON(adapter->rx_ring[i] == NULL);
+
+		err = rnpvf_setup_rx_resources(adapter,
+					       adapter->rx_ring[i]);
+		if (!err)
+			continue;
+		hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n",
+		       i);
+		goto err_setup_rx;
+	}
+
+	return 0;
+
+err_setup_rx:
+	/* rewind the index freeing the rings as we go */
+	while (i--)
+		rnpvf_free_rx_resources(adapter, adapter->rx_ring[i]);
+	return err;
+}
+
+/**
+ * rnpvf_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void rnpvf_free_rx_resources(struct rnpvf_adapter *adapter,
+			     struct rnpvf_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	rnpvf_clean_rx_ring(rx_ring);
+
+	vfree(rx_ring->rx_buffer_info);
+	rx_ring->rx_buffer_info = NULL;
+
+	/* if not set, then don't free */
+	if (!rx_ring->desc)
+		return;
+
+	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+			  rx_ring->dma);
+
+	rx_ring->desc = NULL;
+}
+
+/**
+ * rnpvf_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void rnpvf_free_all_rx_resources(struct rnpvf_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rnpvf_free_rx_resources(adapter, adapter->rx_ring[i]);
+}
+
+/**
+ * rnpvf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int rnpvf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+	struct rnpvf_hw *hw = &adapter->hw;
+
+	if (new_mtu > hw->mtu) {
+		dev_info(&adapter->pdev->dev,
+			 "PF limit vf mtu setup too large %d \n", hw->mtu);
+		return -EINVAL;
+
+	} else {
+		hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
+		       netdev->mtu, new_mtu);
+		/* must set new MTU before calling down or up */
+		netdev->mtu = new_mtu;
+	}
+
+	if (netif_running(netdev))
+		rnpvf_reinit_locked(adapter);
+
+	return 0;
+}
+
+/**
+ * rnpvf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+int rnpvf_open(struct net_device *netdev)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+	struct rnpvf_hw *hw = &adapter->hw;
+	int err;
+
+	DPRINTK(IFUP, INFO, "ifup\n");
+
+	/* A previous failure to open the device because of a lack of
+	 * available MSIX vector resources may have reset the number
+	 * of msix vectors variable to zero.  The only way to recover
+	 * is to unload/reload the driver and hope that the system has
+	 * been able to recover some MSIX vector resources.
+	 */
+	if (!adapter->num_msix_vectors)
+		return -ENOMEM;
+
+	/* disallow open during test */
+	if (test_bit(__RNPVF_TESTING, &adapter->state))
+		return -EBUSY;
+
+	if (hw->adapter_stopped) {
+		rnpvf_reset(adapter);
+		/* if adapter is still stopped then PF isn't up and
+		 * the vf can't start. */
+		if (hw->adapter_stopped) {
+			err = RNP_ERR_MBX;
+			dev_err(&hw->pdev->dev,
+				"%s(%s):error: Unable to start - perhaps the PF Driver isn't "
+				"up yet\n",
+				adapter->name, netdev->name);
+			goto err_setup_reset;
+		}
+	}
+
+	netif_carrier_off(netdev);
+
+	/* allocate transmit descriptors */
+	err = rnpvf_setup_all_tx_resources(adapter);
+	if (err)
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+	err = rnpvf_setup_all_rx_resources(adapter);
+	if (err)
+		goto err_setup_rx;
+
+	rnpvf_configure(adapter);
+
+	/* clear any pending interrupts, may auto mask */
+	err = rnpvf_request_irq(adapter);
+	if (err)
+		goto err_req_irq;
+
+	/* Notify the stack of the actual queue counts. */
+	err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
+	if (err)
+		goto err_set_queues;
+
+	err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
+	if (err)
+		goto err_set_queues;
+
+	rnpvf_up_complete(adapter);
+
+	return 0;
+
+err_set_queues:
+	rnpvf_free_irq(adapter);
+err_req_irq:
+
+err_setup_rx:
+	rnpvf_free_all_rx_resources(adapter);
+err_setup_tx:
+	rnpvf_free_all_tx_resources(adapter);
+
+err_setup_reset:
+
+	return err;
+}
+
+/**
+ * rnpvf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+int rnpvf_close(struct net_device *netdev)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+
+	DPRINTK(IFDOWN, INFO, "ifdown\n");
+
+	rnpvf_down(adapter);
+	rnpvf_free_irq(adapter);
+
+	rnpvf_free_all_tx_resources(adapter);
+	rnpvf_free_all_rx_resources(adapter);
+
+	return 0;
+}
+
+void rnpvf_tx_ctxtdesc(struct rnpvf_ring *tx_ring, u16 mss_seg_len,
+		       u8 l4_hdr_len, u8 tunnel_hdr_len, int ignore_vlan,
+		       u32 type_tucmd, bool crc_pad)
+{
+	struct rnp_tx_ctx_desc *context_desc;
+	u16 i = tx_ring->next_to_use;
+	struct rnpvf_adapter *adapter = RING2ADAPT(tx_ring);
+	struct rnpvf_hw *hw = &adapter->hw;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u8 vfnum = VFNUM(mbx, hw->vfnum);
+
+	context_desc = RNPVF_TX_CTXTDESC(tx_ring, i);
+
+	i++;
+	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+	/* set bits to identify this as an advanced context descriptor */
+	type_tucmd |= RNP_TXD_CTX_CTRL_DESC;
+
+	if (adapter->priv_flags & RNPVF_PRIV_FLAG_TX_PADDING) {
+		if (!crc_pad)
+			type_tucmd |=
+				RNP_TXD_MTI_CRC_PAD_CTRL;
+	}
+
+	context_desc->mss_len = cpu_to_le16(mss_seg_len);
+	context_desc->vfnum = 0x80 | vfnum;
+	context_desc->l4_hdr_len = l4_hdr_len;
+
+	if (ignore_vlan)
+		context_desc->vf_veb_flags |= VF_IGNORE_VLAN;
+
+	context_desc->tunnel_hdr_len = tunnel_hdr_len;
+	context_desc->resv_cmd = cpu_to_le32(type_tucmd);
+	context_desc->res = 0;
+	buf_dump_line("ctx  ", __LINE__, context_desc,
+		      sizeof(*context_desc));
+}
+
+static int rnpvf_tso(struct rnpvf_ring *tx_ring,
+		     struct rnpvf_tx_buffer *first, u8 *hdr_len)
+{
+	struct sk_buff *skb = first->skb;
+	struct net_device *netdev = tx_ring->netdev;
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+	union {
+		struct iphdr *v4;
+		struct ipv6hdr *v6;
+		unsigned char *hdr;
+	} ip;
+	union {
+		struct tcphdr *tcp;
+		struct udphdr *udp;
+		unsigned char *hdr;
+	} l4;
+	u32 paylen, l4_offset;
+	int err;
+	u8 *inner_mac;
+	u16 gso_segs, gso_size;
+	u16 gso_need_pad;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return 0;
+
+	if (!skb_is_gso(skb))
+		return 0;
+
+	err = skb_cow_head(skb, 0);
+	if (err < 0)
+		return err;
+
+	inner_mac = skb->data;
+	ip.hdr = skb_network_header(skb);
+	l4.hdr = skb_transport_header(skb);
+
+	/* initialize outer IP header fields */
+	if (ip.v4->version == 4) {
+		/* IP header will have to cancel out any data that
+		 * is not a part of the outer IP header
+		 */
+		ip.v4->check = 0x0000;
+	} else {
+		ip.v6->payload_len = 0;
+	}
+	if (skb_shinfo(skb)->gso_type &
+	    (SKB_GSO_GRE |
+	     SKB_GSO_GRE_CSUM |
+	     SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) {
+		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+		    (skb_shinfo(skb)->gso_type &
+		     SKB_GSO_UDP_TUNNEL_CSUM)) {
+		}
+		inner_mac = skb_inner_mac_header(skb);
+		first->tunnel_hdr_len = inner_mac - skb->data;
+
+		if (skb_shinfo(skb)->gso_type &
+		    (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) {
+			first->cmd_flags |= RNP_TXD_TUNNEL_VXLAN;
+			l4.udp->check = 0;
+		} else {
+			first->cmd_flags |= RNP_TXD_TUNNEL_NVGRE;
+		}
+		dbg("set outer l4.udp to 0\n");
+
+		/* reset pointers to inner headers */
+		ip.hdr = skb_inner_network_header(skb);
+		l4.hdr = skb_inner_transport_header(skb);
+	}
+	if (ip.v4->version == 4) {
+		/* IP header will have to cancel out any data that
+		 * is not a part of the outer IP header
+		 */
+		ip.v4->check = 0x0000;
+
+	} else {
+		ip.v6->payload_len = 0;
+		/* set ipv6 type */
+		first->cmd_flags |= (RNP_TXD_FLAG_IPv6);
+	}
+
+	/* determine offset of inner transport header */
+	l4_offset = l4.hdr - skb->data;
+
+	paylen = skb->len - l4_offset;
+	dbg("before l4 checksum is %x\n", l4.tcp->check);
+
+	if (skb->csum_offset == offsetof(struct tcphdr, check)) {
+		dbg("tcp before l4 checksum is %x\n", l4.tcp->check);
+		first->cmd_flags |= RNP_TXD_L4_TYPE_TCP;
+		/* compute length of segmentation header */
+		*hdr_len = (l4.tcp->doff * 4) + l4_offset;
+		csum_replace_by_diff(&l4.tcp->check,
+				     (__force __wsum)htonl(paylen));
+		dbg("tcp l4 checksum is %x\n", l4.tcp->check);
+		l4.tcp->psh = 0;
+	} else {
+		dbg("paylen is %x\n", paylen);
+		first->cmd_flags |= RNP_TXD_L4_TYPE_UDP;
+		/* compute length of segmentation header */
+		dbg("udp before l4 checksum is %x\n", l4.udp->check);
+		*hdr_len = sizeof(*l4.udp) + l4_offset;
+		csum_replace_by_diff(&l4.udp->check,
+				     (__force __wsum)htonl(paylen));
+		dbg("udp l4 checksum is %x\n", l4.udp->check);
+	}
+
+	dbg("l4 checksum is %x\n", l4.tcp->check);
+
+	first->mac_ip_len = l4.hdr - ip.hdr;
+	first->mac_ip_len |= (ip.hdr - inner_mac) << 9;
+
+	/* compute header lengths */
+	/* pull values out of skb_shinfo */
+	gso_size = skb_shinfo(skb)->gso_size;
+	gso_segs = skb_shinfo(skb)->gso_segs;
+
+	/* if we close padding check gso confition */
+	if (adapter->priv_flags & RNPVF_PRIV_FLAG_TX_PADDING) {
+		gso_need_pad = (first->skb->len - *hdr_len) % gso_size;
+		if (gso_need_pad) {
+			if ((gso_need_pad + *hdr_len) <= 60) {
+				gso_need_pad =
+					60 - (gso_need_pad + *hdr_len);
+				first->gso_need_padding = !!gso_need_pad;
+			}
+		}
+	}
+
+	/* update gso size and bytecount with header size */
+	/* to fix tx status */
+	first->gso_segs = gso_segs;
+	first->bytecount += (first->gso_segs - 1) * *hdr_len;
+	first->mss_len_vf_num |= (gso_size | ((l4.tcp->doff * 4) << 24));
+	first->cmd_flags |= RNP_TXD_FLAG_TSO | RNP_TXD_IP_CSUM |
+			    RNP_TXD_L4_CSUM;
+	first->ctx_flag = true;
+	return 1;
+}
+
+static int rnpvf_tx_csum(struct rnpvf_ring *tx_ring,
+			 struct rnpvf_tx_buffer *first)
+{
+	struct sk_buff *skb = first->skb;
+	u8 l4_proto = 0;
+	u8 ip_len = 0;
+	u8 mac_len = 0;
+	u8 *inner_mac = skb->data;
+	u8 *exthdr;
+	__be16 frag_off;
+	union {
+		struct iphdr *v4;
+		struct ipv6hdr *v6;
+		unsigned char *hdr;
+	} ip;
+	union {
+		struct tcphdr *tcp;
+		struct udphdr *udp;
+		unsigned char *hdr;
+	} l4;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL) {
+		return 0;
+	}
+
+	ip.hdr = skb_network_header(skb);
+	l4.hdr = skb_transport_header(skb);
+
+	inner_mac = skb->data;
+
+	/* outer protocol */
+	if (skb->encapsulation) {
+		/* define outer network header type */
+		if (ip.v4->version == 4) {
+			l4_proto = ip.v4->protocol;
+		} else {
+			exthdr = ip.hdr + sizeof(*ip.v6);
+			l4_proto = ip.v6->nexthdr;
+			if (l4.hdr != exthdr)
+				ipv6_skip_exthdr(skb, exthdr - skb->data,
+						 &l4_proto, &frag_off);
+		}
+
+		/* define outer transport */
+		switch (l4_proto) {
+		case IPPROTO_UDP:
+			l4.udp->check = 0;
+			first->cmd_flags |= RNP_TXD_TUNNEL_VXLAN;
+
+			break;
+		case IPPROTO_GRE:
+
+			first->cmd_flags |= RNP_TXD_TUNNEL_NVGRE;
+			/* There was a long-standing issue in GRE where GSO
+			 * was not setting the outer transport header unless
+			 * a GRE checksum was requested. This was fixed in
+			 * the 4.6 version of the kernel.  In the 4.7 kernel
+			 * support for GRE over IPv6 was added to GSO.  So we
+			 * can assume this workaround for all IPv4 headers
+			 * without impacting later versions of the GRE.
+			 */
+			if (ip.v4->version == 4)
+				l4.hdr = ip.hdr + (ip.v4->ihl * 4);
+			break;
+		default:
+			skb_checksum_help(skb);
+			return -1;
+		}
+
+		/* switch IP header pointer from outer to inner header */
+		ip.hdr = skb_inner_network_header(skb);
+		l4.hdr = skb_inner_transport_header(skb);
+
+		inner_mac = skb_inner_mac_header(skb);
+		first->tunnel_hdr_len = inner_mac - skb->data;
+		first->ctx_flag = true;
+		dbg("tunnel length is %d\n", first->tunnel_hdr_len);
+	}
+
+	mac_len = (ip.hdr - inner_mac);
+	dbg("inner checksum needed %d", skb_checksum_start_offset(skb));
+	dbg("skb->encapsulation %d\n", skb->encapsulation);
+	ip_len = (l4.hdr - ip.hdr);
+	if (ip.v4->version == 4) {
+		l4_proto = ip.v4->protocol;
+	} else {
+		exthdr = ip.hdr + sizeof(*ip.v6);
+		l4_proto = ip.v6->nexthdr;
+		if (l4.hdr != exthdr)
+			ipv6_skip_exthdr(skb, exthdr - skb->data,
+					 &l4_proto, &frag_off);
+		first->cmd_flags |= RNP_TXD_FLAG_IPv6;
+	}
+	/* Enable L4 checksum offloads */
+	switch (l4_proto) {
+	case IPPROTO_TCP:
+		first->cmd_flags |= RNP_TXD_L4_TYPE_TCP | RNP_TXD_L4_CSUM;
+		break;
+	case IPPROTO_SCTP:
+		first->cmd_flags |= RNP_TXD_L4_TYPE_SCTP | RNP_TXD_L4_CSUM;
+		break;
+	case IPPROTO_UDP:
+		first->cmd_flags |= RNP_TXD_L4_TYPE_UDP | RNP_TXD_L4_CSUM;
+		break;
+	default:
+		skb_checksum_help(skb);
+		return 0;
+	}
+	if ((tx_ring->ring_flags & RNPVF_RING_NO_TUNNEL_SUPPORT) &&
+	    (first->ctx_flag)) {
+		/* if not support tunnel */
+		first->cmd_flags &= (~RNP_TXD_TUNNEL_MASK);
+		mac_len += first->tunnel_hdr_len;
+		first->tunnel_hdr_len = 0;
+		first->ctx_flag = false;
+	}
+
+	dbg("mac length is %d\n", mac_len);
+	dbg("ip length is %d\n", ip_len);
+	first->mac_ip_len = (mac_len << 9) | ip_len;
+	return 0;
+}
+
+static void rnpvf_tx_map(struct rnpvf_ring *tx_ring,
+			 struct rnpvf_tx_buffer *first, const u8 hdr_len)
+{
+	struct sk_buff *skb = first->skb;
+	struct rnpvf_tx_buffer *tx_buffer;
+	struct rnp_tx_desc *tx_desc;
+	skb_frag_t *frag;
+	dma_addr_t dma;
+	unsigned int data_len, size;
+	u16 vlan = first->vlan;
+	u16 cmd = first->cmd_flags;
+	u16 i = tx_ring->next_to_use;
+	u64 fun_id = ((u64)(tx_ring->vfnum) << (32 + 24));
+
+	tx_desc = RNPVF_TX_DESC(tx_ring, i);
+	tx_desc->blen = cpu_to_le16(skb->len - hdr_len); /* maybe no-use */
+	tx_desc->vlan = cpu_to_le16(vlan);
+	tx_desc->cmd = cpu_to_le16(cmd);
+	tx_desc->mac_ip_len = first->mac_ip_len;
+
+	size = skb_headlen(skb);
+	data_len = skb->data_len;
+
+	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+	tx_buffer = first;
+
+	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+		if (dma_mapping_error(tx_ring->dev, dma))
+			goto dma_error;
+
+		/* record length, and DMA address */
+		dma_unmap_len_set(tx_buffer, len, size);
+		dma_unmap_addr_set(tx_buffer, dma, dma);
+
+		/* 1st desc */
+		tx_desc->pkt_addr = cpu_to_le64(dma | fun_id);
+
+		while (unlikely(size > RNPVF_MAX_DATA_PER_TXD)) {
+			tx_desc->cmd = cpu_to_le16(cmd);
+			tx_desc->blen =
+				cpu_to_le16(RNPVF_MAX_DATA_PER_TXD);
+			buf_dump_line("tx0  ", __LINE__, tx_desc,
+				      sizeof(*tx_desc));
+			i++;
+			tx_desc++;
+			if (i == tx_ring->count) {
+				tx_desc = RNPVF_TX_DESC(tx_ring, 0);
+				i = 0;
+			}
+
+			dma += RNPVF_MAX_DATA_PER_TXD;
+			size -= RNPVF_MAX_DATA_PER_TXD;
+
+			tx_desc->pkt_addr = cpu_to_le64(dma | fun_id);
+		}
+
+		buf_dump_line("tx1  ", __LINE__, tx_desc,
+			      sizeof(*tx_desc));
+		if (likely(!data_len))
+			break;
+		tx_desc->cmd = cpu_to_le16(cmd);
+		tx_desc->blen = cpu_to_le16(size);
+		buf_dump_line("tx2  ", __LINE__, tx_desc,
+			      sizeof(*tx_desc));
+
+		/* ==== frag== */
+		i++;
+		tx_desc++;
+		if (i == tx_ring->count) {
+			tx_desc = RNPVF_TX_DESC(tx_ring, 0);
+			i = 0;
+		}
+		tx_desc->cmd = RNP_TXD_CMD_RS;
+		tx_desc->mac_ip_len = 0;
+
+		size = skb_frag_size(frag);
+
+		data_len -= size;
+
+		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+				       DMA_TO_DEVICE);
+
+		tx_buffer = &tx_ring->tx_buffer_info[i];
+	}
+
+	/* write last descriptor with RS and EOP bits */
+	tx_desc->cmd = cpu_to_le16(cmd | RNP_TXD_CMD_EOP | RNP_TXD_CMD_RS);
+	tx_desc->blen = cpu_to_le16(size);
+	buf_dump_line("tx3  ", __LINE__, tx_desc, sizeof(*tx_desc));
+	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
+
+	/* set the timestamp */
+	first->time_stamp = jiffies;
+
+	/*
+	 * Force memory writes to complete before letting h/w know there
+	 * are new descriptors to fetch.  (Only applicable for weak-ordered
+	 * memory model archs, such as IA-64).
+	 *
+	 * We also need this memory barrier to make certain all of the
+	 * status bits have been updated before next_to_watch is written.
+	 */
+	wmb();
+
+	/* set next_to_watch value indicating a packet is present */
+	first->next_to_watch = tx_desc;
+
+	buf_dump_line("tx4  ", __LINE__, tx_desc, sizeof(*tx_desc));
+	i++;
+	if (i == tx_ring->count)
+		i = 0;
+
+	tx_ring->next_to_use = i;
+
+	/* notify HW of packet */
+	rnpvf_wr_reg(tx_ring->tail, i);
+
+	return;
+dma_error:
+	dev_err(tx_ring->dev, "TX DMA map failed\n");
+
+	/* clear dma mappings for failed tx_buffer_info map */
+	for (;;) {
+		tx_buffer = &tx_ring->tx_buffer_info[i];
+		rnpvf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+		if (tx_buffer == first)
+			break;
+		if (i == 0)
+			i = tx_ring->count;
+		i--;
+	}
+
+	tx_ring->next_to_use = i;
+}
+
+static int __rnpvf_maybe_stop_tx(struct rnpvf_ring *tx_ring, int size)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(tx_ring->netdev);
+
+	dbg("stop subqueue\n");
+	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	/* Herbert's original patch had:
+	 *  smp_mb__after_netif_stop_queue();
+	 * but since that doesn't exist yet, just open code it. */
+	smp_mb();
+
+	/* We need to check again in a case another CPU has just
+	 * made room available. */
+	if (likely(rnpvf_desc_unused(tx_ring) < size))
+		return -EBUSY;
+
+	/* A reprieve! - use start_queue because it doesn't call schedule */
+	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	++adapter->restart_queue;
+	return 0;
+}
+
+void rnpvf_maybe_tx_ctxtdesc(struct rnpvf_ring *tx_ring,
+			     struct rnpvf_tx_buffer *first,
+			     int ignore_vlan, u32 type_tucmd)
+{
+	if (first->ctx_flag) {
+		rnpvf_tx_ctxtdesc(tx_ring, first->mss_len,
+				  first->l4_hdr_len, first->tunnel_hdr_len,
+				  ignore_vlan, type_tucmd,
+				  first->gso_need_padding);
+	}
+}
+
+static int rnpvf_maybe_stop_tx(struct rnpvf_ring *tx_ring, int size)
+{
+	if (likely(RNPVF_DESC_UNUSED(tx_ring) >= size))
+		return 0;
+	return __rnpvf_maybe_stop_tx(tx_ring, size);
+}
+
+static int rnpvf_check_spoof_mac(struct sk_buff *skb,
+				 struct net_device *netdev,
+				 struct rnpvf_adapter *adapter)
+{
+	struct rnpvf_hw *hw = &adapter->hw;
+	struct rnpvf_hw_stats_own *hw_stats = &adapter->hw_stats;
+	int ret;
+	u8 *data = skb->data;
+
+	/* if not in mac spoof, do nothing */
+	if (!(hw->pf_feature & PF_MAC_SPOOF))
+		return 0;
+
+	if (0 == memcmp(data + netdev->addr_len, netdev->dev_addr,
+				netdev->addr_len)) {
+		ret = 0;
+	} else {
+		hw_stats->spoof_dropped++;
+		ret = 1;
+	}
+
+
+	return ret;
+}
+
+static void rnpvf_force_src_mac(struct sk_buff *skb,
+				struct net_device *netdev)
+{
+	u8 *data = skb->data;
+	bool ret = false;
+	struct netdev_hw_addr *ha;
+
+	/* force all src mac to myself */
+	if (is_multicast_ether_addr(data)) {
+		if (0 == memcmp(data + netdev->addr_len, netdev->dev_addr,
+				netdev->addr_len)) {
+			ret = true;
+			goto DONE;
+		}
+		netdev_for_each_uc_addr(ha, netdev) {
+			if (0 == memcmp(data + netdev->addr_len, ha->addr,
+					netdev->addr_len)) {
+				ret = true;
+				goto DONE;
+			}
+		}
+		/* if not src mac, force to src mac */
+		if (!ret)
+			memcpy(data + netdev->addr_len, netdev->dev_addr,
+			       netdev->addr_len);
+	}
+DONE:
+	return;
+}
+
+netdev_tx_t rnpvf_xmit_frame_ring(struct sk_buff *skb,
+				  struct rnpvf_adapter *adapter,
+				  struct rnpvf_ring *tx_ring,
+				  bool tx_padding)
+{
+	struct rnpvf_tx_buffer *first;
+	int tso;
+	u16 cmd = RNP_TXD_CMD_RS;
+	u16 vlan = 0;
+	unsigned short f;
+	u16 count = TXD_USE_COUNT(skb_headlen(skb));
+	__be16 protocol = skb->protocol;
+	u8 hdr_len = 0;
+	int ignore_vlan = 0;
+
+	dbg("=== begin ====\n");
+
+	rnpvf_skb_dump(skb, true);
+
+	dbg("skb:%p, skb->len:%d  headlen:%d, data_len:%d, tx_ring->next_to_use:%d "
+	    "count:%d\n",
+	    skb, skb->len, skb_headlen(skb), skb->data_len,
+	    tx_ring->next_to_use, tx_ring->count);
+	/*
+	 * need: 1 descriptor per page * PAGE_SIZE/RNPVF_MAX_DATA_PER_TXD,
+	 *       + 1 desc for skb_headlen/RNPVF_MAX_DATA_PER_TXD,
+	 *       + 2 desc gap to keep tail from touching head,
+	 *       + 1 desc for context descriptor,
+	 * otherwise try next time
+	 */
+	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+		skb_frag_t *frag_temp = &skb_shinfo(skb)->frags[f];
+		count += TXD_USE_COUNT(skb_frag_size(frag_temp));
+		dbg(" #%d frag: size:%d\n", f,
+		    skb_shinfo(skb)->frags[f].size);
+	}
+
+	if (rnpvf_maybe_stop_tx(tx_ring, count + 3)) {
+		tx_ring->tx_stats.tx_busy++;
+		return NETDEV_TX_BUSY;
+	}
+	dbg("xx %p\n", tx_ring->tx_buffer_info);
+
+	/* patch force send src mac to this netdev->mac */
+	if (!(tx_ring->ring_flags & RNPVF_RING_VEB_MULTI_FIX))
+		rnpvf_force_src_mac(skb, tx_ring->netdev);
+	if (rnpvf_check_spoof_mac(skb, tx_ring->netdev, adapter)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+	/* record the location of the first descriptor for this packet */
+	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+	first->skb = skb;
+	first->bytecount = skb->len;
+	first->gso_segs = 1;
+	first->mss_len_vf_num = 0;
+	first->inner_vlan_tunnel_len = 0;
+
+	if (adapter->priv_flags & RNPVF_PRIV_FLAG_TX_PADDING) {
+		first->ctx_flag = true;
+		first->gso_need_padding = tx_padding;
+	}
+
+	/* if we have a HW VLAN tag being added default to the HW one */
+
+	if (adapter->flags & RNPVF_FLAG_PF_SET_VLAN) {
+		vlan |= adapter->vf_vlan;
+		cmd |= RNP_TXD_VLAN_VALID | RNP_TXD_VLAN_CTRL_INSERT_VLAN;
+
+	} else {
+		if (skb_vlan_tag_present(skb)) {
+			if (skb->vlan_proto != htons(ETH_P_8021Q)) {
+				/* veb only use ctags */
+				vlan |= skb_vlan_tag_get(skb);
+				cmd |= RNP_TXD_SVLAN_TYPE |
+				       RNP_TXD_VLAN_CTRL_INSERT_VLAN;
+			} else {
+				vlan |= skb_vlan_tag_get(skb);
+				cmd |= RNP_TXD_VLAN_VALID |
+				       RNP_TXD_VLAN_CTRL_INSERT_VLAN;
+			}
+			tx_ring->tx_stats.vlan_add++;
+		} else if (protocol == __constant_htons(ETH_P_8021Q)) {
+			struct vlan_hdr *vhdr, _vhdr;
+			vhdr = skb_header_pointer(skb, ETH_HLEN,
+						  sizeof(_vhdr), &_vhdr);
+			if (!vhdr)
+				goto out_drop;
+
+			protocol = vhdr->h_vlan_encapsulated_proto;
+			vlan = ntohs(vhdr->h_vlan_TCI);
+			cmd |= RNP_TXD_VLAN_VALID | RNP_TXD_VLAN_CTRL_NOP;
+			ignore_vlan = 1;
+		}
+	}
+
+	/* record initial flags and protocol */
+	first->cmd_flags = cmd;
+	first->vlan = vlan;
+	first->protocol = protocol;
+	/* default len should not 0 (hw request) */
+	first->mac_ip_len = 20;
+	first->tunnel_hdr_len = 0;
+
+	tso = rnpvf_tso(tx_ring, first, &hdr_len);
+	if (tso < 0) {
+		goto out_drop;
+	} else if (!tso) {
+		rnpvf_tx_csum(tx_ring, first);
+	}
+	/* vf should always send ctx with vf_num*/
+	first->ctx_flag = true;
+	/* add control desc */
+	rnpvf_maybe_tx_ctxtdesc(tx_ring, first, ignore_vlan, 0);
+
+	rnpvf_tx_map(tx_ring, first, hdr_len);
+
+	rnpvf_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+	dbg("=== end ====\n\n\n\n");
+	return NETDEV_TX_OK;
+
+out_drop:
+	dev_kfree_skb_any(first->skb);
+	first->skb = NULL;
+
+	return NETDEV_TX_OK;
+}
+
+static bool check_sctp_no_padding(struct sk_buff *skb)
+{
+	bool no_padding = false;
+	u8 l4_proto = 0;
+	u8 *exthdr;
+	__be16 frag_off;
+	union {
+		struct iphdr *v4;
+		struct ipv6hdr *v6;
+		unsigned char *hdr;
+	} ip;
+	union {
+		struct tcphdr *tcp;
+		struct udphdr *udp;
+		unsigned char *hdr;
+	} l4;
+
+	ip.hdr = skb_network_header(skb);
+	l4.hdr = skb_transport_header(skb);
+
+	if (ip.v4->version == 4) {
+		l4_proto = ip.v4->protocol;
+	} else {
+		exthdr = ip.hdr + sizeof(*ip.v6);
+		l4_proto = ip.v6->nexthdr;
+		if (l4.hdr != exthdr)
+			ipv6_skip_exthdr(skb, exthdr - skb->data,
+					 &l4_proto, &frag_off);
+	}
+	switch (l4_proto) {
+	case IPPROTO_SCTP:
+		no_padding = true;
+		break;
+	default:
+
+		break;
+	}
+	return no_padding;
+}
+
+static int rnpvf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+	struct rnpvf_ring *tx_ring;
+	bool tx_padding = false;
+
+	if (!netif_carrier_ok(netdev)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+	if (adapter->priv_flags & RNPVF_PRIV_FLAG_TX_PADDING) {
+		if (skb->len < 60) {
+			if (!check_sctp_no_padding(skb)) {
+				if (skb_put_padto(skb, 60))
+					return NETDEV_TX_OK;
+
+			} else {
+				tx_padding = true;
+			}
+		}
+
+	} else {
+		if (skb_put_padto(skb, 17))
+			return NETDEV_TX_OK;
+	}
+
+	tx_ring = adapter->tx_ring[skb->queue_mapping];
+	dbg("xmi:queue_mapping:%d ring:%p\n", skb->queue_mapping, tx_ring);
+	return rnpvf_xmit_frame_ring(skb, adapter, tx_ring, tx_padding);
+}
+
+/**
+ * rnpvf_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int rnpvf_set_mac(struct net_device *netdev, void *p)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+	struct rnpvf_hw *hw = &adapter->hw;
+	struct sockaddr *addr = p;
+	s32 ret_val;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+	spin_lock_bh(&adapter->mbx_lock);
+	set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	ret_val = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
+	clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	spin_unlock_bh(&adapter->mbx_lock);
+	if (0 != ret_val) {
+		/* set mac failed */
+		dev_err(&adapter->pdev->dev, "pf not allowed reset mac\n");
+		return -EADDRNOTAVAIL;
+
+	} else {
+		eth_hw_addr_set(netdev, addr->sa_data);
+		memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+		rnpvf_configure_veb(adapter);
+	}
+
+	return 0;
+}
+
+void remove_mbx_irq(struct rnpvf_adapter *adapter)
+{
+	u32 msgbuf[2];
+	struct rnpvf_hw *hw = &adapter->hw;
+
+	spin_lock_bh(&adapter->mbx_lock);
+	set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	msgbuf[0] = RNP_PF_REMOVE;
+	adapter->hw.mbx.ops.write_posted(hw, msgbuf, 1, false);
+	clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	spin_unlock_bh(&adapter->mbx_lock);
+	mdelay(10);
+
+	/* mbx */
+	if (adapter->flags & RNPVF_FLAG_MSIX_ENABLED) {
+		adapter->hw.mbx.ops.configure(
+			&adapter->hw, adapter->msix_entries[0].entry,
+			false);
+		free_irq(adapter->msix_entries[0].vector, adapter);
+	}
+}
+
+static void rnp_get_link_status(struct rnpvf_adapter *adapter)
+{
+	struct rnpvf_hw *hw = &adapter->hw;
+	u32 msgbuf[3];
+	s32 ret_val = -1;
+
+	spin_lock_bh(&adapter->mbx_lock);
+	set_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	msgbuf[0] = RNP_PF_GET_LINK;
+	adapter->hw.mbx.ops.write_posted(hw, msgbuf, 1, false);
+	mdelay(2);
+	ret_val = adapter->hw.mbx.ops.read_posted(hw, msgbuf, 2, false);
+	if (ret_val == 0) {
+		if (msgbuf[1] & RNP_PF_LINK_UP) {
+			hw->link = true;
+			hw->speed = msgbuf[1] & 0xffff;
+
+		} else {
+			hw->link = false;
+			hw->speed = 0;
+		}
+	} else {
+		printk("[rpnvf] error! mbx GET_LINK failed!\n");
+	}
+	clear_bit(__RNPVF_MBX_POLLING, &adapter->state);
+	spin_unlock_bh(&adapter->mbx_lock);
+}
+
+int register_mbx_irq(struct rnpvf_adapter *adapter)
+{
+	struct rnpvf_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	int err = 0;
+
+	/* for mbx:vector0 */
+	if (adapter->flags & RNPVF_FLAG_MSIX_ENABLED) {
+		err = request_irq(adapter->msix_entries[0].vector,
+				  rnpvf_msix_other, 0, netdev->name,
+				  adapter);
+		if (err) {
+			dev_err(&adapter->pdev->dev,
+				"request_irq for msix_other failed: %d\n",
+				err);
+			goto err_mbx;
+		}
+		hw->mbx.ops.configure(hw, adapter->msix_entries[0].entry,
+				      true);
+	}
+
+	rnp_get_link_status(adapter);
+err_mbx:
+	return err;
+}
+
+static int rnpvf_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct rnpvf_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
+#ifdef CONFIG_PM
+	int retval = 0;
+#endif
+
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev)) {
+		rtnl_lock();
+		rnpvf_down(adapter);
+		rnpvf_free_irq(adapter);
+		rnpvf_free_all_tx_resources(adapter);
+		rnpvf_free_all_rx_resources(adapter);
+		rtnl_unlock();
+	}
+
+	remove_mbx_irq(adapter);
+	rnpvf_clear_interrupt_scheme(adapter);
+
+#ifdef CONFIG_PM
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+
+#endif
+	pci_disable_device(pdev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int rnpvf_resume(struct pci_dev *pdev)
+{
+	struct rnpvf_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
+	u32 err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	/*
+	 * pci_restore_state clears dev->state_saved so call
+	 * pci_save_state to restore it.
+	 */
+	pci_save_state(pdev);
+
+	err = pcim_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"Cannot enable PCI device from suspend\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	rtnl_lock();
+	err = rnpvf_init_interrupt_scheme(adapter);
+	rtnl_unlock();
+	register_mbx_irq(adapter);
+
+	if (err) {
+		dev_err(&pdev->dev, "Cannot initialize interrupts\n");
+		return err;
+	}
+
+	rnpvf_reset(adapter);
+
+	if (netif_running(netdev)) {
+		err = rnpvf_open(netdev);
+		if (err)
+			return err;
+	}
+
+	netif_device_attach(netdev);
+
+	return err;
+}
+
+#endif /* CONFIG_PM */
+static void rnpvf_shutdown(struct pci_dev *pdev)
+{
+	rnpvf_suspend(pdev, PMSG_SUSPEND);
+}
+
+static void rnpvf_get_stats64(struct net_device *netdev,
+			      struct rtnl_link_stats64 *stats)
+{
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+	int i;
+	u64 ring_csum_err = 0;
+	u64 ring_csum_good = 0;
+
+	rcu_read_lock();
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct rnpvf_ring *ring = adapter->rx_ring[i];
+		u64 bytes, packets;
+		unsigned int start;
+
+		if (ring) {
+			do {
+				start = u64_stats_fetch_begin(
+					&ring->syncp);
+				packets = ring->stats.packets;
+				bytes = ring->stats.bytes;
+				ring_csum_err += ring->rx_stats.csum_err;
+				ring_csum_good += ring->rx_stats.csum_good;
+			} while (u64_stats_fetch_retry(&ring->syncp,
+						       start));
+			stats->rx_packets += packets;
+			stats->rx_bytes += bytes;
+		}
+	}
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct rnpvf_ring *ring = adapter->tx_ring[i];
+		u64 bytes, packets;
+		unsigned int start;
+
+		if (ring) {
+			do {
+				start = u64_stats_fetch_begin(
+					&ring->syncp);
+				packets = ring->stats.packets;
+				bytes = ring->stats.bytes;
+			} while (u64_stats_fetch_retry(&ring->syncp,
+						       start));
+			stats->tx_packets += packets;
+			stats->tx_bytes += bytes;
+		}
+	}
+	rcu_read_unlock();
+	/* following stats updated by rnp_watchdog_task() */
+	stats->multicast = netdev->stats.multicast;
+	stats->rx_errors = netdev->stats.rx_errors;
+	stats->rx_length_errors = netdev->stats.rx_length_errors;
+	stats->rx_crc_errors = netdev->stats.rx_crc_errors;
+	stats->rx_missed_errors = netdev->stats.rx_missed_errors;
+
+}
+
+#define RNP_MAX_TUNNEL_HDR_LEN 80
+#define RNP_MAX_MAC_HDR_LEN 127
+#define RNP_MAX_NETWORK_HDR_LEN 511
+
+static netdev_features_t rnpvf_features_check(struct sk_buff *skb,
+					      struct net_device *dev,
+					      netdev_features_t features)
+{
+	unsigned int network_hdr_len, mac_hdr_len;
+
+	/* Make certain the headers can be described by a context descriptor */
+	mac_hdr_len = skb_network_header(skb) - skb->data;
+	if (unlikely(mac_hdr_len > RNP_MAX_MAC_HDR_LEN))
+		return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC |
+				    NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_TSO |
+				    NETIF_F_TSO6);
+
+	network_hdr_len =
+		skb_checksum_start(skb) - skb_network_header(skb);
+	if (unlikely(network_hdr_len > RNP_MAX_NETWORK_HDR_LEN))
+		return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC |
+				    NETIF_F_TSO | NETIF_F_TSO6);
+
+	/* We can only support IPV4 TSO in tunnels if we can mangle the
+	 * inner IP ID field, so strip TSO if MANGLEID is not supported.
+	 */
+	if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+		features &= ~NETIF_F_TSO;
+
+	return features;
+}
+
+static const struct net_device_ops rnpvf_netdev_ops = {
+	.ndo_open = rnpvf_open,
+	.ndo_stop = rnpvf_close,
+	.ndo_start_xmit = rnpvf_xmit_frame,
+	.ndo_validate_addr = eth_validate_addr,
+	.ndo_get_stats64 = rnpvf_get_stats64,
+	.ndo_set_rx_mode = rnpvf_set_rx_mode,
+	.ndo_set_mac_address = rnpvf_set_mac,
+	.ndo_change_mtu = rnpvf_change_mtu,
+	.ndo_vlan_rx_add_vid = rnpvf_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid = rnpvf_vlan_rx_kill_vid,
+	.ndo_features_check = rnpvf_features_check,
+	.ndo_set_features = rnpvf_set_features,
+	.ndo_fix_features = rnpvf_fix_features,
+};
+
+void rnpvf_assign_netdev_ops(struct net_device *dev)
+{
+	/* different hw can assign difference fun */
+	dev->netdev_ops = &rnpvf_netdev_ops;
+	rnpvf_set_ethtool_ops(dev);
+	dev->watchdog_timeo = 5 * HZ;
+}
+
+static u8 rnpvf_vfnum(struct rnpvf_hw *hw)
+{
+	u16 vf_num = -1;
+#if CONFIG_BAR4_PFVFNUM
+	int ring, v;
+	u16 func = 0;
+
+	func = ((hw->pdev->devfn & 0x1) ? 1 : 0);
+	for (ring = 0; ring < 128; ring += 2) {
+		v = rd32(hw, 0x8010 + 0x100 * ring);
+		if ((v & 0xFFFF) == hw->pdev->vendor) {
+			continue;
+		} else {
+			vf_num = (1 << 7) /* vf-active */ |
+				 (func << 6) /* pf */ |
+				 (ring / 2) /* vfnum */;
+			break;
+		}
+	}
+	return vf_num;
+#else
+	u32 pfvfnum_reg;
+
+	pfvfnum_reg =
+		(VF_NUM_REG_N10 & (pci_resource_len(hw->pdev, 0) - 1));
+	vf_num = readl(hw->hw_addr_bar0 + pfvfnum_reg);
+#define VF_NUM_MASK_TEMP (0xff0)
+#define VF_NUM_OFF (4)
+	return ((vf_num & VF_NUM_MASK_TEMP) >> VF_NUM_OFF);
+#endif
+}
+
+static inline unsigned long rnpvf_tso_features(struct rnpvf_hw *hw)
+{
+	unsigned long features = 0;
+
+	if (hw->feature_flags & RNPVF_NET_FEATURE_TSO)
+		features |= NETIF_F_TSO;
+	if (hw->feature_flags & RNPVF_NET_FEATURE_TSO)
+		features |= NETIF_F_TSO6;
+	features |= NETIF_F_GSO_PARTIAL;
+	if (hw->feature_flags & RNPVF_NET_FEATURE_TX_UDP_TUNNEL)
+		features |= RNPVF_GSO_PARTIAL_FEATURES;
+
+	return features;
+}
+
+static int rnpvf_add_adpater(struct pci_dev *pdev,
+			     const struct rnpvf_info *ii,
+			     struct rnpvf_adapter **padapter)
+{
+	int err = 0;
+	struct rnpvf_adapter *adapter = NULL;
+	struct net_device *netdev;
+	struct rnpvf_hw *hw;
+	unsigned int queues = MAX_TX_QUEUES;
+	static int pf0_cards_found;
+	static int pf1_cards_found;
+
+	pr_info("====  add adapter queues:%d ====", queues);
+
+	netdev = alloc_etherdev_mq(sizeof(struct rnpvf_adapter), queues);
+	if (!netdev)
+		return -ENOMEM;
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+	adapter = netdev_priv(netdev);
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	/* setup some status */
+#ifdef FIX_VF_BUG
+	adapter->status |= GET_VFNUM_FROM_BAR0;
+#endif
+
+	if (padapter)
+		*padapter = adapter;
+	pci_set_drvdata(pdev, adapter);
+
+	hw = &adapter->hw;
+	hw->back = adapter;
+	hw->pdev = pdev;
+	hw->board_type = ii->board_type;
+	adapter->msg_enable =
+		netif_msg_init(debug, NETIF_MSG_DRV
+#ifdef MSG_PROBE_ENABLE
+					      | NETIF_MSG_PROBE
+#endif
+#ifdef MSG_IFUP_ENABLE
+					      | NETIF_MSG_IFUP
+#endif
+#ifdef MSG_IFDOWN_ENABLE
+					      | NETIF_MSG_IFDOWN
+#endif
+		);
+
+	switch (ii->mac) {
+	case rnp_mac_2port_10G:
+		hw->mode = MODE_NIC_MODE_2PORT_10G;
+		break;
+	case rnp_mac_2port_40G:
+		hw->mode = MODE_NIC_MODE_2PORT_40G;
+		break;
+	case rnp_mac_4port_10G:
+		hw->mode = MODE_NIC_MODE_4PORT_10G;
+		break;
+	case rnp_mac_8port_10G:
+		hw->mode = MODE_NIC_MODE_8PORT_10G;
+		break;
+	default:
+		break;
+	}
+
+	switch (hw->board_type) {
+	case rnp_board_n10:
+#define RNP_N10_BAR 4
+		hw->hw_addr = pcim_iomap(pdev, RNP_N10_BAR, 0);
+		if (!hw->hw_addr) {
+			err = -EIO;
+			goto err_ioremap;
+		}
+		dev_info(&pdev->dev, "[bar%d]:%p %llx len=%d MB\n",
+			 RNP_N10_BAR, hw->hw_addr,
+			 (unsigned long long)pci_resource_start(
+				 pdev, RNP_N10_BAR),
+			 (int)pci_resource_len(pdev, RNP_N10_BAR) / 1024 /
+				 1024);
+#if CONFIG_BAR4_PFVFNUM
+#else
+		hw->hw_addr_bar0 = pcim_iomap(pdev, 0, 0);
+		if (!hw->hw_addr_bar0) {
+			err = -EIO;
+			goto err_ioremap;
+		}
+#endif
+
+		hw->vfnum = rnpvf_vfnum(hw);
+		dev_info(&adapter->pdev->dev, "hw->vfnum is %x\n",
+			 hw->vfnum);
+		hw->ring_msix_base = hw->hw_addr + 0xa0000;
+
+		if (hw->vfnum & 0x40) {
+#ifdef FIX_VF_BUG
+			/* in this mode offset hw_addr */
+			hw->ring_msix_base += 0x200;
+			hw->hw_addr += 0x100000;
+#endif
+			adapter->port = adapter->bd_number =
+				pf1_cards_found++;
+			if (pf1_cards_found == 1000)
+				pf1_cards_found = 0;
+		} else {
+			adapter->port = adapter->bd_number =
+				pf0_cards_found++;
+			if (pf0_cards_found == 1000)
+				pf0_cards_found = 0;
+		}
+		snprintf(adapter->name, sizeof(netdev->name), "%s%d%d",
+			 rnpvf_driver_name, (hw->vfnum & 0x40) >> 6,
+			 adapter->bd_number);
+		/* n10 only support msix */
+		adapter->irq_mode = irq_mode_msix;
+		break;
+	}
+
+	pr_info("%s %s: vfnum:0x%x\n", adapter->name, pci_name(pdev),
+		hw->vfnum);
+
+	rnpvf_assign_netdev_ops(netdev);
+	strncpy(netdev->name, adapter->name, sizeof(netdev->name) - 1);
+
+	/* Setup hw api */
+	memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+	hw->mac.type = ii->mac;
+
+	ii->get_invariants(hw);
+
+	memcpy(&hw->mbx.ops, &rnpvf_mbx_ops,
+	       sizeof(struct rnp_mbx_operations));
+
+	/* setup the private structure */
+	err = rnpvf_sw_init(adapter);
+	if (err)
+		goto err_sw_init;
+
+	/* The HW MAC address was set and/or determined in sw_init */
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
+		pr_err("invalid MAC address\n");
+		err = -EIO;
+		goto err_sw_init;
+	}
+	/* MTU range: 68 - 9710 */
+	netdev->min_mtu = hw->min_length;
+	netdev->max_mtu = hw->max_length - (ETH_HLEN + 2 * ETH_FCS_LEN);
+
+	netdev->mtu = hw->mtu;
+
+	if (hw->feature_flags & RNPVF_NET_FEATURE_SG)
+		netdev->features |= NETIF_F_SG;
+	if (hw->feature_flags & RNPVF_NET_FEATURE_TSO)
+		netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+	if (hw->feature_flags & RNPVF_NET_FEATURE_RX_HASH)
+		netdev->features |= NETIF_F_RXHASH;
+	if (hw->feature_flags & RNPVF_NET_FEATURE_RX_CHECKSUM) {
+		netdev->features |= NETIF_F_RXCSUM;
+		adapter->flags |= RNPVF_FLAG_RX_CHKSUM_ENABLED;
+	}
+	if (hw->feature_flags & RNPVF_NET_FEATURE_TX_CHECKSUM) {
+		netdev->features |= NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC;
+	}
+	if (hw->feature_flags & RNPVF_NET_FEATURE_USO) {
+		netdev->features |= NETIF_F_GSO_UDP_L4;
+	}
+
+	netdev->features |= NETIF_F_HIGHDMA;
+
+	if (hw->feature_flags & RNPVF_NET_FEATURE_TX_UDP_TUNNEL) {
+		netdev->gso_partial_features = RNPVF_GSO_PARTIAL_FEATURES;
+		netdev->features |= NETIF_F_GSO_PARTIAL |
+				    RNPVF_GSO_PARTIAL_FEATURES;
+	}
+
+	netdev->hw_features |= netdev->features;
+
+	if (hw->feature_flags & RNPVF_NET_FEATURE_VLAN_FILTER)
+		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+	if (hw->pf_feature & PF_NCSI_EN)
+		hw->feature_flags &= (~RNPVF_NET_FEATURE_VLAN_OFFLOAD);
+	if (hw->feature_flags & RNPVF_NET_FEATURE_VLAN_OFFLOAD) {
+		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
+				       NETIF_F_HW_VLAN_CTAG_TX;
+	}
+
+	if (hw->feature_flags & RNPVF_NET_FEATURE_STAG_OFFLOAD) {
+		netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
+				       NETIF_F_HW_VLAN_STAG_TX;
+	}
+
+	netdev->hw_features |= NETIF_F_RXALL;
+	if (hw->feature_flags & RNPVF_NET_FEATURE_RX_NTUPLE_FILTER)
+		netdev->hw_features |= NETIF_F_NTUPLE;
+	if (hw->feature_flags & RNPVF_NET_FEATURE_RX_FCS)
+		netdev->hw_features |= NETIF_F_RXFCS;
+
+	netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+	netdev->hw_enc_features |= netdev->vlan_features;
+	netdev->mpls_features |= NETIF_F_HW_CSUM;
+
+	/* some fixed feature control by pf */
+	if (hw->pf_feature & PF_FEATURE_VLAN_FILTER)
+		netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+	if (hw->pf_feature & PF_NCSI_EN)
+		hw->feature_flags &= (~RNPVF_NET_FEATURE_VLAN_OFFLOAD);
+
+	if (hw->feature_flags & RNPVF_NET_FEATURE_VLAN_OFFLOAD) {
+		netdev->features |= NETIF_F_HW_VLAN_CTAG_RX |
+				    NETIF_F_HW_VLAN_CTAG_TX;
+	}
+	if (hw->feature_flags & RNPVF_NET_FEATURE_STAG_OFFLOAD) {
+		netdev->features |= NETIF_F_HW_VLAN_STAG_RX |
+				    NETIF_F_HW_VLAN_STAG_TX;
+	}
+	netdev->priv_flags |= IFF_UNICAST_FLT;
+	netdev->priv_flags |= IFF_SUPP_NOFCS;
+	netdev->priv_flags |= IFF_UNICAST_FLT;
+	netdev->priv_flags |= IFF_SUPP_NOFCS;
+
+	timer_setup(&adapter->watchdog_timer, rnpvf_watchdog, 0);
+	INIT_WORK(&adapter->watchdog_task, rnpvf_watchdog_task);
+
+	err = rnpvf_init_interrupt_scheme(adapter);
+	if (err)
+		goto err_sw_init;
+
+	err = register_mbx_irq(adapter);
+	if (err)
+		goto err_register;
+
+	if (fix_eth_name) {
+		strncpy(netdev->name, adapter->name,
+			sizeof(netdev->name) - 1);
+	} else {
+		strscpy(netdev->name, pci_name(pdev),
+			sizeof(netdev->name));
+		strscpy(netdev->name, "eth%d", sizeof(netdev->name));
+	}
+	err = register_netdev(netdev);
+	if (err) {
+		rnpvf_err("register_netdev failed!\n");
+		dev_err(&pdev->dev,
+			"%s %s: vfnum:0x%x. register_netdev failed!\n",
+			adapter->name, pci_name(pdev), hw->vfnum);
+		goto err_register;
+	}
+
+	/* carrier off reporting is important to ethtool even BEFORE open */
+	netif_carrier_off(netdev);
+	rnpvf_sysfs_init(netdev);
+
+	/* print the MAC address */
+	hw_dbg(hw, "%pM\n", netdev->dev_addr);
+
+	hw_dbg(hw, "Mucse(R) n10 Virtual Function\n");
+
+	return 0;
+err_register:
+	remove_mbx_irq(adapter);
+	rnpvf_clear_interrupt_scheme(adapter);
+err_sw_init:
+err_ioremap:
+	free_netdev(netdev);
+
+	dev_err(&pdev->dev, "%s failed. err:%d\n", __func__, err);
+	return err;
+}
+
+static int rnpvf_rm_adpater(struct rnpvf_adapter *adapter)
+{
+	struct net_device *netdev;
+
+	if (!adapter)
+		return -EINVAL;
+
+	rnpvf_info("= remove adapter:%s =\n", adapter->name);
+	netdev = adapter->netdev;
+
+	if (netdev) {
+		netif_carrier_off(netdev);
+		rnpvf_sysfs_exit(netdev);
+	}
+
+	set_bit(__RNPVF_REMOVE, &adapter->state);
+	del_timer_sync(&adapter->watchdog_timer);
+	cancel_work_sync(&adapter->watchdog_task);
+
+	if (netdev) {
+		if (netdev->reg_state == NETREG_REGISTERED)
+			unregister_netdev(netdev);
+	}
+
+	remove_mbx_irq(adapter);
+	rnpvf_clear_interrupt_scheme(adapter);
+	rnpvf_reset_interrupt_capability(adapter);
+
+	free_netdev(netdev);
+
+	rnpvf_info("remove %s  complete\n", adapter->name);
+
+	return 0;
+}
+
+/**
+ * rnpvf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in rnpvf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * rnpvf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int rnpvf_probe(struct pci_dev *pdev,
+		       const struct pci_device_id *ent)
+{
+	struct rnpvf_adapter *adapter = NULL;
+	const struct rnpvf_info *ii = rnpvf_info_tbl[ent->driver_data];
+	int err;
+
+	err = pci_enable_device_mem(pdev);
+	if (err)
+		return err;
+
+	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(56)) &&
+	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(56))) {
+		pci_using_hi_dma = 1;
+	} else {
+		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (err) {
+			err = dma_set_coherent_mask(&pdev->dev,
+						    DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pdev->dev,
+					"No usable DMA "
+					"configuration, aborting\n");
+				goto err_dma;
+			}
+		}
+		pci_using_hi_dma = 0;
+	}
+
+	err = pci_request_mem_regions(pdev, rnpvf_driver_name);
+	if (err) {
+		dev_err(&pdev->dev,
+			"pci_request_selected_regions failed 0x%x\n", err);
+		goto err_pci_reg;
+	}
+	pci_set_master(pdev);
+	pci_save_state(pdev);
+
+	err = rnpvf_add_adpater(pdev, ii, &adapter);
+	if (err) {
+		dev_err(&pdev->dev, "ERROR %s: %d\n", __func__, __LINE__);
+		goto err_regions;
+	}
+
+	return 0;
+
+err_regions:
+	pci_release_mem_regions(pdev);
+err_dma:
+err_pci_reg:
+	return err;
+}
+
+/**
+ * rnpvf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * rnpvf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void rnpvf_remove(struct pci_dev *pdev)
+{
+	struct rnpvf_adapter *adapter = pci_get_drvdata(pdev);
+
+	rnpvf_rm_adpater(adapter);
+	pci_release_mem_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+/**
+ * rnpvf_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t rnpvf_io_error_detected(struct pci_dev *pdev,
+						pci_channel_state_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+
+	netif_device_detach(netdev);
+
+	if (state == pci_channel_io_perm_failure)
+		return PCI_ERS_RESULT_DISCONNECT;
+
+	if (netif_running(netdev))
+		rnpvf_down(adapter);
+
+	pci_disable_device(pdev);
+
+	/* Request a slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * rnpvf_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the rnpvf_resume routine.
+ */
+static pci_ers_result_t rnpvf_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+
+	if (pci_enable_device_mem(pdev)) {
+		dev_err(&pdev->dev,
+			"Cannot re-enable PCI device after reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_set_master(pdev);
+
+	rnpvf_reset(adapter);
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * rnpvf_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the rnpvf_resume routine.
+ */
+static void rnpvf_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct rnpvf_adapter *adapter = netdev_priv(netdev);
+
+	if (netif_running(netdev))
+		rnpvf_up(adapter);
+
+	netif_device_attach(netdev);
+}
+
+/* PCI Error Recovery (ERS) */
+static const struct pci_error_handlers rnpvf_err_handler = {
+	.error_detected = rnpvf_io_error_detected,
+	.slot_reset = rnpvf_io_slot_reset,
+	.resume = rnpvf_io_resume,
+};
+
+static struct pci_driver rnpvf_driver = {
+	.name = rnpvf_driver_name,
+	.id_table = rnpvf_pci_tbl,
+	.probe = rnpvf_probe,
+	.remove = rnpvf_remove,
+#ifdef CONFIG_PM
+	/* Power Management Hooks */
+	.suspend = rnpvf_suspend,
+	.resume = rnpvf_resume,
+#endif
+	.shutdown = rnpvf_shutdown,
+	.err_handler = &rnpvf_err_handler,
+};
+
+/**
+ * rnpvf_init_module - Driver Registration Routine
+ *
+ * rnpvf_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init rnpvf_init_module(void)
+{
+	int ret;
+
+	pr_info("%s - version %s\n", rnpvf_driver_string,
+		rnpvf_driver_version);
+	pr_info("%s\n", rnpvf_copyright);
+
+	ret = pci_register_driver(&rnpvf_driver);
+	return ret;
+}
+
+module_init(rnpvf_init_module);
+
+/**
+ * rnpvf_exit_module - Driver Exit Cleanup Routine
+ *
+ * rnpvf_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit rnpvf_exit_module(void)
+{
+	pci_unregister_driver(&rnpvf_driver);
+}
+
+module_exit(rnpvf_exit_module);
diff --git a/drivers/net/ethernet/mucse/rnpvf/sysfs.c b/drivers/net/ethernet/mucse/rnpvf/sysfs.c
new file mode 100644
index 0000000000000..90a59af0de5da
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpvf/sysfs.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include 
+#include 
+#include 
+
+#include "rnpvf.h"
+
+#define to_net_device(n) container_of(n, struct net_device, dev)
+
+int rnpvf_sysfs_init(struct net_device *ndev)
+{
+	return 0;
+}
+
+void rnpvf_sysfs_exit(struct net_device *ndev)
+{
+}
diff --git a/drivers/net/ethernet/mucse/rnpvf/vf.c b/drivers/net/ethernet/mucse/rnpvf/vf.c
new file mode 100644
index 0000000000000..c332a19db84d7
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpvf/vf.c
@@ -0,0 +1,849 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#include "vf.h"
+#include "rnpvf.h"
+
+static int rnpvf_reset_pf(struct rnpvf_hw *hw)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[2];
+	s32 ret_val;
+
+	memset(msgbuf, 0, sizeof(msgbuf));
+	msgbuf[0] = RNP_VF_RESET_PF;
+
+	ret_val = mbx->ops.write_posted(hw, msgbuf, 2, false);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read_posted(hw, msgbuf, 2, false);
+
+	return ret_val;
+}
+
+static int rnpvf_get_mtu(struct rnpvf_hw *hw)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[2];
+	s32 ret_val;
+
+	memset(msgbuf, 0, sizeof(msgbuf));
+	msgbuf[0] = RNP_VF_GET_MTU;
+
+	ret_val = mbx->ops.write_posted(hw, msgbuf, 2, false);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read_posted(hw, msgbuf, 2, false);
+
+	msgbuf[0] &= ~RNP_VT_MSGTYPE_CTS;
+
+	/* if nacked the address was rejected, use "perm_addr" */
+	if (!ret_val &&
+	    (msgbuf[0] == (RNP_VF_SET_MTU | RNP_VT_MSGTYPE_NACK)))
+		return -1;
+	hw->mtu = msgbuf[1];
+
+	return ret_val;
+}
+
+static int rnpvf_set_mtu(struct rnpvf_hw *hw, int mtu)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[2];
+	s32 ret_val;
+
+	memset(msgbuf, 0, sizeof(msgbuf));
+	msgbuf[0] = RNP_VF_SET_MTU;
+	msgbuf[1] = mtu;
+
+	ret_val = mbx->ops.write_posted(hw, msgbuf, 2, false);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read_posted(hw, msgbuf, 2, false);
+
+	msgbuf[0] &= ~RNP_VT_MSGTYPE_CTS;
+
+	/* if nacked the address was rejected, use "perm_addr" */
+	if (!ret_val &&
+	    (msgbuf[0] == (RNP_VF_SET_MTU | RNP_VT_MSGTYPE_NACK)))
+		return -1;
+
+	return ret_val;
+}
+
+static int rnpvf_read_eth_reg(struct rnpvf_hw *hw, int reg, u32 *value)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[2];
+	int err;
+
+	msgbuf[0] = RNP_VF_REG_RD;
+	msgbuf[1] = reg;
+
+	err = mbx->ops.write_posted(hw, msgbuf, 2, false);
+	if (err)
+		goto mbx_err;
+
+	err = mbx->ops.read_posted(hw, msgbuf, 2, false);
+	if (err)
+		goto mbx_err;
+
+	/* remove extra bits from the message */
+	msgbuf[0] &= ~RNP_VT_MSGTYPE_CTS;
+	msgbuf[0] &= ~(0xFF << RNP_VT_MSGINFO_SHIFT);
+
+	if (msgbuf[0] != (RNP_VF_REG_RD | RNP_VT_MSGTYPE_ACK))
+		err = RNP_ERR_INVALID_ARGUMENT;
+
+	*value = msgbuf[1];
+
+mbx_err:
+	return err;
+}
+
+/**
+ *  rnpvf_start_hw_vf - Prepare hardware for Tx/Rx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware by filling the bus info structure and media type, clears
+ *  all on chip counters, initializes receive address registers, multicast
+ *  table, VLAN filter table, calls routine to set up link and flow control
+ *  settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+static s32 rnpvf_start_hw_vf(struct rnpvf_hw *hw)
+{
+	/* Clear adapter stopped flag */
+	hw->adapter_stopped = false;
+
+	return 0;
+}
+
+/**
+ *  rnpvf_init_hw_vf - virtual function hardware initialization
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the hardware by resetting the hardware and then starting
+ *  the hardware
+ **/
+static s32 rnpvf_init_hw_vf(struct rnpvf_hw *hw)
+{
+	s32 status;
+
+	status = hw->mac.ops.start_hw(hw);
+
+	hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+	return status;
+}
+
+/**
+ *  rnpvf_reset_hw_vf - Performs hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by resetting the transmit and receive units, masks and
+ *  clears all interrupts.
+ **/
+static s32 rnpvf_reset_hw_vf(struct rnpvf_hw *hw)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	struct rnpvf_adapter *adapter = hw->back;
+	// u32 timeout = RNP_VF_INIT_TIMEOUT;
+	s32 ret_val = RNP_ERR_INVALID_MAC_ADDR;
+	u32 msgbuf[RNP_VF_PERMADDR_MSG_LEN];
+	u8 *addr = (u8 *)(&msgbuf[1]);
+	u32 vlan;
+	int try_cnt = 10;
+
+	/* Call adapter stop to disable tx/rx and clear interrupts */
+	hw->mac.ops.stop_adapter(hw);
+
+	/* reset the api version */
+	hw->api_version = 0;
+
+	/* mailbox timeout can now become active */
+	mbx->timeout = RNP_VF_MBX_INIT_TIMEOUT;
+
+	while (try_cnt--) {
+		msgbuf[0] = RNP_VF_RESET;
+		mbx->ops.write_posted(hw, msgbuf, 1, false);
+		/* ack write back maybe too fast */
+		mdelay(20);
+
+		/* set our "perm_addr" based on info provided by PF */
+		/* also set up the mc_filter_type which is piggy backed
+		 * on the mac address in word 3
+		 */
+		ret_val = mbx->ops.read_posted(
+			hw, msgbuf, RNP_VF_PERMADDR_MSG_LEN, false);
+		if (ret_val == 0)
+			break;
+	}
+	if (ret_val)
+		return ret_val;
+
+	/* New versions of the PF may NACK the reset return message
+	 * to indicate that no MAC address has yet been assigned for
+	 * the VF.
+	 */
+	if (msgbuf[0] != (RNP_VF_RESET | RNP_VT_MSGTYPE_ACK) &&
+	    msgbuf[0] != (RNP_VF_RESET | RNP_VT_MSGTYPE_NACK))
+		return RNP_ERR_INVALID_MAC_ADDR;
+	/* we get mac address from mailbox */
+
+	memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
+	hw->mac.mc_filter_type = msgbuf[RNP_VF_MC_TYPE_WORD] & 0xff;
+
+	/* ft padding */
+	if ((msgbuf[RNP_VF_MC_TYPE_WORD] >> 8) & 0xff)
+		adapter->priv_flags |= RNPVF_PRIV_FLAG_FT_PADDING;
+	else
+		adapter->priv_flags = 0;
+	/* fc mode */
+	hw->fc.current_mode = (msgbuf[RNP_VF_MC_TYPE_WORD] >> 16) & 0xff;
+
+	/* phy status */
+	hw->phy_type = (msgbuf[RNP_VF_PHY_TYPE_WORD] & 0xffff);
+
+	hw->dma_version = hw->mac.dma_version =
+		msgbuf[RNP_VF_DMA_VERSION_WORD];
+
+	/* vlan status */
+	vlan = msgbuf[RNP_VF_VLAN_WORD];
+	if (vlan & 0xffff) {
+		adapter->vf_vlan = vlan & 0xffff;
+		adapter->flags |= RNPVF_FLAG_PF_SET_VLAN;
+	}
+	hw->ops.set_veb_vlan(hw, vlan, VFNUM(mbx, hw->vfnum));
+	hw->fw_version = msgbuf[RNP_VF_FW_VERSION_WORD];
+
+	if (msgbuf[RNP_VF_LINK_STATUS_WORD] & RNP_PF_LINK_UP) {
+		hw->link = true;
+		hw->speed = msgbuf[RNP_VF_LINK_STATUS_WORD] & 0xffff;
+
+	} else {
+		hw->link = false;
+		hw->speed = 0;
+	}
+
+	hw->usecstocount = msgbuf[RNP_VF_AXI_MHZ];
+
+	DPRINTK(PROBE, INFO, "dma_versioin:%x vlan %d \n",
+		hw->mac.dma_version, adapter->vf_vlan);
+	DPRINTK(PROBE, INFO, "axi:%x\n", hw->usecstocount);
+	DPRINTK(PROBE, INFO, "firmware :%x\n", hw->fw_version);
+	DPRINTK(PROBE, INFO, "link speed :%x\n", hw->speed);
+	DPRINTK(PROBE, INFO, "link status :%s\n",
+		hw->link ? "up" : "down");
+	hw->pf_feature = msgbuf[RNP_VF_FEATURE];
+
+	return 0;
+}
+
+/**
+ *  rnpvf_stop_hw_vf - Generic stop Tx/Rx units
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the adapter_stopped flag within rnpvf_hw struct. Clears interrupts,
+ *  disables transmit and receive units. The adapter_stopped flag is used by
+ *  the shared code and drivers to determine if the adapter is in a stopped
+ *  state and should not touch the hardware.
+ **/
+static s32 rnpvf_stop_hw_vf(struct rnpvf_hw *hw)
+{
+	u32 number_of_queues;
+	u16 i;
+	struct rnpvf_adapter *adapter = hw->back;
+	struct rnpvf_ring *ring;
+
+	/*
+	 * Set the adapter_stopped flag so other driver functions stop touching
+	 * the hardware
+	 */
+	hw->adapter_stopped = true;
+
+	/* Disable the receive unit by stopped each queue */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		ring = adapter->rx_ring[i];
+		ring_wr32(ring, RNP_DMA_RX_START, 0);
+	}
+
+	/* Disable the transmit unit.  Each queue must be disabled. */
+	number_of_queues = hw->mac.max_tx_queues;
+
+	return 0;
+}
+
+/**
+ *  rnpvf_mta_vector - Determines bit-vector in multicast table to set
+ *  @hw: pointer to hardware structure
+ *  @mc_addr: the multicast address
+ *
+ *  Extracts the 12 bits, from a multicast address, to determine which
+ *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ *  incoming rx multicast addresses, to determine the bit-vector to check in
+ *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ *  by the MO field of the MCSTCTRL. The MO field is set during initialization
+ *  to mc_filter_type.
+ **/
+static s32 rnpvf_mta_vector(struct rnpvf_hw *hw, u8 *mc_addr)
+{
+	u32 vector = 0;
+
+	switch (hw->mac.mc_filter_type) {
+	case 0: /* use bits [47:36] of the address */
+		vector = ((mc_addr[4] << 8) | (((u16)mc_addr[5])));
+		break;
+	case 1: /* use bits [46:35] of the address */
+		vector = ((mc_addr[4] << 7) | (((u16)mc_addr[5]) >> 1));
+		break;
+	case 2: /* use bits [45:34] of the address */
+		vector = ((mc_addr[4] << 6) | (((u16)mc_addr[5]) >> 2));
+		break;
+	case 3: /* use bits [43:32] of the address */
+		vector = ((mc_addr[4]) << 4 | (((u16)mc_addr[5]) >> 4));
+		break;
+	case 4: /* use bits [32:43] of the address */
+		vector = ((mc_addr[0] << 8) | (((u16)mc_addr[1])));
+		vector = (vector >> 4);
+		break;
+	case 5: /* use bits [32:43] of the address */
+		vector = ((mc_addr[0] << 8) | (((u16)mc_addr[1])));
+		vector = (vector >> 3);
+		break;
+	case 6: /* use bits [32:43] of the address */
+		vector = ((mc_addr[0] << 8) | (((u16)mc_addr[1])));
+		vector = (vector >> 2);
+		break;
+	case 7: /* use bits [32:43] of the address */
+		vector = ((mc_addr[0] << 8) | (((u16)mc_addr[1])));
+		break;
+	default: /* Invalid mc_filter_type */
+		break;
+	}
+
+	/* vector can only be 12-bits or boundary will be exceeded */
+	vector &= 0xFFF;
+	return vector;
+}
+
+/**
+ *  rnpvf_get_mac_addr_vf - Read device MAC address
+ *  @hw: pointer to the HW structure
+ *  @mac_addr: pointer to storage for retrieved MAC address
+ **/
+static s32 rnpvf_get_mac_addr_vf(struct rnpvf_hw *hw, u8 *mac_addr)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[3];
+	u8 *msg_addr = (u8 *)(&msgbuf[1]);
+	s32 ret_val = 0;
+
+	memset(msgbuf, 0, sizeof(msgbuf));
+	/*
+	 * If index is one then this is the start of a new list and needs
+	 * indication to the PF so it can do it's own list management.
+	 * If it is zero then that tells the PF to just clear all of
+	 * this VF's macvlans and there is no new list.
+	 */
+	msgbuf[0] |= RNP_VF_SET_MACVLAN;
+	ret_val = mbx->ops.write_posted(hw, msgbuf, 1, false);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read_posted(hw, msgbuf, 3, false);
+
+	msgbuf[0] &= ~RNP_VT_MSGTYPE_CTS;
+
+	if (!ret_val)
+		if (msgbuf[0] ==
+		    (RNP_VF_GET_MACVLAN | RNP_VT_MSGTYPE_NACK))
+			ret_val = -ENOMEM;
+
+	memcpy(mac_addr, msg_addr, 6);
+
+	return 0;
+}
+
+/**
+ *  rnpvf_get_queues_vf - Read device MAC address
+ *  @hw: pointer to the HW structure
+ *  @mac_addr: pointer to storage for retrieved MAC address
+ **/
+static s32 rnpvf_get_queues_vf(struct rnpvf_hw *hw)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = 0;
+	u32 msgbuf[7];
+
+	memset(msgbuf, 0, sizeof(msgbuf));
+	msgbuf[0] |= RNP_VF_GET_QUEUE;
+
+	ret_val = mbx->ops.write_posted(hw, msgbuf, 1, false);
+
+	mdelay(10);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read_posted(hw, msgbuf, 7, false);
+
+	msgbuf[0] &= ~RNP_VT_MSGTYPE_CTS;
+
+	if (!ret_val)
+		if (msgbuf[0] == (RNP_VF_GET_QUEUE | RNP_VT_MSGTYPE_NACK))
+			ret_val = -ENOMEM;
+#define MSG_TX_NUM_WORD 1
+#define MSG_RX_NUM_WORD 2
+#define MSG_RING_BASE_WORD 5
+#define MSG_RING_DEPTH 6
+
+	hw->queue_ring_base = msgbuf[MSG_RING_BASE_WORD];
+	hw->mac.max_tx_queues = msgbuf[MSG_TX_NUM_WORD];
+	hw->mac.max_rx_queues = msgbuf[MSG_RX_NUM_WORD];
+	hw->tx_items_count = 0xffff & (msgbuf[MSG_RING_DEPTH] >> 16);
+	hw->rx_items_count = 0xffff & (msgbuf[MSG_RING_DEPTH] >> 0);
+
+	return 0;
+}
+
+static s32 rnpvf_set_uc_addr_vf(struct rnpvf_hw *hw, u32 index, u8 *addr)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[3];
+	u8 *msg_addr = (u8 *)(&msgbuf[1]);
+	s32 ret_val = 0;
+
+	memset(msgbuf, 0, sizeof(msgbuf));
+	/*
+	 * If index is one then this is the start of a new list and needs
+	 * indication to the PF so it can do it's own list management.
+	 * If it is zero then that tells the PF to just clear all of
+	 * this VF's macvlans and there is no new list.
+	 */
+	msgbuf[0] |= index << RNP_VT_MSGINFO_SHIFT;
+	msgbuf[0] |= RNP_VF_SET_MACVLAN;
+	if (addr)
+		memcpy(msg_addr, addr, 6);
+	ret_val = mbx->ops.write_posted(hw, msgbuf, 3, false);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read_posted(hw, msgbuf, 3, false);
+
+	msgbuf[0] &= ~RNP_VT_MSGTYPE_CTS;
+
+	if (!ret_val)
+		if (msgbuf[0] ==
+		    (RNP_VF_SET_MACVLAN | RNP_VT_MSGTYPE_NACK))
+			ret_val = -ENOMEM;
+	return ret_val;
+}
+
+/**
+ *  rnpvf_set_rar_vf - set device MAC address
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *  @addr: Address to put into receive address register
+ *  @vmdq: Unused in this implementation
+ **/
+static s32 rnpvf_set_rar_vf(struct rnpvf_hw *hw, u32 index, u8 *addr,
+			    u32 vmdq)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[3];
+	u8 *msg_addr = (u8 *)(&msgbuf[1]);
+	s32 ret_val;
+
+	memset(msgbuf, 0, sizeof(msgbuf));
+	msgbuf[0] = RNP_VF_SET_MAC_ADDR;
+	memcpy(msg_addr, addr, 6);
+	ret_val = mbx->ops.write_posted(hw, msgbuf, 3, false);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read_posted(hw, msgbuf, 3, false);
+
+	msgbuf[0] &= ~RNP_VT_MSGTYPE_CTS;
+
+	/* if nacked the address was rejected, use "perm_addr" */
+	if (!ret_val &&
+	    (msgbuf[0] == (RNP_VF_SET_MAC_ADDR | RNP_VT_MSGTYPE_NACK))) {
+		rnpvf_get_mac_addr_vf(hw, hw->mac.addr);
+		return -1;
+	}
+
+	return ret_val;
+}
+
+static void rnpvf_write_msg_read_ack(struct rnpvf_hw *hw, u32 *msg,
+				     u16 size)
+{
+	u32 retmsg[RNP_VFMAILBOX_SIZE];
+	s32 retval;
+	struct rnp_mbx_info *mbx = &hw->mbx;
+
+	retval = mbx->ops.write_posted(hw, msg, size, false);
+	if (!retval)
+		mbx->ops.read_posted(hw, retmsg, size, false);
+}
+
+u8 *rnpvf_addr_list_itr(struct rnpvf_hw __maybe_unused *hw,
+			u8 **mc_addr_ptr)
+{
+	struct netdev_hw_addr *mc_ptr;
+	u8 *addr = *mc_addr_ptr;
+
+	mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]);
+	if (mc_ptr->list.next) {
+		struct netdev_hw_addr *ha;
+
+		ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr,
+				list);
+		*mc_addr_ptr = ha->addr;
+	} else
+		*mc_addr_ptr = NULL;
+
+	return addr;
+}
+
+/**
+ *  rnpvf_update_mc_addr_list_vf - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @netdev: pointer to net device structure
+ *
+ *  Updates the Multicast Table Array.
+ **/
+static s32 rnpvf_update_mc_addr_list_vf(struct rnpvf_hw *hw,
+					struct net_device *netdev)
+{
+	struct netdev_hw_addr *ha;
+	u32 msgbuf[RNP_VFMAILBOX_SIZE];
+	u16 *vector_list = (u16 *)&msgbuf[1];
+	u32 cnt, i;
+	int addr_count = 0;
+	u8 *addr_list = NULL;
+
+	/* Each entry in the list uses 1 16 bit word.  We have 30
+	 * 16 bit words available in our HW msg buffer (minus 1 for the
+	 * msg type).  That's 30 hash values if we pack 'em right.  If
+	 * there are more than 30 MC addresses to add then punt the
+	 * extras for now and then add code to handle more than 30 later.
+	 * It would be unusual for a server to request that many multi-cast
+	 * addresses except for in large enterprise network environments.
+	 */
+
+	cnt = netdev_mc_count(netdev);
+	if (cnt > 30)
+		cnt = 30;
+	msgbuf[0] = RNP_VF_SET_MULTICAST;
+	msgbuf[0] |= cnt << RNP_VT_MSGINFO_SHIFT;
+
+	addr_count = netdev_mc_count(netdev);
+
+	ha = list_first_entry(&netdev->mc.list, struct netdev_hw_addr,
+			      list);
+	addr_list = ha->addr;
+	for (i = 0; i < addr_count; i++) {
+		vector_list[i] = rnpvf_mta_vector(
+			hw, rnpvf_addr_list_itr(hw, &addr_list));
+	}
+
+	rnpvf_write_msg_read_ack(hw, msgbuf, RNP_VFMAILBOX_SIZE);
+
+	return 0;
+}
+
+/**
+ *  rnpvf_set_vfta_vf - Set/Unset vlan filter table address
+ *  @hw: pointer to the HW structure
+ *  @vlan: 12 bit VLAN ID
+ *  @vind: unused by VF drivers
+ *  @vlan_on: if true then set bit, else clear bit
+ **/
+static s32 rnpvf_set_vfta_vf(struct rnpvf_hw *hw, u32 vlan, u32 vind,
+			     bool vlan_on)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[2];
+	s32 err;
+
+	msgbuf[0] = RNP_VF_SET_VLAN;
+	msgbuf[1] = vlan;
+	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+	msgbuf[0] |= vlan_on << RNP_VT_MSGINFO_SHIFT;
+
+	err = mbx->ops.write_posted(hw, msgbuf, 2, false);
+	if (err) {
+		printk("vlan write_posted failed\n");
+		goto mbx_err;
+	}
+
+	err = mbx->ops.read_posted(hw, msgbuf, 2, false);
+	if (err) {
+		printk("vlan read_posted failed\n");
+		goto mbx_err;
+	}
+
+	/* remove extra bits from the message */
+	msgbuf[0] &= ~RNP_VT_MSGTYPE_CTS;
+	msgbuf[0] &= ~(0xFF << RNP_VT_MSGINFO_SHIFT);
+
+	if (msgbuf[0] != (RNP_VF_SET_VLAN | RNP_VT_MSGTYPE_ACK))
+		err = RNP_ERR_INVALID_ARGUMENT;
+
+mbx_err:
+	return err;
+}
+
+static s32 rnpvf_set_vlan_strip(struct rnpvf_hw *hw, bool vlan_on)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	struct rnpvf_adapter *adapter = (struct rnpvf_adapter *)hw->back;
+	u32 msgbuf[4];
+	s32 err;
+	int i;
+
+	if (adapter->num_rx_queues > 2) {
+		err = -EINVAL;
+		goto mbx_err;
+	}
+
+	msgbuf[0] = RNP_VF_SET_VLAN_STRIP;
+	msgbuf[1] = (vlan_on << 31) | adapter->num_rx_queues;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		msgbuf[2 + i] = adapter->rx_ring[i]->rnpvf_queue_idx;
+
+	err = mbx->ops.write_posted(hw, msgbuf, 2 + adapter->num_rx_queues,
+				    false);
+	if (err)
+		goto mbx_err;
+
+	err = mbx->ops.read_posted(hw, msgbuf, 1, false);
+	if (err)
+		goto mbx_err;
+
+	/* remove extra bits from the message */
+	msgbuf[0] &= ~RNP_VT_MSGTYPE_CTS;
+	msgbuf[0] &= ~(0xFF << RNP_VT_MSGINFO_SHIFT);
+
+	if (msgbuf[0] != (RNP_VF_SET_VLAN_STRIP | RNP_VT_MSGTYPE_ACK))
+		err = RNP_ERR_INVALID_ARGUMENT;
+
+mbx_err:
+	return err;
+}
+
+/**
+ *  rnpvf_setup_mac_link_vf - Setup MAC link settings
+ *  @hw: pointer to hardware structure
+ *  @speed: Unused in this implementation
+ *  @autoneg: Unused in this implementation
+ *  @autoneg_wait_to_complete: Unused in this implementation
+ *
+ *  Do nothing and return success.  VF drivers are not allowed to change
+ *  global settings.  Maintained for driver compatibility.
+ **/
+static s32 rnpvf_setup_mac_link_vf(struct rnpvf_hw *hw,
+				   rnp_link_speed speed, bool autoneg,
+				   bool autoneg_wait_to_complete)
+{
+	return 0;
+}
+
+/**
+ *  rnpvf_check_mac_link_vf - Get link/speed status
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @link_up: true is link is up, false otherwise
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ *  Reads the links register to determine if link is up and the current speed
+ **/
+static s32 rnpvf_check_mac_link_vf(struct rnpvf_hw *hw,
+				   rnp_link_speed *speed, bool *link_up,
+				   bool autoneg_wait_to_complete)
+{
+	*speed = hw->speed;
+	*link_up = hw->link;
+
+	return 0;
+}
+
+/**
+ *  rnpvf_rlpml_set_vf - Set the maximum receive packet length
+ *  @hw: pointer to the HW structure
+ *  @max_size: value to assign to max frame size
+ **/
+void rnpvf_rlpml_set_vf(struct rnpvf_hw *hw, u16 max_size)
+{
+	u32 msgbuf[2];
+
+	msgbuf[0] = RNP_VF_SET_LPE;
+	msgbuf[1] = max_size;
+	rnpvf_write_msg_read_ack(hw, msgbuf, 2);
+}
+
+/**
+ *  rnpvf_negotiate_api_version - Negotiate supported API version
+ *  @hw: pointer to the HW structure
+ *  @api: integer containing requested API version
+ **/
+int rnpvf_negotiate_api_version(struct rnpvf_hw *hw, int api)
+{
+	return 0;
+}
+
+int rnpvf_get_queues(struct rnpvf_hw *hw, unsigned int *num_tcs,
+		     unsigned int *default_tc)
+{
+	return -1;
+}
+
+void rnpvf_set_veb_mac_n10(struct rnpvf_hw *hw, u8 *mac, u32 vfnum,
+			   u32 ring)
+{
+	int port;
+	u32 maclow, machi;
+
+	maclow = (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5];
+	machi = (mac[0] << 8) | mac[1];
+	for (port = 0; port < 4; port++) {
+		maclow = (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) |
+			 mac[5];
+		machi = (mac[0] << 8) | mac[1];
+
+		wr32(hw, RNP_DMA_PORT_VBE_MAC_LO_TBL_N10(port, vfnum),
+		     maclow);
+		wr32(hw, RNP_DMA_PORT_VBE_MAC_HI_TBL_N10(port, vfnum),
+		     machi);
+
+		wr32(hw, RNP_DMA_PORT_VEB_VF_RING_TBL_N10(port, vfnum),
+		     ring);
+	}
+}
+
+void rnpvf_set_vlan_n10(struct rnpvf_hw *hw, u16 vid, u32 vf_num)
+{
+	int port;
+
+	for (port = 0; port < 4; port++)
+		wr32(hw, RNP_DMA_PORT_VEB_VID_TBL_N10(port, vf_num), vid);
+}
+
+int rnpvf_set_promisc_mode(struct rnpvf_hw *hw, bool promisc)
+{
+
+	struct rnp_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[2];
+	s32 err;
+
+	msgbuf[0] = RNP_VF_SET_PROMISCE;
+	if (promisc)
+		msgbuf[1] = 1;
+	else
+		msgbuf[1] = 0;
+
+	err = mbx->ops.write_posted(hw, msgbuf, 2, false);
+	if (err) {
+		printk("promisc write_posted failed\n");
+		goto mbx_err;
+	}
+
+	err = mbx->ops.read_posted(hw, msgbuf, 2, false);
+	if (err) {
+		printk("promisc read_posted failed\n");
+		goto mbx_err;
+	}
+
+	/* remove extra bits from the message */
+	msgbuf[0] &= ~RNP_VT_MSGTYPE_CTS;
+	msgbuf[0] &= ~(0xFF << RNP_VT_MSGINFO_SHIFT);
+
+	if (msgbuf[0] != (RNP_VF_SET_PROMISCE | RNP_VT_MSGTYPE_ACK)) {
+		err = RNP_ERR_INVALID_ARGUMENT;
+		printk("set promisc failed\n");
+	}
+
+mbx_err:
+	return err;
+
+
+
+}
+
+static const struct rnpvf_hw_operations rnpvf_hw_ops_n10 = {
+	.set_veb_mac = rnpvf_set_veb_mac_n10,
+	.set_veb_vlan = rnpvf_set_vlan_n10,
+};
+
+static s32 rnpvf_get_invariants_n10(struct rnpvf_hw *hw)
+{
+	struct rnp_mbx_info *mbx = &hw->mbx;
+#ifdef FIX_MAC_PADDIN
+	struct rnpvf_adapter *adapter = (struct rnpvf_adapter *)hw->back;
+#endif
+
+	hw->feature_flags |=
+		RNPVF_NET_FEATURE_SG | RNPVF_NET_FEATURE_TX_CHECKSUM |
+		RNPVF_NET_FEATURE_RX_CHECKSUM | RNPVF_NET_FEATURE_TSO |
+		RNPVF_NET_FEATURE_TX_UDP_TUNNEL |
+		RNPVF_NET_FEATURE_VLAN_OFFLOAD | RNPVF_NET_FEATURE_RX_HASH;
+
+	/* mbx setup */
+	mbx->pf2vf_mbox_vec_base = 0xa5000;
+	mbx->vf2pf_mbox_vec_base = 0xa5100;
+	mbx->cpu2vf_mbox_vec_base = 0xa5200;
+	mbx->cpu2pf_mbox_vec = 0xa5300;
+	mbx->pf_vf_shm_base = 0xa6000;
+	mbx->cpu_vf_shm_base = 0xa8000;
+	mbx->vf2cpu_mbox_ctrl_base = 0xa9000;
+	mbx->cpu_vf_mbox_mask_lo_base = 0xa9200;
+	mbx->cpu_vf_mbox_mask_hi_base = 0xa9300;
+	mbx->mbx_mem_size = 64;
+
+	mbx->vf2pf_mbox_ctrl_base = 0xa7000;
+	mbx->pf2vf_mbox_ctrl_base = 0xa7100;
+	mbx->pf_vf_mbox_mask_lo = 0xa7200;
+	mbx->pf_vf_mbox_mask_hi = 0xa7300;
+
+	mbx->cpu_pf_shm_base = 0xaa000;
+	mbx->pf2cpu_mbox_ctrl = 0xaa100;
+	mbx->pf2cpu_mbox_mask = 0xaa300;
+
+	mbx->vf_num_mask = 0x3f;
+
+	hw->min_length = RNPVF_MIN_MTU;
+	hw->max_length = RNPVF_N10_MAX_JUMBO_FRAME_SIZE;
+
+#ifdef FIX_MAC_PADDIN
+	adapter->priv_flags |= RNPVF_PRIV_FLAG_TX_PADDING;
+#endif
+
+	memcpy(&hw->ops, &rnpvf_hw_ops_n10, sizeof(hw->ops));
+
+	return 0;
+}
+
+static const struct rnp_mac_operations rnpvf_mac_ops = {
+	.init_hw = rnpvf_init_hw_vf,
+	.reset_hw = rnpvf_reset_hw_vf,
+	.start_hw = rnpvf_start_hw_vf,
+	.get_mac_addr = rnpvf_get_mac_addr_vf,
+	.get_queues = rnpvf_get_queues_vf,
+	.stop_adapter = rnpvf_stop_hw_vf,
+	.setup_link = rnpvf_setup_mac_link_vf,
+	.check_link = rnpvf_check_mac_link_vf,
+	.set_rar = rnpvf_set_rar_vf,
+	.update_mc_addr_list = rnpvf_update_mc_addr_list_vf,
+	.set_uc_addr = rnpvf_set_uc_addr_vf,
+	.set_vfta = rnpvf_set_vfta_vf,
+	.set_vlan_strip = rnpvf_set_vlan_strip,
+	.read_eth_reg = rnpvf_read_eth_reg,
+	.get_mtu = rnpvf_get_mtu,
+	.set_mtu = rnpvf_set_mtu,
+	.req_reset_pf = rnpvf_reset_pf,
+	.set_promisc_mode = rnpvf_set_promisc_mode,
+};
+
+const struct rnpvf_info rnp_n10_vf_info = {
+	.mac = rnp_mac_2port_40G,
+	.mac_ops = &rnpvf_mac_ops,
+	.board_type = rnp_board_n10,
+	.get_invariants = &rnpvf_get_invariants_n10,
+};
diff --git a/drivers/net/ethernet/mucse/rnpvf/vf.h b/drivers/net/ethernet/mucse/rnpvf/vf.h
new file mode 100644
index 0000000000000..ca84f37e862e7
--- /dev/null
+++ b/drivers/net/ethernet/mucse/rnpvf/vf.h
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 - 2024 Mucse Corporation. */
+
+#ifndef __RNP_VF_H__
+#define __RNP_VF_H__
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "defines.h"
+#include "regs.h"
+#include "mbx.h"
+
+struct rnpvf_hw;
+
+/* iterator type for walking multicast address lists */
+typedef u8 *(*rnp_mc_addr_itr)(struct rnpvf_hw *hw, u8 **mc_addr_ptr,
+			       u32 *vmdq);
+struct rnp_mac_operations {
+	s32 (*init_hw)(struct rnpvf_hw *);
+	s32 (*reset_hw)(struct rnpvf_hw *);
+	s32 (*start_hw)(struct rnpvf_hw *);
+	s32 (*clear_hw_cntrs)(struct rnpvf_hw *);
+	enum rnp_media_type (*get_media_type)(struct rnpvf_hw *);
+	u32 (*get_supported_physical_layer)(struct rnpvf_hw *);
+	s32 (*get_mac_addr)(struct rnpvf_hw *, u8 *);
+	s32 (*get_queues)(struct rnpvf_hw *);
+	s32 (*stop_adapter)(struct rnpvf_hw *);
+	s32 (*get_bus_info)(struct rnpvf_hw *);
+	int (*read_eth_reg)(struct rnpvf_hw *, int, u32 *);
+	int (*get_mtu)(struct rnpvf_hw *);
+	int (*set_mtu)(struct rnpvf_hw *, int);
+	int (*req_reset_pf)(struct rnpvf_hw *);
+	/* Link */
+	s32 (*setup_link)(struct rnpvf_hw *, rnp_link_speed, bool, bool);
+	s32 (*check_link)(struct rnpvf_hw *, rnp_link_speed *, bool *,
+			  bool);
+	s32 (*get_link_capabilities)(struct rnpvf_hw *, rnp_link_speed *,
+				     bool *);
+	/* RAR, Multicast, VLAN */
+	s32 (*set_rar)(struct rnpvf_hw *, u32, u8 *, u32);
+	s32 (*set_uc_addr)(struct rnpvf_hw *, u32, u8 *);
+	s32 (*init_rx_addrs)(struct rnpvf_hw *);
+	s32 (*update_mc_addr_list)(struct rnpvf_hw *, struct net_device *);
+	s32 (*enable_mc)(struct rnpvf_hw *);
+	s32 (*disable_mc)(struct rnpvf_hw *);
+	s32 (*clear_vfta)(struct rnpvf_hw *);
+	s32 (*set_vfta)(struct rnpvf_hw *, u32, u32, bool);
+	s32 (*set_vlan_strip)(struct rnpvf_hw *, bool);
+	s32 (*set_promisc_mode)(struct rnpvf_hw *, bool);
+};
+
+enum rnp_mac_type {
+	rnp_mac_unknown = 0,
+	rnp_mac_2port_10G,
+	rnp_mac_2port_40G,
+	rnp_mac_4port_10G,
+	rnp_mac_8port_10G,
+	rnp_num_macs
+};
+
+enum rnp_board_type {
+	rnp_board_n10,
+};
+
+struct rnp_mac_info {
+	struct rnp_mac_operations ops;
+	u8 addr[6];
+	u8 perm_addr[6];
+	enum rnp_mac_type type;
+	s32 mc_filter_type;
+	u32 dma_version;
+	bool get_link_status;
+	u32 max_tx_queues;
+	u32 max_rx_queues;
+	u32 max_msix_vectors;
+};
+
+#define RNP_MAX_TRAFFIC_CLASS 4
+enum rnp_fc_mode {
+	rnp_fc_none = 0,
+	rnp_fc_rx_pause,
+	rnp_fc_tx_pause,
+	rnp_fc_full,
+	rnp_fc_default
+};
+
+struct rnp_fc_info {
+	u32 high_water[RNP_MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
+	u32 low_water[RNP_MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */
+	u16 pause_time; /* Flow Control Pause timer */
+	bool send_xon; /* Flow control send XON */
+	bool strict_ieee; /* Strict IEEE mode */
+	bool disable_fc_autoneg; /* Do not autonegotiate FC */
+	bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
+	enum rnp_fc_mode current_mode; /* FC mode in effect */
+	enum rnp_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+struct rnp_mbx_operations {
+	s32 (*init_params)(struct rnpvf_hw *hw);
+	s32 (*read)(struct rnpvf_hw *, u32 *, u16, bool);
+	s32 (*write)(struct rnpvf_hw *, u32 *, u16, bool);
+	s32 (*read_posted)(struct rnpvf_hw *, u32 *, u16, bool);
+	s32 (*write_posted)(struct rnpvf_hw *, u32 *, u16, bool);
+	s32 (*check_for_msg)(struct rnpvf_hw *, bool);
+	s32 (*check_for_ack)(struct rnpvf_hw *, bool);
+	s32 (*check_for_rst)(struct rnpvf_hw *, bool);
+	s32 (*configure)(struct rnpvf_hw *hw, int nr_vec, bool enable);
+};
+
+struct rnpvf_hw_operations {
+	void (*set_veb_mac)(struct rnpvf_hw *hw, u8 *, u32, u32);
+	void (*set_veb_vlan)(struct rnpvf_hw *hw, u16, u32);
+};
+
+struct rnp_mbx_stats {
+	u32 msgs_tx;
+	u32 msgs_rx;
+	u32 acks;
+	u32 reqs;
+	u32 rsts;
+};
+
+struct rnp_mbx_info {
+	struct rnp_mbx_operations ops;
+	struct rnp_mbx_stats stats;
+	u32 timeout;
+	u32 udelay;
+	u32 v2p_mailbox;
+	u16 size;
+	u16 pf_req;
+	u16 pf_ack;
+	u16 cpu_req;
+	u16 cpu_ack;
+	u32 vf_num_mask;
+	int mbx_size;
+	int mbx_mem_size;
+	/* cm3 <-> pf mbx */
+	u32 cpu_pf_shm_base;
+	u32 pf2cpu_mbox_ctrl;
+	u32 pf2cpu_mbox_mask;
+	u32 cpu_pf_mbox_mask;
+	u32 cpu2pf_mbox_vec;
+	/* cm3 <-> vf mbx */
+	u32 cpu_vf_shm_base;
+	u32 cpu2vf_mbox_vec_base;
+	u32 cpu_vf_mbox_mask_lo_base;
+	u32 cpu_vf_mbox_mask_hi_base;
+	/* pf <--> vf mbx */
+	u32 pf_vf_shm_base;
+	u32 vf2cpu_mbox_ctrl_base;
+	u32 pf2vf_mbox_ctrl_base;
+	u32 pf_vf_mbox_mask_lo;
+	u32 pf_vf_mbox_mask_hi;
+	u32 pf2vf_mbox_vec_base;
+	u32 vf2pf_mbox_vec_base;
+	u32 vf2pf_mbox_ctrl_base;
+};
+
+struct rnpvf_hw_stats_own {
+	u64 vlan_add_cnt;
+	u64 vlan_strip_cnt;
+	u64 csum_err;
+	u64 csum_good;
+	u64 spoof_dropped;
+};
+
+struct rnpvf_hw_stats {
+	u64 base_vfgprc;
+	u64 base_vfgptc;
+	u64 base_vfgorc;
+	u64 base_vfgotc;
+	u64 base_vfmprc;
+	u64 last_vfgprc;
+	u64 last_vfgptc;
+	u64 last_vfgorc;
+	u64 last_vfgotc;
+	u64 last_vfmprc;
+	u64 vfgprc;
+	u64 vfgptc;
+	u64 vfgorc;
+	u64 vfgotc;
+	u64 vfmprc;
+	u64 saved_reset_vfgprc;
+	u64 saved_reset_vfgptc;
+	u64 saved_reset_vfgorc;
+	u64 saved_reset_vfgotc;
+	u64 saved_reset_vfmprc;
+};
+
+struct rnpvf_info {
+	enum rnp_mac_type mac;
+	enum rnp_board_type board_type;
+	const struct rnp_mac_operations *mac_ops;
+	s32 (*get_invariants)(struct rnpvf_hw *);
+};
+
+void rnpvf_rlpml_set_vf(struct rnpvf_hw *hw, u16 max_size);
+#endif /* __RNP_VF_H__ */