diff --git a/target/linux/qualcommax/patches-6.6/0054-v6.8-arm64-dts-qcom-ipq6018-use-CPUFreq-NVMEM.patch b/target/linux/qualcommax/patches-6.6/0054-v6.8-arm64-dts-qcom-ipq6018-use-CPUFreq-NVMEM.patch index 6198e24f38b8ee..fa73672c346707 100644 --- a/target/linux/qualcommax/patches-6.6/0054-v6.8-arm64-dts-qcom-ipq6018-use-CPUFreq-NVMEM.patch +++ b/target/linux/qualcommax/patches-6.6/0054-v6.8-arm64-dts-qcom-ipq6018-use-CPUFreq-NVMEM.patch @@ -46,28 +46,28 @@ Signed-off-by: Bjorn Andersson opp-1320000000 { opp-hz = /bits/ 64 <1320000000>; opp-microvolt = <862500>; -+ opp-supported-hw = <0x3>; ++ opp-supported-hw = <0xf>; clock-latency-ns = <200000>; }; opp-1440000000 { opp-hz = /bits/ 64 <1440000000>; opp-microvolt = <925000>; -+ opp-supported-hw = <0x3>; ++ opp-supported-hw = <0xf>; clock-latency-ns = <200000>; }; opp-1608000000 { opp-hz = /bits/ 64 <1608000000>; opp-microvolt = <987500>; -+ opp-supported-hw = <0x1>; ++ opp-supported-hw = <0xf>; clock-latency-ns = <200000>; }; opp-1800000000 { opp-hz = /bits/ 64 <1800000000>; opp-microvolt = <1062500>; -+ opp-supported-hw = <0x1>; ++ opp-supported-hw = <0xf>; clock-latency-ns = <200000>; }; }; diff --git a/target/linux/qualcommax/patches-6.6/0060-v6.9-clk-qcom-gcc-ipq6018-add-qdss_at-clock-needed-for-wi.patch b/target/linux/qualcommax/patches-6.6/0060-v6.9-clk-qcom-gcc-ipq6018-add-qdss_at-clock-needed-for-wi.patch index 5f0af1352a8cdc..f798fffb28b4a7 100644 --- a/target/linux/qualcommax/patches-6.6/0060-v6.9-clk-qcom-gcc-ipq6018-add-qdss_at-clock-needed-for-wi.patch +++ b/target/linux/qualcommax/patches-6.6/0060-v6.9-clk-qcom-gcc-ipq6018-add-qdss_at-clock-needed-for-wi.patch @@ -31,7 +31,7 @@ Signed-off-by: Bjorn Andersson + .parent_hws = (const struct clk_hw *[]){ + &qdss_at_clk_src.clkr.hw }, + .num_parents = 1, -+ .flags = CLK_SET_RATE_PARENT, ++ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + .ops = &clk_branch2_ops, + }, + }, diff --git a/target/linux/qualcommax/patches-6.6/0910-arm64-dts-qcom-ipq6018-change-voltage-to-perf-levels.patch b/target/linux/qualcommax/patches-6.6/0910-arm64-dts-qcom-ipq6018-change-voltage-to-perf-levels.patch index 25fa3136709bb6..bcf812bdcc5f51 100644 --- a/target/linux/qualcommax/patches-6.6/0910-arm64-dts-qcom-ipq6018-change-voltage-to-perf-levels.patch +++ b/target/linux/qualcommax/patches-6.6/0910-arm64-dts-qcom-ipq6018-change-voltage-to-perf-levels.patch @@ -36,7 +36,7 @@ Signed-off-by: Mantas Pucka opp-hz = /bits/ 64 <1320000000>; - opp-microvolt = <862500>; + opp-microvolt = <3>; - opp-supported-hw = <0x3>; + opp-supported-hw = <0xf>; clock-latency-ns = <200000>; }; @@ -44,7 +44,7 @@ Signed-off-by: Mantas Pucka opp-hz = /bits/ 64 <1440000000>; - opp-microvolt = <925000>; + opp-microvolt = <4>; - opp-supported-hw = <0x3>; + opp-supported-hw = <0xf>; clock-latency-ns = <200000>; }; @@ -52,7 +52,7 @@ Signed-off-by: Mantas Pucka opp-hz = /bits/ 64 <1608000000>; - opp-microvolt = <987500>; + opp-microvolt = <5>; - opp-supported-hw = <0x1>; + opp-supported-hw = <0xf>; clock-latency-ns = <200000>; }; @@ -60,6 +60,6 @@ Signed-off-by: Mantas Pucka opp-hz = /bits/ 64 <1800000000>; - opp-microvolt = <1062500>; + opp-microvolt = <6>; - opp-supported-hw = <0x1>; + opp-supported-hw = <0xf>; clock-latency-ns = <200000>; }; diff --git a/target/linux/qualcommax/patches-6.6/1002-arm64-dts-qcom-ipq6018-add-blsp1_i2c6-node.patch b/target/linux/qualcommax/patches-6.6/1002-arm64-dts-qcom-ipq6018-add-blsp1_i2c6-node.patch new file mode 100644 index 00000000000000..45197366dd0198 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/1002-arm64-dts-qcom-ipq6018-add-blsp1_i2c6-node.patch @@ -0,0 +1,33 @@ +From ba4cdc72744a217ba1d0d345e074ff65d7ec8c37 Mon Sep 17 00:00:00 2001 +From: JiaY-shi +Date: Tue, 6 Jun 2023 19:35:55 +0800 +Subject: [PATCH 155/155] arm64: dts: qcom: ipq6018: add blsp1_i2c6 node + +--- + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 15 +++++++++++++++ + 1 file changed, 15 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -626,6 +626,21 @@ + status = "disabled"; + }; + ++ blsp1_i2c6: i2c@78ba000 { ++ compatible = "qcom,i2c-qup-v2.2.1"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x078ba000 0x0 0x600>; ++ interrupts = ; ++ clocks = <&gcc GCC_BLSP1_QUP6_I2C_APPS_CLK>, ++ <&gcc GCC_BLSP1_AHB_CLK>; ++ clock-names = "core", "iface"; ++ clock-frequency = <400000>; ++ dmas = <&blsp_dma 22>, <&blsp_dma 23>; ++ dma-names = "tx", "rx"; ++ status = "disabled"; ++ }; ++ + qpic_bam: dma-controller@7984000 { + compatible = "qcom,bam-v1.7.0"; + reg = <0x0 0x07984000 0x0 0x1a000>; diff --git a/target/linux/qualcommax/patches-6.6/1003-arm64-dts-qcom-ipq6018-repair-reserved-memory-missin.patch b/target/linux/qualcommax/patches-6.6/1003-arm64-dts-qcom-ipq6018-repair-reserved-memory-missin.patch new file mode 100644 index 00000000000000..d502277a7b6f60 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/1003-arm64-dts-qcom-ipq6018-repair-reserved-memory-missin.patch @@ -0,0 +1,35 @@ +From 0ad5a9666e0eca72fc4546ed384a40b1430ddd8b Mon Sep 17 00:00:00 2001 +From: JiaY-shi +Date: Mon, 12 Jun 2023 15:06:01 +0800 +Subject: [PATCH] arm64: dts: qcom: ipq6018: repair reserved-memory missing + nodes + +--- + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 31 +++++++++++++++++++++++++++ + 1 file changed, 31 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -223,6 +223,22 @@ + reg = <0x0 0x4ab00000 0x0 0x5500000>; + no-map; + }; ++ ++ nss_region: nss@40000000 { ++ no-map; ++ reg = <0x0 0x40000000 0x0 0x01000000>; ++ }; ++ ++ q6_etr_region: q6_etr_dump@1 { ++ no-map; ++ reg = <0x0 0x50000000 0x0 0x00100000>; ++ }; ++ ++ m3_dump_region: m3_dump@50100000 { ++ no-map; ++ reg = <0x0 0x50100000 0x0 0x00100000>; ++ }; ++ + }; + + smem { diff --git a/target/linux/qualcommax/patches-6.6/1004-arm64-dts-qcom-ipq6018-Add-missing-fixed-clocks.patch b/target/linux/qualcommax/patches-6.6/1004-arm64-dts-qcom-ipq6018-Add-missing-fixed-clocks.patch new file mode 100644 index 00000000000000..4f96b319599bd8 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/1004-arm64-dts-qcom-ipq6018-Add-missing-fixed-clocks.patch @@ -0,0 +1,40 @@ +From 48d8e82ed977f07211f827834d6ee6e6fe3336d8 Mon Sep 17 00:00:00 2001 +From: Alexandru Gagniuc +Date: Sat, 27 Aug 2022 17:33:37 -0500 +Subject: [PATCH 1004/1010] arm64: dts: qcom: ipq6018: Add missing fixed-clocks + +Signed-off-by: Alexandru Gagniuc +--- + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 24 ++++++++++++++++++++++-- + 1 file changed, 22 insertions(+), 2 deletions(-) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -28,6 +28,12 @@ + clock-frequency = <24000000>; + #clock-cells = <0>; + }; ++ ++ usb3phy_0_cc_pipe_clk: usb3phy-0-cc-pipe-clk { ++ compatible = "fixed-clock"; ++ clock-frequency = <125000000>; ++ #clock-cells = <0>; ++ }; + }; + + cpus: cpus { +@@ -433,8 +439,12 @@ + gcc: gcc@1800000 { + compatible = "qcom,gcc-ipq6018"; + reg = <0x0 0x01800000 0x0 0x80000>; +- clocks = <&xo>, <&sleep_clk>; +- clock-names = "xo", "sleep_clk"; ++ clocks = <&xo>, ++ <&sleep_clk>, ++ <&usb3phy_0_cc_pipe_clk>; ++ clock-names = "xo", ++ "sleep_clk", ++ "usb3phy_0_cc_pipe_clk"; + #clock-cells = <1>; + #reset-cells = <1>; + }; diff --git a/target/linux/qualcommax/patches-6.6/1005-clk-qcom-ipq6018-add-missing-clocks.patch b/target/linux/qualcommax/patches-6.6/1005-clk-qcom-ipq6018-add-missing-clocks.patch new file mode 100644 index 00000000000000..1f210fc24c31ce --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/1005-clk-qcom-ipq6018-add-missing-clocks.patch @@ -0,0 +1,642 @@ +From 86c704b73063eb61d1dc7aaf0d110a88b09c6134 Mon Sep 17 00:00:00 2001 +From: anusha +Date: Wed, 19 Aug 2020 17:59:22 +0530 +Subject: [PATCH 1001/1010] clk: qcom: ipq6018: add missing clocks + +A large number of clocks are missing for the IPQ6018. Add those clocks +from the downstream QCA 5.4 kernel. + +Signed-off-by: anusha +Signed-off-by: Alexandru Gagniuc +--- + drivers/clk/qcom/gcc-ipq6018.c | 587 +++++++++++++++++++++++++++++++++ + 1 file changed, 587 insertions(+) + +--- a/drivers/clk/qcom/gcc-ipq6018.c ++++ b/drivers/clk/qcom/gcc-ipq6018.c +@@ -212,6 +212,19 @@ static struct clk_rcg2 pcnoc_bfdcd_clk_s + }, + }; + ++static struct clk_fixed_factor pcnoc_clk_src = { ++ .mult = 1, ++ .div = 1, ++ .hw.init = &(struct clk_init_data){ ++ .name = "pcnoc_clk_src", ++ .parent_hws = (const struct clk_hw *[]){ ++ &pcnoc_bfdcd_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .ops = &clk_fixed_factor_ops, ++ .flags = CLK_SET_RATE_PARENT, ++ }, ++}; ++ + static struct clk_alpha_pll gpll2_main = { + .offset = 0x4a000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], +@@ -490,6 +503,19 @@ static struct clk_rcg2 snoc_nssnoc_bfdcd + }, + }; + ++static struct clk_fixed_factor snoc_nssnoc_clk_src = { ++ .mult = 1, ++ .div = 1, ++ .hw.init = &(struct clk_init_data){ ++ .name = "snoc_nssnoc_clk_src", ++ .parent_hws = (const struct clk_hw *[]){ ++ &snoc_nssnoc_bfdcd_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .ops = &clk_fixed_factor_ops, ++ .flags = CLK_SET_RATE_PARENT, ++ }, ++}; ++ + static const struct freq_tbl ftbl_apss_ahb_clk_src[] = { + F(24000000, P_XO, 1, 0, 0), + F(25000000, P_GPLL0_DIV2, 16, 0, 0), +@@ -1890,6 +1916,19 @@ static struct clk_rcg2 system_noc_bfdcd_ + }, + }; + ++static struct clk_fixed_factor system_noc_clk_src = { ++ .mult = 1, ++ .div = 1, ++ .hw.init = &(struct clk_init_data){ ++ .name = "system_noc_clk_src", ++ .parent_hws = (const struct clk_hw *[]){ ++ &system_noc_bfdcd_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .ops = &clk_fixed_factor_ops, ++ .flags = CLK_SET_RATE_PARENT, ++ }, ++}; ++ + static const struct freq_tbl ftbl_ubi32_mem_noc_bfdcd_clk_src[] = { + F(24000000, P_XO, 1, 0, 0), + F(307670000, P_BIAS_PLL_NSS_NOC, 1.5, 0, 0), +@@ -1924,6 +1963,19 @@ static struct clk_rcg2 ubi32_mem_noc_bfd + }, + }; + ++static struct clk_fixed_factor ubi32_mem_noc_clk_src = { ++ .mult = 1, ++ .div = 1, ++ .hw.init = &(struct clk_init_data){ ++ .name = "ubi32_mem_noc_clk_src", ++ .parent_hws = (const struct clk_hw *[]){ ++ &ubi32_mem_noc_bfdcd_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .ops = &clk_fixed_factor_ops, ++ .flags = CLK_SET_RATE_PARENT, ++ }, ++}; ++ + static struct clk_branch gcc_apss_axi_clk = { + .halt_reg = 0x46020, + .halt_check = BRANCH_HALT_VOTED, +@@ -2681,6 +2733,454 @@ static struct clk_rcg2 lpass_q6_axim_clk + }, + }; + ++static struct clk_branch gcc_wcss_axi_m_clk = { ++ .halt_reg = 0x5903C, ++ .clkr = { ++ .enable_reg = 0x5903C, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_wcss_axi_m_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &system_noc_clk_src.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_sys_noc_wcss_ahb_clk = { ++ .halt_reg = 0x26034, ++ .clkr = { ++ .enable_reg = 0x26034, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_sys_noc_wcss_ahb_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &wcss_ahb_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_q6_axim_clk = { ++ .halt_reg = 0x5913C, ++ .clkr = { ++ .enable_reg = 0x5913C, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_q6_axim_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &q6_axi_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_q6ss_atbm_clk = { ++ .halt_reg = 0x59144, ++ .clkr = { ++ .enable_reg = 0x59144, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_q6ss_atbm_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &qdss_at_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_q6ss_pclkdbg_clk = { ++ .halt_reg = 0x59140, ++ .clkr = { ++ .enable_reg = 0x59140, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_q6ss_pclkdbg_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &qdss_dap_sync_clk_src.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_q6_tsctr_1to2_clk = { ++ .halt_reg = 0x59148, ++ .clkr = { ++ .enable_reg = 0x59148, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_q6_tsctr_1to2_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &qdss_tsctr_div2_clk_src.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_wcss_core_tbu_clk = { ++ .halt_reg = 0x12028, ++ .clkr = { ++ .enable_reg = 0xb00c, ++ .enable_mask = BIT(7), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_wcss_core_tbu_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &system_noc_clk_src.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_wcss_q6_tbu_clk = { ++ .halt_reg = 0x1202C, ++ .clkr = { ++ .enable_reg = 0xb00c, ++ .enable_mask = BIT(8), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_wcss_q6_tbu_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &q6_axi_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_q6_axim2_clk = { ++ .halt_reg = 0x59150, ++ .clkr = { ++ .enable_reg = 0x59150, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_q6_axim2_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &q6_axi_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_q6_ahb_clk = { ++ .halt_reg = 0x59138, ++ .clkr = { ++ .enable_reg = 0x59138, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_q6_ahb_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &wcss_ahb_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_q6_ahb_s_clk = { ++ .halt_reg = 0x5914C, ++ .clkr = { ++ .enable_reg = 0x5914C, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_q6_ahb_s_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &wcss_ahb_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_wcss_dbg_ifc_apb_clk = { ++ .halt_reg = 0x59040, ++ .clkr = { ++ .enable_reg = 0x59040, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_wcss_dbg_ifc_apb_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &qdss_dap_sync_clk_src.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_wcss_dbg_ifc_atb_clk = { ++ .halt_reg = 0x59044, ++ .clkr = { ++ .enable_reg = 0x59044, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_wcss_dbg_ifc_atb_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &qdss_at_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_wcss_dbg_ifc_nts_clk = { ++ .halt_reg = 0x59048, ++ .clkr = { ++ .enable_reg = 0x59048, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_wcss_dbg_ifc_nts_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &qdss_tsctr_div2_clk_src.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_wcss_dbg_ifc_dapbus_clk = { ++ .halt_reg = 0x5905C, ++ .clkr = { ++ .enable_reg = 0x5905C, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_wcss_dbg_ifc_dapbus_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &qdss_dap_sync_clk_src.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_wcss_dbg_ifc_apb_bdg_clk = { ++ .halt_reg = 0x59050, ++ .clkr = { ++ .enable_reg = 0x59050, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_wcss_dbg_ifc_apb_bdg_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &qdss_dap_sync_clk_src.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_wcss_dbg_ifc_atb_bdg_clk = { ++ .halt_reg = 0x59054, ++ .clkr = { ++ .enable_reg = 0x59054, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_wcss_dbg_ifc_atb_bdg_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &qdss_at_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_wcss_dbg_ifc_nts_bdg_clk = { ++ .halt_reg = 0x59058, ++ .clkr = { ++ .enable_reg = 0x59058, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_wcss_dbg_ifc_nts_bdg_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &qdss_tsctr_div2_clk_src.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_wcss_dbg_ifc_dapbus_bdg_clk = { ++ .halt_reg = 0x59060, ++ .clkr = { ++ .enable_reg = 0x59060, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_wcss_dbg_ifc_dapbus_bdg_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &qdss_dap_sync_clk_src.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_nssnoc_atb_clk = { ++ .halt_reg = 0x6818C, ++ .clkr = { ++ .enable_reg = 0x6818C, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_nssnoc_atb_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &qdss_at_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_wcss_ecahb_clk = { ++ .halt_reg = 0x59038, ++ .clkr = { ++ .enable_reg = 0x59038, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_wcss_ecahb_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &wcss_ahb_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_wcss_acmt_clk = { ++ .halt_reg = 0x59064, ++ .clkr = { ++ .enable_reg = 0x59064, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_wcss_acmt_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &wcss_ahb_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_wcss_ahb_s_clk = { ++ .halt_reg = 0x59034, ++ .clkr = { ++ .enable_reg = 0x59034, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_wcss_ahb_s_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &wcss_ahb_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++struct clk_branch gcc_rbcpr_wcss_ahb_clk = { ++ .halt_reg = 0x3A008, ++ .clkr = { ++ .enable_reg = 0x3A008, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_rbcpr_wcss_ahb_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &pcnoc_clk_src.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++struct clk_branch gcc_mem_noc_q6_axi_clk = { ++ .halt_reg = 0x1D038, ++ .clkr = { ++ .enable_reg = 0x1D038, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_mem_noc_q6_axi_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &q6_axi_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_sys_noc_qdss_stm_axi_clk = { ++ .halt_reg = 0x26024, ++ .clkr = { ++ .enable_reg = 0x26024, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_sys_noc_qdss_stm_axi_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &qdss_stm_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_qdss_stm_clk = { ++ .halt_reg = 0x29044, ++ .clkr = { ++ .enable_reg = 0x29044, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_qdss_stm_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &qdss_stm_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_qdss_traceclkin_clk = { ++ .halt_reg = 0x29060, ++ .clkr = { ++ .enable_reg = 0x29060, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_qdss_traceclkin_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &qdss_traceclkin_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ + static struct freq_tbl ftbl_rbcpr_wcss_clk_src[] = { + F(24000000, P_XO, 1, 0, 0), + F(50000000, P_GPLL0, 16, 0, 0), +@@ -2700,6 +3200,23 @@ static struct clk_rcg2 rbcpr_wcss_clk_sr + }, + }; + ++struct clk_branch gcc_rbcpr_wcss_clk = { ++ .halt_reg = 0x3A004, ++ .clkr = { ++ .enable_reg = 0x3A004, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_rbcpr_wcss_clk", ++ .parent_hws = (const struct clk_hw *[]){ ++ &rbcpr_wcss_clk_src.clkr.hw }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++ + static struct clk_branch gcc_lpass_core_axim_clk = { + .halt_reg = 0x1F028, + .clkr = { +@@ -4213,6 +4730,9 @@ static struct clk_hw *gcc_ipq6018_hws[] + &gpll6_out_main_div2.hw, + &qdss_dap_sync_clk_src.hw, + &qdss_tsctr_div2_clk_src.hw, ++ &pcnoc_clk_src.hw, ++ &snoc_nssnoc_clk_src.hw, ++ &ubi32_mem_noc_clk_src.hw, + }; + + static struct clk_regmap *gcc_ipq6018_clks[] = { +@@ -4418,9 +4938,35 @@ static struct clk_regmap *gcc_ipq6018_cl + [PCIE0_RCHNG_CLK_SRC] = &pcie0_rchng_clk_src.clkr, + [GCC_PCIE0_AXI_S_BRIDGE_CLK] = &gcc_pcie0_axi_s_bridge_clk.clkr, + [PCIE0_RCHNG_CLK] = &gcc_pcie0_rchng_clk.clkr, ++ [GCC_WCSS_AXI_M_CLK] = &gcc_wcss_axi_m_clk.clkr, ++ [GCC_SYS_NOC_WCSS_AHB_CLK] = &gcc_sys_noc_wcss_ahb_clk.clkr, + [WCSS_AHB_CLK_SRC] = &wcss_ahb_clk_src.clkr, ++ [GCC_Q6_AXIM_CLK] = &gcc_q6_axim_clk.clkr, + [Q6_AXI_CLK_SRC] = &q6_axi_clk_src.clkr, ++ [GCC_Q6SS_ATBM_CLK] = &gcc_q6ss_atbm_clk.clkr, ++ [GCC_Q6SS_PCLKDBG_CLK] = &gcc_q6ss_pclkdbg_clk.clkr, ++ [GCC_Q6_TSCTR_1TO2_CLK] = &gcc_q6_tsctr_1to2_clk.clkr, ++ [GCC_WCSS_CORE_TBU_CLK] = &gcc_wcss_core_tbu_clk.clkr, ++ [GCC_WCSS_Q6_TBU_CLK] = &gcc_wcss_q6_tbu_clk.clkr, ++ [GCC_Q6_AXIM2_CLK] = &gcc_q6_axim2_clk.clkr, ++ [GCC_Q6_AHB_CLK] = &gcc_q6_ahb_clk.clkr, ++ [GCC_Q6_AHB_S_CLK] = &gcc_q6_ahb_s_clk.clkr, ++ [GCC_WCSS_DBG_IFC_APB_CLK] = &gcc_wcss_dbg_ifc_apb_clk.clkr, ++ [GCC_WCSS_DBG_IFC_ATB_CLK] = &gcc_wcss_dbg_ifc_atb_clk.clkr, ++ [GCC_WCSS_DBG_IFC_NTS_CLK] = &gcc_wcss_dbg_ifc_nts_clk.clkr, ++ [GCC_WCSS_DBG_IFC_DAPBUS_CLK] = &gcc_wcss_dbg_ifc_dapbus_clk.clkr, ++ [GCC_WCSS_DBG_IFC_APB_BDG_CLK] = &gcc_wcss_dbg_ifc_apb_bdg_clk.clkr, ++ [GCC_WCSS_DBG_IFC_ATB_BDG_CLK] = &gcc_wcss_dbg_ifc_atb_bdg_clk.clkr, ++ [GCC_WCSS_DBG_IFC_NTS_BDG_CLK] = &gcc_wcss_dbg_ifc_nts_bdg_clk.clkr, ++ [GCC_WCSS_DBG_IFC_DAPBUS_BDG_CLK] = &gcc_wcss_dbg_ifc_dapbus_bdg_clk.clkr, ++ [GCC_NSSNOC_ATB_CLK] = &gcc_nssnoc_atb_clk.clkr, ++ [GCC_WCSS_ECAHB_CLK] = &gcc_wcss_ecahb_clk.clkr, ++ [GCC_WCSS_ACMT_CLK] = &gcc_wcss_acmt_clk.clkr, ++ [GCC_WCSS_AHB_S_CLK] = &gcc_wcss_ahb_s_clk.clkr, ++ [GCC_RBCPR_WCSS_CLK] = &gcc_rbcpr_wcss_clk.clkr, + [RBCPR_WCSS_CLK_SRC] = &rbcpr_wcss_clk_src.clkr, ++ [GCC_RBCPR_WCSS_AHB_CLK] = &gcc_rbcpr_wcss_ahb_clk.clkr, ++ [GCC_MEM_NOC_Q6_AXI_CLK] = &gcc_mem_noc_q6_axi_clk.clkr, + [GCC_LPASS_CORE_AXIM_CLK] = &gcc_lpass_core_axim_clk.clkr, + [LPASS_CORE_AXIM_CLK_SRC] = &lpass_core_axim_clk_src.clkr, + [GCC_LPASS_SNOC_CFG_CLK] = &gcc_lpass_snoc_cfg_clk.clkr, +@@ -4436,6 +4982,9 @@ static struct clk_regmap *gcc_ipq6018_cl + [GCC_MEM_NOC_UBI32_CLK] = &gcc_mem_noc_ubi32_clk.clkr, + [GCC_MEM_NOC_LPASS_CLK] = &gcc_mem_noc_lpass_clk.clkr, + [GCC_SNOC_LPASS_CFG_CLK] = &gcc_snoc_lpass_cfg_clk.clkr, ++ [GCC_SYS_NOC_QDSS_STM_AXI_CLK] = &gcc_sys_noc_qdss_stm_axi_clk.clkr, ++ [GCC_QDSS_STM_CLK] = &gcc_qdss_stm_clk.clkr, ++ [GCC_QDSS_TRACECLKIN_CLK] = &gcc_qdss_traceclkin_clk.clkr, + [QDSS_STM_CLK_SRC] = &qdss_stm_clk_src.clkr, + [QDSS_TRACECLKIN_CLK_SRC] = &qdss_traceclkin_clk_src.clkr, + }; +@@ -4617,6 +5166,10 @@ static const struct qcom_cc_desc gcc_ipq + static int gcc_ipq6018_probe(struct platform_device *pdev) + { + struct regmap *regmap; ++ struct device *dev = &pdev->dev; ++ ++ clk_register_fixed_rate(dev, "pcie20_phy0_pipe_clk", NULL, 0, ++ 250000000); + + regmap = qcom_cc_map(pdev, &gcc_ipq6018_desc); + if (IS_ERR(regmap)) diff --git a/target/linux/qualcommax/patches-6.6/1006-ipq6018-rproc-Add-non-secure-Q6-bringup-sequence.patch b/target/linux/qualcommax/patches-6.6/1006-ipq6018-rproc-Add-non-secure-Q6-bringup-sequence.patch new file mode 100644 index 00000000000000..93d54740d13631 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/1006-ipq6018-rproc-Add-non-secure-Q6-bringup-sequence.patch @@ -0,0 +1,435 @@ +From 4ebe7624c51ff95ec13f5dda9cca7c0abe59cd0e Mon Sep 17 00:00:00 2001 +From: Manikanta Mylavarapu +Date: Fri, 31 Dec 2021 17:13:59 +0530 +Subject: [PATCH 1006/1010] ipq6018: rproc: Add non secure Q6 bringup sequence + +This patch adds Q6 bring up sequence support. + +Change-Id: I28eee991168034cc240d863e736ed9c766ec4f33 +Signed-off-by: Manikanta Mylavarapu +--- + drivers/remoteproc/qcom_q6v5_wcss.c | 223 ++++++++++++++++++++++++-- + 2 files changed, 227 insertions(+), 16 deletions(-) + +--- a/drivers/remoteproc/qcom_q6v5_wcss.c ++++ b/drivers/remoteproc/qcom_q6v5_wcss.c +@@ -27,6 +27,7 @@ + + /* Q6SS Register Offsets */ + #define Q6SS_RESET_REG 0x014 ++#define Q6SS_DBG_CFG 0x018 + #define Q6SS_GFMUX_CTL_REG 0x020 + #define Q6SS_PWR_CTL_REG 0x030 + #define Q6SS_MEM_PWR_CTL 0x0B0 +@@ -68,6 +69,7 @@ + #define HALT_CHECK_MAX_LOOPS 200 + #define Q6SS_XO_CBCR GENMASK(5, 3) + #define Q6SS_SLEEP_CBCR GENMASK(5, 2) ++#define Q6SS_TIMEOUT_US 1000 + + /* Q6SS config/status registers */ + #define TCSR_GLOBAL_CFG0 0x0 +@@ -78,6 +80,7 @@ + #define Q6SS_RST_EVB 0x10 + + #define BHS_EN_REST_ACK BIT(0) ++#define WCSS_HM_RET BIT(1) + #define SSCAON_ENABLE BIT(13) + #define SSCAON_BUS_EN BIT(15) + #define SSCAON_BUS_MUX_MASK GENMASK(18, 16) +@@ -120,6 +123,11 @@ struct q6v5_wcss { + struct clk *qdsp6ss_core_gfmux; + struct clk *lcc_bcr_sleep; + struct clk *prng_clk; ++ struct clk *gcc_sys_noc_wcss_ahb_clk; ++ struct clk *gcc_q6ss_atbm_clk; ++ struct clk *gcc_q6ss_pclkdbg_clk; ++ struct clk *gcc_q6_tsctr_1to2_clk; ++ + struct clk *qdss_clk; + struct regulator *cx_supply; + struct qcom_sysmon *sysmon; +@@ -165,12 +173,77 @@ struct wcss_data { + bool need_auto_boot; + }; + ++static const struct wcss_data wcss_ipq6018_res_init; ++ ++static int ipq6018_clks_prepare_enable(struct q6v5_wcss *wcss) ++{ ++ int ret; ++ ++ ret = clk_prepare_enable(wcss->gcc_sys_noc_wcss_ahb_clk); ++ if (ret) ++ return ret; ++ ++ ret = clk_prepare_enable(wcss->gcc_q6ss_atbm_clk); ++ if (ret) ++ return ret; ++ ++ ret = clk_prepare_enable(wcss->gcc_q6ss_pclkdbg_clk); ++ if (ret) ++ return ret; ++ ++ ret = clk_prepare_enable(wcss->gcc_q6_tsctr_1to2_clk); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static void ipq6018_clks_prepare_disable(struct q6v5_wcss *wcss) ++{ ++ clk_disable_unprepare(wcss->gcc_sys_noc_wcss_ahb_clk); ++ clk_disable_unprepare(wcss->gcc_q6ss_atbm_clk); ++ clk_disable_unprepare(wcss->gcc_q6ss_pclkdbg_clk); ++ clk_disable_unprepare(wcss->gcc_q6_tsctr_1to2_clk); ++} ++ + static int q6v5_wcss_reset(struct q6v5_wcss *wcss) + { ++ const struct wcss_data *desc; + int ret; + u32 val; + int i; + ++ desc = device_get_match_data(wcss->dev); ++ if (desc == &wcss_ipq6018_res_init) { ++ if (desc->aon_reset_required) { ++ /* Deassert wcss aon reset */ ++ ret = reset_control_deassert(wcss->wcss_aon_reset); ++ if (ret) { ++ dev_err(wcss->dev, "wcss_aon_reset failed\n"); ++ return ret; ++ } ++ mdelay(1); ++ } ++ ++ ret = ipq6018_clks_prepare_enable(wcss); ++ if (ret) { ++ dev_err(wcss->dev, "failed to enable clock\n"); ++ return ret; ++ } ++ } ++ ++ val = readl(wcss->rmb_base + SSCAON_CONFIG); ++ val |= BIT(0); ++ writel(val, wcss->rmb_base + SSCAON_CONFIG); ++ mdelay(1); ++ ++ /*set CFG[18:15]=1* and clear CFG[1]=0*/ ++ val = readl(wcss->rmb_base + SSCAON_CONFIG); ++ val &= ~(SSCAON_BUS_MUX_MASK | WCSS_HM_RET); ++ val |= SSCAON_BUS_EN; ++ writel(val, wcss->rmb_base + SSCAON_CONFIG); ++ mdelay(1); ++ + /* Assert resets, stop core */ + val = readl(wcss->reg_base + Q6SS_RESET_REG); + val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; +@@ -196,7 +269,19 @@ static int q6v5_wcss_reset(struct q6v5_w + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + udelay(1); + ++ if (desc == &wcss_ipq6018_res_init) { ++ /* 10 - Wait till BHS Reset is done */ ++ ret = readl_poll_timeout(wcss->reg_base + Q6SS_BHS_STATUS, ++ val, (val & BHS_EN_REST_ACK), 1000, ++ Q6SS_TIMEOUT_US * 50); ++ if (ret) { ++ dev_err(wcss->dev, "BHS_STATUS not ON (rc:%d) val:0x%X\n", ret, val); ++ return ret; ++ } ++ } ++ + /* Put LDO in bypass mode */ ++ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG); + val |= Q6SS_LDO_BYP; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + +@@ -206,6 +291,7 @@ static int q6v5_wcss_reset(struct q6v5_w + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + + /* Deassert memory peripheral sleep and L2 memory standby */ ++ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG); + val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + +@@ -220,7 +306,10 @@ static int q6v5_wcss_reset(struct q6v5_w + * array to turn on. + */ + val |= readl(wcss->reg_base + Q6SS_MEM_PWR_CTL); +- udelay(1); ++ if (desc == &wcss_ipq6018_res_init) ++ mdelay(10); ++ else ++ udelay(1); + } + /* Remove word line clamp */ + val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG); +@@ -228,6 +317,7 @@ static int q6v5_wcss_reset(struct q6v5_w + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + + /* Remove IO clamp */ ++ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG); + val &= ~Q6SS_CLAMP_IO; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + +@@ -246,6 +336,16 @@ static int q6v5_wcss_reset(struct q6v5_w + val &= ~Q6SS_STOP_CORE; + writel(val, wcss->reg_base + Q6SS_RESET_REG); + ++ /* Wait for SSCAON_STATUS */ ++ val = readl(wcss->rmb_base + SSCAON_STATUS); ++ ret = readl_poll_timeout(wcss->rmb_base + SSCAON_STATUS, ++ val, (val & 0xffff) == 0x10, 1000, ++ Q6SS_TIMEOUT_US * 1000); ++ if (ret) { ++ dev_err(wcss->dev, " Boot Error, SSCAON=0x%08X\n", val); ++ return ret; ++ } ++ + return 0; + } + +@@ -380,7 +480,7 @@ static int q6v5_wcss_qcs404_power_on(str + /* Read CLKOFF bit to go low indicating CLK is enabled */ + ret = readl_poll_timeout(wcss->reg_base + Q6SS_XO_CBCR, + val, !(val & BIT(31)), 1, +- HALT_CHECK_MAX_LOOPS); ++ Q6SS_TIMEOUT_US); + if (ret) { + dev_err(wcss->dev, + "xo cbcr enabling timed out (rc:%d)\n", ret); +@@ -533,13 +633,18 @@ static void q6v5_wcss_halt_axi_port(stru + unsigned long timeout; + unsigned int val; + int ret; ++ const struct wcss_data *desc = device_get_match_data(wcss->dev); + +- /* Check if we're already idle */ +- ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); +- if (!ret && val) +- return; ++ if (desc != &wcss_ipq6018_res_init) { ++ /* Check if we're already idle */ ++ ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); ++ if (!ret && val) ++ return; ++ } + + /* Assert halt request */ ++ regmap_read(halt_map, offset + AXI_HALTREQ_REG, &val); ++ val |= BIT(0); + regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); + + /* Wait for halt */ +@@ -552,12 +657,14 @@ static void q6v5_wcss_halt_axi_port(stru + msleep(1); + } + +- ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); +- if (ret || !val) +- dev_err(wcss->dev, "port failed halt\n"); ++ if (desc != &wcss_ipq6018_res_init) { ++ ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); ++ if (ret || !val) ++ dev_err(wcss->dev, "port failed halt\n"); + +- /* Clear halt request (port will remain halted until reset) */ +- regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); ++ /* Clear halt request (port will remain halted until reset) */ ++ regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); ++ } + } + + static int q6v5_qcs404_wcss_shutdown(struct q6v5_wcss *wcss) +@@ -626,6 +733,7 @@ static int q6v5_qcs404_wcss_shutdown(str + + static int q6v5_wcss_powerdown(struct q6v5_wcss *wcss) + { ++ const struct wcss_data *desc = device_get_match_data(wcss->dev); + int ret; + u32 val; + +@@ -643,21 +751,26 @@ static int q6v5_wcss_powerdown(struct q6 + writel(val, wcss->rmb_base + SSCAON_CONFIG); + + /* 4 - SSCAON_CONFIG 1 */ ++ val = readl(wcss->rmb_base + SSCAON_CONFIG); + val |= BIT(1); + writel(val, wcss->rmb_base + SSCAON_CONFIG); + + /* 5 - wait for SSCAON_STATUS */ + ret = readl_poll_timeout(wcss->rmb_base + SSCAON_STATUS, + val, (val & 0xffff) == 0x400, 1000, +- HALT_CHECK_MAX_LOOPS); ++ Q6SS_TIMEOUT_US * 10); + if (ret) { + dev_err(wcss->dev, + "can't get SSCAON_STATUS rc:%d)\n", ret); + return ret; + } + ++ mdelay(2); ++ + /* 6 - De-assert WCSS_AON reset */ + reset_control_assert(wcss->wcss_aon_reset); ++ if (desc == &wcss_ipq6018_res_init) ++ mdelay(1); + + /* 7 - Disable WCSSAON_CONFIG 13 */ + val = readl(wcss->rmb_base + SSCAON_CONFIG); +@@ -667,6 +780,13 @@ static int q6v5_wcss_powerdown(struct q6 + /* 8 - De-assert WCSS/Q6 HALTREQ */ + reset_control_assert(wcss->wcss_reset); + ++ if (desc == &wcss_ipq6018_res_init) { ++ /* Clear halt request (port will remain halted until reset) */ ++ regmap_read(wcss->halt_map, wcss->halt_wcss + AXI_HALTREQ_REG, &val); ++ val &= ~0x1; ++ regmap_write(wcss->halt_map, wcss->halt_wcss + AXI_HALTREQ_REG, val); ++ } ++ + return 0; + } + +@@ -675,6 +795,12 @@ static int q6v5_q6_powerdown(struct q6v5 + int ret; + u32 val; + int i; ++ const struct wcss_data *desc = device_get_match_data(wcss->dev); ++ ++ if (desc == &wcss_ipq6018_res_init) { ++ /* To retain power domain after q6 powerdown */ ++ writel(0x1, wcss->reg_base + Q6SS_DBG_CFG); ++ } + + /* 1 - Halt Q6 bus interface */ + q6v5_wcss_halt_axi_port(wcss, wcss->halt_map, wcss->halt_q6); +@@ -690,14 +816,17 @@ static int q6v5_q6_powerdown(struct q6v5 + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + + /* 4 - Clamp WL */ ++ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG); + val |= QDSS_BHS_ON; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + + /* 5 - Clear Erase standby */ ++ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG); + val &= ~Q6SS_L2DATA_STBY_N; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + + /* 6 - Clear Sleep RTN */ ++ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG); + val &= ~Q6SS_SLP_RET_N; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + +@@ -715,6 +844,7 @@ static int q6v5_q6_powerdown(struct q6v5 + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + + /* 9 - Turn off BHS */ ++ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG); + val &= ~Q6SS_BHS_ON; + writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG); + udelay(1); +@@ -722,7 +852,7 @@ static int q6v5_q6_powerdown(struct q6v5 + /* 10 - Wait till BHS Reset is done */ + ret = readl_poll_timeout(wcss->reg_base + Q6SS_BHS_STATUS, + val, !(val & BHS_EN_REST_ACK), 1000, +- HALT_CHECK_MAX_LOOPS); ++ Q6SS_TIMEOUT_US * 10); + if (ret) { + dev_err(wcss->dev, "BHS_STATUS not OFF (rc:%d)\n", ret); + return ret; +@@ -730,9 +860,23 @@ static int q6v5_q6_powerdown(struct q6v5 + + /* 11 - Assert WCSS reset */ + reset_control_assert(wcss->wcss_reset); ++ if (desc == &wcss_ipq6018_res_init) ++ mdelay(1); + + /* 12 - Assert Q6 reset */ + reset_control_assert(wcss->wcss_q6_reset); ++ if (desc == &wcss_ipq6018_res_init) { ++ mdelay(2); ++ ++ /* Clear halt request (port will remain halted until reset) */ ++ regmap_read(wcss->halt_map, wcss->halt_q6 + AXI_HALTREQ_REG, &val); ++ val &= ~0x1; ++ regmap_write(wcss->halt_map, wcss->halt_q6 + AXI_HALTREQ_REG, val); ++ mdelay(1); ++ ++ /* Disable clocks*/ ++ ipq6018_clks_prepare_disable(wcss); ++ } + + return 0; + } +@@ -975,6 +1119,57 @@ static int q6v5_alloc_memory_region(stru + return 0; + } + ++static int ipq6018_init_clock(struct q6v5_wcss *wcss) ++{ ++ int ret; ++ ++ wcss->prng_clk = devm_clk_get(wcss->dev, "prng"); ++ if (IS_ERR(wcss->prng_clk)) { ++ ret = PTR_ERR(wcss->prng_clk); ++ if (ret != -EPROBE_DEFER) ++ dev_err(wcss->dev, "Failed to get prng clock\n"); ++ return ret; ++ } ++ ++ wcss->gcc_sys_noc_wcss_ahb_clk = ++ devm_clk_get(wcss->dev, "gcc_sys_noc_wcss_ahb_clk"); ++ if (IS_ERR(wcss->gcc_sys_noc_wcss_ahb_clk)) { ++ ret = PTR_ERR(wcss->gcc_sys_noc_wcss_ahb_clk); ++ if (ret != -EPROBE_DEFER) ++ dev_err(wcss->dev, "Failed to get sys_noc_wcss_ahb clock\n"); ++ return ret; ++ } ++ ++ wcss->gcc_q6ss_atbm_clk = ++ devm_clk_get(wcss->dev, "gcc_q6ss_atbm_clk"); ++ if (IS_ERR(wcss->gcc_q6ss_atbm_clk)) { ++ ret = PTR_ERR(wcss->gcc_q6ss_atbm_clk); ++ if (ret != -EPROBE_DEFER) ++ dev_err(wcss->dev, "Failed to get q6ss_atbm clock\n"); ++ return ret; ++ } ++ ++ wcss->gcc_q6ss_pclkdbg_clk = ++ devm_clk_get(wcss->dev, "gcc_q6ss_pclkdbg_clk"); ++ if (IS_ERR(wcss->gcc_q6ss_pclkdbg_clk)) { ++ ret = PTR_ERR(wcss->gcc_q6ss_pclkdbg_clk); ++ if (ret != -EPROBE_DEFER) ++ dev_err(wcss->dev, "Failed to get q6ss_pclkdbg clock\n"); ++ return ret; ++ } ++ ++ wcss->gcc_q6_tsctr_1to2_clk = ++ devm_clk_get(wcss->dev, "gcc_q6_tsctr_1to2_clk"); ++ if (IS_ERR(wcss->gcc_q6_tsctr_1to2_clk)) { ++ ret = PTR_ERR(wcss->gcc_q6_tsctr_1to2_clk); ++ if (ret != -EPROBE_DEFER) ++ dev_err(wcss->dev, "Failed to get q6_tsctr_1to2 clock\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ + static int ipq_init_clock(struct q6v5_wcss *wcss) + { + int ret; +@@ -1203,7 +1398,7 @@ static const struct wcss_data wcss_ipq80 + }; + + static const struct wcss_data wcss_ipq6018_res_init = { +- .init_clock = ipq_init_clock, ++ .init_clock = ipq6018_init_clock, + .q6_firmware_name = "IPQ6018/q6_fw.mdt", + .m3_firmware_name = "IPQ6018/m3_fw.mdt", + .crash_reason_smem = WCSS_CRASH_REASON, diff --git a/target/linux/qualcommax/patches-6.6/1007-Simplify-ipq6018-rproc-Add-non-secure-Q6-bringup-seq.patch b/target/linux/qualcommax/patches-6.6/1007-Simplify-ipq6018-rproc-Add-non-secure-Q6-bringup-seq.patch new file mode 100644 index 00000000000000..33f721ad9cdcde --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/1007-Simplify-ipq6018-rproc-Add-non-secure-Q6-bringup-seq.patch @@ -0,0 +1,55 @@ +From 737b22c05f9e994ce0ac050b987f85223d04b99f Mon Sep 17 00:00:00 2001 +From: Alexandru Gagniuc +Date: Fri, 2 Sep 2022 18:31:11 -0500 +Subject: [PATCH 1007/1009] Simplify "ipq6018: rproc: Add non secure Q6 bringup + sequence" + +--- + drivers/remoteproc/qcom_q6v5_wcss.c | 25 +++++++++---------------- + 1 file changed, 9 insertions(+), 16 deletions(-) + +--- a/drivers/remoteproc/qcom_q6v5_wcss.c ++++ b/drivers/remoteproc/qcom_q6v5_wcss.c +@@ -633,18 +633,13 @@ static void q6v5_wcss_halt_axi_port(stru + unsigned long timeout; + unsigned int val; + int ret; +- const struct wcss_data *desc = device_get_match_data(wcss->dev); + +- if (desc != &wcss_ipq6018_res_init) { +- /* Check if we're already idle */ +- ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); +- if (!ret && val) +- return; +- } ++ /* Check if we're already idle */ ++ ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); ++ if (!ret && val) ++ return; + + /* Assert halt request */ +- regmap_read(halt_map, offset + AXI_HALTREQ_REG, &val); +- val |= BIT(0); + regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); + + /* Wait for halt */ +@@ -657,14 +652,12 @@ static void q6v5_wcss_halt_axi_port(stru + msleep(1); + } + +- if (desc != &wcss_ipq6018_res_init) { +- ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); +- if (ret || !val) +- dev_err(wcss->dev, "port failed halt\n"); ++ ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); ++ if (ret || !val) ++ dev_err(wcss->dev, "port failed halt\n"); + +- /* Clear halt request (port will remain halted until reset) */ +- regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); +- } ++ /* Clear halt request (port will remain halted until reset) */ ++ regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); + } + + static int q6v5_qcs404_wcss_shutdown(struct q6v5_wcss *wcss) diff --git a/target/linux/qualcommax/patches-6.6/1008-pwm-driver-for-qualcomm-ipq6018-pwm-block.patch b/target/linux/qualcommax/patches-6.6/1008-pwm-driver-for-qualcomm-ipq6018-pwm-block.patch new file mode 100644 index 00000000000000..f9a1afc6b021fd --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/1008-pwm-driver-for-qualcomm-ipq6018-pwm-block.patch @@ -0,0 +1,601 @@ +From patchwork Thu Oct 5 16:05:47 2023 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 8bit +X-Patchwork-Submitter: Devi Priya +X-Patchwork-Id: 13410404 +Return-Path: +X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on + aws-us-west-2-korg-lkml-1.web.codeaurora.org +Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) + by smtp.lore.kernel.org (Postfix) with ESMTP id D89C1E92716 + for ; + Thu, 5 Oct 2023 16:21:19 +0000 (UTC) +Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand + id S242346AbjJEQUu (ORCPT + ); + Thu, 5 Oct 2023 12:20:50 -0400 +Received: from lindbergh.monkeyblade.net ([23.128.96.19]:34274 "EHLO + lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org + with ESMTP id S242441AbjJEQR7 (ORCPT + ); + Thu, 5 Oct 2023 12:17:59 -0400 +Received: from mx0a-0031df01.pphosted.com (mx0a-0031df01.pphosted.com + [205.220.168.131]) + by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 654563F020; + Thu, 5 Oct 2023 09:06:56 -0700 (PDT) +Received: from pps.filterd (m0279862.ppops.net [127.0.0.1]) + by mx0a-0031df01.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id + 395Aarqd017488; + Thu, 5 Oct 2023 16:06:23 GMT +DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quicinc.com; + h=from : to : cc : + subject : date : message-id : in-reply-to : references : mime-version : + content-type : content-transfer-encoding; s=qcppdkim1; + bh=pL305QZpgz9DE4v+JRgsjWEqf1lM32BSKRkIofAZtYI=; + b=N/VkHpLPyHQX0FtgqwJTY18MM5NIRAxm/+ejcJgF+GzogJXQJVrX/JAaY+GrGMI/jBWB + fXAGI3rifkl9eKUkW2WiW2CM3NLpeKa1XcRfGYC3FvWNeVEKpAdNUnneWq5jII/7rjwr + LOEF9iGjSkqgE38uQGz0bcm+TCePCLBym1xS29C8u1B7Xx0M74w+Du98muz8yAqjQbLM + xbUkhQ5rGl34cLkYMUaT8Zuu4Je14xfsUL+dVCk2/TppUvaqZz3mzOdGiwKGz9AWdnJ2 + 1+/sxswdw/5WhuALaDoCncbTHD0BtxYj3SYmNtE0+NHQ4IJ6rpa04qfytuU3+2V8h0xw FQ== +Received: from nalasppmta04.qualcomm.com (Global_NAT1.qualcomm.com + [129.46.96.20]) + by mx0a-0031df01.pphosted.com (PPS) with ESMTPS id 3th8e1u8ty-1 + (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 + verify=NOT); + Thu, 05 Oct 2023 16:06:22 +0000 +Received: from nalasex01a.na.qualcomm.com (nalasex01a.na.qualcomm.com + [10.47.209.196]) + by NALASPPMTA04.qualcomm.com (8.17.1.5/8.17.1.5) with ESMTPS id + 395G6Lmf025392 + (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 + verify=NOT); + Thu, 5 Oct 2023 16:06:21 GMT +Received: from hu-devipriy-blr.qualcomm.com (10.80.80.8) by + nalasex01a.na.qualcomm.com (10.47.209.196) with Microsoft SMTP Server + (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id + 15.2.1118.36; Thu, 5 Oct 2023 09:06:16 -0700 +From: Devi Priya +To: , , + , , , + , , + , , + , , + , , + , +CC: , , + +Subject: [PATCH V15 1/4] pwm: driver for qualcomm ipq6018 pwm block +Date: Thu, 5 Oct 2023 21:35:47 +0530 +Message-ID: <20231005160550.2423075-2-quic_devipriy@quicinc.com> +X-Mailer: git-send-email 2.34.1 +In-Reply-To: <20231005160550.2423075-1-quic_devipriy@quicinc.com> +References: <20231005160550.2423075-1-quic_devipriy@quicinc.com> +MIME-Version: 1.0 +X-Originating-IP: [10.80.80.8] +X-ClientProxiedBy: nasanex01a.na.qualcomm.com (10.52.223.231) To + nalasex01a.na.qualcomm.com (10.47.209.196) +X-QCInternal: smtphost +X-Proofpoint-Virus-Version: vendor=nai engine=6200 definitions=5800 + signatures=585085 +X-Proofpoint-GUID: dUK910BXNf0cPwSxJTAoChM7COrWyzeE +X-Proofpoint-ORIG-GUID: dUK910BXNf0cPwSxJTAoChM7COrWyzeE +X-Proofpoint-Virus-Version: vendor=baseguard + engine=ICAP:2.0.267,Aquarius:18.0.980,Hydra:6.0.619,FMLib:17.11.176.26 + definitions=2023-10-05_11,2023-10-05_01,2023-05-22_02 +X-Proofpoint-Spam-Details: rule=outbound_notspam policy=outbound score=0 + phishscore=0 suspectscore=0 + adultscore=0 priorityscore=1501 mlxscore=0 lowpriorityscore=0 spamscore=0 + bulkscore=0 mlxlogscore=999 malwarescore=0 impostorscore=0 clxscore=1015 + classifier=spam adjust=0 reason=mlx scancount=1 engine=8.12.0-2309180000 + definitions=main-2310050126 +Precedence: bulk +List-ID: +X-Mailing-List: linux-arm-msm@vger.kernel.org + +Driver for the PWM block in Qualcomm IPQ6018 line of SoCs. Based on +driver from downstream Codeaurora kernel tree. Removed support for older +(V1) variants because I have no access to that hardware. + +Tested on IPQ6010 based hardware. + +Co-developed-by: Baruch Siach +Signed-off-by: Baruch Siach +Signed-off-by: Devi Priya +--- +V15: + No change + +V14: + No change + +V13: + No change + +V12: + + Fix the below clang warning for overflow check reported by kernel test robot + drivers/pwm/pwm-ipq.c:122:11: warning: result of comparison of constant 16000000000 + with expression of type 'unsigned long' is always false [-Wtautological-constant-out-of-range-compare] +>> if (rate > 16ULL * GIGA) + +v11: + +Address comment from Uwe Kleine-König: + + Drop redundant registers field comments + + Fix period limit check in .apply + + Clarify the comment explaining skip of pre_div > pwm_div values + + Add explicit check for clock rate within limit + + Add comment explaining the selection of initial pre_div + + Use pwm_div division with remainder instead of separate diff calculation + + Round up duty_cycle calculation in .get_state + +v10: + + Restore round up in pwm_div calculation; otherwise diff is always <= + 0, so only bingo match works + + Don't overwrite min_diff on every loop iteration + +v9: + +Address comment from Uwe Kleine-König: + + Use period_ns*rate in dividers calculation for better accuracy + + Round down pre_div and pwm_div + + Add a comment explaining why pwm_div can't underflow + + Add a comment explaining why pre_div > pwm_div end the search loop + + Drop 'CFG_' from register macros + + Rename to_ipq_pwm_chip() to ipq_pwm_from_chip() + + Change bare 'unsigned' to 'unsigned int' + + Clarify the comment on separate REG1 write for enable/disable + Round up the period value in .get_state + + Use direct readl/writel so no need to check for regmap errors + +v7: + + Change 'offset' to 'reg' for the tcsr offset (Rob) + + Drop clock name; there is only one clock (Bjorn) + + Simplify probe failure code path (Bjorn) + +v6: + +Address Uwe Kleine-König review comments: + + Drop IPQ_PWM_MAX_DEVICES + + Rely on assigned-clock-rates; drop IPQ_PWM_CLK_SRC_FREQ + + Simplify register offset calculation + + Calculate duty cycle more precisely + + Refuse to set inverted polarity + + Drop redundant IPQ_PWM_REG1_ENABLE bit clear + + Remove x1000 factor in pwm_div calculation, use rate directly, and round up + + Choose initial pre_div such that pwm_div < IPQ_PWM_MAX_DIV + + Ensure pre_div <= pwm_div + + Rename close_ to best_ + + Explain in comment why effective_div doesn't overflow + + Limit pwm_div to IPQ_PWM_MAX_DIV - 1 to allow 100% duty cycle + + Disable clock only after pwmchip_remove() + + const pwm_ops + +Other changes: + + Add missing linux/bitfield.h header include (kernel test robot) + + Adjust code for PWM device node under TCSR (Rob Herring) + +v5: + + Use &tcsr_q6 syscon to access registers (Bjorn Andersson) + + Address Uwe Kleine-König review comments: + + Implement .get_state() + + Add IPQ_PWM_ prefix to local macros + + Use GENMASK/BIT/FIELD_PREP for register fields access + + Make type of config_div_and_duty() parameters consistent + + Derive IPQ_PWM_MIN_PERIOD_NS from IPQ_PWM_CLK_SRC_FREQ + + Integrate enable/disable into config_div_and_duty() to save register read, + and reduce frequency glitch on update + + Use min() instead of min_t() + + Fix comment format + + Use dev_err_probe() to indicate probe step failure + + Add missing clk_disable_unprepare() in .remove + + Don't set .owner + +v4: + + Use div64_u64() to fix link for 32-bit targets ((kernel test robot + , Uwe Kleine-König) + +v3: + + s/qcom,pwm-ipq6018/qcom,ipq6018-pwm/ (Rob Herring) + + Fix integer overflow on 32-bit targets (kernel test robot ) + +v2: + +Address Uwe Kleine-König review comments: + + Fix period calculation when out of range + + Don't set period larger than requested + + Remove PWM disable on configuration change + + Implement .apply instead of non-atomic .config/.enable/.disable + + Don't modify PWM on .request/.free + + Check pwm_div underflow + + Fix various code and comment formatting issues + +Other changes: + + Use u64 divisor safe division + + Remove now empty .request/.free + + drivers/pwm/Kconfig | 12 ++ + drivers/pwm/Makefile | 1 + + drivers/pwm/pwm-ipq.c | 282 ++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 295 insertions(+) + create mode 100644 drivers/pwm/pwm-ipq.c + +--- a/drivers/pwm/Kconfig ++++ b/drivers/pwm/Kconfig +@@ -282,6 +282,18 @@ config PWM_INTEL_LGM + To compile this driver as a module, choose M here: the module + will be called pwm-intel-lgm. + ++config PWM_IPQ ++ tristate "IPQ PWM support" ++ depends on ARCH_QCOM || COMPILE_TEST ++ depends on HAVE_CLK && HAS_IOMEM ++ help ++ Generic PWM framework driver for IPQ PWM block which supports ++ 4 pwm channels. Each of the these channels can be configured ++ independent of each other. ++ ++ To compile this driver as a module, choose M here: the module ++ will be called pwm-ipq. ++ + config PWM_IQS620A + tristate "Azoteq IQS620A PWM support" + depends on MFD_IQS62X || COMPILE_TEST +--- a/drivers/pwm/Makefile ++++ b/drivers/pwm/Makefile +@@ -24,6 +24,7 @@ obj-$(CONFIG_PWM_IMX1) += pwm-imx1.o + obj-$(CONFIG_PWM_IMX27) += pwm-imx27.o + obj-$(CONFIG_PWM_IMX_TPM) += pwm-imx-tpm.o + obj-$(CONFIG_PWM_INTEL_LGM) += pwm-intel-lgm.o ++obj-$(CONFIG_PWM_IPQ) += pwm-ipq.o + obj-$(CONFIG_PWM_IQS620A) += pwm-iqs620a.o + obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o + obj-$(CONFIG_PWM_KEEMBAY) += pwm-keembay.o +--- /dev/null ++++ b/drivers/pwm/pwm-ipq.c +@@ -0,0 +1,282 @@ ++// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 ++/* ++ * Copyright (c) 2016-2017, 2020 The Linux Foundation. All rights reserved. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* The frequency range supported is 1 Hz to clock rate */ ++#define IPQ_PWM_MAX_PERIOD_NS ((u64)NSEC_PER_SEC) ++ ++/* ++ * The max value specified for each field is based on the number of bits ++ * in the pwm control register for that field ++ */ ++#define IPQ_PWM_MAX_DIV 0xFFFF ++ ++/* ++ * Two 32-bit registers for each PWM: REG0, and REG1. ++ * Base offset for PWM #i is at 8 * #i. ++ */ ++#define IPQ_PWM_REG0 0 ++#define IPQ_PWM_REG0_PWM_DIV GENMASK(15, 0) ++#define IPQ_PWM_REG0_HI_DURATION GENMASK(31, 16) ++ ++#define IPQ_PWM_REG1 4 ++#define IPQ_PWM_REG1_PRE_DIV GENMASK(15, 0) ++/* ++ * Enable bit is set to enable output toggling in pwm device. ++ * Update bit is set to reflect the changed divider and high duration ++ * values in register. ++ */ ++#define IPQ_PWM_REG1_UPDATE BIT(30) ++#define IPQ_PWM_REG1_ENABLE BIT(31) ++ ++struct ipq_pwm_chip { ++ struct pwm_chip chip; ++ struct clk *clk; ++ void __iomem *mem; ++}; ++ ++static struct ipq_pwm_chip *ipq_pwm_from_chip(struct pwm_chip *chip) ++{ ++ return container_of(chip, struct ipq_pwm_chip, chip); ++} ++ ++static unsigned int ipq_pwm_reg_read(struct pwm_device *pwm, unsigned int reg) ++{ ++ struct ipq_pwm_chip *ipq_chip = ipq_pwm_from_chip(pwm->chip); ++ unsigned int off = 8 * pwm->hwpwm + reg; ++ ++ return readl(ipq_chip->mem + off); ++} ++ ++static void ipq_pwm_reg_write(struct pwm_device *pwm, unsigned int reg, ++ unsigned int val) ++{ ++ struct ipq_pwm_chip *ipq_chip = ipq_pwm_from_chip(pwm->chip); ++ unsigned int off = 8 * pwm->hwpwm + reg; ++ ++ writel(val, ipq_chip->mem + off); ++} ++ ++static void config_div_and_duty(struct pwm_device *pwm, unsigned int pre_div, ++ unsigned int pwm_div, unsigned long rate, u64 duty_ns, ++ bool enable) ++{ ++ unsigned long hi_dur; ++ unsigned long val = 0; ++ ++ /* ++ * high duration = pwm duty * (pwm div + 1) ++ * pwm duty = duty_ns / period_ns ++ */ ++ hi_dur = div64_u64(duty_ns * rate, (pre_div + 1) * NSEC_PER_SEC); ++ ++ val = FIELD_PREP(IPQ_PWM_REG0_HI_DURATION, hi_dur) | ++ FIELD_PREP(IPQ_PWM_REG0_PWM_DIV, pwm_div); ++ ipq_pwm_reg_write(pwm, IPQ_PWM_REG0, val); ++ ++ val = FIELD_PREP(IPQ_PWM_REG1_PRE_DIV, pre_div); ++ ipq_pwm_reg_write(pwm, IPQ_PWM_REG1, val); ++ ++ /* PWM enable toggle needs a separate write to REG1 */ ++ val |= IPQ_PWM_REG1_UPDATE; ++ if (enable) ++ val |= IPQ_PWM_REG1_ENABLE; ++ ipq_pwm_reg_write(pwm, IPQ_PWM_REG1, val); ++} ++ ++static int ipq_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, ++ const struct pwm_state *state) ++{ ++ struct ipq_pwm_chip *ipq_chip = ipq_pwm_from_chip(chip); ++ unsigned int pre_div, pwm_div, best_pre_div, best_pwm_div; ++ unsigned long rate = clk_get_rate(ipq_chip->clk); ++ u64 period_ns, duty_ns, period_rate; ++ u64 min_diff; ++ ++ if (state->polarity != PWM_POLARITY_NORMAL) ++ return -EINVAL; ++ ++ if (state->period < DIV64_U64_ROUND_UP(NSEC_PER_SEC, rate)) ++ return -ERANGE; ++ ++ period_ns = min(state->period, IPQ_PWM_MAX_PERIOD_NS); ++ duty_ns = min(state->duty_cycle, period_ns); ++ ++ /* ++ * period_ns is 1G or less. As long as rate is less than 16 GHz, ++ * period_rate does not overflow. Make that explicit. ++ */ ++ if ((unsigned long long)rate > 16ULL * GIGA) ++ return -EINVAL; ++ period_rate = period_ns * rate; ++ best_pre_div = IPQ_PWM_MAX_DIV; ++ best_pwm_div = IPQ_PWM_MAX_DIV; ++ /* ++ * We don't need to consider pre_div values smaller than ++ * ++ * period_rate ++ * pre_div_min := ------------------------------------ ++ * NSEC_PER_SEC * (IPQ_PWM_MAX_DIV + 1) ++ * ++ * because pre_div = pre_div_min results in a better ++ * approximation. ++ */ ++ pre_div = div64_u64(period_rate, ++ (u64)NSEC_PER_SEC * (IPQ_PWM_MAX_DIV + 1)); ++ min_diff = period_rate; ++ ++ for (; pre_div <= IPQ_PWM_MAX_DIV; pre_div++) { ++ u64 remainder; ++ ++ pwm_div = div64_u64_rem(period_rate, ++ (u64)NSEC_PER_SEC * (pre_div + 1), &remainder); ++ /* pwm_div is unsigned; the check below catches underflow */ ++ pwm_div--; ++ ++ /* ++ * Swapping values for pre_div and pwm_div produces the same ++ * period length. So we can skip all settings with pre_div > ++ * pwm_div which results in bigger constraints for selecting ++ * the duty_cycle than with the two values swapped. ++ */ ++ if (pre_div > pwm_div) ++ break; ++ ++ /* ++ * Make sure we can do 100% duty cycle where ++ * hi_dur == pwm_div + 1 ++ */ ++ if (pwm_div > IPQ_PWM_MAX_DIV - 1) ++ continue; ++ ++ if (remainder < min_diff) { ++ best_pre_div = pre_div; ++ best_pwm_div = pwm_div; ++ min_diff = remainder; ++ ++ if (min_diff == 0) /* bingo */ ++ break; ++ } ++ } ++ ++ /* config divider values for the closest possible frequency */ ++ config_div_and_duty(pwm, best_pre_div, best_pwm_div, ++ rate, duty_ns, state->enabled); ++ ++ return 0; ++} ++ ++static int ipq_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, ++ struct pwm_state *state) ++{ ++ struct ipq_pwm_chip *ipq_chip = ipq_pwm_from_chip(chip); ++ unsigned long rate = clk_get_rate(ipq_chip->clk); ++ unsigned int pre_div, pwm_div, hi_dur; ++ u64 effective_div, hi_div; ++ u32 reg0, reg1; ++ ++ reg0 = ipq_pwm_reg_read(pwm, IPQ_PWM_REG0); ++ reg1 = ipq_pwm_reg_read(pwm, IPQ_PWM_REG1); ++ ++ state->polarity = PWM_POLARITY_NORMAL; ++ state->enabled = reg1 & IPQ_PWM_REG1_ENABLE; ++ ++ pwm_div = FIELD_GET(IPQ_PWM_REG0_PWM_DIV, reg0); ++ hi_dur = FIELD_GET(IPQ_PWM_REG0_HI_DURATION, reg0); ++ pre_div = FIELD_GET(IPQ_PWM_REG1_PRE_DIV, reg1); ++ ++ /* No overflow here, both pre_div and pwm_div <= 0xffff */ ++ effective_div = (u64)(pre_div + 1) * (pwm_div + 1); ++ state->period = DIV64_U64_ROUND_UP(effective_div * NSEC_PER_SEC, rate); ++ ++ hi_div = hi_dur * (pre_div + 1); ++ state->duty_cycle = DIV64_U64_ROUND_UP(hi_div * NSEC_PER_SEC, rate); ++ ++ return 0; ++} ++ ++static const struct pwm_ops ipq_pwm_ops = { ++ .apply = ipq_pwm_apply, ++ .get_state = ipq_pwm_get_state, ++ .owner = THIS_MODULE, ++}; ++ ++static int ipq_pwm_probe(struct platform_device *pdev) ++{ ++ struct ipq_pwm_chip *pwm; ++ struct device *dev = &pdev->dev; ++ int ret; ++ ++ pwm = devm_kzalloc(dev, sizeof(*pwm), GFP_KERNEL); ++ if (!pwm) ++ return -ENOMEM; ++ ++ platform_set_drvdata(pdev, pwm); ++ ++ pwm->mem = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(pwm->mem)) ++ return dev_err_probe(dev, PTR_ERR(pwm->mem), ++ "regs map failed"); ++ ++ pwm->clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(pwm->clk)) ++ return dev_err_probe(dev, PTR_ERR(pwm->clk), ++ "failed to get clock"); ++ ++ ret = clk_prepare_enable(pwm->clk); ++ if (ret) ++ return dev_err_probe(dev, ret, "clock enable failed"); ++ ++ pwm->chip.dev = dev; ++ pwm->chip.ops = &ipq_pwm_ops; ++ pwm->chip.npwm = 4; ++ ++ ret = pwmchip_add(&pwm->chip); ++ if (ret < 0) { ++ dev_err_probe(dev, ret, "pwmchip_add() failed\n"); ++ clk_disable_unprepare(pwm->clk); ++ } ++ ++ return ret; ++} ++ ++static int ipq_pwm_remove(struct platform_device *pdev) ++{ ++ struct ipq_pwm_chip *pwm = platform_get_drvdata(pdev); ++ ++ pwmchip_remove(&pwm->chip); ++ clk_disable_unprepare(pwm->clk); ++ ++ return 0; ++} ++ ++static const struct of_device_id pwm_ipq_dt_match[] = { ++ { .compatible = "qcom,ipq6018-pwm", }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, pwm_ipq_dt_match); ++ ++static struct platform_driver ipq_pwm_driver = { ++ .driver = { ++ .name = "ipq-pwm", ++ .of_match_table = pwm_ipq_dt_match, ++ }, ++ .probe = ipq_pwm_probe, ++ .remove = ipq_pwm_remove, ++}; ++ ++module_platform_driver(ipq_pwm_driver); ++ ++MODULE_LICENSE("Dual BSD/GPL"); diff --git a/target/linux/qualcommax/patches-6.6/1009-arm64-dts-qcom-ipq6018-add-pwm-node.patch b/target/linux/qualcommax/patches-6.6/1009-arm64-dts-qcom-ipq6018-add-pwm-node.patch new file mode 100644 index 00000000000000..a54aeedb183c8c --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/1009-arm64-dts-qcom-ipq6018-add-pwm-node.patch @@ -0,0 +1,187 @@ +From patchwork Thu Oct 5 16:05:50 2023 +Content-Type: text/plain; charset="utf-8" +MIME-Version: 1.0 +Content-Transfer-Encoding: 8bit +X-Patchwork-Submitter: Devi Priya +X-Patchwork-Id: 13410402 +Return-Path: +X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on + aws-us-west-2-korg-lkml-1.web.codeaurora.org +Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) + by smtp.lore.kernel.org (Postfix) with ESMTP id 0A952E71D4F + for ; + Thu, 5 Oct 2023 16:21:13 +0000 (UTC) +Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand + id S242309AbjJEQUs (ORCPT + ); + Thu, 5 Oct 2023 12:20:48 -0400 +Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36106 "EHLO + lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org + with ESMTP id S242451AbjJEQSB (ORCPT + ); + Thu, 5 Oct 2023 12:18:01 -0400 +Received: from mx0b-0031df01.pphosted.com (mx0b-0031df01.pphosted.com + [205.220.180.131]) + by lindbergh.monkeyblade.net (Postfix) with ESMTPS id C6E8D3F025; + Thu, 5 Oct 2023 09:06:56 -0700 (PDT) +Received: from pps.filterd (m0279869.ppops.net [127.0.0.1]) + by mx0a-0031df01.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id + 395CsRur015464; + Thu, 5 Oct 2023 16:06:41 GMT +DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quicinc.com; + h=from : to : cc : + subject : date : message-id : in-reply-to : references : mime-version : + content-type : content-transfer-encoding; s=qcppdkim1; + bh=NSi6brrY0QDAZ3HzHxPaf2hgz27vKeKX8LZRRHNWQWc=; + b=bhnUe3rjnlfJlfGglxvqihGmTNQCt2GnP9fbZBkIsYd0LKp5VGtAlA0IYUcwsiTLb7RK + zTJfR/Llub7KabA70G7qxopCKO0GgBFRq3Xhug1Nnrpr08qcHCrAOSuS16i8viiv5tE/ + Lrn3/Dj59DKIWaBWzrTmI5pK0eLkK0nAPYsTjv03eE8JFtw3M0lHrMQdjuwtpNE7ivjl + CCkahl9YDirevdy+EdeBg17dBw4R3t/qo+3tDzom1COe5knM5DIJI79fxqY1ueFFOM6D + D5mVEsufaOcEQFeLv3y7PtEsPTmWdyN2PCiU8GpFFz6KH8iJLyhzHc6o5pItDDupV1EG uQ== +Received: from nalasppmta04.qualcomm.com (Global_NAT1.qualcomm.com + [129.46.96.20]) + by mx0a-0031df01.pphosted.com (PPS) with ESMTPS id 3thn059cyw-1 + (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 + verify=NOT); + Thu, 05 Oct 2023 16:06:41 +0000 +Received: from nalasex01a.na.qualcomm.com (nalasex01a.na.qualcomm.com + [10.47.209.196]) + by NALASPPMTA04.qualcomm.com (8.17.1.5/8.17.1.5) with ESMTPS id + 395G6dfE025540 + (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 + verify=NOT); + Thu, 5 Oct 2023 16:06:40 GMT +Received: from hu-devipriy-blr.qualcomm.com (10.80.80.8) by + nalasex01a.na.qualcomm.com (10.47.209.196) with Microsoft SMTP Server + (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id + 15.2.1118.36; Thu, 5 Oct 2023 09:06:34 -0700 +From: Devi Priya +To: , , + , , , + , , + , , + , , + , , + , +CC: , , + +Subject: [PATCH V15 4/4] arm64: dts: qcom: ipq6018: add pwm node +Date: Thu, 5 Oct 2023 21:35:50 +0530 +Message-ID: <20231005160550.2423075-5-quic_devipriy@quicinc.com> +X-Mailer: git-send-email 2.34.1 +In-Reply-To: <20231005160550.2423075-1-quic_devipriy@quicinc.com> +References: <20231005160550.2423075-1-quic_devipriy@quicinc.com> +MIME-Version: 1.0 +X-Originating-IP: [10.80.80.8] +X-ClientProxiedBy: nasanex01a.na.qualcomm.com (10.52.223.231) To + nalasex01a.na.qualcomm.com (10.47.209.196) +X-QCInternal: smtphost +X-Proofpoint-Virus-Version: vendor=nai engine=6200 definitions=5800 + signatures=585085 +X-Proofpoint-ORIG-GUID: pbCgYn8v-0OpdxaoHvIbH76mglvWkJsO +X-Proofpoint-GUID: pbCgYn8v-0OpdxaoHvIbH76mglvWkJsO +X-Proofpoint-Virus-Version: vendor=baseguard + engine=ICAP:2.0.267,Aquarius:18.0.980,Hydra:6.0.619,FMLib:17.11.176.26 + definitions=2023-10-05_11,2023-10-05_01,2023-05-22_02 +X-Proofpoint-Spam-Details: rule=outbound_notspam policy=outbound score=0 + adultscore=0 suspectscore=0 + impostorscore=0 bulkscore=0 spamscore=0 mlxlogscore=999 priorityscore=1501 + mlxscore=0 malwarescore=0 lowpriorityscore=0 phishscore=0 clxscore=1015 + classifier=spam adjust=0 reason=mlx scancount=1 engine=8.12.0-2309180000 + definitions=main-2310050125 +Precedence: bulk +List-ID: +X-Mailing-List: linux-arm-msm@vger.kernel.org + +Describe the PWM block on IPQ6018. + +The PWM is in the TCSR area. Make &tcsr "simple-mfd" compatible, and add +&pwm as child of &tcsr. + +Add also ipq6018 specific compatible string. + +Reviewed-by: Krzysztof Kozlowski +Co-developed-by: Baruch Siach +Signed-off-by: Baruch Siach +Signed-off-by: Devi Priya +--- +v15: + + Fixed the indentation of pwm node + +v14: + + Moved ranges just after reg as suggested by Krzysztof + + Picked up the R-b tag + +v13: + + No change + +v12: + + No change + +v11: + + No change + +v10: + + No change + +v9: + + Add 'ranges' property (Rob) + +v8: + + Add size cell to 'reg' (Rob) + +v7: + + Use 'reg' instead of 'offset' (Rob) + + Add qcom,tcsr-ipq6018 (Rob) + + Drop clock-names (Bjorn) + +v6: + + Make the PWM node child of TCSR (Rob Herring) + + Add assigned-clocks/assigned-clock-rates (Uwe Kleine-König) + +v5: Use qcom,pwm-regs for TCSR phandle instead of direct regs + +v3: s/qcom,pwm-ipq6018/qcom,ipq6018-pwm/ (Rob Herring) + + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 15 ++++++++++++++- + 1 file changed, 14 insertions(+), 1 deletion(-) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -456,8 +456,21 @@ + }; + + tcsr: syscon@1937000 { +- compatible = "qcom,tcsr-ipq6018", "syscon"; ++ compatible = "qcom,tcsr-ipq6018", "syscon", "simple-mfd"; + reg = <0x0 0x01937000 0x0 0x21000>; ++ ranges = <0x0 0x0 0x01937000 0x21000>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ pwm: pwm@a010 { ++ compatible = "qcom,ipq6018-pwm"; ++ reg = <0xa010 0x20>; ++ clocks = <&gcc GCC_ADSS_PWM_CLK>; ++ assigned-clocks = <&gcc GCC_ADSS_PWM_CLK>; ++ assigned-clock-rates = <100000000>; ++ #pwm-cells = <2>; ++ status = "disabled"; ++ }; + }; + + usb2: usb@70f8800 { diff --git a/target/linux/qualcommax/patches-6.6/1010-clk-qcom-gcc-ipq6018-rework-nss_port5-clock-to-mul.patch b/target/linux/qualcommax/patches-6.6/1010-clk-qcom-gcc-ipq6018-rework-nss_port5-clock-to-mul.patch new file mode 100644 index 00000000000000..5c1c03e3494805 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/1010-clk-qcom-gcc-ipq6018-rework-nss_port5-clock-to-mul.patch @@ -0,0 +1,66 @@ +From 76b1d5178910e6714f14c1f6c17390277d227afd Mon Sep 17 00:00:00 2001 +From: JiaY-shi +Date: Thu, 17 Aug 2023 20:46:54 +0800 +Subject: [PATCH] clk: qcom: gcc-ipq6018: rework nss_port5 clock to multiple + conf + +--- + drivers/clk/qcom/gcc-ipq6018.c | 34 ++++++++++++++++++++++++++-------- + 1 file changed, 26 insertions(+), 8 deletions(-) + +--- a/drivers/clk/qcom/gcc-ipq6018.c ++++ b/drivers/clk/qcom/gcc-ipq6018.c +@@ -537,13 +537,22 @@ static struct clk_rcg2 apss_ahb_clk_src + }, + }; + ++ ++static const struct freq_conf ftbl_nss_port5_rx_clk_src_25[] = { ++ C(P_UNIPHY1_RX, 12.5, 0, 0), ++ C(P_UNIPHY0_RX, 5, 0, 0), ++}; ++ ++static const struct freq_conf ftbl_nss_port5_rx_clk_src_125[] = { ++ C(P_UNIPHY1_RX, 2.5, 0, 0), ++ C(P_UNIPHY0_RX, 1, 0, 0), ++}; ++ + static const struct freq_tbl ftbl_nss_port5_rx_clk_src[] = { + F(24000000, P_XO, 1, 0, 0), +- F(25000000, P_UNIPHY1_RX, 12.5, 0, 0), +- F(25000000, P_UNIPHY0_RX, 5, 0, 0), ++ FM(25000000, ftbl_nss_port5_rx_clk_src_25), + F(78125000, P_UNIPHY1_RX, 4, 0, 0), +- F(125000000, P_UNIPHY1_RX, 2.5, 0, 0), +- F(125000000, P_UNIPHY0_RX, 1, 0, 0), ++ FM(125000000, ftbl_nss_port5_rx_clk_src_125), + F(156250000, P_UNIPHY1_RX, 2, 0, 0), + F(312500000, P_UNIPHY1_RX, 1, 0, 0), + { } +@@ -584,13 +593,22 @@ static struct clk_rcg2 nss_port5_rx_clk_ + }, + }; + ++ ++static struct freq_conf ftbl_nss_port5_tx_clk_src_25[] = { ++ C(P_UNIPHY1_TX, 12.5, 0, 0), ++ C(P_UNIPHY0_TX, 5, 0, 0), ++}; ++ ++static struct freq_conf ftbl_nss_port5_tx_clk_src_125[] = { ++ C(P_UNIPHY1_TX, 2.5, 0, 0), ++ C(P_UNIPHY0_TX, 1, 0, 0), ++}; ++ + static const struct freq_tbl ftbl_nss_port5_tx_clk_src[] = { + F(24000000, P_XO, 1, 0, 0), +- F(25000000, P_UNIPHY1_TX, 12.5, 0, 0), +- F(25000000, P_UNIPHY0_TX, 5, 0, 0), ++ FM(25000000, ftbl_nss_port5_tx_clk_src_25), + F(78125000, P_UNIPHY1_TX, 4, 0, 0), +- F(125000000, P_UNIPHY1_TX, 2.5, 0, 0), +- F(125000000, P_UNIPHY0_TX, 1, 0, 0), ++ FM(125000000, ftbl_nss_port5_tx_clk_src_125), + F(156250000, P_UNIPHY1_TX, 2, 0, 0), + F(312500000, P_UNIPHY1_TX, 1, 0, 0), + { } diff --git a/target/linux/qualcommax/patches-6.6/1011-arm64-dts-qcom-ipq6018-assign-some-clock-to-wifi.patch b/target/linux/qualcommax/patches-6.6/1011-arm64-dts-qcom-ipq6018-assign-some-clock-to-wifi.patch new file mode 100644 index 00000000000000..fd7216c68c2ad7 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/1011-arm64-dts-qcom-ipq6018-assign-some-clock-to-wifi.patch @@ -0,0 +1,40 @@ +From 71f30e25d21ae4981ecef6653a4ba7dfeb80db7b Mon Sep 17 00:00:00 2001 +From: JiaY-shi +Date: Tue, 23 Jan 2024 11:04:57 +0200 +Subject: [PATCH] arm64: dts: qcom: ipq6018: assign some clock to wifi remoteproc + +--- + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 15 ++++++++------- + 1 file changed, 9 insertions(+), 8 deletions(-) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -983,8 +983,26 @@ + "wcss_reset", + "wcss_q6_reset"; + +- clocks = <&gcc GCC_PRNG_AHB_CLK>, <&gcc GCC_QDSS_AT_CLK>; +- clock-names = "prng", "qdss" ; ++ clocks = <&gcc GCC_PRNG_AHB_CLK>, ++ <&gcc GCC_QDSS_AT_CLK>, ++ <&gcc GCC_SYS_NOC_WCSS_AHB_CLK>, ++ <&gcc GCC_Q6SS_ATBM_CLK>, ++ <&gcc GCC_Q6SS_PCLKDBG_CLK>, ++ <&gcc GCC_Q6_TSCTR_1TO2_CLK>; ++ clock-names = "prng", ++ "qdss", ++ "gcc_sys_noc_wcss_ahb_clk", ++ "gcc_q6ss_atbm_clk", ++ "gcc_q6ss_pclkdbg_clk", ++ "gcc_q6_tsctr_1to2_clk"; ++ assigned-clocks = <&gcc GCC_SYS_NOC_WCSS_AHB_CLK>, ++ <&gcc GCC_Q6SS_PCLKDBG_CLK>, ++ <&gcc GCC_Q6_TSCTR_1TO2_CLK>, ++ <&gcc GCC_Q6SS_ATBM_CLK>; ++ assigned-clock-rates = <133333333>, ++ <600000000>, ++ <600000000>, ++ <240000000>; + + qcom,halt-regs = <&tcsr 0x18000 0x1b000 0xe000>; + diff --git a/target/linux/qualcommax/patches-6.6/1012-arm64-dts-qcom-ipq6018-Add-QUP5-SPI-node.patch b/target/linux/qualcommax/patches-6.6/1012-arm64-dts-qcom-ipq6018-Add-QUP5-SPI-node.patch new file mode 100644 index 00000000000000..1398c046364b11 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/1012-arm64-dts-qcom-ipq6018-Add-QUP5-SPI-node.patch @@ -0,0 +1,36 @@ +From dc5423c92edffac4d3b59aa5fd0dcbdfdcba0bf3 Mon Sep 17 00:00:00 2001 +From: Chukun Pan +Date: Sat, 11 Nov 2023 15:50:03 +0800 +Subject: [PATCH] arm64: dts: qcom: ipq6018: Add QUP5 SPI node + +Add node to support the QUP5 SPI controller inside of IPQ6018. +Some routers use this bus to connect SPI TPM chips. + +Signed-off-by: Chukun Pan +--- + arch/arm64/boot/dts/qcom/ipq6018.dtsi | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi ++++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi +@@ -635,6 +635,20 @@ + status = "disabled"; + }; + ++ blsp1_spi5: spi@78b9000 { ++ compatible = "qcom,spi-qup-v2.2.1"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x078b9000 0x0 0x600>; ++ interrupts = ; ++ clocks = <&gcc GCC_BLSP1_QUP5_SPI_APPS_CLK>, ++ <&gcc GCC_BLSP1_AHB_CLK>; ++ clock-names = "core", "iface"; ++ dmas = <&blsp_dma 20>, <&blsp_dma 21>; ++ dma-names = "tx", "rx"; ++ status = "disabled"; ++ }; ++ + blsp1_i2c2: i2c@78b6000 { + compatible = "qcom,i2c-qup-v2.2.1"; + #address-cells = <1>; diff --git a/target/linux/qualcommax/patches-6.6/2170-clk-qcom-ipq8074-Support-added-for-necessary-clocks-and-reset.patch b/target/linux/qualcommax/patches-6.6/2170-clk-qcom-ipq8074-Support-added-for-necessary-clocks-and-reset.patch new file mode 100644 index 00000000000000..76cc8caac9a412 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2170-clk-qcom-ipq8074-Support-added-for-necessary-clocks-and-reset.patch @@ -0,0 +1,311 @@ +From 6504bc9edeb1a2a54d813f4bb5d0267e7bf827f9 Mon Sep 17 00:00:00 2001 +From: Praveenkumar I +Date: Thu, 6 Feb 2020 17:35:42 +0530 +Subject: [PATCH 4/8] clk: ipq8074: Support added for necessary clocks and + reset + +Change-Id: I21a76a44185f766e9b6dcba274392ea8e599718b +Signed-off-by: Praveenkumar I +Signed-off-by: Rajkumar Ayyasamy +--- + drivers/clk/qcom/gcc-ipq8074.c | 238 ++++++++++++++++++- + include/dt-bindings/clock/qcom,gcc-ipq8074.h | 35 ++- + 2 files changed, 258 insertions(+), 15 deletions(-) + +--- a/drivers/clk/qcom/gcc-ipq8074.c ++++ b/drivers/clk/qcom/gcc-ipq8074.c +@@ -48,6 +48,22 @@ enum { + P_UNIPHY2_TX, + }; + ++static const char * const gcc_xo_gpll4_gpll0_gpll6_gpll0_div2[] = { ++ "xo", ++ "gpll4", ++ "gpll0", ++ "gpll6", ++ "gpll0_out_main_div2", ++}; ++ ++static const struct parent_map gcc_xo_gpll4_gpll0_gpll6_gpll0_div2_map[] = { ++ { P_XO, 0 }, ++ { P_GPLL4, 1 }, ++ { P_GPLL0, 2 }, ++ { P_GPLL6, 3 }, ++ { P_GPLL0_DIV2, 4 }, ++}; ++ + static struct clk_alpha_pll gpll0_main = { + .offset = 0x21000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT], +@@ -629,6 +645,12 @@ static const struct freq_tbl ftbl_pcie_a + { } + }; + ++struct freq_tbl ftbl_pcie_rchng_clk_src[] = { ++ F(19200000, P_XO, 1, 0, 0), ++ F(100000000, P_GPLL0, 8, 0, 0), ++ { } ++}; ++ + static struct clk_rcg2 pcie0_axi_clk_src = { + .cmd_rcgr = 0x75054, + .freq_tbl = ftbl_pcie_axi_clk_src, +@@ -2029,6 +2051,78 @@ static struct clk_rcg2 gp3_clk_src = { + }, + }; + ++struct freq_tbl ftbl_qdss_tsctr_clk_src[] = { ++ F(160000000, P_GPLL0_DIV2, 2.5, 0, 0), ++ F(320000000, P_GPLL0, 2.5, 0, 0), ++ F(600000000, P_GPLL6, 2, 0, 0), ++ { } ++}; ++ ++struct clk_rcg2 qdss_tsctr_clk_src = { ++ .cmd_rcgr = 0x29064, ++ .freq_tbl = ftbl_qdss_tsctr_clk_src, ++ .hid_width = 5, ++ .parent_map = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2_map, ++ .clkr.hw.init = &(struct clk_init_data){ ++ .name = "qdss_tsctr_clk_src", ++ .parent_names = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2, ++ .num_parents = 5, ++ .ops = &clk_rcg2_ops, ++ }, ++}; ++ ++static struct clk_fixed_factor qdss_dap_sync_clk_src = { ++ .mult = 1, ++ .div = 4, ++ .hw.init = &(struct clk_init_data){ ++ .name = "qdss_dap_sync_clk_src", ++ .parent_names = (const char *[]){ ++ "qdss_tsctr_clk_src" ++ }, ++ .num_parents = 1, ++ .ops = &clk_fixed_factor_ops, ++ }, ++}; ++ ++struct freq_tbl ftbl_qdss_at_clk_src[] = { ++ F(66670000, P_GPLL0_DIV2, 6, 0, 0), ++ F(240000000, P_GPLL6, 6, 0, 0), ++ { } ++}; ++ ++struct clk_rcg2 qdss_at_clk_src = { ++ .cmd_rcgr = 0x2900c, ++ .freq_tbl = ftbl_qdss_at_clk_src, ++ .hid_width = 5, ++ .parent_map = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2_map, ++ .clkr.hw.init = &(struct clk_init_data){ ++ .name = "qdss_at_clk_src", ++ .parent_names = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2, ++ .num_parents = 5, ++ .ops = &clk_rcg2_ops, ++ }, ++}; ++ ++ ++struct freq_tbl ftbl_adss_pwm_clk_src[] = { ++ F(19200000, P_XO, 1, 0, 0), ++ F(200000000, P_GPLL0, 4, 0, 0), ++ { } ++}; ++ ++struct clk_rcg2 adss_pwm_clk_src = { ++ .cmd_rcgr = 0x1c008, ++ .freq_tbl = ftbl_adss_pwm_clk_src, ++ .hid_width = 5, ++ .parent_map = gcc_xo_gpll0_map, ++ .clkr.hw.init = &(struct clk_init_data){ ++ .name = "adss_pwm_clk_src", ++ .parent_data = gcc_xo_gpll0, ++ .num_parents = 2, ++ .ops = &clk_rcg2_ops, ++ }, ++}; ++ + static struct clk_branch gcc_blsp1_ahb_clk = { + .halt_reg = 0x01008, + .clkr = { +@@ -4224,13 +4318,7 @@ static struct clk_branch gcc_gp3_clk = { + }, + }; + +-static const struct freq_tbl ftbl_pcie_rchng_clk_src[] = { +- F(19200000, P_XO, 1, 0, 0), +- F(100000000, P_GPLL0, 8, 0, 0), +- { } +-}; +- +-static struct clk_rcg2 pcie0_rchng_clk_src = { ++struct clk_rcg2 pcie0_rchng_clk_src = { + .cmd_rcgr = 0x75070, + .freq_tbl = ftbl_pcie_rchng_clk_src, + .hid_width = 5, +@@ -4322,6 +4410,114 @@ static const struct alpha_pll_config nss + .alpha_en_mask = BIT(24), + }; + ++static struct clk_branch gcc_snoc_bus_timeout2_ahb_clk = { ++ .halt_reg = 0x4700c, ++ .halt_bit = 31, ++ .clkr = { ++ .enable_reg = 0x4700c, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_snoc_bus_timeout2_ahb_clk", ++ .parent_names = (const char *[]){ ++ "usb0_master_clk_src" ++ }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_snoc_bus_timeout3_ahb_clk = { ++ .halt_reg = 0x47014, ++ .halt_bit = 31, ++ .clkr = { ++ .enable_reg = 0x47014, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_snoc_bus_timeout3_ahb_clk", ++ .parent_names = (const char *[]){ ++ "usb1_master_clk_src" ++ }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_dcc_clk = { ++ .halt_reg = 0x77004, ++ .halt_bit = 31, ++ .clkr = { ++ .enable_reg = 0x77004, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_dcc_clk", ++ .parent_names = (const char *[]){ ++ "pcnoc_clk_src" ++ }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_qdss_at_clk = { ++ .halt_reg = 0x29024, ++ .halt_bit = 31, ++ .clkr = { ++ .enable_reg = 0x29024, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_qdss_at_clk", ++ .parent_names = (const char *[]){ ++ "qdss_at_clk_src" ++ }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_qdss_dap_clk = { ++ .halt_reg = 0x29084, ++ .halt_bit = 31, ++ .clkr = { ++ .enable_reg = 0x29084, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_qdss_dap_clk", ++ .parent_names = (const char *[]){ ++ "qdss_dap_sync_clk_src" ++ }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ ++static struct clk_branch gcc_adss_pwm_clk = { ++ .halt_reg = 0x1c020, ++ .halt_bit = 31, ++ .clkr = { ++ .enable_reg = 0x1c020, ++ .enable_mask = BIT(0), ++ .hw.init = &(struct clk_init_data){ ++ .name = "gcc_adss_pwm_clk", ++ .parent_names = (const char *[]){ ++ "adss_pwm_clk_src" ++ }, ++ .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT, ++ .ops = &clk_branch2_ops, ++ }, ++ }, ++}; ++ + static struct clk_hw *gcc_ipq8074_hws[] = { + &gpll0_out_main_div2.hw, + &gpll6_out_main_div2.hw, +@@ -4330,6 +4526,7 @@ static struct clk_hw *gcc_ipq8074_hws[] + &gcc_xo_div4_clk_src.hw, + &nss_noc_clk_src.hw, + &nss_ppe_cdiv_clk_src.hw, ++ &qdss_dap_sync_clk_src.hw, + }; + + static struct clk_regmap *gcc_ipq8074_clks[] = { +@@ -4561,6 +4758,15 @@ static struct clk_regmap *gcc_ipq8074_cl + [GCC_PCIE0_RCHNG_CLK] = &gcc_pcie0_rchng_clk.clkr, + [GCC_PCIE0_AXI_S_BRIDGE_CLK] = &gcc_pcie0_axi_s_bridge_clk.clkr, + [GCC_CRYPTO_PPE_CLK] = &gcc_crypto_ppe_clk.clkr, ++ [GCC_SNOC_BUS_TIMEOUT2_AHB_CLK] = &gcc_snoc_bus_timeout2_ahb_clk.clkr, ++ [GCC_SNOC_BUS_TIMEOUT3_AHB_CLK] = &gcc_snoc_bus_timeout3_ahb_clk.clkr, ++ [GCC_DCC_CLK] = &gcc_dcc_clk.clkr, ++ [QDSS_TSCTR_CLK_SRC] = &qdss_tsctr_clk_src.clkr, ++ [QDSS_AT_CLK_SRC] = &qdss_at_clk_src.clkr, ++ [GCC_QDSS_AT_CLK] = &gcc_qdss_at_clk.clkr, ++ [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr, ++ [ADSS_PWM_CLK_SRC] = &adss_pwm_clk_src.clkr, ++ [GCC_ADSS_PWM_CLK] = &gcc_adss_pwm_clk.clkr, + }; + + static const struct qcom_reset_map gcc_ipq8074_resets[] = { +--- a/include/dt-bindings/clock/qcom,gcc-ipq8074.h ++++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h +@@ -230,10 +230,19 @@ + #define GCC_GP1_CLK 221 + #define GCC_GP2_CLK 222 + #define GCC_GP3_CLK 223 +-#define GCC_PCIE0_AXI_S_BRIDGE_CLK 224 +-#define GCC_PCIE0_RCHNG_CLK_SRC 225 +-#define GCC_PCIE0_RCHNG_CLK 226 +-#define GCC_CRYPTO_PPE_CLK 227 ++#define GCC_CRYPTO_PPE_CLK 224 ++#define GCC_PCIE0_RCHNG_CLK_SRC 225 ++#define GCC_PCIE0_RCHNG_CLK 226 ++#define GCC_PCIE0_AXI_S_BRIDGE_CLK 227 ++#define GCC_SNOC_BUS_TIMEOUT2_AHB_CLK 228 ++#define GCC_SNOC_BUS_TIMEOUT3_AHB_CLK 229 ++#define GCC_DCC_CLK 230 ++#define ADSS_PWM_CLK_SRC 231 ++#define GCC_ADSS_PWM_CLK 232 ++#define QDSS_TSCTR_CLK_SRC 233 ++#define QDSS_AT_CLK_SRC 234 ++#define GCC_QDSS_AT_CLK 235 ++#define GCC_QDSS_DAP_CLK 236 + + #define GCC_BLSP1_BCR 0 + #define GCC_BLSP1_QUP1_BCR 1 diff --git a/target/linux/qualcommax/patches-6.6/2171-1-clk-qcom-ipq8074-Fix-gcc_snoc_bus_timeout_ahb_clk-offset.patch b/target/linux/qualcommax/patches-6.6/2171-1-clk-qcom-ipq8074-Fix-gcc_snoc_bus_timeout_ahb_clk-offset.patch new file mode 100644 index 00000000000000..b1393fc9ad43c2 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2171-1-clk-qcom-ipq8074-Fix-gcc_snoc_bus_timeout_ahb_clk-offset.patch @@ -0,0 +1,44 @@ +From 462aa0c53397ec5bf78e3e7f68aa8a3ca300f4ba Mon Sep 17 00:00:00 2001 +From: Selvam Sathappan Periakaruppan +Date: Tue, 24 Mar 2020 19:09:38 +0530 +Subject: [PATCH 5/8] clk: qcom: ipq8074: Fix gcc_snoc_bus_timeout_ahb_clk + offset + +By default, the ipq8074 V2 clks are provided in the gcc driver. +Updating the gcc_snoc_bus_timeout_ahb_clk offsets also as needed +in ipq8074 V2. + +Change-Id: I5a6e98d002f5c3354a804e55dd9ebb1f83f7f974 +Signed-off-by: Selvam Sathappan Periakaruppan +--- + drivers/clk/qcom/gcc-ipq8074.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/drivers/clk/qcom/gcc-ipq8074.c ++++ b/drivers/clk/qcom/gcc-ipq8074.c +@@ -4411,10 +4411,10 @@ static const struct alpha_pll_config nss + }; + + static struct clk_branch gcc_snoc_bus_timeout2_ahb_clk = { +- .halt_reg = 0x4700c, ++ .halt_reg = 0x47014, + .halt_bit = 31, + .clkr = { +- .enable_reg = 0x4700c, ++ .enable_reg = 0x47014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_snoc_bus_timeout2_ahb_clk", +@@ -4429,10 +4429,10 @@ static struct clk_branch gcc_snoc_bus_ti + }; + + static struct clk_branch gcc_snoc_bus_timeout3_ahb_clk = { +- .halt_reg = 0x47014, ++ .halt_reg = 0x4701C, + .halt_bit = 31, + .clkr = { +- .enable_reg = 0x47014, ++ .enable_reg = 0x4701C, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_snoc_bus_timeout3_ahb_clk", diff --git a/target/linux/qualcommax/patches-6.6/2171-2-clk-qcom-ipq8074-Fix-gcc_blsp1_ahb_clk-properties.patch b/target/linux/qualcommax/patches-6.6/2171-2-clk-qcom-ipq8074-Fix-gcc_blsp1_ahb_clk-properties.patch new file mode 100644 index 00000000000000..a7abddd5fddf18 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2171-2-clk-qcom-ipq8074-Fix-gcc_blsp1_ahb_clk-properties.patch @@ -0,0 +1,41 @@ +From 52315bec6ed633b6a71f28b746029602f8bd70b9 Mon Sep 17 00:00:00 2001 +From: Balaji Prakash J +Date: Wed, 22 Apr 2020 20:35:30 +0530 +Subject: [PATCH] clk: ipq8074: fix gcc_blsp1_ahb_clk properties + +All the voting enabled clocks does not support the enable +from CBCR register. So, updated gcc_blsp1_ahb_clk enable +register and mask to enable bit in APCS_CLOCK_BRANCH_ENA_VOTE. + +Also, the voting controlled clocks are shared among multiple +components like APSS, RPM, NSS, TZ, etc. So, turning the +voting off from APSS does not make the clock off if it has +been voted from another component. Added the flag +BRANCH_HALT_VOTED in order to skip checking the clock +disable status. + +This change is referred from the below commits, +1. 246b4fb3af9bd65d8af794aac2f0e7b1ed9cc2dd +2. c8374157d5ae91d3b3e0d513d62808a798b32d3a + +Signed-off-by: Balaji Prakash J +Change-Id: I505cb560b31ad27a02c165fbe13bb33a2fc7d230 +--- + drivers/clk/qcom/gcc-ipq8074.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/drivers/clk/qcom/gcc-ipq8074.c ++++ b/drivers/clk/qcom/gcc-ipq8074.c +@@ -2125,9 +2125,10 @@ struct clk_rcg2 adss_pwm_clk_src = { + + static struct clk_branch gcc_blsp1_ahb_clk = { + .halt_reg = 0x01008, ++ .halt_check = BRANCH_HALT_VOTED, + .clkr = { +- .enable_reg = 0x01008, +- .enable_mask = BIT(0), ++ .enable_reg = 0x0b004, ++ .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "gcc_blsp1_ahb_clk", + .parent_hws = (const struct clk_hw *[]){ diff --git a/target/linux/qualcommax/patches-6.6/2600-1-qca-nss-ecm-support-CORE.patch b/target/linux/qualcommax/patches-6.6/2600-1-qca-nss-ecm-support-CORE.patch new file mode 100644 index 00000000000000..8aed01612b599b --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2600-1-qca-nss-ecm-support-CORE.patch @@ -0,0 +1,785 @@ +--- a/include/linux/if_bridge.h ++++ b/include/linux/if_bridge.h +@@ -71,6 +71,9 @@ void brioctl_set(int (*hook)(struct net + void __user *uarg)); + int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd, + struct ifreq *ifr, void __user *uarg); ++extern void br_dev_update_stats(struct net_device *dev, ++ struct rtnl_link_stats64 *nlstats); ++extern bool br_is_hairpin_enabled(struct net_device *dev); + + #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) + int br_multicast_list_adjacent(struct net_device *dev, +@@ -213,4 +216,42 @@ static inline clock_t br_get_ageing_time + } + #endif + ++/* QCA NSS ECM support - Start */ ++extern struct net_device *br_port_dev_get(struct net_device *dev, ++ unsigned char *addr, ++ struct sk_buff *skb, ++ unsigned int cookie); ++extern void br_refresh_fdb_entry(struct net_device *dev, const char *addr); ++extern void br_fdb_entry_refresh(struct net_device *dev, const char *addr, __u16 vid); ++extern struct net_bridge_fdb_entry *br_fdb_has_entry(struct net_device *dev, ++ const char *addr, ++ __u16 vid); ++extern void br_fdb_update_register_notify(struct notifier_block *nb); ++extern void br_fdb_update_unregister_notify(struct notifier_block *nb); ++ ++typedef struct net_bridge_port *br_port_dev_get_hook_t(struct net_device *dev, ++ struct sk_buff *skb, ++ unsigned char *addr, ++ unsigned int cookie); ++extern br_port_dev_get_hook_t __rcu *br_port_dev_get_hook; ++ ++#define BR_FDB_EVENT_ADD 0x01 ++#define BR_FDB_EVENT_DEL 0x02 ++ ++struct br_fdb_event { ++ struct net_device *dev; ++ unsigned char addr[6]; ++ unsigned char is_local; ++ struct net_bridge *br; ++ struct net_device *orig_dev; ++}; ++extern void br_fdb_register_notify(struct notifier_block *nb); ++extern void br_fdb_unregister_notify(struct notifier_block *nb); ++ ++typedef struct net_bridge_port *br_get_dst_hook_t( ++ const struct net_bridge_port *src, ++ struct sk_buff **skb); ++extern br_get_dst_hook_t __rcu *br_get_dst_hook; ++/* QCA NSS ECM support - End */ ++ + #endif +--- a/include/linux/if_vlan.h ++++ b/include/linux/if_vlan.h +@@ -235,7 +235,28 @@ extern void vlan_vids_del_by_dev(struct + + extern bool vlan_uses_dev(const struct net_device *dev); + ++/* QCA NSS ECM support - Start */ ++extern void __vlan_dev_update_accel_stats(struct net_device *dev, ++ struct rtnl_link_stats64 *stats); ++extern u16 vlan_dev_get_egress_prio(struct net_device *dev, u32 skb_prio); ++extern struct net_device *vlan_dev_next_dev(const struct net_device *dev); ++/* QCA NSS ECM support - End */ ++ + #else ++/* QCA NSS ECM support - Start */ ++static inline void __vlan_dev_update_accel_stats(struct net_device *dev, ++ struct rtnl_link_stats64 *stats) ++{ ++ ++} ++ ++static inline u16 vlan_dev_get_egress_prio(struct net_device *dev, ++ u32 skb_prio) ++{ ++ return 0; ++} ++/* QCA NSS ECM support - End */ ++ + static inline struct net_device * + __vlan_find_dev_deep_rcu(struct net_device *real_dev, + __be16 vlan_proto, u16 vlan_id) +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -2936,6 +2936,10 @@ enum netdev_cmd { + NETDEV_OFFLOAD_XSTATS_REPORT_USED, + NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, + NETDEV_XDP_FEAT_CHANGE, ++ /* QCA NSS ECM Support - Start */ ++ NETDEV_BR_JOIN, ++ NETDEV_BR_LEAVE, ++ /* QCA NSS ECM Support - End */ + }; + const char *netdev_cmd_to_name(enum netdev_cmd cmd); + +--- a/include/net/addrconf.h ++++ b/include/net/addrconf.h +@@ -514,4 +514,9 @@ int if6_proc_init(void); + void if6_proc_exit(void); + #endif + ++/* QCA NSS ECM support - Start */ ++struct net_device *ipv6_dev_find_and_hold(struct net *net, struct in6_addr *addr, ++ int strict); ++/* QCA NSS ECM support - End */ ++ + #endif +--- a/include/net/ip6_route.h ++++ b/include/net/ip6_route.h +@@ -207,6 +207,11 @@ void rt6_multipath_rebalance(struct fib6 + void rt6_uncached_list_add(struct rt6_info *rt); + void rt6_uncached_list_del(struct rt6_info *rt); + ++/* QCA NSS ECM support - Start */ ++int rt6_register_notifier(struct notifier_block *nb); ++int rt6_unregister_notifier(struct notifier_block *nb); ++/* QCA NSS ECM support - End */ ++ + static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb) + { + const struct dst_entry *dst = skb_dst(skb); +--- a/include/net/neighbour.h ++++ b/include/net/neighbour.h +@@ -600,4 +600,15 @@ static inline void neigh_update_is_route + *notify = 1; + } + } ++ ++/* QCA NSS ECM support - Start */ ++struct neigh_mac_update { ++ unsigned char old_mac[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))]; ++ unsigned char update_mac[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))]; ++}; ++ ++extern void neigh_mac_update_register_notify(struct notifier_block *nb); ++extern void neigh_mac_update_unregister_notify(struct notifier_block *nb); ++/* QCA NSS ECM support - End */ ++ + #endif +--- a/include/net/route.h ++++ b/include/net/route.h +@@ -237,6 +237,11 @@ struct rtable *rt_dst_alloc(struct net_d + unsigned int flags, u16 type, bool noxfrm); + struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt); + ++/* QCA NSS ECM support - Start */ ++int ip_rt_register_notifier(struct notifier_block *nb); ++int ip_rt_unregister_notifier(struct notifier_block *nb); ++/* QCA NSS ECM support - End */ ++ + struct in_ifaddr; + void fib_add_ifaddr(struct in_ifaddr *); + void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *); +--- a/net/bridge/br_private.h ++++ b/net/bridge/br_private.h +@@ -2266,4 +2266,9 @@ void br_do_suppress_nd(struct sk_buff *s + u16 vid, struct net_bridge_port *p, struct nd_msg *msg); + struct nd_msg *br_is_nd_neigh_msg(struct sk_buff *skb, struct nd_msg *m); + bool br_is_neigh_suppress_enabled(const struct net_bridge_port *p, u16 vid); ++ ++/* QCA NSS ECM support - Start */ ++#define __br_get(__hook, __default, __args ...) \ ++ (__hook ? (__hook(__args)) : (__default)) ++/* QCA NSS ECM support - End */ + #endif +--- a/net/8021q/vlan_core.c ++++ b/net/8021q/vlan_core.c +@@ -555,4 +555,52 @@ static int __init vlan_offload_init(void + return 0; + } + ++/* QCA NSS ECM support - Start */ ++/* Update the VLAN device with statistics from network offload engines */ ++void __vlan_dev_update_accel_stats(struct net_device *dev, ++ struct rtnl_link_stats64 *nlstats) ++{ ++ struct vlan_pcpu_stats *stats; ++ ++ if (!is_vlan_dev(dev)) ++ return; ++ ++ stats = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, 0); ++ ++ u64_stats_update_begin(&stats->syncp); ++ u64_stats_add(&stats->rx_packets, nlstats->rx_packets); ++ u64_stats_add(&stats->rx_bytes, nlstats->rx_bytes); ++ u64_stats_add(&stats->tx_packets, nlstats->tx_packets); ++ u64_stats_add(&stats->tx_bytes, nlstats->tx_bytes); ++ u64_stats_update_end(&stats->syncp); ++} ++EXPORT_SYMBOL(__vlan_dev_update_accel_stats); ++ ++/* Lookup the 802.1p egress_map table and return the 802.1p value */ ++u16 vlan_dev_get_egress_prio(struct net_device *dev, u32 skb_prio) ++{ ++ struct vlan_priority_tci_mapping *mp; ++ ++ mp = vlan_dev_priv(dev)->egress_priority_map[(skb_prio & 0xf)]; ++ while (mp) { ++ if (mp->priority == skb_prio) { ++ /* This should already be shifted ++ * to mask correctly with the ++ * VLAN's TCI ++ */ ++ return mp->vlan_qos; ++ } ++ mp = mp->next; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(vlan_dev_get_egress_prio); ++ ++struct net_device *vlan_dev_next_dev(const struct net_device *dev) ++{ ++ return vlan_dev_priv(dev)->real_dev; ++} ++EXPORT_SYMBOL(vlan_dev_next_dev); ++/* QCA NSS ECM support - End */ ++ + fs_initcall(vlan_offload_init); +--- a/net/bridge/br_fdb.c ++++ b/net/bridge/br_fdb.c +@@ -33,6 +33,35 @@ static const struct rhashtable_params br + + static struct kmem_cache *br_fdb_cache __read_mostly; + ++/* QCA NSS ECM support - Start */ ++ATOMIC_NOTIFIER_HEAD(br_fdb_notifier_list); ++ATOMIC_NOTIFIER_HEAD(br_fdb_update_notifier_list); ++ ++void br_fdb_register_notify(struct notifier_block *nb) ++{ ++ atomic_notifier_chain_register(&br_fdb_notifier_list, nb); ++} ++EXPORT_SYMBOL_GPL(br_fdb_register_notify); ++ ++void br_fdb_unregister_notify(struct notifier_block *nb) ++{ ++ atomic_notifier_chain_unregister(&br_fdb_notifier_list, nb); ++} ++EXPORT_SYMBOL_GPL(br_fdb_unregister_notify); ++ ++void br_fdb_update_register_notify(struct notifier_block *nb) ++{ ++ atomic_notifier_chain_register(&br_fdb_update_notifier_list, nb); ++} ++EXPORT_SYMBOL_GPL(br_fdb_update_register_notify); ++ ++void br_fdb_update_unregister_notify(struct notifier_block *nb) ++{ ++ atomic_notifier_chain_unregister(&br_fdb_update_notifier_list, nb); ++} ++EXPORT_SYMBOL_GPL(br_fdb_update_unregister_notify); ++/* QCA NSS ECM support - End */ ++ + int __init br_fdb_init(void) + { + br_fdb_cache = kmem_cache_create("bridge_fdb_cache", +@@ -192,7 +221,26 @@ static void fdb_notify(struct net_bridge + struct sk_buff *skb; + int err = -ENOBUFS; + +- if (swdev_notify) ++ /* QCA NSS ECM support - Start */ ++ if (fdb->dst) { ++ int event; ++ struct br_fdb_event fdb_event; ++ ++ if (type == RTM_NEWNEIGH) ++ event = BR_FDB_EVENT_ADD; ++ else ++ event = BR_FDB_EVENT_DEL; ++ ++ fdb_event.dev = fdb->dst->dev; ++ ether_addr_copy(fdb_event.addr, fdb->key.addr.addr); ++ fdb_event.is_local = test_bit(BR_FDB_LOCAL, &fdb->flags); ++ atomic_notifier_call_chain(&br_fdb_notifier_list, ++ event, ++ (void *)&fdb_event); ++ } ++ /* QCA NSS ECM support - End */ ++ ++ if (swdev_notify) + br_switchdev_fdb_notify(br, fdb, type); + + skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC); +@@ -527,6 +575,7 @@ void br_fdb_cleanup(struct work_struct * + unsigned long delay = hold_time(br); + unsigned long work_delay = delay; + unsigned long now = jiffies; ++ u8 mac_addr[6]; /* QCA NSS ECM support */ + + /* this part is tricky, in order to avoid blocking learning and + * consequently forwarding, we rely on rcu to delete objects with +@@ -553,8 +602,15 @@ void br_fdb_cleanup(struct work_struct * + work_delay = min(work_delay, this_timer - now); + } else { + spin_lock_bh(&br->hash_lock); +- if (!hlist_unhashed(&f->fdb_node)) ++ if (!hlist_unhashed(&f->fdb_node)) { ++ ether_addr_copy(mac_addr, f->key.addr.addr); + fdb_delete(br, f, true); ++ /* QCA NSS ECM support - Start */ ++ atomic_notifier_call_chain( ++ &br_fdb_update_notifier_list, 0, ++ (void *)mac_addr); ++ /* QCA NSS ECM support - End */ ++ } + spin_unlock_bh(&br->hash_lock); + } + } +@@ -891,6 +947,12 @@ void br_fdb_update(struct net_bridge *br + */ + if (unlikely(test_bit(BR_FDB_LOCKED, &fdb->flags))) + clear_bit(BR_FDB_LOCKED, &fdb->flags); ++ ++ /* QCA NSS ECM support - Start */ ++ atomic_notifier_call_chain( ++ &br_fdb_update_notifier_list, ++ 0, (void *)addr); ++ /* QCA NSS ECM support - End */ + } + + if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags))) +@@ -1508,3 +1570,62 @@ void br_fdb_clear_offload(const struct n + spin_unlock_bh(&p->br->hash_lock); + } + EXPORT_SYMBOL_GPL(br_fdb_clear_offload); ++ ++/* QCA NSS ECM support - Start */ ++/* Refresh FDB entries for bridge packets being forwarded by offload engines */ ++void br_refresh_fdb_entry(struct net_device *dev, const char *addr) ++{ ++ struct net_bridge_port *p = br_port_get_rcu(dev); ++ ++ if (!p || p->state == BR_STATE_DISABLED) ++ return; ++ ++ if (!is_valid_ether_addr(addr)) { ++ pr_info("bridge: Attempt to refresh with invalid ether address %pM\n", ++ addr); ++ return; ++ } ++ ++ rcu_read_lock(); ++ br_fdb_update(p->br, p, addr, 0, true); ++ rcu_read_unlock(); ++} ++EXPORT_SYMBOL_GPL(br_refresh_fdb_entry); ++ ++/* Update timestamp of FDB entries for bridge packets being forwarded by offload engines */ ++void br_fdb_entry_refresh(struct net_device *dev, const char *addr, __u16 vid) ++{ ++ struct net_bridge_fdb_entry *fdb; ++ struct net_bridge_port *p = br_port_get_rcu(dev); ++ ++ if (!p || p->state == BR_STATE_DISABLED) ++ return; ++ ++ rcu_read_lock(); ++ fdb = fdb_find_rcu(&p->br->fdb_hash_tbl, addr, vid); ++ if (likely(fdb)) { ++ fdb->updated = jiffies; ++ } ++ rcu_read_unlock(); ++} ++EXPORT_SYMBOL_GPL(br_fdb_entry_refresh); ++ ++/* Look up the MAC address in the device's bridge fdb table */ ++struct net_bridge_fdb_entry *br_fdb_has_entry(struct net_device *dev, ++ const char *addr, __u16 vid) ++{ ++ struct net_bridge_port *p = br_port_get_rcu(dev); ++ struct net_bridge_fdb_entry *fdb; ++ ++ if (!p || p->state == BR_STATE_DISABLED) ++ return NULL; ++ ++ rcu_read_lock(); ++ fdb = fdb_find_rcu(&p->br->fdb_hash_tbl, addr, vid); ++ rcu_read_unlock(); ++ ++ return fdb; ++} ++EXPORT_SYMBOL_GPL(br_fdb_has_entry); ++/* QCA NSS ECM support - End */ ++ +--- a/net/bridge/br_if.c ++++ b/net/bridge/br_if.c +@@ -26,6 +26,12 @@ + + #include "br_private.h" + ++/* QCA NSS ECM support - Start */ ++/* Hook for external forwarding logic */ ++br_port_dev_get_hook_t __rcu *br_port_dev_get_hook __read_mostly; ++EXPORT_SYMBOL_GPL(br_port_dev_get_hook); ++/* QCA NSS ECM support - End */ ++ + /* + * Determine initial path cost based on speed. + * using recommendations from 802.1d standard +@@ -697,6 +703,8 @@ int br_add_if(struct net_bridge *br, str + + kobject_uevent(&p->kobj, KOBJ_ADD); + ++ call_netdevice_notifiers(NETDEV_BR_JOIN, dev); /* QCA NSS ECM support */ ++ + return 0; + + err6: +@@ -732,6 +740,8 @@ int br_del_if(struct net_bridge *br, str + if (!p || p->br != br) + return -EINVAL; + ++ call_netdevice_notifiers(NETDEV_BR_LEAVE, dev); /* QCA NSS ECM support */ ++ + /* Since more than one interface can be attached to a bridge, + * there still maybe an alternate path for netconsole to use; + * therefore there is no reason for a NETDEV_RELEASE event. +@@ -775,3 +785,96 @@ bool br_port_flag_is_set(const struct ne + return p->flags & flag; + } + EXPORT_SYMBOL_GPL(br_port_flag_is_set); ++ ++/* QCA NSS ECM support - Start */ ++/* API to know if hairpin feature is enabled/disabled on this bridge port */ ++bool br_is_hairpin_enabled(struct net_device *dev) ++{ ++ struct net_bridge_port *port = br_port_get_check_rcu(dev); ++ ++ if (likely(port)) ++ return port->flags & BR_HAIRPIN_MODE; ++ return false; ++} ++EXPORT_SYMBOL_GPL(br_is_hairpin_enabled); ++ ++/* br_port_dev_get() ++ * If a skb is provided, and the br_port_dev_get_hook_t hook exists, ++ * use that to try and determine the egress port for that skb. ++ * If not, or no egress port could be determined, use the given addr ++ * to identify the port to which it is reachable, ++ * returing a reference to the net device associated with that port. ++ * ++ * NOTE: Return NULL if given dev is not a bridge or the mac has no ++ * associated port. ++ */ ++struct net_device *br_port_dev_get(struct net_device *dev, unsigned char *addr, ++ struct sk_buff *skb, ++ unsigned int cookie) ++{ ++ struct net_bridge_fdb_entry *fdbe; ++ struct net_bridge *br; ++ struct net_device *netdev = NULL; ++ ++ /* Is this a bridge? */ ++ if (!(dev->priv_flags & IFF_EBRIDGE)) ++ return NULL; ++ ++ rcu_read_lock(); ++ ++ /* If the hook exists and the skb isn't NULL, try and get the port */ ++ if (skb) { ++ br_port_dev_get_hook_t *port_dev_get_hook; ++ ++ port_dev_get_hook = rcu_dereference(br_port_dev_get_hook); ++ if (port_dev_get_hook) { ++ struct net_bridge_port *pdst = ++ __br_get(port_dev_get_hook, NULL, dev, skb, ++ addr, cookie); ++ if (pdst) { ++ dev_hold(pdst->dev); ++ netdev = pdst->dev; ++ goto out; ++ } ++ } ++ } ++ ++ /* Either there is no hook, or can't ++ * determine the port to use - fall back to using FDB ++ */ ++ ++ br = netdev_priv(dev); ++ ++ /* Lookup the fdb entry and get reference to the port dev */ ++ fdbe = br_fdb_find_rcu(br, addr, 0); ++ if (fdbe && fdbe->dst) { ++ netdev = fdbe->dst->dev; /* port device */ ++ dev_hold(netdev); ++ } ++out: ++ rcu_read_unlock(); ++ return netdev; ++} ++EXPORT_SYMBOL_GPL(br_port_dev_get); ++ ++/* Update bridge statistics for bridge packets processed by offload engines */ ++void br_dev_update_stats(struct net_device *dev, ++ struct rtnl_link_stats64 *nlstats) ++{ ++ struct pcpu_sw_netstats *tstats; ++ ++ /* Is this a bridge? */ ++ if (!(dev->priv_flags & IFF_EBRIDGE)) ++ return; ++ ++ tstats = this_cpu_ptr(dev->tstats); ++ ++ u64_stats_update_begin(&tstats->syncp); ++ u64_stats_add(&tstats->rx_packets, nlstats->rx_packets); ++ u64_stats_add(&tstats->rx_bytes, nlstats->rx_bytes); ++ u64_stats_add(&tstats->tx_packets, nlstats->tx_packets); ++ u64_stats_add(&tstats->tx_bytes, nlstats->tx_bytes); ++ u64_stats_update_end(&tstats->syncp); ++} ++EXPORT_SYMBOL_GPL(br_dev_update_stats); ++/* QCA NSS ECM support - End */ +--- a/net/core/neighbour.c ++++ b/net/core/neighbour.c +@@ -1275,6 +1275,22 @@ static void neigh_update_hhs(struct neig + } + } + ++/* QCA NSS ECM support - Start */ ++ATOMIC_NOTIFIER_HEAD(neigh_mac_update_notifier_list); ++ ++void neigh_mac_update_register_notify(struct notifier_block *nb) ++{ ++ atomic_notifier_chain_register(&neigh_mac_update_notifier_list, nb); ++} ++EXPORT_SYMBOL_GPL(neigh_mac_update_register_notify); ++ ++void neigh_mac_update_unregister_notify(struct notifier_block *nb) ++{ ++ atomic_notifier_chain_unregister(&neigh_mac_update_notifier_list, nb); ++} ++EXPORT_SYMBOL_GPL(neigh_mac_update_unregister_notify); ++/* QCA NSS ECM support - End */ ++ + /* Generic update routine. + -- lladdr is new lladdr or NULL, if it is not supplied. + -- new is new state. +@@ -1303,6 +1319,7 @@ static int __neigh_update(struct neighbo + struct net_device *dev; + int err, notify = 0; + u8 old; ++ struct neigh_mac_update nmu; /* QCA NSS ECM support */ + + trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid); + +@@ -1317,7 +1334,10 @@ static int __neigh_update(struct neighbo + new = old; + goto out; + } +- if (!(flags & NEIGH_UPDATE_F_ADMIN) && ++ ++ memset(&nmu, 0, sizeof(struct neigh_mac_update)); /* QCA NSS ECM support */ ++ ++ if (!(flags & NEIGH_UPDATE_F_ADMIN) && + (old & (NUD_NOARP | NUD_PERMANENT))) + goto out; + +@@ -1354,7 +1374,12 @@ static int __neigh_update(struct neighbo + - compare new & old + - if they are different, check override flag + */ +- if ((old & NUD_VALID) && ++ /* QCA NSS ECM update - Start */ ++ memcpy(nmu.old_mac, neigh->ha, dev->addr_len); ++ memcpy(nmu.update_mac, lladdr, dev->addr_len); ++ /* QCA NSS ECM update - End */ ++ ++ if ((old & NUD_VALID) && + !memcmp(lladdr, neigh->ha, dev->addr_len)) + lladdr = neigh->ha; + } else { +@@ -1476,8 +1501,11 @@ out: + neigh_update_gc_list(neigh); + if (managed_update) + neigh_update_managed_list(neigh); +- if (notify) ++ if (notify) { + neigh_update_notify(neigh, nlmsg_pid); ++ atomic_notifier_call_chain(&neigh_mac_update_notifier_list, 0, ++ (struct neigh_mac_update *)&nmu); /* QCA NSS ECM support */ ++ } + trace_neigh_update_done(neigh, err); + return err; + } +--- a/net/ipv4/fib_trie.c ++++ b/net/ipv4/fib_trie.c +@@ -1211,6 +1211,9 @@ static bool fib_valid_key_len(u32 key, u + static void fib_remove_alias(struct trie *t, struct key_vector *tp, + struct key_vector *l, struct fib_alias *old); + ++/* Define route change notification chain. */ ++static BLOCKING_NOTIFIER_HEAD(iproute_chain); /* QCA NSS ECM support */ ++ + /* Caller must hold RTNL. */ + int fib_table_insert(struct net *net, struct fib_table *tb, + struct fib_config *cfg, struct netlink_ext_ack *extack) +@@ -1404,6 +1407,9 @@ int fib_table_insert(struct net *net, st + rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id, + &cfg->fc_nlinfo, nlflags); + succeeded: ++ blocking_notifier_call_chain(&iproute_chain, ++ RTM_NEWROUTE, fi); ++ + return 0; + + out_remove_new_fa: +@@ -1775,6 +1781,9 @@ int fib_table_delete(struct net *net, st + if (fa_to_delete->fa_state & FA_S_ACCESSED) + rt_cache_flush(cfg->fc_nlinfo.nl_net); + ++ blocking_notifier_call_chain(&iproute_chain, ++ RTM_DELROUTE, fa_to_delete->fa_info); ++ + fib_release_info(fa_to_delete->fa_info); + alias_free_mem_rcu(fa_to_delete); + return 0; +@@ -2407,6 +2416,20 @@ void __init fib_trie_init(void) + 0, SLAB_PANIC | SLAB_ACCOUNT, NULL); + } + ++/* QCA NSS ECM support - Start */ ++int ip_rt_register_notifier(struct notifier_block *nb) ++{ ++ return blocking_notifier_chain_register(&iproute_chain, nb); ++} ++EXPORT_SYMBOL(ip_rt_register_notifier); ++ ++int ip_rt_unregister_notifier(struct notifier_block *nb) ++{ ++ return blocking_notifier_chain_unregister(&iproute_chain, nb); ++} ++EXPORT_SYMBOL(ip_rt_unregister_notifier); ++/* QCA NSS ECM support - End */ ++ + struct fib_table *fib_trie_table(u32 id, struct fib_table *alias) + { + struct fib_table *tb; +--- a/net/ipv6/ndisc.c ++++ b/net/ipv6/ndisc.c +@@ -666,6 +666,7 @@ void ndisc_send_ns(struct net_device *de + if (skb) + ndisc_send_skb(skb, daddr, saddr); + } ++EXPORT_SYMBOL(ndisc_send_ns); + + void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr, + const struct in6_addr *daddr) +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -3853,6 +3853,9 @@ out_free: + return ERR_PTR(err); + } + ++/* Define route change notification chain. */ ++ATOMIC_NOTIFIER_HEAD(ip6route_chain); /* QCA NSS ECM support */ ++ + int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags, + struct netlink_ext_ack *extack) + { +@@ -3864,6 +3867,10 @@ int ip6_route_add(struct fib6_config *cf + return PTR_ERR(rt); + + err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack); ++ if (!err) ++ atomic_notifier_call_chain(&ip6route_chain, ++ RTM_NEWROUTE, rt); ++ + fib6_info_release(rt); + + return err; +@@ -3885,6 +3892,9 @@ static int __ip6_del_rt(struct fib6_info + err = fib6_del(rt, info); + spin_unlock_bh(&table->tb6_lock); + ++ if (!err) ++ atomic_notifier_call_chain(&ip6route_chain, ++ RTM_DELROUTE, rt); + out: + fib6_info_release(rt); + return err; +@@ -6336,6 +6346,20 @@ static int ip6_route_dev_notify(struct n + return NOTIFY_OK; + } + ++/* QCA NSS ECM support - Start */ ++int rt6_register_notifier(struct notifier_block *nb) ++{ ++ return atomic_notifier_chain_register(&ip6route_chain, nb); ++} ++EXPORT_SYMBOL(rt6_register_notifier); ++ ++int rt6_unregister_notifier(struct notifier_block *nb) ++{ ++ return atomic_notifier_chain_unregister(&ip6route_chain, nb); ++} ++EXPORT_SYMBOL(rt6_unregister_notifier); ++/* QCA NSS ECM support - End */ ++ + /* + * /proc + */ +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -1673,6 +1673,7 @@ const char *netdev_cmd_to_name(enum netd + N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE) + N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA) + N(XDP_FEAT_CHANGE) ++ N(BR_JOIN) N(BR_LEAVE) + } + #undef N + return "UNKNOWN_NETDEV_EVENT"; +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -1002,6 +1002,7 @@ void inet6_ifa_finish_destroy(struct ine + + kfree_rcu(ifp, rcu); + } ++EXPORT_SYMBOL(inet6_ifa_finish_destroy); + + static void + ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp) +@@ -2068,6 +2069,36 @@ struct inet6_ifaddr *ipv6_get_ifaddr(str + + return result; + } ++EXPORT_SYMBOL(ipv6_get_ifaddr); ++ ++/* ipv6_dev_find_and_hold() ++ * Find (and hold) net device that has the given address. ++ * Or NULL on failure. ++ */ ++struct net_device *ipv6_dev_find_and_hold(struct net *net, struct in6_addr *addr, ++ int strict) ++{ ++ struct inet6_ifaddr *ifp; ++ struct net_device *dev; ++ ++ ifp = ipv6_get_ifaddr(net, addr, NULL, strict); ++ if (!ifp) ++ return NULL; ++ ++ if (!ifp->idev) { ++ in6_ifa_put(ifp); ++ return NULL; ++ } ++ ++ dev = ifp->idev->dev; ++ if (dev) ++ dev_hold(dev); ++ ++ in6_ifa_put(ifp); ++ ++ return dev; ++} ++EXPORT_SYMBOL(ipv6_dev_find_and_hold); + + /* Gets referenced address, destroys ifaddr */ + +--- a/include/net/vxlan.h ++++ b/include/net/vxlan.h +@@ -440,6 +440,15 @@ static inline __be32 vxlan_compute_rco(u + return vni_field; + } + ++/* ++ * vxlan_get_vni() ++ * Returns the vni corresponding to tunnel ++ */ ++static inline u32 vxlan_get_vni(struct vxlan_dev *vxlan_tun) ++{ ++ return be32_to_cpu(vxlan_tun->cfg.vni); ++} ++ + static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs) + { + return vs->sock->sk->sk_family; diff --git a/target/linux/qualcommax/patches-6.6/2600-2-qca-nss-ecm-support-PPPOE-offload.patch b/target/linux/qualcommax/patches-6.6/2600-2-qca-nss-ecm-support-PPPOE-offload.patch new file mode 100644 index 00000000000000..699330689c9539 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2600-2-qca-nss-ecm-support-PPPOE-offload.patch @@ -0,0 +1,588 @@ +--- a/drivers/net/ppp/ppp_generic.c ++++ b/drivers/net/ppp/ppp_generic.c +@@ -48,6 +48,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -254,6 +255,25 @@ struct ppp_net { + #define seq_before(a, b) ((s32)((a) - (b)) < 0) + #define seq_after(a, b) ((s32)((a) - (b)) > 0) + ++ ++/* ++ * Registration/Unregistration methods ++ * for PPP channel connect and disconnect event notifications. ++ */ ++RAW_NOTIFIER_HEAD(ppp_channel_connection_notifier_list); ++ ++void ppp_channel_connection_register_notify(struct notifier_block *nb) ++{ ++ raw_notifier_chain_register(&ppp_channel_connection_notifier_list, nb); ++} ++EXPORT_SYMBOL_GPL(ppp_channel_connection_register_notify); ++ ++void ppp_channel_connection_unregister_notify(struct notifier_block *nb) ++{ ++ raw_notifier_chain_unregister(&ppp_channel_connection_notifier_list, nb); ++} ++EXPORT_SYMBOL_GPL(ppp_channel_connection_unregister_notify); ++ + /* Prototypes. */ + static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, + struct file *file, unsigned int cmd, unsigned long arg); +@@ -3453,7 +3473,10 @@ ppp_connect_channel(struct channel *pch, + struct ppp_net *pn; + int ret = -ENXIO; + int hdrlen; ++ int ppp_proto; ++ int version; + ++ int notify = 0; + pn = ppp_pernet(pch->chan_net); + + mutex_lock(&pn->all_ppp_mutex); +@@ -3485,13 +3508,40 @@ ppp_connect_channel(struct channel *pch, + ++ppp->n_channels; + pch->ppp = ppp; + refcount_inc(&ppp->file.refcnt); ++ ++ /* Set the netdev priv flag if the prototype ++ * is L2TP or PPTP. Return success in all cases ++ */ ++ if (!pch->chan) ++ goto out2; ++ ++ ppp_proto = ppp_channel_get_protocol(pch->chan); ++ if (ppp_proto == PX_PROTO_PPTP) { ++ ppp->dev->priv_flags_ext |= IFF_EXT_PPP_PPTP; ++ } else if (ppp_proto == PX_PROTO_OL2TP) { ++ version = ppp_channel_get_proto_version(pch->chan); ++ if (version == 2) ++ ppp->dev->priv_flags_ext |= IFF_EXT_PPP_L2TPV2; ++ else if (version == 3) ++ ppp->dev->priv_flags_ext |= IFF_EXT_PPP_L2TPV3; ++ } ++ notify = 1; ++ ++ out2: + ppp_unlock(ppp); + ret = 0; +- + outl: + write_unlock_bh(&pch->upl); + out: + mutex_unlock(&pn->all_ppp_mutex); ++ ++ if (notify && ppp && ppp->dev) { ++ dev_hold(ppp->dev); ++ raw_notifier_call_chain(&ppp_channel_connection_notifier_list, ++ PPP_CHANNEL_CONNECT, ppp->dev); ++ dev_put(ppp->dev); ++ } ++ + return ret; + } + +@@ -3509,6 +3559,13 @@ ppp_disconnect_channel(struct channel *p + pch->ppp = NULL; + write_unlock_bh(&pch->upl); + if (ppp) { ++ if (ppp->dev) { ++ dev_hold(ppp->dev); ++ raw_notifier_call_chain(&ppp_channel_connection_notifier_list, ++ PPP_CHANNEL_DISCONNECT, ppp->dev); ++ dev_put(ppp->dev); ++ } ++ + /* remove it from the ppp unit's list */ + ppp_lock(ppp); + list_del(&pch->clist); +@@ -3588,6 +3645,222 @@ static void *unit_find(struct idr *p, in + return idr_find(p, n); + } + ++/* Updates the PPP interface statistics. */ ++void ppp_update_stats(struct net_device *dev, unsigned long rx_packets, ++ unsigned long rx_bytes, unsigned long tx_packets, ++ unsigned long tx_bytes, unsigned long rx_errors, ++ unsigned long tx_errors, unsigned long rx_dropped, ++ unsigned long tx_dropped) ++{ ++ struct ppp *ppp; ++ ++ if (!dev) ++ return; ++ ++ if (dev->type != ARPHRD_PPP) ++ return; ++ ++ ppp = netdev_priv(dev); ++ ++ ppp_xmit_lock(ppp); ++ ppp->stats64.tx_packets += tx_packets; ++ ppp->stats64.tx_bytes += tx_bytes; ++ ppp->dev->stats.tx_errors += tx_errors; ++ ppp->dev->stats.tx_dropped += tx_dropped; ++ if (tx_packets) ++ ppp->last_xmit = jiffies; ++ ppp_xmit_unlock(ppp); ++ ++ ppp_recv_lock(ppp); ++ ppp->stats64.rx_packets += rx_packets; ++ ppp->stats64.rx_bytes += rx_bytes; ++ ppp->dev->stats.rx_errors += rx_errors; ++ ppp->dev->stats.rx_dropped += rx_dropped; ++ if (rx_packets) ++ ppp->last_recv = jiffies; ++ ppp_recv_unlock(ppp); ++} ++ ++/* Returns >0 if the device is a multilink PPP netdevice, 0 if not or < 0 if ++ * the device is not PPP. ++ */ ++int ppp_is_multilink(struct net_device *dev) ++{ ++ struct ppp *ppp; ++ unsigned int flags; ++ ++ if (!dev) ++ return -1; ++ ++ if (dev->type != ARPHRD_PPP) ++ return -1; ++ ++ ppp = netdev_priv(dev); ++ ppp_lock(ppp); ++ flags = ppp->flags; ++ ppp_unlock(ppp); ++ ++ if (flags & SC_MULTILINK) ++ return 1; ++ ++ return 0; ++} ++EXPORT_SYMBOL(ppp_is_multilink); ++ ++/* ppp_channel_get_protocol() ++ * Call this to obtain the underlying protocol of the PPP channel, ++ * e.g. PX_PROTO_OE ++ * ++ * NOTE: Some channels do not use PX sockets so the protocol value may be very ++ * different for them. ++ * NOTE: -1 indicates failure. ++ * NOTE: Once you know the channel protocol you may then either cast 'chan' to ++ * its sub-class or use the channel protocol specific API's as provided by that ++ * channel sub type. ++ */ ++int ppp_channel_get_protocol(struct ppp_channel *chan) ++{ ++ if (!chan->ops->get_channel_protocol) ++ return -1; ++ ++ return chan->ops->get_channel_protocol(chan); ++} ++EXPORT_SYMBOL(ppp_channel_get_protocol); ++ ++/* ppp_channel_get_proto_version() ++ * Call this to get channel protocol version ++ */ ++int ppp_channel_get_proto_version(struct ppp_channel *chan) ++{ ++ if (!chan->ops->get_channel_protocol_ver) ++ return -1; ++ ++ return chan->ops->get_channel_protocol_ver(chan); ++} ++EXPORT_SYMBOL(ppp_channel_get_proto_version); ++ ++/* ppp_channel_hold() ++ * Call this to hold a channel. ++ * ++ * Returns true on success or false if the hold could not happen. ++ * ++ * NOTE: chan must be protected against destruction during this call - ++ * either by correct locking etc. or because you already have an implicit ++ * or explicit hold to the channel already and this is an additional hold. ++ */ ++bool ppp_channel_hold(struct ppp_channel *chan) ++{ ++ if (!chan->ops->hold) ++ return false; ++ ++ chan->ops->hold(chan); ++ return true; ++} ++EXPORT_SYMBOL(ppp_channel_hold); ++ ++/* ppp_channel_release() ++ * Call this to release a hold you have upon a channel ++ */ ++void ppp_channel_release(struct ppp_channel *chan) ++{ ++ chan->ops->release(chan); ++} ++EXPORT_SYMBOL(ppp_channel_release); ++ ++/* Check if ppp xmit lock is on hold */ ++bool ppp_is_xmit_locked(struct net_device *dev) ++{ ++ struct ppp *ppp; ++ ++ if (!dev) ++ return false; ++ ++ if (dev->type != ARPHRD_PPP) ++ return false; ++ ++ ppp = netdev_priv(dev); ++ if (!ppp) ++ return false; ++ ++ if (spin_is_locked(&(ppp)->wlock)) ++ return true; ++ ++ return false; ++} ++EXPORT_SYMBOL(ppp_is_xmit_locked); ++ ++/* ppp_hold_channels() ++ * Returns the PPP channels of the PPP device, storing each one into ++ * channels[]. ++ * ++ * channels[] has chan_sz elements. ++ * This function returns the number of channels stored, up to chan_sz. ++ * It will return < 0 if the device is not PPP. ++ * ++ * You MUST release the channels using ppp_release_channels(). ++ */ ++int ppp_hold_channels(struct net_device *dev, struct ppp_channel *channels[], ++ unsigned int chan_sz) ++{ ++ struct ppp *ppp; ++ int c; ++ struct channel *pch; ++ ++ if (!dev) ++ return -1; ++ ++ if (dev->type != ARPHRD_PPP) ++ return -1; ++ ++ ppp = netdev_priv(dev); ++ ++ c = 0; ++ ppp_lock(ppp); ++ list_for_each_entry(pch, &ppp->channels, clist) { ++ struct ppp_channel *chan; ++ ++ if (!pch->chan) { ++ /* Channel is going / gone away */ ++ continue; ++ } ++ ++ if (c == chan_sz) { ++ /* No space to record channel */ ++ ppp_unlock(ppp); ++ return c; ++ } ++ ++ /* Hold the channel, if supported */ ++ chan = pch->chan; ++ if (!chan->ops->hold) ++ continue; ++ ++ chan->ops->hold(chan); ++ ++ /* Record the channel */ ++ channels[c++] = chan; ++ } ++ ppp_unlock(ppp); ++ return c; ++} ++EXPORT_SYMBOL(ppp_hold_channels); ++ ++/* ppp_release_channels() ++ * Releases channels ++ */ ++void ppp_release_channels(struct ppp_channel *channels[], unsigned int chan_sz) ++{ ++ unsigned int c; ++ ++ for (c = 0; c < chan_sz; ++c) { ++ struct ppp_channel *chan; ++ ++ chan = channels[c]; ++ chan->ops->release(chan); ++ } ++} ++EXPORT_SYMBOL(ppp_release_channels); ++ + /* Module/initialization stuff */ + + module_init(ppp_init); +@@ -3604,6 +3877,7 @@ EXPORT_SYMBOL(ppp_input_error); + EXPORT_SYMBOL(ppp_output_wakeup); + EXPORT_SYMBOL(ppp_register_compressor); + EXPORT_SYMBOL(ppp_unregister_compressor); ++EXPORT_SYMBOL(ppp_update_stats); + MODULE_LICENSE("GPL"); + MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0); + MODULE_ALIAS_RTNL_LINK("ppp"); +--- a/drivers/net/ppp/pppoe.c ++++ b/drivers/net/ppp/pppoe.c +@@ -62,6 +62,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -87,7 +88,7 @@ + static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); + + static const struct proto_ops pppoe_ops; +-static const struct ppp_channel_ops pppoe_chan_ops; ++static const struct pppoe_channel_ops pppoe_chan_ops; + + /* per-net private data for this module */ + static unsigned int pppoe_net_id __read_mostly; +@@ -692,7 +693,7 @@ static int pppoe_connect(struct socket * + + po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2; + po->chan.private = sk; +- po->chan.ops = &pppoe_chan_ops; ++ po->chan.ops = (struct ppp_channel_ops *)&pppoe_chan_ops; + + error = ppp_register_net_channel(dev_net(dev), &po->chan); + if (error) { +@@ -995,9 +996,80 @@ static int pppoe_fill_forward_path(struc + return 0; + } + +-static const struct ppp_channel_ops pppoe_chan_ops = { +- .start_xmit = pppoe_xmit, +- .fill_forward_path = pppoe_fill_forward_path, ++/************************************************************************ ++ * ++ * function called by generic PPP driver to hold channel ++ * ++ ***********************************************************************/ ++static void pppoe_hold_chan(struct ppp_channel *chan) ++{ ++ struct sock *sk = (struct sock *)chan->private; ++ ++ sock_hold(sk); ++} ++ ++/************************************************************************ ++ * ++ * function called by generic PPP driver to release channel ++ * ++ ***********************************************************************/ ++static void pppoe_release_chan(struct ppp_channel *chan) ++{ ++ struct sock *sk = (struct sock *)chan->private; ++ ++ sock_put(sk); ++} ++ ++/************************************************************************ ++ * ++ * function called to get the channel protocol type ++ * ++ ***********************************************************************/ ++static int pppoe_get_channel_protocol(struct ppp_channel *chan) ++{ ++ return PX_PROTO_OE; ++} ++ ++/************************************************************************ ++ * ++ * function called to get the PPPoE channel addressing ++ * NOTE: This function returns a HOLD to the netdevice ++ * ++ ***********************************************************************/ ++static int pppoe_get_addressing(struct ppp_channel *chan, ++ struct pppoe_opt *addressing) ++{ ++ struct sock *sk = (struct sock *)chan->private; ++ struct pppox_sock *po = pppox_sk(sk); ++ int err = 0; ++ ++ *addressing = po->proto.pppoe; ++ if (!addressing->dev) ++ return -ENODEV; ++ ++ dev_hold(addressing->dev); ++ return err; ++} ++ ++/* pppoe_channel_addressing_get() ++ * Return PPPoE channel specific addressing information. ++ */ ++int pppoe_channel_addressing_get(struct ppp_channel *chan, ++ struct pppoe_opt *addressing) ++{ ++ return pppoe_get_addressing(chan, addressing); ++} ++EXPORT_SYMBOL(pppoe_channel_addressing_get); ++ ++static const struct pppoe_channel_ops pppoe_chan_ops = { ++ /* PPPoE specific channel ops */ ++ .get_addressing = pppoe_get_addressing, ++ /* General ppp channel ops */ ++ .ops.start_xmit = pppoe_xmit, ++ .ops.get_channel_protocol = pppoe_get_channel_protocol, ++ .ops.hold = pppoe_hold_chan, ++ .ops.release = pppoe_release_chan, ++ .ops.fill_forward_path = pppoe_fill_forward_path, + }; + + static int pppoe_recvmsg(struct socket *sock, struct msghdr *m, +--- a/include/linux/if_pppox.h ++++ b/include/linux/if_pppox.h +@@ -91,4 +91,17 @@ enum { + PPPOX_DEAD = 16 /* dead, useless, please clean me up!*/ + }; + ++/* ++ * PPPoE Channel specific operations ++ */ ++struct pppoe_channel_ops { ++ /* Must be first - general to all PPP channels */ ++ struct ppp_channel_ops ops; ++ int (*get_addressing)(struct ppp_channel *, struct pppoe_opt *); ++}; ++ ++/* Return PPPoE channel specific addressing information */ ++extern int pppoe_channel_addressing_get(struct ppp_channel *chan, ++ struct pppoe_opt *addressing); ++ + #endif /* !(__LINUX_IF_PPPOX_H) */ +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -1762,6 +1762,24 @@ enum netdev_priv_flags { + IFF_NO_IP_ALIGN = BIT_ULL(34), + }; + ++ ++/** ++ * enum netdev_priv_flags_ext - &struct net_device priv_flags_ext ++ * ++ * These flags are used to check for device type and can be ++ * set and used by the drivers ++ * ++ */ ++enum netdev_priv_flags_ext { ++ IFF_EXT_TUN_TAP = 1<<0, ++ IFF_EXT_PPP_L2TPV2 = 1<<1, ++ IFF_EXT_PPP_L2TPV3 = 1<<2, ++ IFF_EXT_PPP_PPTP = 1<<3, ++ IFF_EXT_GRE_V4_TAP = 1<<4, ++ IFF_EXT_GRE_V6_TAP = 1<<5, ++ IFF_EXT_IFB = 1<<6, ++}; ++ + #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN + #define IFF_EBRIDGE IFF_EBRIDGE + #define IFF_BONDING IFF_BONDING +@@ -2128,6 +2146,7 @@ struct net_device { + xdp_features_t xdp_features; + unsigned long long priv_flags; + const struct net_device_ops *netdev_ops; ++ unsigned int priv_flags_ext; + const struct xdp_metadata_ops *xdp_metadata_ops; + int ifindex; + unsigned short gflags; +--- a/include/linux/ppp_channel.h ++++ b/include/linux/ppp_channel.h +@@ -19,6 +19,10 @@ + #include + #include + #include ++#include ++ ++#define PPP_CHANNEL_DISCONNECT 0 ++#define PPP_CHANNEL_CONNECT 1 + + struct net_device_path; + struct net_device_path_ctx; +@@ -30,9 +34,19 @@ struct ppp_channel_ops { + int (*start_xmit)(struct ppp_channel *, struct sk_buff *); + /* Handle an ioctl call that has come in via /dev/ppp. */ + int (*ioctl)(struct ppp_channel *, unsigned int, unsigned long); ++ /* Get channel protocol type, one of PX_PROTO_XYZ or specific to ++ * the channel subtype ++ */ ++ int (*get_channel_protocol)(struct ppp_channel *); ++ /* Get channel protocol version */ ++ int (*get_channel_protocol_ver)(struct ppp_channel *); ++ /* Hold the channel from being destroyed */ ++ void (*hold)(struct ppp_channel *); ++ /* Release hold on the channel */ ++ void (*release)(struct ppp_channel *); + int (*fill_forward_path)(struct net_device_path_ctx *, +- struct net_device_path *, +- const struct ppp_channel *); ++ struct net_device_path *, ++ const struct ppp_channel *); + }; + + struct ppp_channel { +@@ -76,6 +90,51 @@ extern int ppp_unit_number(struct ppp_ch + /* Get the device name associated with a channel, or NULL if none */ + extern char *ppp_dev_name(struct ppp_channel *); + ++/* Call this to obtain the underlying protocol of the PPP channel, ++ * e.g. PX_PROTO_OE ++ */ ++extern int ppp_channel_get_protocol(struct ppp_channel *); ++ ++/* Call this get protocol version */ ++extern int ppp_channel_get_proto_version(struct ppp_channel *); ++ ++/* Call this to hold a channel */ ++extern bool ppp_channel_hold(struct ppp_channel *); ++ ++/* Call this to release a hold you have upon a channel */ ++extern void ppp_channel_release(struct ppp_channel *); ++ ++/* Release hold on PPP channels */ ++extern void ppp_release_channels(struct ppp_channel *channels[], ++ unsigned int chan_sz); ++ ++/* Hold PPP channels for the PPP device */ ++extern int ppp_hold_channels(struct net_device *dev, ++ struct ppp_channel *channels[], ++ unsigned int chan_sz); ++ ++/* Test if ppp xmit lock is locked */ ++extern bool ppp_is_xmit_locked(struct net_device *dev); ++ ++/* Test if the ppp device is a multi-link ppp device */ ++extern int ppp_is_multilink(struct net_device *dev); ++ ++/* Register the PPP channel connect notifier */ ++extern void ppp_channel_connection_register_notify(struct notifier_block *nb); ++ ++/* Unregister the PPP channel connect notifier */ ++extern void ppp_channel_connection_unregister_notify(struct notifier_block *nb); ++ ++/* Update statistics of the PPP net_device by incrementing related ++ * statistics field value with corresponding parameter ++ */ ++extern void ppp_update_stats(struct net_device *dev, unsigned long rx_packets, ++ unsigned long rx_bytes, unsigned long tx_packets, ++ unsigned long tx_bytes, unsigned long rx_errors, ++ unsigned long tx_errors, unsigned long rx_dropped, ++ unsigned long tx_dropped); ++ ++ + /* + * SMP locking notes: + * The channel code must ensure that when it calls ppp_unregister_channel, diff --git a/target/linux/qualcommax/patches-6.6/2600-3-qca-nss-ecm-support-net-bonding.patch b/target/linux/qualcommax/patches-6.6/2600-3-qca-nss-ecm-support-net-bonding.patch new file mode 100644 index 00000000000000..3ef4a189b55c4d --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2600-3-qca-nss-ecm-support-net-bonding.patch @@ -0,0 +1,81 @@ +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -254,6 +254,8 @@ static const struct flow_dissector_key f + }, + }; + ++static unsigned long bond_id_mask = 0xFFFFFFF0; /* QCA NSS ECM bonding support */ ++ + static struct flow_dissector flow_keys_bonding __read_mostly; + + /*-------------------------- Forward declarations ---------------------------*/ +@@ -4418,6 +4420,24 @@ static int bond_get_lowest_level_rcu(str + } + #endif + ++/* QCA NSS ECM bonding support */ ++int bond_get_id(struct net_device *bond_dev) ++{ ++ struct bonding *bond; ++ int bond_id = 0; ++ ++ if (!((bond_dev->priv_flags & IFF_BONDING) && ++ (bond_dev->flags & IFF_MASTER))) ++ return -EINVAL; ++ ++ bond = netdev_priv(bond_dev); ++ bond_id = bond->id; ++ ++ return bond_id; ++} ++EXPORT_SYMBOL(bond_get_id); ++/* QCA NSS ECM bonding support */ ++ + static void bond_get_stats(struct net_device *bond_dev, + struct rtnl_link_stats64 *stats) + { +@@ -5873,6 +5893,11 @@ static void bond_destructor(struct net_d + destroy_workqueue(bond->wq); + + free_percpu(bond->rr_tx_counter); ++ ++ /* QCA NSS ECM bonding support */ ++ if (bond->id != (~0U)) ++ clear_bit(bond->id, &bond_id_mask); ++ /* QCA NSS ECM bonding support */ + } + + void bond_setup(struct net_device *bond_dev) +@@ -6421,6 +6446,14 @@ int bond_create(struct net *net, const c + + bond_work_init_all(bond); + ++ /* QCA NSS ECM bonding support - Start */ ++ bond->id = ~0U; ++ if (bond_id_mask != (~0UL)) { ++ bond->id = (u32)ffz(bond_id_mask); ++ set_bit(bond->id, &bond_id_mask); ++ } ++ /* QCA NSS ECM bonding support - End */ ++ + out: + rtnl_unlock(); + return res; +--- a/include/net/bonding.h ++++ b/include/net/bonding.h +@@ -261,6 +261,7 @@ struct bonding { + spinlock_t ipsec_lock; + #endif /* CONFIG_XFRM_OFFLOAD */ + struct bpf_prog *xdp_prog; ++ u32 id; /* QCA NSS ECM bonding support */ + }; + + #define bond_slave_get_rcu(dev) \ +@@ -652,6 +653,7 @@ struct bond_net { + + int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); + netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); ++int bond_get_id(struct net_device *bond_dev); /* QCA NSS ECM bonding support */ + int bond_create(struct net *net, const char *name); + int bond_create_sysfs(struct bond_net *net); + void bond_destroy_sysfs(struct bond_net *net); diff --git a/target/linux/qualcommax/patches-6.6/2600-4-qca-nss-ecm-support-net-bonding-over-LAG-interface.patch b/target/linux/qualcommax/patches-6.6/2600-4-qca-nss-ecm-support-net-bonding-over-LAG-interface.patch new file mode 100644 index 00000000000000..f5c3cbfec6ed39 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2600-4-qca-nss-ecm-support-net-bonding-over-LAG-interface.patch @@ -0,0 +1,672 @@ +--- a/drivers/net/bonding/bond_3ad.c ++++ b/drivers/net/bonding/bond_3ad.c +@@ -115,7 +115,40 @@ static void ad_marker_info_received(stru + static void ad_marker_response_received(struct bond_marker *marker, + struct port *port); + static void ad_update_actor_keys(struct port *port, bool reset); ++/* QCA NSS ECM bonding support - Start */ ++struct bond_cb __rcu *bond_cb; + ++int bond_register_cb(struct bond_cb *cb) ++{ ++ struct bond_cb *lag_cb; ++ ++ lag_cb = kzalloc(sizeof(*lag_cb), GFP_ATOMIC | __GFP_NOWARN); ++ if (!lag_cb) { ++ return -1; ++ } ++ ++ memcpy((void *)lag_cb, (void *)cb, sizeof(*cb)); ++ ++ rcu_read_lock(); ++ rcu_assign_pointer(bond_cb, lag_cb); ++ rcu_read_unlock(); ++ return 0; ++} ++EXPORT_SYMBOL(bond_register_cb); ++ ++void bond_unregister_cb(void) ++{ ++ struct bond_cb *lag_cb_main; ++ ++ rcu_read_lock(); ++ lag_cb_main = rcu_dereference(bond_cb); ++ rcu_assign_pointer(bond_cb, NULL); ++ rcu_read_unlock(); ++ ++ kfree(lag_cb_main); ++} ++EXPORT_SYMBOL(bond_unregister_cb); ++/* QCA NSS ECM bonding support - End */ + + /* ================= api to bonding and kernel code ================== */ + +@@ -430,7 +463,6 @@ static u16 __ad_timer_to_ticks(u16 timer + return retval; + } + +- + /* ================= ad_rx_machine helper functions ================== */ + + /** +@@ -1073,7 +1105,30 @@ static void ad_mux_machine(struct port * + ad_disable_collecting_distributing(port, + update_slave_arr); + port->ntt = true; +- break; ++ ++ /* QCA NSS ECM bonding support - Start */ ++ /* Send a notificaton about change in state of this ++ * port. We only want to handle case where port moves ++ * from AD_MUX_COLLECTING_DISTRIBUTING -> ++ * AD_MUX_ATTACHED. ++ */ ++ if (bond_slave_is_up(port->slave) && ++ (last_state == AD_MUX_COLLECTING_DISTRIBUTING)) { ++ struct bond_cb *lag_cb_main; ++ ++ rcu_read_lock(); ++ lag_cb_main = rcu_dereference(bond_cb); ++ if (lag_cb_main && ++ lag_cb_main->bond_cb_link_down) { ++ struct net_device *dev; ++ ++ dev = port->slave->dev; ++ lag_cb_main->bond_cb_link_down(dev); ++ } ++ rcu_read_unlock(); ++ } ++ ++ break; /* QCA NSS ECM bonding support - End */ + case AD_MUX_COLLECTING_DISTRIBUTING: + port->actor_oper_port_state |= LACP_STATE_COLLECTING; + port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING; +@@ -1917,13 +1972,24 @@ static void ad_enable_collecting_distrib + bool *update_slave_arr) + { + if (port->aggregator->is_active) { +- slave_dbg(port->slave->bond->dev, port->slave->dev, ++ struct bond_cb *lag_cb_main; /* QCA NSS ECM bonding support */ ++ slave_dbg(port->slave->bond->dev, port->slave->dev, + "Enabling port %d (LAG %d)\n", + port->actor_port_number, + port->aggregator->aggregator_identifier); + __enable_port(port); + /* Slave array needs update */ + *update_slave_arr = true; ++ ++ /* QCA NSS ECM bonding support - Start */ ++ rcu_read_lock(); ++ lag_cb_main = rcu_dereference(bond_cb); ++ ++ if (lag_cb_main && lag_cb_main->bond_cb_link_up) ++ lag_cb_main->bond_cb_link_up(port->slave->dev); ++ ++ rcu_read_unlock(); ++ /* QCA NSS ECM bonding support - End */ + } + } + +@@ -2683,6 +2749,104 @@ int bond_3ad_get_active_agg_info(struct + return ret; + } + ++/* QCA NSS ECM bonding support - Start */ ++/* bond_3ad_get_tx_dev - Calculate egress interface for a given packet, ++ * for a LAG that is configured in 802.3AD mode ++ * @skb: pointer to skb to be egressed ++ * @src_mac: pointer to source L2 address ++ * @dst_mac: pointer to destination L2 address ++ * @src: pointer to source L3 address ++ * @dst: pointer to destination L3 address ++ * @protocol: L3 protocol id from L2 header ++ * @bond_dev: pointer to bond master device ++ * ++ * If @skb is NULL, bond_xmit_hash is used to calculate hash using L2/L3 ++ * addresses. ++ * ++ * Returns: Either valid slave device, or NULL otherwise ++ */ ++struct net_device *bond_3ad_get_tx_dev(struct sk_buff *skb, u8 *src_mac, ++ u8 *dst_mac, void *src, ++ void *dst, u16 protocol, ++ struct net_device *bond_dev, ++ __be16 *layer4hdr) ++{ ++ struct bonding *bond = netdev_priv(bond_dev); ++ struct aggregator *agg; ++ struct ad_info ad_info; ++ struct list_head *iter; ++ struct slave *slave; ++ struct slave *first_ok_slave = NULL; ++ u32 hash = 0; ++ int slaves_in_agg; ++ int slave_agg_no = 0; ++ int agg_id; ++ ++ if (__bond_3ad_get_active_agg_info(bond, &ad_info)) { ++ pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n", ++ bond_dev->name); ++ return NULL; ++ } ++ ++ slaves_in_agg = ad_info.ports; ++ agg_id = ad_info.aggregator_id; ++ ++ if (slaves_in_agg == 0) { ++ pr_debug("%s: Error: active aggregator is empty\n", ++ bond_dev->name); ++ return NULL; ++ } ++ ++ if (skb) { ++ hash = bond_xmit_hash(bond, skb); ++ slave_agg_no = hash % slaves_in_agg; ++ } else { ++ if (bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER23 && ++ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER2 && ++ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER34) { ++ pr_debug("%s: Error: Unsupported hash policy for 802.3AD fast path\n", ++ bond_dev->name); ++ return NULL; ++ } ++ ++ hash = bond_xmit_hash_without_skb(src_mac, dst_mac, ++ src, dst, protocol, ++ bond_dev, layer4hdr); ++ slave_agg_no = hash % slaves_in_agg; ++ } ++ ++ bond_for_each_slave_rcu(bond, slave, iter) { ++ agg = SLAVE_AD_INFO(slave)->port.aggregator; ++ if (!agg || agg->aggregator_identifier != agg_id) ++ continue; ++ ++ if (slave_agg_no >= 0) { ++ if (!first_ok_slave && bond_slave_can_tx(slave)) ++ first_ok_slave = slave; ++ slave_agg_no--; ++ continue; ++ } ++ ++ if (bond_slave_can_tx(slave)) ++ return slave->dev; ++ } ++ ++ if (slave_agg_no >= 0) { ++ pr_err("%s: Error: Couldn't find a slave to tx on for aggregator ID %d\n", ++ bond_dev->name, agg_id); ++ return NULL; ++ } ++ ++ /* we couldn't find any suitable slave after the agg_no, so use the ++ * first suitable found, if found. ++ */ ++ if (first_ok_slave) ++ return first_ok_slave->dev; ++ ++ return NULL; ++} ++/* QCA NSS ECM bonding support - End */ ++ + int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, + struct slave *slave) + { +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -1190,6 +1190,23 @@ void bond_change_active_slave(struct bon + if (BOND_MODE(bond) == BOND_MODE_8023AD) + bond_3ad_handle_link_change(new_active, BOND_LINK_UP); + ++ /* QCA NSS ECM bonding support - Start */ ++ if (bond->params.mode == BOND_MODE_XOR) { ++ struct bond_cb *lag_cb_main; ++ ++ rcu_read_lock(); ++ lag_cb_main = rcu_dereference(bond_cb); ++ if (lag_cb_main && ++ lag_cb_main->bond_cb_link_up) { ++ struct net_device *dev; ++ ++ dev = new_active->dev; ++ lag_cb_main->bond_cb_link_up(dev); ++ } ++ rcu_read_unlock(); ++ } ++ /* QCA NSS ECM bonding support - End */ ++ + if (bond_is_lb(bond)) + bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP); + } else { +@@ -1834,7 +1851,8 @@ int bond_enslave(struct net_device *bond + const struct net_device_ops *slave_ops = slave_dev->netdev_ops; + struct slave *new_slave = NULL, *prev_slave; + struct sockaddr_storage ss; +- int link_reporting; ++ struct bond_cb *lag_cb_main; /* QCA NSS ECM bonding support */ ++ int link_reporting; + int res = 0, i; + + if (slave_dev->flags & IFF_MASTER && +@@ -2279,6 +2297,15 @@ int bond_enslave(struct net_device *bond + bond_is_active_slave(new_slave) ? "an active" : "a backup", + new_slave->link != BOND_LINK_DOWN ? "an up" : "a down"); + ++ /* QCA NSS ECM bonding support - Start */ ++ rcu_read_lock(); ++ lag_cb_main = rcu_dereference(bond_cb); ++ if (lag_cb_main && lag_cb_main->bond_cb_enslave) ++ lag_cb_main->bond_cb_enslave(slave_dev); ++ ++ rcu_read_unlock(); ++ /* QCA NSS ECM bonding support - End */ ++ + /* enslave is successful */ + bond_queue_slave_event(new_slave); + return 0; +@@ -2344,6 +2371,15 @@ err_undo_flags: + } + } + ++ /* QCA NSS ECM bonding support - Start */ ++ rcu_read_lock(); ++ lag_cb_main = rcu_dereference(bond_cb); ++ if (lag_cb_main && lag_cb_main->bond_cb_enslave) ++ lag_cb_main->bond_cb_enslave(slave_dev); ++ ++ rcu_read_unlock(); ++ /* QCA NSS ECM bonding support - End */ ++ + return res; + } + +@@ -2366,7 +2402,8 @@ static int __bond_release_one(struct net + struct slave *slave, *oldcurrent; + struct sockaddr_storage ss; + int old_flags = bond_dev->flags; +- netdev_features_t old_features = bond_dev->features; ++ struct bond_cb *lag_cb_main; /* QCA NSS ECM bonding support */ ++ netdev_features_t old_features = bond_dev->features; + + /* slave is not a slave or master is not master of this slave */ + if (!(slave_dev->flags & IFF_SLAVE) || +@@ -2387,6 +2424,15 @@ static int __bond_release_one(struct net + + bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW); + ++ /* QCA NSS ECM bonding support - Start */ ++ rcu_read_lock(); ++ lag_cb_main = rcu_dereference(bond_cb); ++ if (lag_cb_main && lag_cb_main->bond_cb_release) ++ lag_cb_main->bond_cb_release(slave_dev); ++ ++ rcu_read_unlock(); ++ /* QCA NSS ECM bonding support - End */ ++ + bond_sysfs_slave_del(slave); + + /* recompute stats just before removing the slave */ +@@ -2709,6 +2755,8 @@ static void bond_miimon_commit(struct bo + struct slave *slave, *primary, *active; + bool do_failover = false; + struct list_head *iter; ++ struct net_device *slave_dev = NULL; /* QCA NSS ECM bonding support */ ++ struct bond_cb *lag_cb_main; /* QCA NSS ECM bonding support */ + + ASSERT_RTNL(); + +@@ -2748,6 +2796,12 @@ static void bond_miimon_commit(struct bo + bond_set_active_slave(slave); + } + ++ /* QCA NSS ECM bonding support - Start */ ++ if ((bond->params.mode == BOND_MODE_XOR) && ++ (!slave_dev)) ++ slave_dev = slave->dev; ++ /* QCA NSS ECM bonding support - End */ ++ + slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n", + slave->speed == SPEED_UNKNOWN ? 0 : slave->speed, + slave->duplex ? "full" : "half"); +@@ -2796,6 +2850,16 @@ static void bond_miimon_commit(struct bo + unblock_netpoll_tx(); + } + ++ /* QCA NSS ECM bonding support - Start */ ++ rcu_read_lock(); ++ lag_cb_main = rcu_dereference(bond_cb); ++ ++ if (slave_dev && lag_cb_main && lag_cb_main->bond_cb_link_up) ++ lag_cb_main->bond_cb_link_up(slave_dev); ++ ++ rcu_read_unlock(); ++ /* QCA NSS ECM bonding support - End */ ++ + bond_set_carrier(bond); + } + +@@ -4048,8 +4112,219 @@ static inline u32 bond_eth_hash(struct s + return 0; + + ep = (struct ethhdr *)(data + mhoff); +- return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto); ++ return ep->h_dest[5] ^ ep->h_source[5]; /* QCA NSS ECM bonding support */ ++} ++ ++/* QCA NSS ECM bonding support - Start */ ++/* Extract the appropriate headers based on bond's xmit policy */ ++static bool bond_flow_dissect_without_skb(struct bonding *bond, ++ u8 *src_mac, u8 *dst_mac, ++ void *psrc, void *pdst, ++ u16 protocol, __be16 *layer4hdr, ++ struct flow_keys *fk) ++{ ++ u32 *src = NULL; ++ u32 *dst = NULL; ++ ++ fk->ports.ports = 0; ++ src = (uint32_t *)psrc; ++ dst = (uint32_t *)pdst; ++ ++ if (protocol == htons(ETH_P_IP)) { ++ /* V4 addresses and address type*/ ++ fk->addrs.v4addrs.src = src[0]; ++ fk->addrs.v4addrs.dst = dst[0]; ++ fk->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; ++ } else if (protocol == htons(ETH_P_IPV6)) { ++ /* V6 addresses and address type*/ ++ memcpy(&fk->addrs.v6addrs.src, src, sizeof(struct in6_addr)); ++ memcpy(&fk->addrs.v6addrs.dst, dst, sizeof(struct in6_addr)); ++ fk->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; ++ } else { ++ return false; ++ } ++ if ((bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) && ++ (layer4hdr)) ++ fk->ports.ports = *layer4hdr; ++ ++ return true; ++} ++ ++/* bond_xmit_hash_without_skb - Applies load balancing algorithm for a packet, ++ * to calculate hash for a given set of L2/L3 addresses. Does not ++ * calculate egress interface. ++ */ ++uint32_t bond_xmit_hash_without_skb(u8 *src_mac, u8 *dst_mac, ++ void *psrc, void *pdst, u16 protocol, ++ struct net_device *bond_dev, ++ __be16 *layer4hdr) ++{ ++ struct bonding *bond = netdev_priv(bond_dev); ++ struct flow_keys flow; ++ u32 hash = 0; ++ ++ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 || ++ !bond_flow_dissect_without_skb(bond, src_mac, dst_mac, psrc, ++ pdst, protocol, layer4hdr, &flow)) ++ return (dst_mac[5] ^ src_mac[5]); ++ ++ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23) ++ hash = dst_mac[5] ^ src_mac[5]; ++ else if (layer4hdr) ++ hash = (__force u32)flow.ports.ports; ++ ++ hash ^= (__force u32)flow_get_u32_dst(&flow) ^ ++ (__force u32)flow_get_u32_src(&flow); ++ hash ^= (hash >> 16); ++ hash ^= (hash >> 8); ++ ++ return hash; ++} ++ ++/* bond_xor_get_tx_dev - Calculate egress interface for a given packet for a LAG ++ * that is configured in balance-xor mode ++ * @skb: pointer to skb to be egressed ++ * @src_mac: pointer to source L2 address ++ * @dst_mac: pointer to destination L2 address ++ * @src: pointer to source L3 address in network order ++ * @dst: pointer to destination L3 address in network order ++ * @protocol: L3 protocol ++ * @bond_dev: pointer to bond master device ++ * ++ * If @skb is NULL, bond_xmit_hash_without_skb is used to calculate hash using ++ * L2/L3 addresses. ++ * ++ * Returns: Either valid slave device, or NULL otherwise ++ */ ++static struct net_device *bond_xor_get_tx_dev(struct sk_buff *skb, ++ u8 *src_mac, u8 *dst_mac, ++ void *src, void *dst, ++ u16 protocol, ++ struct net_device *bond_dev, ++ __be16 *layer4hdr) ++{ ++ struct bonding *bond = netdev_priv(bond_dev); ++ int slave_cnt = READ_ONCE(bond->slave_cnt); ++ int slave_id = 0, i = 0; ++ u32 hash; ++ struct list_head *iter; ++ struct slave *slave; ++ ++ if (slave_cnt == 0) { ++ pr_debug("%s: Error: No slave is attached to the interface\n", ++ bond_dev->name); ++ return NULL; ++ } ++ ++ if (skb) { ++ hash = bond_xmit_hash(bond, skb); ++ slave_id = hash % slave_cnt; ++ } else { ++ if (bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER23 && ++ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER2 && ++ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER34) { ++ pr_debug("%s: Error: Unsupported hash policy for balance-XOR fast path\n", ++ bond_dev->name); ++ return NULL; ++ } ++ ++ hash = bond_xmit_hash_without_skb(src_mac, dst_mac, src, ++ dst, protocol, bond_dev, ++ layer4hdr); ++ slave_id = hash % slave_cnt; ++ } ++ ++ i = slave_id; ++ ++ /* Here we start from the slave with slave_id */ ++ bond_for_each_slave_rcu(bond, slave, iter) { ++ if (--i < 0) { ++ if (bond_slave_can_tx(slave)) ++ return slave->dev; ++ } ++ } ++ ++ /* Here we start from the first slave up to slave_id */ ++ i = slave_id; ++ bond_for_each_slave_rcu(bond, slave, iter) { ++ if (--i < 0) ++ break; ++ if (bond_slave_can_tx(slave)) ++ return slave->dev; ++ } ++ ++ return NULL; ++} ++ ++/* bond_get_tx_dev - Calculate egress interface for a given packet. ++ * ++ * Supports 802.3AD and balance-xor modes ++ * ++ * @skb: pointer to skb to be egressed, if valid ++ * @src_mac: pointer to source L2 address ++ * @dst_mac: pointer to destination L2 address ++ * @src: pointer to source L3 address in network order ++ * @dst: pointer to destination L3 address in network order ++ * @protocol: L3 protocol id from L2 header ++ * @bond_dev: pointer to bond master device ++ * ++ * Returns: Either valid slave device, or NULL for un-supported LAG modes ++ */ ++struct net_device *bond_get_tx_dev(struct sk_buff *skb, uint8_t *src_mac, ++ u8 *dst_mac, void *src, ++ void *dst, u16 protocol, ++ struct net_device *bond_dev, ++ __be16 *layer4hdr) ++{ ++ struct bonding *bond; ++ ++ if (!bond_dev) ++ return NULL; ++ ++ if (!((bond_dev->priv_flags & IFF_BONDING) && ++ (bond_dev->flags & IFF_MASTER))) ++ return NULL; ++ ++ bond = netdev_priv(bond_dev); ++ ++ switch (bond->params.mode) { ++ case BOND_MODE_XOR: ++ return bond_xor_get_tx_dev(skb, src_mac, dst_mac, ++ src, dst, protocol, ++ bond_dev, layer4hdr); ++ case BOND_MODE_8023AD: ++ return bond_3ad_get_tx_dev(skb, src_mac, dst_mac, ++ src, dst, protocol, ++ bond_dev, layer4hdr); ++ default: ++ return NULL; ++ } + } ++EXPORT_SYMBOL(bond_get_tx_dev); ++ ++/* In bond_xmit_xor() , we determine the output device by using a pre- ++ * determined xmit_hash_policy(), If the selected device is not enabled, ++ * find the next active slave. ++ */ ++static int bond_xmit_xor(struct sk_buff *skb, struct net_device *dev) ++{ ++ struct bonding *bond = netdev_priv(dev); ++ struct net_device *outdev; ++ ++ outdev = bond_xor_get_tx_dev(skb, NULL, NULL, NULL, ++ NULL, 0, dev, NULL); ++ if (!outdev) ++ goto out; ++ ++ bond_dev_queue_xmit(bond, skb, outdev); ++ goto final; ++out: ++ /* no suitable interface, frame not sent */ ++ dev_kfree_skb(skb); ++final: ++ return NETDEV_TX_OK; ++} ++/* QCA NSS ECM bonding support - End */ + + static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data, + int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34) +@@ -5196,15 +5471,23 @@ static netdev_tx_t bond_3ad_xor_xmit(str + struct net_device *dev) + { + struct bonding *bond = netdev_priv(dev); +- struct bond_up_slave *slaves; +- struct slave *slave; ++ /* QCA NSS ECM bonding support - Start */ ++ struct net_device *outdev = NULL; ++ outdev = bond_3ad_get_tx_dev(skb, NULL, NULL, NULL, ++ NULL, 0, dev, NULL); + +- slaves = rcu_dereference(bond->usable_slaves); +- slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves); +- if (likely(slave)) +- return bond_dev_queue_xmit(bond, skb, slave->dev); ++ if (!outdev) ++ goto out; + +- return bond_tx_drop(dev, skb); ++ bond_dev_queue_xmit(bond, skb, outdev); ++ goto final; ++ ++out: ++ dev_kfree_skb(skb); ++ ++final: ++ return NETDEV_TX_OK; ++/* QCA NSS ECM bonding support - End */ + } + + /* in broadcast mode, we send everything to all usable interfaces. */ +@@ -5454,8 +5737,9 @@ static netdev_tx_t __bond_start_xmit(str + return bond_xmit_roundrobin(skb, dev); + case BOND_MODE_ACTIVEBACKUP: + return bond_xmit_activebackup(skb, dev); +- case BOND_MODE_8023AD: + case BOND_MODE_XOR: ++ return bond_xmit_xor(skb, dev); /* QCA NSS ECM bonding support */ ++ case BOND_MODE_8023AD: + return bond_3ad_xor_xmit(skb, dev); + case BOND_MODE_BROADCAST: + return bond_xmit_broadcast(skb, dev); +--- a/include/net/bond_3ad.h ++++ b/include/net/bond_3ad.h +@@ -302,8 +302,15 @@ int bond_3ad_lacpdu_recv(const struct sk + struct slave *slave); + int bond_3ad_set_carrier(struct bonding *bond); + void bond_3ad_update_lacp_rate(struct bonding *bond); ++/* QCA NSS ECM bonding support */ ++struct net_device *bond_3ad_get_tx_dev(struct sk_buff *skb, uint8_t *src_mac, ++ uint8_t *dst_mac, void *src, ++ void *dst, uint16_t protocol, ++ struct net_device *bond_dev, ++ __be16 *layer4hdr); ++/* QCA NSS ECM bonding support */ ++ + void bond_3ad_update_ad_actor_settings(struct bonding *bond); + int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats); + size_t bond_3ad_stats_size(void); + #endif /* _NET_BOND_3AD_H */ +- +--- a/include/net/bonding.h ++++ b/include/net/bonding.h +@@ -83,6 +83,8 @@ + #define bond_for_each_slave(bond, pos, iter) \ + netdev_for_each_lower_private((bond)->dev, pos, iter) + ++extern struct bond_cb __rcu *bond_cb; /* QCA NSS ECM bonding support */ ++ + /* Caller must have rcu_read_lock */ + #define bond_for_each_slave_rcu(bond, pos, iter) \ + netdev_for_each_lower_private_rcu((bond)->dev, pos, iter) +@@ -685,6 +687,12 @@ struct bond_vlan_tag *bond_verify_device + int level); + int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave); + void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay); ++/* QCA NSS ECM bonding support - Start */ ++uint32_t bond_xmit_hash_without_skb(uint8_t *src_mac, uint8_t *dst_mac, ++ void *psrc, void *pdst, uint16_t protocol, ++ struct net_device *bond_dev, ++ __be16 *layer4hdr); ++/* QCA NSS ECM bonding support - End */ + void bond_work_init_all(struct bonding *bond); + + #ifdef CONFIG_PROC_FS +@@ -789,4 +797,18 @@ static inline netdev_tx_t bond_tx_drop(s + return NET_XMIT_DROP; + } + ++/* QCA NSS ECM bonding support - Start */ ++struct bond_cb { ++ void (*bond_cb_link_up)(struct net_device *slave); ++ void (*bond_cb_link_down)(struct net_device *slave); ++ void (*bond_cb_enslave)(struct net_device *slave); ++ void (*bond_cb_release)(struct net_device *slave); ++ void (*bond_cb_delete_by_slave)(struct net_device *slave); ++ void (*bond_cb_delete_by_mac)(uint8_t *mac_addr); ++}; ++ ++extern int bond_register_cb(struct bond_cb *cb); ++extern void bond_unregister_cb(void); ++/* QCA NSS ECM bonding support - End */ ++ + #endif /* _NET_BONDING_H */ diff --git a/target/linux/qualcommax/patches-6.6/2600-5-qca-nss-ecm-support-macvlan.patch b/target/linux/qualcommax/patches-6.6/2600-5-qca-nss-ecm-support-macvlan.patch new file mode 100644 index 00000000000000..29f7e96d791328 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2600-5-qca-nss-ecm-support-macvlan.patch @@ -0,0 +1,96 @@ +--- a/include/linux/if_macvlan.h ++++ b/include/linux/if_macvlan.h +@@ -15,6 +15,13 @@ struct macvlan_port; + #define MACVLAN_MC_FILTER_BITS 8 + #define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS) + ++/* QCA NSS ECM Support - Start */ ++/* ++ * Callback for updating interface statistics for macvlan flows offloaded from host CPU. ++ */ ++typedef void (*macvlan_offload_stats_update_cb_t)(struct net_device *dev, struct rtnl_link_stats64 *stats, bool update_mcast_rx_stats); ++/* QCA NSS ECM Support - End */ ++ + struct macvlan_dev { + struct net_device *dev; + struct list_head list; +@@ -35,6 +42,7 @@ struct macvlan_dev { + #ifdef CONFIG_NET_POLL_CONTROLLER + struct netpoll *netpoll; + #endif ++ macvlan_offload_stats_update_cb_t offload_stats_update; /* QCA NSS ECM support */ + }; + + static inline void macvlan_count_rx(const struct macvlan_dev *vlan, +@@ -107,4 +115,26 @@ static inline int macvlan_release_l2fw_o + macvlan->accel_priv = NULL; + return dev_uc_add(macvlan->lowerdev, dev->dev_addr); + } ++ ++/* QCA NSS ECM Support - Start */ ++#if IS_ENABLED(CONFIG_MACVLAN) ++static inline void ++macvlan_offload_stats_update(struct net_device *dev, ++ struct rtnl_link_stats64 *stats, ++ bool update_mcast_rx_stats) ++{ ++ struct macvlan_dev *macvlan = netdev_priv(dev); ++ ++ macvlan->offload_stats_update(dev, stats, update_mcast_rx_stats); ++} ++ ++static inline enum ++macvlan_mode macvlan_get_mode(struct net_device *dev) ++{ ++ struct macvlan_dev *macvlan = netdev_priv(dev); ++ ++ return macvlan->mode; ++} ++#endif ++/* QCA NSS ECM Support - End */ + #endif /* _LINUX_IF_MACVLAN_H */ +--- a/drivers/net/macvlan.c ++++ b/drivers/net/macvlan.c +@@ -960,6 +960,34 @@ static void macvlan_uninit(struct net_de + macvlan_port_destroy(port->dev); + } + ++/* QCA NSS ECM Support - Start */ ++/* Update macvlan statistics processed by offload engines */ ++static void macvlan_dev_update_stats(struct net_device *dev, ++ struct rtnl_link_stats64 *offl_stats, ++ bool update_mcast_rx_stats) ++{ ++ struct vlan_pcpu_stats *stats; ++ struct macvlan_dev *macvlan; ++ ++ /* Is this a macvlan? */ ++ if (!netif_is_macvlan(dev)) ++ return; ++ ++ macvlan = netdev_priv(dev); ++ stats = this_cpu_ptr(macvlan->pcpu_stats); ++ u64_stats_update_begin(&stats->syncp); ++ u64_stats_add(&stats->rx_packets, offl_stats->rx_packets); ++ u64_stats_add(&stats->rx_bytes, offl_stats->rx_bytes); ++ u64_stats_add(&stats->tx_packets, offl_stats->tx_packets); ++ u64_stats_add(&stats->tx_bytes, offl_stats->tx_bytes); ++ /* Update multicast statistics */ ++ if (unlikely(update_mcast_rx_stats)) { ++ u64_stats_add(&stats->rx_multicast, offl_stats->rx_packets); ++ } ++ u64_stats_update_end(&stats->syncp); ++} ++/* QCA NSS ECM Support - End */ ++ + static void macvlan_dev_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) + { +@@ -1506,6 +1534,7 @@ int macvlan_common_newlink(struct net *s + vlan->dev = dev; + vlan->port = port; + vlan->set_features = MACVLAN_FEATURES; ++ vlan->offload_stats_update = macvlan_dev_update_stats; /* QCA NSS ECM Support */ + + vlan->mode = MACVLAN_MODE_VEPA; + if (data && data[IFLA_MACVLAN_MODE]) diff --git a/target/linux/qualcommax/patches-6.6/2600-6-qca-nss-ecm-support-netfilter-DSCPREMARK.patch b/target/linux/qualcommax/patches-6.6/2600-6-qca-nss-ecm-support-netfilter-DSCPREMARK.patch new file mode 100644 index 00000000000000..9589b599bd060e --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2600-6-qca-nss-ecm-support-netfilter-DSCPREMARK.patch @@ -0,0 +1,69 @@ +--- a/net/netfilter/Kconfig ++++ b/net/netfilter/Kconfig +@@ -174,6 +174,13 @@ config NF_CONNTRACK_TIMEOUT + + If unsure, say `N'. + ++config NF_CONNTRACK_DSCPREMARK_EXT ++ bool 'Connection tracking extension for dscp remark target' ++ depends on NETFILTER_ADVANCED ++ help ++ This option enables support for connection tracking extension ++ for dscp remark. ++ + config NF_CONNTRACK_TIMESTAMP + bool 'Connection tracking timestamping' + depends on NETFILTER_ADVANCED +--- a/include/net/netfilter/nf_conntrack_extend.h ++++ b/include/net/netfilter/nf_conntrack_extend.h +@@ -31,6 +31,10 @@ enum nf_ct_ext_id { + #if IS_ENABLED(CONFIG_NET_ACT_CT) + NF_CT_EXT_ACT_CT, + #endif ++#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT ++ NF_CT_EXT_DSCPREMARK, /* QCA NSS ECM support */ ++#endif ++ + NF_CT_EXT_NUM, + }; + +--- a/net/netfilter/nf_conntrack_extend.c ++++ b/net/netfilter/nf_conntrack_extend.c +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + #include + + #define NF_CT_EXT_PREALLOC 128u /* conntrack events are on by default */ +@@ -54,6 +55,9 @@ static const u8 nf_ct_ext_type_len[NF_CT + #if IS_ENABLED(CONFIG_NET_ACT_CT) + [NF_CT_EXT_ACT_CT] = sizeof(struct nf_conn_act_ct_ext), + #endif ++#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT ++ [NF_CT_EXT_DSCPREMARK] = sizeof(struct nf_ct_dscpremark_ext), ++#endif + }; + + static __always_inline unsigned int total_extension_size(void) +@@ -86,6 +90,9 @@ static __always_inline unsigned int tota + #if IS_ENABLED(CONFIG_NET_ACT_CT) + + sizeof(struct nf_conn_act_ct_ext) + #endif ++#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT ++ + sizeof(struct nf_ct_dscpremark_ext) ++#endif + ; + } + +--- a/net/netfilter/Makefile ++++ b/net/netfilter/Makefile +@@ -15,6 +15,7 @@ nf_conntrack-$(CONFIG_NF_CONNTRACK_OVS) + nf_conntrack-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o + nf_conntrack-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o + nf_conntrack-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o ++nf_conntrack-$(CONFIG_NF_CONNTRACK_DSCPREMARK_EXT) += nf_conntrack_dscpremark_ext.o + ifeq ($(CONFIG_NF_CONNTRACK),m) + nf_conntrack-$(CONFIG_DEBUG_INFO_BTF_MODULES) += nf_conntrack_bpf.o + else ifeq ($(CONFIG_NF_CONNTRACK),y) diff --git a/target/linux/qualcommax/patches-6.6/2600-7-qca-nss-ecm-add-missing-net-defines.patch b/target/linux/qualcommax/patches-6.6/2600-7-qca-nss-ecm-add-missing-net-defines.patch new file mode 100644 index 00000000000000..8911bc60112df3 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2600-7-qca-nss-ecm-add-missing-net-defines.patch @@ -0,0 +1,35 @@ +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -1777,7 +1777,9 @@ enum netdev_priv_flags_ext { + IFF_EXT_PPP_PPTP = 1<<3, + IFF_EXT_GRE_V4_TAP = 1<<4, + IFF_EXT_GRE_V6_TAP = 1<<5, +- IFF_EXT_IFB = 1<<6, ++ IFF_EXT_IFB = 1<<6, ++ IFF_EXT_MAPT = 1<<7, ++ IFF_EXT_HW_NO_OFFLOAD = 1<<8, + }; + + #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN +--- a/include/uapi/linux/in.h ++++ b/include/uapi/linux/in.h +@@ -63,6 +63,8 @@ enum { + #define IPPROTO_MTP IPPROTO_MTP + IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */ + #define IPPROTO_BEETPH IPPROTO_BEETPH ++ IPPROTO_ETHERIP = 97, /* ETHERIP protocol number */ ++#define IPPROTO_ETHERIP IPPROTO_ETHERIP + IPPROTO_ENCAP = 98, /* Encapsulation Header */ + #define IPPROTO_ENCAP IPPROTO_ENCAP + IPPROTO_PIM = 103, /* Protocol Independent Multicast */ +--- a/tools/include/uapi/linux/in.h ++++ b/tools/include/uapi/linux/in.h +@@ -63,6 +63,8 @@ enum { + #define IPPROTO_MTP IPPROTO_MTP + IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */ + #define IPPROTO_BEETPH IPPROTO_BEETPH ++ IPPROTO_ETHERIP = 97, /* ETHERIP protocol number */ ++#define IPPROTO_ETHERIP IPPROTO_ETHERIP + IPPROTO_ENCAP = 98, /* Encapsulation Header */ + #define IPPROTO_ENCAP IPPROTO_ENCAP + IPPROTO_PIM = 103, /* Protocol Independent Multicast */ diff --git a/target/linux/qualcommax/patches-6.6/2600-8-qca-nss-ecm-support-MLO-bonding.patch b/target/linux/qualcommax/patches-6.6/2600-8-qca-nss-ecm-support-MLO-bonding.patch new file mode 100644 index 00000000000000..989862ee862c9a --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2600-8-qca-nss-ecm-support-MLO-bonding.patch @@ -0,0 +1,537 @@ +From 0403eceee14bb6eb4e417102cae830b7509b1554 Mon Sep 17 00:00:00 2001 +From: Shivani Soni +Date: Thu, 17 Nov 2022 00:16:38 +0530 +Subject: [PATCH] arm/arm64: Add support for MLO bonding + +1. Introduced BOND_MODE_MLO to support MLO bonding +2. Transmit handling according to new mode + +Change-Id: Ib272e77cce56ee50b0a13305fac8fae76743c206 +Signed-off-by: Shivani Soni +--- + drivers/net/bonding/bond_main.c | 212 ++++++++++++++++++++++++----- + drivers/net/bonding/bond_options.c | 1 + + include/net/bonding.h | 30 +++- + include/uapi/linux/if_bonding.h | 1 + + 4 files changed, 210 insertions(+), 34 deletions(-) + +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -1436,6 +1436,10 @@ static netdev_features_t bond_fix_featur + return features; + } + ++#define BOND_MLO_VLAN_FEATURES (NETIF_F_SG | \ ++ NETIF_F_FRAGLIST | \ ++ NETIF_F_HIGHDMA | NETIF_F_LRO) ++ + #define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ + NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \ + NETIF_F_HIGHDMA | NETIF_F_LRO) +@@ -1466,13 +1470,25 @@ static void bond_compute_features(struct + + if (!bond_has_slaves(bond)) + goto done; ++ ++ /* ++ * Use features specific to bond MLO ++ */ ++ if (BOND_MODE(bond) == BOND_MODE_MLO) { ++ vlan_features = BOND_MLO_VLAN_FEATURES; ++ } ++ + vlan_features &= NETIF_F_ALL_FOR_ALL; + mpls_features &= NETIF_F_ALL_FOR_ALL; + + bond_for_each_slave(bond, slave, iter) { +- vlan_features = netdev_increment_features(vlan_features, +- slave->dev->vlan_features, BOND_VLAN_FEATURES); +- ++ if (BOND_MODE(bond) == BOND_MODE_MLO) { ++ vlan_features = netdev_increment_features(vlan_features, ++ slave->dev->vlan_features, BOND_MLO_VLAN_FEATURES); ++ } else { ++ vlan_features = netdev_increment_features(vlan_features, ++ slave->dev->vlan_features, BOND_VLAN_FEATURES); ++ } + enc_features = netdev_increment_features(enc_features, + slave->dev->hw_enc_features, + BOND_ENC_FEATURES); +@@ -1617,6 +1633,16 @@ static rx_handler_result_t bond_handle_f + bond->dev->addr_len); + } + ++ /* ++ * Set the PACKET_HOST for MLO mode as ++ * MLO bond netdevice needs to support routing ++ */ ++ if (BOND_MODE(bond) == BOND_MODE_MLO) { ++ if (ether_addr_equal(bond->dev->dev_addr, eth_hdr(skb)->h_dest)) { ++ skb->pkt_type = PACKET_HOST; ++ } ++ } ++ + return ret; + } + +@@ -1862,6 +1888,8 @@ int bond_enslave(struct net_device *bond + return -EPERM; + } + ++ ASSERT_RTNL(); ++ + if (!bond->params.use_carrier && + slave_dev->ethtool_ops->get_link == NULL && + slave_ops->ndo_eth_ioctl == NULL) { +@@ -1975,13 +2003,17 @@ int bond_enslave(struct net_device *bond + call_netdevice_notifiers(NETDEV_JOIN, slave_dev); + + /* If this is the first slave, then we need to set the master's hardware +- * address to be the same as the slave's. ++ * address to be the same as the slave's except for BOND_MODE_MLO. ++ * For BOND_MODE_MLO, master's mac address is MLD address which should ++ * not be same as slave's address. + */ +- if (!bond_has_slaves(bond) && +- bond->dev->addr_assign_type == NET_ADDR_RANDOM) { +- res = bond_set_dev_addr(bond->dev, slave_dev); +- if (res) +- goto err_undo_flags; ++ if (BOND_MODE(bond) != BOND_MODE_MLO) { ++ if (!bond_has_slaves(bond) && ++ bond->dev->addr_assign_type == NET_ADDR_RANDOM) { ++ res = bond_set_dev_addr(bond->dev, slave_dev); ++ if (res) ++ goto err_undo_flags; ++ } + } + + new_slave = bond_alloc_slave(bond, slave_dev); +@@ -2010,18 +2042,21 @@ int bond_enslave(struct net_device *bond + bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr, + slave_dev->addr_len); + +- if (!bond->params.fail_over_mac || +- BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { +- /* Set slave to master's mac address. The application already +- * set the master's mac address to that of the first slave +- */ +- memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len); +- ss.ss_family = slave_dev->type; +- res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, +- extack); +- if (res) { +- slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res); +- goto err_restore_mtu; ++ /* Set slave to master's mac address except for BOND_MODE_MLO ++ * as for MLO mode master's mac address is not same as slave's mac address. ++ * The application already set the master's mac address to that of the first slave ++ */ ++ if (BOND_MODE(bond) != BOND_MODE_MLO) { ++ if (!bond->params.fail_over_mac || ++ BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { ++ memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len); ++ ss.ss_family = slave_dev->type; ++ res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, ++ extack); ++ if (res) { ++ slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res); ++ goto err_restore_mtu; ++ } + } + } + +@@ -2382,6 +2417,7 @@ err_undo_flags: + + return res; + } ++EXPORT_SYMBOL(bond_enslave); + + /* Try to release the slave device from the bond device + * It is legal to access curr_active_slave without a lock because all the function +@@ -2503,13 +2539,23 @@ static int __bond_release_one(struct net + } + + bond_set_carrier(bond); +- if (!bond_has_slaves(bond)) +- eth_hw_addr_random(bond_dev); ++ ++ /* ++ * Avoid changing the mac address of bond netdevice for MLO case, ++ * This will only be supported from wifi driver. ++ */ ++ if (BOND_MODE(bond) != BOND_MODE_MLO) { ++ if (!bond_has_slaves(bond)) ++ eth_hw_addr_random(bond_dev); ++ } + + unblock_netpoll_tx(); + synchronize_rcu(); + bond->slave_cnt--; + ++ /* ++ * TODO: Avoid MAC address change notification for BOND_MODE_MLO ++ */ + if (!bond_has_slaves(bond)) { + call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); + call_netdevice_notifiers(NETDEV_RELEASE, bond->dev); +@@ -2579,6 +2625,7 @@ int bond_release(struct net_device *bond + { + return __bond_release_one(bond_dev, slave_dev, false, false); + } ++EXPORT_SYMBOL(bond_release); + + /* First release a slave and then destroy the bond if no more slaves are left. + * Must be under rtnl_lock when this function is called. +@@ -2600,6 +2647,29 @@ static int bond_release_and_destroy(stru + return ret; + } + ++/* Destroy the bond for BOND_MODE_MLO if no more slaves are left. ++ * Must be under rtnl_lock when this function is called. ++ */ ++bool bond_destroy_mlo(struct net_device *bond_dev) ++{ ++ struct bonding *bond = netdev_priv(bond_dev); ++ ++ ASSERT_RTNL(); ++ ++ if (!bond_has_slaves(bond)) { ++ bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; ++ netdev_info(bond_dev, "Destroying bond as no slaves are present\n"); ++ bond_remove_proc_entry(bond); ++ unregister_netdevice(bond_dev); ++ return true; ++ } ++ ++ pr_err("%p: Not able to destroy bond netdevice: %s as slaves are present\n", bond_dev, bond_dev->name); ++ ++ return false; ++} ++EXPORT_SYMBOL(bond_destroy_mlo); ++ + static void bond_info_query(struct net_device *bond_dev, struct ifbond *info) + { + struct bonding *bond = netdev_priv(bond_dev); +@@ -4256,6 +4326,24 @@ static struct net_device *bond_xor_get_t + return NULL; + } + ++/* Transmit function for BOND_MODE_MLO. ++ * Get transmit link interface from registered callback. ++ */ ++struct net_device *bond_mlo_get_tx_dev(struct net_device *bond_dev, u8 *dst_mac) ++{ ++ struct net_device *slave_dev = NULL; ++ struct mlo_bond_info *mlo_info = NULL; ++ void *bond_mlo_ctx; ++ ++ mlo_info = bond_get_mlo_priv(bond_dev); ++ if (mlo_info->bond_get_mlo_tx_netdev) { ++ bond_mlo_ctx = bond_get_mlo_ctx(bond_dev); ++ slave_dev = mlo_info->bond_get_mlo_tx_netdev(bond_mlo_ctx, dst_mac); ++ } ++ ++ return slave_dev; ++} ++ + /* bond_get_tx_dev - Calculate egress interface for a given packet. + * + * Supports 802.3AD and balance-xor modes +@@ -4296,6 +4384,9 @@ struct net_device *bond_get_tx_dev(struc + return bond_3ad_get_tx_dev(skb, src_mac, dst_mac, + src, dst, protocol, + bond_dev, layer4hdr); ++ case BOND_MODE_MLO: ++ return bond_mlo_get_tx_dev(bond_dev, dst_mac); ++ + default: + return NULL; + } +@@ -5054,20 +5145,26 @@ static int bond_set_mac_address(struct n + if (!is_valid_ether_addr(ss->__data)) + return -EADDRNOTAVAIL; + +- bond_for_each_slave(bond, slave, iter) { +- slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n", +- __func__, slave); +- res = dev_set_mac_address(slave->dev, addr, NULL); +- if (res) { +- /* TODO: consider downing the slave +- * and retry ? +- * User should expect communications +- * breakage anyway until ARP finish +- * updating, so... +- */ +- slave_dbg(bond_dev, slave->dev, "%s: err %d\n", +- __func__, res); +- goto unwind; ++ /* ++ * Do not allow mac address change for slave netdevice for BOND_MODE_MLO ++ * as master's mac address is not same as slave's mac address. ++ */ ++ if (BOND_MODE(bond) != BOND_MODE_MLO) { ++ bond_for_each_slave(bond, slave, iter) { ++ slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n", ++ __func__, slave); ++ res = dev_set_mac_address(slave->dev, addr, NULL); ++ if (res) { ++ /* TODO: consider downing the slave ++ * and retry ? ++ * User should expect communications ++ * breakage anyway until ARP finish ++ * updating, so... ++ */ ++ slave_dbg(bond_dev, slave->dev, "%s: err %d\n", ++ __func__, res); ++ goto unwind; ++ } + } + } + +@@ -5719,6 +5816,27 @@ static netdev_tx_t bond_tls_device_xmit( + } + #endif + ++/* In bond_xmit_mlo(), we send the packet and bond netdevice ++ * to registered callback for final xmit. ++ */ ++static netdev_tx_t bond_xmit_mlo(struct sk_buff *skb, struct net_device *bond_dev) ++{ ++ struct bonding *bond = netdev_priv(bond_dev); ++ int slave_cnt, ret; ++ struct mlo_bond_info *mlo_info = bond_get_mlo_priv(bond_dev); ++ ++ slave_cnt = READ_ONCE(bond->slave_cnt); ++ if (unlikely(slave_cnt == 0) || unlikely(!mlo_info->bond_mlo_xmit_netdev)) { ++ bond_tx_drop(bond_dev, skb); ++ } else { ++ ret = mlo_info->bond_mlo_xmit_netdev(skb, bond_dev); ++ if (ret != NET_XMIT_SUCCESS) ++ netdev_err(bond_dev, "Xmit failed with mode %d %p\n", BOND_MODE(bond), skb); ++ } ++ ++ return NETDEV_TX_OK; ++} ++ + static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct bonding *bond = netdev_priv(dev); +@@ -5747,6 +5865,8 @@ static netdev_tx_t __bond_start_xmit(str + return bond_alb_xmit(skb, dev); + case BOND_MODE_TLB: + return bond_tlb_xmit(skb, dev); ++ case BOND_MODE_MLO: ++ return bond_xmit_mlo(skb, dev); + default: + /* Should never happen, mode already checked */ + netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond)); +@@ -6182,6 +6302,15 @@ static void bond_destructor(struct net_d + if (bond->id != (~0U)) + clear_bit(bond->id, &bond_id_mask); + /* QCA NSS ECM bonding support */ ++ ++ /* ++ * Wifi driver registered callback to destroy wiphy for MLO bond netdevice ++ */ ++ if (bond_is_mlo_device(bond_dev)) { ++ if (bond->mlo_info.bond_mlo_netdev_priv_destructor) { ++ bond->mlo_info.bond_mlo_netdev_priv_destructor(bond_dev); ++ } ++ } + } + + void bond_setup(struct net_device *bond_dev) +@@ -6743,6 +6872,76 @@ out: + return res; + } + ++/* bond_create_mlo() ++ * Create bond netdevice for BOND_MODE_MLO with MLO specific callback and context. ++ */ ++struct net_device *bond_create_mlo(struct net *net, const char *name, struct mlo_bond_info *mlo_info) ++{ ++ struct net_device *bond_dev; ++ struct bonding *bond; ++ int res; ++ ++ ASSERT_RTNL(); ++ ++ bond_dev = alloc_netdev_mq(sizeof(struct bonding), ++ name ? name : "bond%d", NET_NAME_UNKNOWN, ++ bond_setup, tx_queues); ++ if (!bond_dev) { ++ pr_err("%s: eek! can't alloc netdev!\n", name); ++ return NULL; ++ } ++ ++ bond = netdev_priv(bond_dev); ++ ++ dev_net_set(bond_dev, net); ++ bond_dev->rtnl_link_ops = &bond_link_ops; ++ ++ /* ++ * MLO specific initialization. ++ */ ++ bond_dev->ieee80211_ptr = mlo_info->wdev; ++ bond->params.mode = BOND_MODE_MLO; ++ mlo_info->wdev->netdev = bond_dev; ++ ++ memcpy((void *)&bond->mlo_info, (void *)mlo_info, sizeof(*mlo_info)); ++ eth_hw_addr_random(bond_dev); ++ ++ /* ++ * Disable HW CSUM as wlan driver doesn't support ++ */ ++ bond_dev->hw_features &= ~(NETIF_F_HW_CSUM); ++ bond_dev->features &= ~(NETIF_F_HW_CSUM); ++ ++ res = register_netdevice(bond_dev); ++ if (res < 0) { ++ free_netdev(bond_dev); ++ return NULL; ++ } ++ ++ netif_carrier_off(bond_dev); ++ bond_work_init_all(bond); ++ ++ bond->id = ~0U; ++ if (bond_id_mask != (~0UL)) { ++ bond->id = (u32)ffz(bond_id_mask); ++ set_bit(bond->id, &bond_id_mask); ++ } ++ ++ return bond_dev; ++} ++EXPORT_SYMBOL(bond_create_mlo); ++ ++/* bond_get_mlo_ctx ++ * Returns MLO context stored in netdev priv of bond netdevice ++ */ ++void *bond_get_mlo_ctx(struct net_device *bond_dev) ++{ ++ struct mlo_bond_info *mlo_info = bond_get_mlo_priv(bond_dev); ++ ++ return mlo_info->bond_mlo_ctx; ++} ++EXPORT_SYMBOL(bond_get_mlo_ctx); ++ + static int __net_init bond_net_init(struct net *net) + { + struct bond_net *bn = net_generic(net, bond_net_id); +--- a/drivers/net/bonding/bond_options.c ++++ b/drivers/net/bonding/bond_options.c +@@ -94,6 +94,7 @@ static const struct bond_opt_value bond_ + { "802.3ad", BOND_MODE_8023AD, 0}, + { "balance-tlb", BOND_MODE_TLB, 0}, + { "balance-alb", BOND_MODE_ALB, 0}, ++ { "mode mlo", BOND_MODE_MLO, 0}, + { NULL, -1, 0}, + }; + +--- a/include/net/bonding.h ++++ b/include/net/bonding.h +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -199,6 +200,22 @@ struct bond_up_slave { + struct slave *arr[]; + }; + ++/** ++ * mlo_bond_info - mlo_bond_info structure maintains members corresponding to wifi 7 ++ * @bond_mlo_xmit_netdev: Callback function to provide skb to wifi driver for xmit ++ * @bond_get_mlo_tx_netdev: Callback function to get link interface from wifi driver for transmit ++ * @bond_mlo_ctx: Private member for wifi driver ++ * @wdev: ieee80211_ptr for wifi VAP ++ * @bond_mlo_netdev_priv_destructor: Callback function to remove wiphy instance from wifi driver ++ */ ++struct mlo_bond_info { ++ int (*bond_mlo_xmit_netdev)(struct sk_buff *skb, struct net_device *bond_dev); ++ struct net_device *(*bond_get_mlo_tx_netdev)(void *bond_mlo_ctx, void *dst); ++ void *bond_mlo_ctx; ++ struct wireless_dev *wdev; ++ void (*bond_mlo_netdev_priv_destructor)(struct net_device *bond_dev); ++}; ++ + /* + * Link pseudo-state only used internally by monitors + */ +@@ -264,6 +281,8 @@ struct bonding { + #endif /* CONFIG_XFRM_OFFLOAD */ + struct bpf_prog *xdp_prog; + u32 id; /* QCA NSS ECM bonding support */ ++ /* MLO mode info */ ++ struct mlo_bond_info mlo_info; + }; + + #define bond_slave_get_rcu(dev) \ +@@ -280,6 +299,19 @@ struct bond_vlan_tag { + unsigned short vlan_id; + }; + ++/** ++ * Returns False if the net_device is not MLO bond netdvice ++ * ++ */ ++static inline bool bond_is_mlo_device(struct net_device *bond_dev) ++{ ++ struct bonding *bond = netdev_priv(bond_dev); ++ if (BOND_MODE(bond) == BOND_MODE_MLO) ++ return true; ++ ++ return false; ++} ++ + /* + * Returns NULL if the net_device does not belong to any of the bond's slaves + * +@@ -644,6 +676,12 @@ static inline __be32 bond_confirm_addr(s + return addr; + } + ++static inline struct mlo_bond_info *bond_get_mlo_priv(struct net_device *bond_dev) ++{ ++ struct bonding *bond = netdev_priv(bond_dev); ++ return &bond->mlo_info; ++} ++ + struct bond_net { + struct net *net; /* Associated network namespace */ + struct list_head dev_list; +@@ -657,15 +695,18 @@ int bond_rcv_validate(const struct sk_bu + netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); + int bond_get_id(struct net_device *bond_dev); /* QCA NSS ECM bonding support */ + int bond_create(struct net *net, const char *name); ++extern struct net_device *bond_create_mlo(struct net *net, const char *name, struct mlo_bond_info *mlo_info); ++extern void *bond_get_mlo_ctx(struct net_device *bond_dev); ++extern bool bond_destroy_mlo(struct net_device *bond_dev); + int bond_create_sysfs(struct bond_net *net); + void bond_destroy_sysfs(struct bond_net *net); + void bond_prepare_sysfs_group(struct bonding *bond); + int bond_sysfs_slave_add(struct slave *slave); + void bond_sysfs_slave_del(struct slave *slave); + void bond_xdp_set_features(struct net_device *bond_dev); +-int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, ++extern int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, + struct netlink_ext_ack *extack); +-int bond_release(struct net_device *bond_dev, struct net_device *slave_dev); ++extern int bond_release(struct net_device *bond_dev, struct net_device *slave_dev); + u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb); + int bond_set_carrier(struct bonding *bond); + void bond_select_active_slave(struct bonding *bond); +--- a/include/uapi/linux/if_bonding.h ++++ b/include/uapi/linux/if_bonding.h +@@ -71,6 +71,7 @@ + #define BOND_MODE_8023AD 4 + #define BOND_MODE_TLB 5 + #define BOND_MODE_ALB 6 /* TLB + RLB (receive load balancing) */ ++#define BOND_MODE_MLO 7 /* MLO (Multi link) mode for Wi-Fi 7 AP links */ + + /* each slave's link has 4 states */ + #define BOND_LINK_UP 0 /* link is up and running */ diff --git a/target/linux/qualcommax/patches-6.6/2601-qca-add-nss-bridge-mgr-support.patch b/target/linux/qualcommax/patches-6.6/2601-qca-add-nss-bridge-mgr-support.patch new file mode 100644 index 00000000000000..1d24c0e8c3faa1 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2601-qca-add-nss-bridge-mgr-support.patch @@ -0,0 +1,94 @@ +From 3c17a0e1112be70071e98d5208da5b55dcec20a6 Mon Sep 17 00:00:00 2001 +From: Simon Casey +Date: Wed, 2 Feb 2022 19:37:29 +0100 +Subject: [PATCH] Update 607-qca-add-add-nss-bridge-mgr-support.patch for kernel 5.15 + +--- + include/linux/if_bridge.h | 4 ++++ + net/bridge/br_fdb.c | 25 +++++++++++++++++++++---- + 2 files changed, 25 insertions(+), 4 deletions(-) + +--- a/include/linux/if_bridge.h ++++ b/include/linux/if_bridge.h +@@ -254,4 +254,8 @@ typedef struct net_bridge_port *br_get_d + extern br_get_dst_hook_t __rcu *br_get_dst_hook; + /* QCA NSS ECM support - End */ + ++/* QCA NSS bridge-mgr support - Start */ ++extern struct net_device *br_fdb_bridge_dev_get_and_hold(struct net_bridge *br); ++/* QCA NSS bridge-mgr support - End */ ++ + #endif +--- a/net/bridge/br_fdb.c ++++ b/net/bridge/br_fdb.c +@@ -62,6 +62,15 @@ void br_fdb_update_unregister_notify(str + EXPORT_SYMBOL_GPL(br_fdb_update_unregister_notify); + /* QCA NSS ECM support - End */ + ++/* QCA NSS bridge-mgr support - Start */ ++struct net_device *br_fdb_bridge_dev_get_and_hold(struct net_bridge *br) ++{ ++ dev_hold(br->dev); ++ return br->dev; ++} ++EXPORT_SYMBOL_GPL(br_fdb_bridge_dev_get_and_hold); ++/* QCA NSS bridge-mgr support - End */ ++ + int __init br_fdb_init(void) + { + br_fdb_cache = kmem_cache_create("bridge_fdb_cache", +@@ -575,7 +584,7 @@ void br_fdb_cleanup(struct work_struct * + unsigned long delay = hold_time(br); + unsigned long work_delay = delay; + unsigned long now = jiffies; +- u8 mac_addr[6]; /* QCA NSS ECM support */ ++ struct br_fdb_event fdb_event; /* QCA NSS bridge-mgr support */ + + /* this part is tricky, in order to avoid blocking learning and + * consequently forwarding, we rely on rcu to delete objects with +@@ -603,12 +612,13 @@ void br_fdb_cleanup(struct work_struct * + } else { + spin_lock_bh(&br->hash_lock); + if (!hlist_unhashed(&f->fdb_node)) { +- ether_addr_copy(mac_addr, f->key.addr.addr); ++ memset(&fdb_event, 0, sizeof(fdb_event)); ++ ether_addr_copy(fdb_event.addr, f->key.addr.addr); + fdb_delete(br, f, true); + /* QCA NSS ECM support - Start */ + atomic_notifier_call_chain( + &br_fdb_update_notifier_list, 0, +- (void *)mac_addr); ++ (void *)&fdb_event); + /* QCA NSS ECM support - End */ + } + spin_unlock_bh(&br->hash_lock); +@@ -910,6 +920,7 @@ void br_fdb_update(struct net_bridge *br + const unsigned char *addr, u16 vid, unsigned long flags) + { + struct net_bridge_fdb_entry *fdb; ++ struct br_fdb_event fdb_event; /* QCA NSS bridge-mgr support */ + + /* some users want to always flood. */ + if (hold_time(br) == 0) +@@ -935,6 +946,12 @@ void br_fdb_update(struct net_bridge *br + if (unlikely(source != READ_ONCE(fdb->dst) && + !test_bit(BR_FDB_STICKY, &fdb->flags))) { + br_switchdev_fdb_notify(br, fdb, RTM_DELNEIGH); ++ /* QCA NSS bridge-mgr support - Start */ ++ ether_addr_copy(fdb_event.addr, addr); ++ fdb_event.br = br; ++ fdb_event.orig_dev = READ_ONCE(fdb->dst->dev); ++ fdb_event.dev = source->dev; ++ /* QCA NSS bridge-mgr support - End */ + WRITE_ONCE(fdb->dst, source); + fdb_modified = true; + /* Take over HW learned entry */ +@@ -951,7 +968,7 @@ void br_fdb_update(struct net_bridge *br + /* QCA NSS ECM support - Start */ + atomic_notifier_call_chain( + &br_fdb_update_notifier_list, +- 0, (void *)addr); ++ 0, (void *)&fdb_event); + /* QCA NSS ECM support - End */ + } + diff --git a/target/linux/qualcommax/patches-6.6/2602-qca-nss-drv-add-qdisc-support.patch b/target/linux/qualcommax/patches-6.6/2602-qca-nss-drv-add-qdisc-support.patch new file mode 100644 index 00000000000000..b1eb55ca05f031 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2602-qca-nss-drv-add-qdisc-support.patch @@ -0,0 +1,44 @@ +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -764,6 +764,7 @@ typedef unsigned char *sk_buff_data_t; + * @offload_fwd_mark: Packet was L2-forwarded in hardware + * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware + * @tc_skip_classify: do not classify packet. set by IFB device ++ * @tc_skip_classify_offload: do not classify packet set by offload IFB device + * @tc_at_ingress: used within tc_classify to distinguish in/egress + * @redirected: packet was redirected by packet classifier + * @from_ingress: packet was redirected from the ingress path +@@ -944,6 +945,8 @@ struct sk_buff { + #ifdef CONFIG_NET_XGRESS + __u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */ + __u8 tc_skip_classify:1; ++ __u8 tc_skip_classify_offload:1; ++ __u16 tc_verd_qca_nss; /* QCA NSS Qdisc Support */ + #endif + __u8 remcsum_offload:1; + __u8 csum_complete_sw:1; +--- a/include/uapi/linux/pkt_cls.h ++++ b/include/uapi/linux/pkt_cls.h +@@ -139,6 +139,7 @@ enum tca_id { + TCA_ID_MPLS, + TCA_ID_CT, + TCA_ID_GATE, ++ TCA_ID_MIRRED_NSS, /* QCA NSS Qdisc IGS Support */ + /* other actions go here */ + __TCA_ID_MAX = 255 + }; +@@ -817,4 +818,14 @@ enum { + TCF_EM_OPND_LT + }; + ++/* QCA NSS Qdisc Support - Start */ ++#define _TC_MAKE32(x) ((x)) ++#define _TC_MAKEMASK1(n) (_TC_MAKE32(1) << _TC_MAKE32(n)) ++ ++#define TC_NCLS _TC_MAKEMASK1(8) ++#define TC_NCLS_NSS _TC_MAKEMASK1(12) ++#define SET_TC_NCLS_NSS(v) ( TC_NCLS_NSS | ((v) & ~TC_NCLS_NSS)) ++#define CLR_TC_NCLS_NSS(v) ( (v) & ~TC_NCLS_NSS) ++/* QCA NSS Qdisc Support - End */ ++ + #endif diff --git a/target/linux/qualcommax/patches-6.6/2603-1-qca-nss-clients-add-qdisc-support.patch b/target/linux/qualcommax/patches-6.6/2603-1-qca-nss-clients-add-qdisc-support.patch new file mode 100644 index 00000000000000..939acfde653ba6 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2603-1-qca-nss-clients-add-qdisc-support.patch @@ -0,0 +1,441 @@ +--- a/include/linux/timer.h ++++ b/include/linux/timer.h +@@ -17,6 +17,7 @@ struct timer_list { + unsigned long expires; + void (*function)(struct timer_list *); + u32 flags; ++ unsigned long cust_data; + + #ifdef CONFIG_LOCKDEP + struct lockdep_map lockdep_map; +--- a/drivers/net/ifb.c ++++ b/drivers/net/ifb.c +@@ -151,6 +151,31 @@ resched: + + } + ++void ifb_update_offload_stats(struct net_device *dev, struct pcpu_sw_netstats *offload_stats) ++{ ++ struct ifb_dev_private *dp; ++ struct ifb_q_private *txp; ++ ++ if (!dev || !offload_stats) { ++ return; ++ } ++ ++ if (!(dev->priv_flags_ext & IFF_EXT_IFB)) { ++ return; ++ } ++ ++ dp = netdev_priv(dev); ++ txp = dp->tx_private; ++ ++ u64_stats_update_begin(&txp->rx_stats.sync); ++ txp->rx_stats.packets += u64_stats_read(&offload_stats->rx_packets); ++ txp->rx_stats.bytes += u64_stats_read(&offload_stats->rx_bytes); ++ txp->tx_stats.packets += u64_stats_read(&offload_stats->tx_packets); ++ txp->tx_stats.bytes += u64_stats_read(&offload_stats->tx_bytes); ++ u64_stats_update_end(&txp->rx_stats.sync); ++} ++EXPORT_SYMBOL(ifb_update_offload_stats); ++ + static void ifb_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) + { +@@ -326,6 +351,7 @@ static void ifb_setup(struct net_device + dev->flags |= IFF_NOARP; + dev->flags &= ~IFF_MULTICAST; + dev->priv_flags &= ~IFF_TX_SKB_SHARING; ++ dev->priv_flags_ext |= IFF_EXT_IFB; /* Mark the device as an IFB device. */ + netif_keep_dst(dev); + eth_hw_addr_random(dev); + dev->needs_free_netdev = true; +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -4686,6 +4686,15 @@ void dev_uc_flush(struct net_device *dev + void dev_uc_init(struct net_device *dev); + + /** ++ * ifb_update_offload_stats - Update the IFB interface stats ++ * @dev: IFB device to update the stats ++ * @offload_stats: per CPU stats structure ++ * ++ * Allows update of IFB stats when flows are offloaded to an accelerator. ++ **/ ++void ifb_update_offload_stats(struct net_device *dev, struct pcpu_sw_netstats *offload_stats); ++ ++/** + * __dev_uc_sync - Synchonize device's unicast list + * @dev: device to sync + * @sync: function to call if address should be added +@@ -5212,6 +5221,11 @@ static inline bool netif_is_failover_sla + return dev->priv_flags & IFF_FAILOVER_SLAVE; + } + ++static inline bool netif_is_ifb_dev(const struct net_device *dev) ++{ ++ return dev->priv_flags_ext & IFF_EXT_IFB; ++} ++ + /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ + static inline void netif_keep_dst(struct net_device *dev) + { +--- a/include/uapi/linux/pkt_sched.h ++++ b/include/uapi/linux/pkt_sched.h +@@ -1306,4 +1306,248 @@ enum { + + #define TCA_ETS_MAX (__TCA_ETS_MAX - 1) + ++/* QCA NSS Clients Support - Start */ ++enum { ++ TCA_NSS_ACCEL_MODE_NSS_FW, ++ TCA_NSS_ACCEL_MODE_PPE, ++ TCA_NSS_ACCEL_MODE_MAX ++}; ++ ++/* NSSFIFO section */ ++ ++enum { ++ TCA_NSSFIFO_UNSPEC, ++ TCA_NSSFIFO_PARMS, ++ __TCA_NSSFIFO_MAX ++}; ++ ++#define TCA_NSSFIFO_MAX (__TCA_NSSFIFO_MAX - 1) ++ ++struct tc_nssfifo_qopt { ++ __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */ ++ __u8 set_default; /* Sets qdisc to be the default qdisc for enqueue */ ++ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ ++}; ++ ++/* NSSWRED section */ ++ ++enum { ++ TCA_NSSWRED_UNSPEC, ++ TCA_NSSWRED_PARMS, ++ __TCA_NSSWRED_MAX ++}; ++ ++#define TCA_NSSWRED_MAX (__TCA_NSSWRED_MAX - 1) ++#define NSSWRED_CLASS_MAX 6 ++struct tc_red_alg_parameter { ++ __u32 min; /* qlen_avg < min: pkts are all enqueued */ ++ __u32 max; /* qlen_avg > max: pkts are all dropped */ ++ __u32 probability;/* Drop probability at qlen_avg = max */ ++ __u32 exp_weight_factor;/* exp_weight_factor for calculate qlen_avg */ ++}; ++ ++struct tc_nsswred_traffic_class { ++ __u32 limit; /* Queue length */ ++ __u32 weight_mode_value; /* Weight mode value */ ++ struct tc_red_alg_parameter rap;/* Parameters for RED alg */ ++}; ++ ++/* ++ * Weight modes for WRED ++ */ ++enum tc_nsswred_weight_modes { ++ TC_NSSWRED_WEIGHT_MODE_DSCP = 0,/* Weight mode is DSCP */ ++ TC_NSSWRED_WEIGHT_MODES, /* Must be last */ ++}; ++ ++struct tc_nsswred_qopt { ++ __u32 limit; /* Queue length */ ++ enum tc_nsswred_weight_modes weight_mode; ++ /* Weight mode */ ++ __u32 traffic_classes; /* How many traffic classes: DPs */ ++ __u32 def_traffic_class; /* Default traffic if no match: def_DP */ ++ __u32 traffic_id; /* The traffic id to be configured: DP */ ++ __u32 weight_mode_value; /* Weight mode value */ ++ struct tc_red_alg_parameter rap;/* RED algorithm parameters */ ++ struct tc_nsswred_traffic_class tntc[NSSWRED_CLASS_MAX]; ++ /* Traffic settings for dumpping */ ++ __u8 ecn; /* Setting ECN bit or dropping */ ++ __u8 set_default; /* Sets qdisc to be the default for enqueue */ ++ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ ++}; ++ ++/* NSSCODEL section */ ++ ++enum { ++ TCA_NSSCODEL_UNSPEC, ++ TCA_NSSCODEL_PARMS, ++ __TCA_NSSCODEL_MAX ++}; ++ ++#define TCA_NSSCODEL_MAX (__TCA_NSSCODEL_MAX - 1) ++ ++struct tc_nsscodel_qopt { ++ __u32 target; /* Acceptable queueing delay */ ++ __u32 limit; /* Max number of packets that can be held in the queue */ ++ __u32 interval; /* Monitoring interval */ ++ __u32 flows; /* Number of flow buckets */ ++ __u32 quantum; /* Weight (in bytes) used for DRR of flow buckets */ ++ __u8 ecn; /* 0 - disable ECN, 1 - enable ECN */ ++ __u8 set_default; /* Sets qdisc to be the default qdisc for enqueue */ ++ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ ++}; ++ ++struct tc_nsscodel_xstats { ++ __u32 peak_queue_delay; /* Peak delay experienced by a dequeued packet */ ++ __u32 peak_drop_delay; /* Peak delay experienced by a dropped packet */ ++}; ++ ++/* NSSFQ_CODEL section */ ++ ++struct tc_nssfq_codel_xstats { ++ __u32 new_flow_count; /* Total number of new flows seen */ ++ __u32 new_flows_len; /* Current number of new flows */ ++ __u32 old_flows_len; /* Current number of old flows */ ++ __u32 ecn_mark; /* Number of packets marked with ECN */ ++ __u32 drop_overlimit; /* Number of packets dropped due to overlimit */ ++ __u32 maxpacket; /* The largest packet seen so far in the queue */ ++}; ++ ++/* NSSTBL section */ ++ ++enum { ++ TCA_NSSTBL_UNSPEC, ++ TCA_NSSTBL_PARMS, ++ __TCA_NSSTBL_MAX ++}; ++ ++#define TCA_NSSTBL_MAX (__TCA_NSSTBL_MAX - 1) ++ ++struct tc_nsstbl_qopt { ++ __u32 burst; /* Maximum burst size */ ++ __u32 rate; /* Limiting rate of TBF */ ++ __u32 peakrate; /* Maximum rate at which TBF is allowed to send */ ++ __u32 mtu; /* Max size of packet, or minumim burst size */ ++ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ ++}; ++ ++/* NSSPRIO section */ ++ ++#define TCA_NSSPRIO_MAX_BANDS 256 ++ ++enum { ++ TCA_NSSPRIO_UNSPEC, ++ TCA_NSSPRIO_PARMS, ++ __TCA_NSSPRIO_MAX ++}; ++ ++#define TCA_NSSPRIO_MAX (__TCA_NSSPRIO_MAX - 1) ++ ++struct tc_nssprio_qopt { ++ __u32 bands; /* Number of bands */ ++ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ ++}; ++ ++/* NSSBF section */ ++ ++enum { ++ TCA_NSSBF_UNSPEC, ++ TCA_NSSBF_CLASS_PARMS, ++ TCA_NSSBF_QDISC_PARMS, ++ __TCA_NSSBF_MAX ++}; ++ ++#define TCA_NSSBF_MAX (__TCA_NSSBF_MAX - 1) ++ ++struct tc_nssbf_class_qopt { ++ __u32 burst; /* Maximum burst size */ ++ __u32 rate; /* Allowed bandwidth for this class */ ++ __u32 mtu; /* MTU of the associated interface */ ++ __u32 quantum; /* Quantum allocation for DRR */ ++}; ++ ++struct tc_nssbf_qopt { ++ __u16 defcls; /* Default class value */ ++ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ ++}; ++ ++/* NSSWRR section */ ++ ++enum { ++ TCA_NSSWRR_UNSPEC, ++ TCA_NSSWRR_CLASS_PARMS, ++ TCA_NSSWRR_QDISC_PARMS, ++ __TCA_NSSWRR_MAX ++}; ++ ++#define TCA_NSSWRR_MAX (__TCA_NSSWRR_MAX - 1) ++ ++struct tc_nsswrr_class_qopt { ++ __u32 quantum; /* Weight associated to this class */ ++}; ++ ++struct tc_nsswrr_qopt { ++ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ ++}; ++ ++/* NSSWFQ section */ ++ ++enum { ++ TCA_NSSWFQ_UNSPEC, ++ TCA_NSSWFQ_CLASS_PARMS, ++ TCA_NSSWFQ_QDISC_PARMS, ++ __TCA_NSSWFQ_MAX ++}; ++ ++#define TCA_NSSWFQ_MAX (__TCA_NSSWFQ_MAX - 1) ++ ++struct tc_nsswfq_class_qopt { ++ __u32 quantum; /* Weight associated to this class */ ++}; ++ ++struct tc_nsswfq_qopt { ++ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ ++}; ++ ++/* NSSHTB section */ ++ ++enum { ++ TCA_NSSHTB_UNSPEC, ++ TCA_NSSHTB_CLASS_PARMS, ++ TCA_NSSHTB_QDISC_PARMS, ++ __TCA_NSSHTB_MAX ++}; ++ ++#define TCA_NSSHTB_MAX (__TCA_NSSHTB_MAX - 1) ++ ++struct tc_nsshtb_class_qopt { ++ __u32 burst; /* Allowed burst size */ ++ __u32 rate; /* Allowed bandwidth for this class */ ++ __u32 cburst; /* Maximum burst size */ ++ __u32 crate; /* Maximum bandwidth for this class */ ++ __u32 quantum; /* Quantum allocation for DRR */ ++ __u32 priority; /* Priority value associated with this class */ ++ __u32 overhead; /* Overhead in bytes per packet */ ++}; ++ ++struct tc_nsshtb_qopt { ++ __u32 r2q; /* Rate to quantum ratio */ ++ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ ++}; ++ ++/* NSSBLACKHOLE section */ ++ ++enum { ++ TCA_NSSBLACKHOLE_UNSPEC, ++ TCA_NSSBLACKHOLE_PARMS, ++ __TCA_NSSBLACKHOLE_MAX ++}; ++ ++#define TCA_NSSBLACKHOLE_MAX (__TCA_NSSBLACKHOLE_MAX - 1) ++ ++struct tc_nssblackhole_qopt { ++ __u8 set_default; /* Sets qdisc to be the default qdisc for enqueue */ ++ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */ ++}; ++/* QCA NSS Clients Support - End */ + #endif +--- a/net/sched/sch_api.c ++++ b/net/sched/sch_api.c +@@ -314,6 +314,7 @@ struct Qdisc *qdisc_lookup(struct net_de + out: + return q; + } ++EXPORT_SYMBOL(qdisc_lookup); + + struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle) + { +@@ -2389,4 +2390,26 @@ static int __init pktsched_init(void) + return 0; + } + ++/* QCA NSS Qdisc Support - Start */ ++bool tcf_destroy(struct tcf_proto *tp, bool force) ++{ ++ tp->ops->destroy(tp, force, NULL); ++ module_put(tp->ops->owner); ++ kfree_rcu(tp, rcu); ++ ++ return true; ++} ++ ++void tcf_destroy_chain(struct tcf_proto __rcu **fl) ++{ ++ struct tcf_proto *tp; ++ ++ while ((tp = rtnl_dereference(*fl)) != NULL) { ++ RCU_INIT_POINTER(*fl, tp->next); ++ tcf_destroy(tp, true); ++ } ++} ++EXPORT_SYMBOL(tcf_destroy_chain); ++/* QCA NSS Qdisc Support - End */ ++ + subsys_initcall(pktsched_init); +--- a/net/sched/sch_generic.c ++++ b/net/sched/sch_generic.c +@@ -1077,6 +1077,7 @@ void qdisc_destroy(struct Qdisc *qdisc) + + __qdisc_destroy(qdisc); + } ++EXPORT_SYMBOL(qdisc_destroy); + + void qdisc_put(struct Qdisc *qdisc) + { +--- a/include/net/sch_generic.h ++++ b/include/net/sch_generic.h +@@ -94,6 +94,7 @@ struct Qdisc { + #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ + #define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */ + #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ ++#define TCQ_F_NSS 0x1000 /* NSS qdisc flag. */ + u32 limit; + const struct Qdisc_ops *ops; + struct qdisc_size_table __rcu *stab; +@@ -751,6 +752,40 @@ static inline bool skb_skip_tc_classify( + return false; + } + ++/* ++ * Set skb classify bit field. ++ */ ++static inline void skb_set_tc_classify_offload(struct sk_buff *skb) ++{ ++#ifdef CONFIG_NET_CLS_ACT ++ skb->tc_skip_classify_offload = 1; ++#endif ++} ++ ++/* ++ * Clear skb classify bit field. ++ */ ++static inline void skb_clear_tc_classify_offload(struct sk_buff *skb) ++{ ++#ifdef CONFIG_NET_CLS_ACT ++ skb->tc_skip_classify_offload = 0; ++#endif ++} ++ ++/* ++ * Skip skb processing if sent from ifb dev. ++ */ ++static inline bool skb_skip_tc_classify_offload(struct sk_buff *skb) ++{ ++#ifdef CONFIG_NET_CLS_ACT ++ if (skb->tc_skip_classify_offload) { ++ skb_clear_tc_classify_offload(skb); ++ return true; ++ } ++#endif ++ return false; ++} ++ + /* Reset all TX qdiscs greater than index of a device. */ + static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) + { +@@ -1323,4 +1358,9 @@ static inline void qdisc_synchronize(con + msleep(1); + } + ++/* QCA NSS Qdisc Support - Start */ ++void qdisc_destroy(struct Qdisc *qdisc); ++void tcf_destroy_chain(struct tcf_proto __rcu **fl); ++/* QCA NSS Qdisc Support - End */ ++ + #endif diff --git a/target/linux/qualcommax/patches-6.6/2603-2-qca-nss-clients-add-l2tp-support.patch b/target/linux/qualcommax/patches-6.6/2603-2-qca-nss-clients-add-l2tp-support.patch new file mode 100644 index 00000000000000..7fa9184df25155 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2603-2-qca-nss-clients-add-l2tp-support.patch @@ -0,0 +1,46 @@ +--- a/net/l2tp/l2tp_core.c ++++ b/net/l2tp/l2tp_core.c +@@ -398,6 +398,31 @@ err_tlock: + } + EXPORT_SYMBOL_GPL(l2tp_session_register); + ++void l2tp_stats_update(struct l2tp_tunnel *tunnel, ++ struct l2tp_session *session, ++ struct l2tp_stats *stats) ++{ ++ atomic_long_add(atomic_long_read(&stats->rx_packets), ++ &tunnel->stats.rx_packets); ++ atomic_long_add(atomic_long_read(&stats->rx_bytes), ++ &tunnel->stats.rx_bytes); ++ atomic_long_add(atomic_long_read(&stats->tx_packets), ++ &tunnel->stats.tx_packets); ++ atomic_long_add(atomic_long_read(&stats->tx_bytes), ++ &tunnel->stats.tx_bytes); ++ ++ atomic_long_add(atomic_long_read(&stats->rx_packets), ++ &session->stats.rx_packets); ++ atomic_long_add(atomic_long_read(&stats->rx_bytes), ++ &session->stats.rx_bytes); ++ atomic_long_add(atomic_long_read(&stats->tx_packets), ++ &session->stats.tx_packets); ++ atomic_long_add(atomic_long_read(&stats->tx_bytes), ++ &session->stats.tx_bytes); ++} ++EXPORT_SYMBOL_GPL(l2tp_stats_update); ++ ++ + /***************************************************************************** + * Receive data handling + *****************************************************************************/ +--- a/net/l2tp/l2tp_core.h ++++ b/net/l2tp/l2tp_core.h +@@ -232,6 +232,9 @@ struct l2tp_session *l2tp_session_get_nt + struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, + const char *ifname); + ++void l2tp_stats_update(struct l2tp_tunnel *tunnel, struct l2tp_session *session, ++ struct l2tp_stats *stats); ++ + /* Tunnel and session lifetime management. + * Creation of a new instance is a two-step process: create, then register. + * Destruction is triggered using the *_delete functions, and completes asynchronously. diff --git a/target/linux/qualcommax/patches-6.6/2603-3-qca-nss-clients-add-PPTP-support.patch b/target/linux/qualcommax/patches-6.6/2603-3-qca-nss-clients-add-PPTP-support.patch new file mode 100644 index 00000000000000..06072e2a70aa3b --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2603-3-qca-nss-clients-add-PPTP-support.patch @@ -0,0 +1,469 @@ +--- a/include/linux/if_pppox.h ++++ b/include/linux/if_pppox.h +@@ -36,6 +36,7 @@ struct pptp_opt { + u32 ack_sent, ack_recv; + u32 seq_sent, seq_recv; + int ppp_flags; ++ bool pptp_offload_mode; + }; + #include + +@@ -100,8 +101,40 @@ struct pppoe_channel_ops { + int (*get_addressing)(struct ppp_channel *, struct pppoe_opt *); + }; + ++/* PPTP client callback */ ++typedef int (*pptp_gre_seq_offload_callback_t)(struct sk_buff *skb, ++ struct net_device *pptp_dev); ++ + /* Return PPPoE channel specific addressing information */ + extern int pppoe_channel_addressing_get(struct ppp_channel *chan, + struct pppoe_opt *addressing); + ++/* Lookup PPTP session info and return PPTP session using sip, dip and local call id */ ++extern int pptp_session_find_by_src_callid(struct pptp_opt *opt, __be16 src_call_id, ++ __be32 daddr, __be32 saddr); ++ ++/* Lookup PPTP session info and return PPTP session using dip and peer call id */ ++extern int pptp_session_find(struct pptp_opt *opt, __be16 peer_call_id, ++ __be32 peer_ip_addr); ++ ++/* Return PPTP session information given the channel */ ++extern void pptp_channel_addressing_get(struct pptp_opt *opt, ++ struct ppp_channel *chan); ++ ++/* Enable the PPTP session offload flag */ ++extern int pptp_session_enable_offload_mode(__be16 peer_call_id, ++ __be32 peer_ip_addr); ++ ++/* Disable the PPTP session offload flag */ ++extern int pptp_session_disable_offload_mode(__be16 peer_call_id, ++ __be32 peer_ip_addr); ++ ++/* Register the PPTP GRE packets sequence number offload callback */ ++extern int ++pptp_register_gre_seq_offload_callback(pptp_gre_seq_offload_callback_t ++ pptp_client_cb); ++ ++/* Unregister the PPTP GRE packets sequence number offload callback */ ++extern void pptp_unregister_gre_seq_offload_callback(void); ++ + #endif /* !(__LINUX_IF_PPPOX_H) */ +--- a/drivers/net/ppp/ppp_generic.c ++++ b/drivers/net/ppp/ppp_generic.c +@@ -2973,6 +2973,20 @@ char *ppp_dev_name(struct ppp_channel *c + return name; + } + ++/* Return the PPP net device index */ ++int ppp_dev_index(struct ppp_channel *chan) ++{ ++ struct channel *pch = chan->ppp; ++ int ifindex = 0; ++ ++ if (pch) { ++ read_lock_bh(&pch->upl); ++ if (pch->ppp && pch->ppp->dev) ++ ifindex = pch->ppp->dev->ifindex; ++ read_unlock_bh(&pch->upl); ++ } ++ return ifindex; ++} + + /* + * Disconnect a channel from the generic layer. +@@ -3681,6 +3695,28 @@ void ppp_update_stats(struct net_device + ppp_recv_unlock(ppp); + } + ++/* Returns true if Compression is enabled on PPP device ++ */ ++bool ppp_is_cp_enabled(struct net_device *dev) ++{ ++ struct ppp *ppp; ++ bool flag = false; ++ ++ if (!dev) ++ return false; ++ ++ if (dev->type != ARPHRD_PPP) ++ return false; ++ ++ ppp = netdev_priv(dev); ++ ppp_lock(ppp); ++ flag = !!(ppp->xstate & SC_COMP_RUN) || !!(ppp->rstate & SC_DECOMP_RUN); ++ ppp_unlock(ppp); ++ ++ return flag; ++} ++EXPORT_SYMBOL(ppp_is_cp_enabled); ++ + /* Returns >0 if the device is a multilink PPP netdevice, 0 if not or < 0 if + * the device is not PPP. + */ +@@ -3872,6 +3908,7 @@ EXPORT_SYMBOL(ppp_unregister_channel); + EXPORT_SYMBOL(ppp_channel_index); + EXPORT_SYMBOL(ppp_unit_number); + EXPORT_SYMBOL(ppp_dev_name); ++EXPORT_SYMBOL(ppp_dev_index); + EXPORT_SYMBOL(ppp_input); + EXPORT_SYMBOL(ppp_input_error); + EXPORT_SYMBOL(ppp_output_wakeup); +--- a/include/linux/ppp_channel.h ++++ b/include/linux/ppp_channel.h +@@ -84,6 +84,9 @@ extern void ppp_unregister_channel(struc + /* Get the channel number for a channel */ + extern int ppp_channel_index(struct ppp_channel *); + ++/* Get the device index associated with a channel, or 0, if none */ ++extern int ppp_dev_index(struct ppp_channel *); ++ + /* Get the unit number associated with a channel, or -1 if none */ + extern int ppp_unit_number(struct ppp_channel *); + +@@ -116,6 +119,7 @@ extern int ppp_hold_channels(struct net_ + /* Test if ppp xmit lock is locked */ + extern bool ppp_is_xmit_locked(struct net_device *dev); + ++bool ppp_is_cp_enabled(struct net_device *dev); + /* Test if the ppp device is a multi-link ppp device */ + extern int ppp_is_multilink(struct net_device *dev); + +--- a/drivers/net/ppp/pptp.c ++++ b/drivers/net/ppp/pptp.c +@@ -50,6 +50,8 @@ static struct proto pptp_sk_proto __read + static const struct ppp_channel_ops pptp_chan_ops; + static const struct proto_ops pptp_ops; + ++static pptp_gre_seq_offload_callback_t __rcu pptp_gre_offload_xmit_cb; ++ + static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr) + { + struct pppox_sock *sock; +@@ -91,6 +93,79 @@ static int lookup_chan_dst(u16 call_id, + return i < MAX_CALLID; + } + ++/* Search a pptp session based on local call id, local and remote ip address */ ++static int lookup_session_src(struct pptp_opt *opt, u16 call_id, __be32 daddr, __be32 saddr) ++{ ++ struct pppox_sock *sock; ++ int i = 1; ++ ++ rcu_read_lock(); ++ for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) { ++ sock = rcu_dereference(callid_sock[i]); ++ if (!sock) ++ continue; ++ ++ if (sock->proto.pptp.src_addr.call_id == call_id && ++ sock->proto.pptp.dst_addr.sin_addr.s_addr == daddr && ++ sock->proto.pptp.src_addr.sin_addr.s_addr == saddr) { ++ sock_hold(sk_pppox(sock)); ++ memcpy(opt, &sock->proto.pptp, sizeof(struct pptp_opt)); ++ sock_put(sk_pppox(sock)); ++ rcu_read_unlock(); ++ return 0; ++ } ++ } ++ rcu_read_unlock(); ++ return -EINVAL; ++} ++ ++/* Search a pptp session based on peer call id and peer ip address */ ++static int lookup_session_dst(struct pptp_opt *opt, u16 call_id, __be32 d_addr) ++{ ++ struct pppox_sock *sock; ++ int i = 1; ++ ++ rcu_read_lock(); ++ for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) { ++ sock = rcu_dereference(callid_sock[i]); ++ if (!sock) ++ continue; ++ ++ if (sock->proto.pptp.dst_addr.call_id == call_id && ++ sock->proto.pptp.dst_addr.sin_addr.s_addr == d_addr) { ++ sock_hold(sk_pppox(sock)); ++ memcpy(opt, &sock->proto.pptp, sizeof(struct pptp_opt)); ++ sock_put(sk_pppox(sock)); ++ rcu_read_unlock(); ++ return 0; ++ } ++ } ++ rcu_read_unlock(); ++ return -EINVAL; ++} ++ ++/* If offload mode set then this function sends all packets to ++ * offload module instead of network stack ++ */ ++static int pptp_client_skb_xmit(struct sk_buff *skb, ++ struct net_device *pptp_dev) ++{ ++ pptp_gre_seq_offload_callback_t pptp_gre_offload_cb_f; ++ int ret; ++ ++ rcu_read_lock(); ++ pptp_gre_offload_cb_f = rcu_dereference(pptp_gre_offload_xmit_cb); ++ ++ if (!pptp_gre_offload_cb_f) { ++ rcu_read_unlock(); ++ return -1; ++ } ++ ++ ret = pptp_gre_offload_cb_f(skb, pptp_dev); ++ rcu_read_unlock(); ++ return ret; ++} ++ + static int add_chan(struct pppox_sock *sock, + struct pptp_addr *sa) + { +@@ -163,8 +238,11 @@ static int pptp_xmit(struct ppp_channel + + struct rtable *rt; + struct net_device *tdev; ++ struct net_device *pptp_dev; + struct iphdr *iph; + int max_headroom; ++ int pptp_ifindex; ++ int ret; + + if (sk_pppox(po)->sk_state & PPPOX_DEAD) + goto tx_error; +@@ -258,7 +336,32 @@ static int pptp_xmit(struct ppp_channel + ip_select_ident(net, skb, NULL); + ip_send_check(iph); + +- ip_local_out(net, skb->sk, skb); ++ pptp_ifindex = ppp_dev_index(chan); ++ ++ /* set incoming interface as the ppp interface */ ++ if (skb->skb_iif) ++ skb->skb_iif = pptp_ifindex; ++ ++ /* If the PPTP GRE seq number offload module is not enabled yet ++ * then sends all PPTP GRE packets through linux network stack ++ */ ++ if (!opt->pptp_offload_mode) { ++ ip_local_out(net, skb->sk, skb); ++ return 1; ++ } ++ ++ pptp_dev = dev_get_by_index(&init_net, pptp_ifindex); ++ if (!pptp_dev) ++ goto tx_error; ++ ++ /* If PPTP offload module is enabled then forward all PPTP GRE ++ * packets to PPTP GRE offload module ++ */ ++ ret = pptp_client_skb_xmit(skb, pptp_dev); ++ dev_put(pptp_dev); ++ if (ret < 0) ++ goto tx_error; ++ + return 1; + + tx_error: +@@ -314,6 +417,13 @@ static int pptp_rcv_core(struct sock *sk + goto drop; + + payload = skb->data + headersize; ++ ++ /* If offload is enabled, we expect the offload module ++ * to handle PPTP GRE sequence number checks ++ */ ++ if (opt->pptp_offload_mode) ++ goto allow_packet; ++ + /* check for expected sequence number */ + if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) { + if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) && +@@ -371,6 +481,7 @@ static int pptp_rcv(struct sk_buff *skb) + if (po) { + skb_dst_drop(skb); + nf_reset_ct(skb); ++ skb->skb_iif = ppp_dev_index(&po->chan); + return sk_receive_skb(sk_pppox(po), skb, 0); + } + drop: +@@ -473,7 +584,7 @@ static int pptp_connect(struct socket *s + + opt->dst_addr = sp->sa_addr.pptp; + sk->sk_state |= PPPOX_CONNECTED; +- ++ opt->pptp_offload_mode = false; + end: + release_sock(sk); + return error; +@@ -603,9 +714,169 @@ static int pptp_ppp_ioctl(struct ppp_cha + return err; + } + ++/* pptp_channel_addressing_get() ++ * Return PPTP channel specific addressing information. ++ */ ++void pptp_channel_addressing_get(struct pptp_opt *opt, struct ppp_channel *chan) ++{ ++ struct sock *sk; ++ struct pppox_sock *po; ++ ++ if (!opt) ++ return; ++ ++ sk = (struct sock *)chan->private; ++ if (!sk) ++ return; ++ ++ sock_hold(sk); ++ ++ /* This is very unlikely, but check the socket is connected state */ ++ if (unlikely(sock_flag(sk, SOCK_DEAD) || ++ !(sk->sk_state & PPPOX_CONNECTED))) { ++ sock_put(sk); ++ return; ++ } ++ ++ po = pppox_sk(sk); ++ memcpy(opt, &po->proto.pptp, sizeof(struct pptp_opt)); ++ sock_put(sk); ++} ++EXPORT_SYMBOL(pptp_channel_addressing_get); ++ ++/* pptp_session_find() ++ * Search and return a PPTP session info based on peer callid and IP ++ * address. The function accepts the parameters in network byte order. ++ */ ++int pptp_session_find(struct pptp_opt *opt, __be16 peer_call_id, ++ __be32 peer_ip_addr) ++{ ++ if (!opt) ++ return -EINVAL; ++ ++ return lookup_session_dst(opt, ntohs(peer_call_id), peer_ip_addr); ++} ++EXPORT_SYMBOL(pptp_session_find); ++ ++/* pptp_session_find_by_src_callid() ++ * Search and return a PPTP session info based on src callid and IP ++ * address. The function accepts the parameters in network byte order. ++ */ ++int pptp_session_find_by_src_callid(struct pptp_opt *opt, __be16 src_call_id, ++ __be32 daddr, __be32 saddr) ++{ ++ if (!opt) ++ return -EINVAL; ++ ++ return lookup_session_src(opt, ntohs(src_call_id), daddr, saddr); ++} ++EXPORT_SYMBOL(pptp_session_find_by_src_callid); ++ ++ /* Function to change the offload mode true/false for a PPTP session */ ++static int pptp_set_offload_mode(bool accel_mode, ++ __be16 peer_call_id, __be32 peer_ip_addr) ++{ ++ struct pppox_sock *sock; ++ int i = 1; ++ ++ rcu_read_lock(); ++ for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) { ++ sock = rcu_dereference(callid_sock[i]); ++ if (!sock) ++ continue; ++ ++ if (sock->proto.pptp.dst_addr.call_id == peer_call_id && ++ sock->proto.pptp.dst_addr.sin_addr.s_addr == peer_ip_addr) { ++ sock_hold(sk_pppox(sock)); ++ sock->proto.pptp.pptp_offload_mode = accel_mode; ++ sock_put(sk_pppox(sock)); ++ rcu_read_unlock(); ++ return 0; ++ } ++ } ++ rcu_read_unlock(); ++ return -EINVAL; ++} ++ ++/* Enable the PPTP session offload flag */ ++int pptp_session_enable_offload_mode(__be16 peer_call_id, __be32 peer_ip_addr) ++{ ++ return pptp_set_offload_mode(true, peer_call_id, peer_ip_addr); ++} ++EXPORT_SYMBOL(pptp_session_enable_offload_mode); ++ ++/* Disable the PPTP session offload flag */ ++int pptp_session_disable_offload_mode(__be16 peer_call_id, __be32 peer_ip_addr) ++{ ++ return pptp_set_offload_mode(false, peer_call_id, peer_ip_addr); ++} ++EXPORT_SYMBOL(pptp_session_disable_offload_mode); ++ ++/* Register the offload callback function on behalf of the module which ++ * will own the sequence and acknowledgment number updates for all ++ * PPTP GRE packets. All PPTP GRE packets are then transmitted to this ++ * module after encapsulation in order to ensure the correct seq/ack ++ * fields are set in the packets before transmission. This is required ++ * when PPTP flows are offloaded to acceleration engines, in-order to ++ * ensure consistency in sequence and ack numbers between PPTP control ++ * (PPP LCP) and data packets ++ */ ++int pptp_register_gre_seq_offload_callback(pptp_gre_seq_offload_callback_t ++ pptp_gre_offload_cb) ++{ ++ pptp_gre_seq_offload_callback_t pptp_gre_offload_cb_f; ++ ++ rcu_read_lock(); ++ pptp_gre_offload_cb_f = rcu_dereference(pptp_gre_offload_xmit_cb); ++ ++ if (pptp_gre_offload_cb_f) { ++ rcu_read_unlock(); ++ return -1; ++ } ++ ++ rcu_assign_pointer(pptp_gre_offload_xmit_cb, pptp_gre_offload_cb); ++ rcu_read_unlock(); ++ return 0; ++} ++EXPORT_SYMBOL(pptp_register_gre_seq_offload_callback); ++ ++/* Unregister the PPTP GRE packets sequence number offload callback */ ++void pptp_unregister_gre_seq_offload_callback(void) ++{ ++ rcu_assign_pointer(pptp_gre_offload_xmit_cb, NULL); ++} ++EXPORT_SYMBOL(pptp_unregister_gre_seq_offload_callback); ++ ++/* pptp_hold_chan() */ ++static void pptp_hold_chan(struct ppp_channel *chan) ++{ ++ struct sock *sk = (struct sock *)chan->private; ++ ++ sock_hold(sk); ++} ++ ++/* pptp_release_chan() */ ++static void pptp_release_chan(struct ppp_channel *chan) ++{ ++ struct sock *sk = (struct sock *)chan->private; ++ ++ sock_put(sk); ++} ++ ++/* pptp_get_channel_protocol() ++ * Return the protocol type of the PPTP over PPP protocol ++ */ ++static int pptp_get_channel_protocol(struct ppp_channel *chan) ++{ ++ return PX_PROTO_PPTP; ++} ++ + static const struct ppp_channel_ops pptp_chan_ops = { + .start_xmit = pptp_xmit, + .ioctl = pptp_ppp_ioctl, ++ .get_channel_protocol = pptp_get_channel_protocol, ++ .hold = pptp_hold_chan, ++ .release = pptp_release_chan, + }; + + static struct proto pptp_sk_proto __read_mostly = { diff --git a/target/linux/qualcommax/patches-6.6/2603-4-qca-nss-clients-add-iptunnel-support.patch b/target/linux/qualcommax/patches-6.6/2603-4-qca-nss-clients-add-iptunnel-support.patch new file mode 100644 index 00000000000000..c9fc33686480ec --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2603-4-qca-nss-clients-add-iptunnel-support.patch @@ -0,0 +1,77 @@ +--- a/include/net/ip6_tunnel.h ++++ b/include/net/ip6_tunnel.h +@@ -36,6 +36,7 @@ struct __ip6_tnl_parm { + __u8 proto; /* tunnel protocol */ + __u8 encap_limit; /* encapsulation limit for tunnel */ + __u8 hop_limit; /* hop limit for tunnel */ ++ __u8 draft03; /* FMR using draft03 of map-e - QCA NSS Clients Support */ + bool collect_md; + __be32 flowinfo; /* traffic class and flowlabel for tunnel */ + __u32 flags; /* tunnel flags */ +--- a/include/net/ip_tunnels.h ++++ b/include/net/ip_tunnels.h +@@ -558,4 +558,9 @@ static inline void ip_tunnel_info_opts_s + + #endif /* CONFIG_INET */ + ++/* QCA NSS Clients Support - Start */ ++void ipip6_update_offload_stats(struct net_device *dev, void *ptr); ++void ip6_update_offload_stats(struct net_device *dev, void *ptr); ++/* QCA NSS Clients Support - End */ ++ + #endif /* __NET_IP_TUNNELS_H */ +--- a/net/ipv6/ip6_tunnel.c ++++ b/net/ipv6/ip6_tunnel.c +@@ -2411,6 +2411,26 @@ nla_put_failure: + return -EMSGSIZE; + } + ++/* QCA NSS Client Support - Start */ ++/* ++ * Update offload stats ++ */ ++void ip6_update_offload_stats(struct net_device *dev, void *ptr) ++{ ++ struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, 0); ++ const struct pcpu_sw_netstats *offload_stats = ++ (struct pcpu_sw_netstats *)ptr; ++ ++ u64_stats_update_begin(&tstats->syncp); ++ u64_stats_add(&tstats->tx_packets, u64_stats_read(&offload_stats->tx_packets)); ++ u64_stats_add(&tstats->tx_bytes, u64_stats_read(&offload_stats->tx_bytes)); ++ u64_stats_add(&tstats->rx_packets, u64_stats_read(&offload_stats->rx_packets)); ++ u64_stats_add(&tstats->rx_bytes, u64_stats_read(&offload_stats->rx_bytes)); ++ u64_stats_update_end(&tstats->syncp); ++} ++EXPORT_SYMBOL(ip6_update_offload_stats); ++/* QCA NSS Client Support - End */ ++ + struct net *ip6_tnl_get_link_net(const struct net_device *dev) + { + struct ip6_tnl *tunnel = netdev_priv(dev); +--- a/net/ipv6/sit.c ++++ b/net/ipv6/sit.c +@@ -1733,6 +1733,23 @@ nla_put_failure: + return -EMSGSIZE; + } + ++/* QCA NSS Clients Support - Start */ ++void ipip6_update_offload_stats(struct net_device *dev, void *ptr) ++{ ++ struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, 0); ++ const struct pcpu_sw_netstats *offload_stats = ++ (struct pcpu_sw_netstats *)ptr; ++ ++ u64_stats_update_begin(&tstats->syncp); ++ u64_stats_add(&tstats->tx_packets, u64_stats_read(&offload_stats->tx_packets)); ++ u64_stats_add(&tstats->tx_bytes, u64_stats_read(&offload_stats->tx_bytes)); ++ u64_stats_add(&tstats->rx_packets, u64_stats_read(&offload_stats->rx_packets)); ++ u64_stats_add(&tstats->rx_bytes, u64_stats_read(&offload_stats->rx_bytes)); ++ u64_stats_update_end(&tstats->syncp); ++} ++EXPORT_SYMBOL(ipip6_update_offload_stats); ++/* QCA NSS Clients Support - End */ ++ + static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = { + [IFLA_IPTUN_LINK] = { .type = NLA_U32 }, + [IFLA_IPTUN_LOCAL] = { .type = NLA_U32 }, diff --git a/target/linux/qualcommax/patches-6.6/2603-5-qca-nss-clients-add-vxlan-support.patch b/target/linux/qualcommax/patches-6.6/2603-5-qca-nss-clients-add-vxlan-support.patch new file mode 100644 index 00000000000000..fc475f3f8d2eb0 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2603-5-qca-nss-clients-add-vxlan-support.patch @@ -0,0 +1,107 @@ +--- a/drivers/net/vxlan/vxlan_core.c ++++ b/drivers/net/vxlan/vxlan_core.c +@@ -29,6 +29,20 @@ + #include + #include + ++ATOMIC_NOTIFIER_HEAD(vxlan_fdb_notifier_list); ++ ++void vxlan_fdb_register_notify(struct notifier_block *nb) ++{ ++ atomic_notifier_chain_register(&vxlan_fdb_notifier_list, nb); ++} ++EXPORT_SYMBOL(vxlan_fdb_register_notify); ++ ++void vxlan_fdb_unregister_notify(struct notifier_block *nb) ++{ ++ atomic_notifier_chain_unregister(&vxlan_fdb_notifier_list, nb); ++} ++EXPORT_SYMBOL(vxlan_fdb_unregister_notify); ++ + #if IS_ENABLED(CONFIG_IPV6) + #include + #include +@@ -260,6 +274,7 @@ static void __vxlan_fdb_notify(struct vx + { + struct net *net = dev_net(vxlan->dev); + struct sk_buff *skb; ++ struct vxlan_fdb_event vfe; + int err = -ENOBUFS; + + skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC); +@@ -275,7 +290,11 @@ static void __vxlan_fdb_notify(struct vx + } + + rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); +- return; ++ vfe.dev = vxlan->dev; ++ vfe.rdst = rd; ++ ether_addr_copy(vfe.eth_addr, fdb->eth_addr); ++ atomic_notifier_call_chain(&vxlan_fdb_notifier_list, type, (void *)&vfe); ++ return; + errout: + if (err < 0) + rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); +@@ -441,6 +460,18 @@ static struct vxlan_fdb *vxlan_find_mac( + return f; + } + ++/* Find and update age of fdb entry corresponding to MAC. */ ++void vxlan_fdb_update_mac(struct vxlan_dev *vxlan, const u8 *mac, uint32_t vni) ++{ ++ u32 hash_index; ++ ++ hash_index = fdb_head_index(vxlan, mac, vni); ++ spin_lock_bh(&vxlan->hash_lock[hash_index]); ++ vxlan_find_mac(vxlan, mac, vni); ++ spin_unlock_bh(&vxlan->hash_lock[hash_index]); ++} ++EXPORT_SYMBOL(vxlan_fdb_update_mac); ++ + /* caller should hold vxlan->hash_lock */ + static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f, + union vxlan_addr *ip, __be16 port, +@@ -2581,6 +2612,9 @@ void vxlan_xmit_one(struct sk_buff *skb, + goto out_unlock; + } + ++ /* Reset the skb_iif to Tunnels interface index */ ++ skb->skb_iif = dev->ifindex; ++ + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); + ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); + err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), +@@ -2652,7 +2686,10 @@ void vxlan_xmit_one(struct sk_buff *skb, + if (err < 0) + goto tx_error; + +- udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev, ++ /* Reset the skb_iif to Tunnels interface index */ ++ skb->skb_iif = dev->ifindex; ++ ++ udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev, + &local_ip.sin6.sin6_addr, + &dst->sin6.sin6_addr, tos, ttl, + label, src_port, dst_port, !udp_sum); +--- a/include/net/vxlan.h ++++ b/include/net/vxlan.h +@@ -352,6 +352,19 @@ struct vxlan_dev { + VXLAN_F_VNIFILTER | \ + VXLAN_F_LOCALBYPASS) + ++/* ++ * Application data for fdb notifier event ++ */ ++struct vxlan_fdb_event { ++ struct net_device *dev; ++ struct vxlan_rdst *rdst; ++ u8 eth_addr[ETH_ALEN]; ++}; ++ ++extern void vxlan_fdb_register_notify(struct notifier_block *nb); ++extern void vxlan_fdb_unregister_notify(struct notifier_block *nb); ++extern void vxlan_fdb_update_mac(struct vxlan_dev *vxlan, const u8 *mac, uint32_t vni); ++ + struct net_device *vxlan_dev_create(struct net *net, const char *name, + u8 name_assign_type, struct vxlan_config *conf); + diff --git a/target/linux/qualcommax/patches-6.6/2603-6-qca-nss-clients-add-l2tp-offloading-support.patch b/target/linux/qualcommax/patches-6.6/2603-6-qca-nss-clients-add-l2tp-offloading-support.patch new file mode 100644 index 00000000000000..4032eb3c227134 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2603-6-qca-nss-clients-add-l2tp-offloading-support.patch @@ -0,0 +1,368 @@ +--- a/include/linux/ppp_channel.h ++++ b/include/linux/ppp_channel.h +@@ -61,6 +61,51 @@ struct ppp_channel { + }; + + #ifdef __KERNEL__ ++/* Call this to obtain the underlying protocol of the PPP channel, ++ * e.g. PX_PROTO_OE ++ */ ++extern int ppp_channel_get_protocol(struct ppp_channel *); ++ ++/* Call this to hold a channel */ ++extern bool ppp_channel_hold(struct ppp_channel *); ++ ++/* Call this to release a hold you have upon a channel */ ++extern void ppp_channel_release(struct ppp_channel *); ++ ++/* Release hold on PPP channels */ ++extern void ppp_release_channels(struct ppp_channel *channels[], ++ unsigned int chan_sz); ++ ++/* Test if ppp xmit lock is locked */ ++extern bool ppp_is_xmit_locked(struct net_device *dev); ++ ++/* Call this get protocol version */ ++extern int ppp_channel_get_proto_version(struct ppp_channel *); ++ ++/* Get the device index associated with a channel, or 0, if none */ ++extern int ppp_dev_index(struct ppp_channel *); ++ ++/* Hold PPP channels for the PPP device */ ++extern int ppp_hold_channels(struct net_device *dev, ++ struct ppp_channel *channels[], ++ unsigned int chan_sz); ++extern int __ppp_hold_channels(struct net_device *dev, ++ struct ppp_channel *channels[], ++ unsigned int chan_sz); ++ ++/* Test if the ppp device is a multi-link ppp device */ ++extern int ppp_is_multilink(struct net_device *dev); ++extern int __ppp_is_multilink(struct net_device *dev); ++ ++/* Update statistics of the PPP net_device by incrementing related ++ * statistics field value with corresponding parameter ++ */ ++extern void ppp_update_stats(struct net_device *dev, unsigned long rx_packets, ++ unsigned long rx_bytes, unsigned long tx_packets, ++ unsigned long tx_bytes, unsigned long rx_errors, ++ unsigned long tx_errors, unsigned long rx_dropped, ++ unsigned long tx_dropped); ++ + /* Called by the channel when it can send some more data. */ + extern void ppp_output_wakeup(struct ppp_channel *); + +@@ -148,5 +193,17 @@ extern void ppp_update_stats(struct net_ + * that ppp_unregister_channel returns. + */ + ++/* QCA NSS Clients Support - Start */ ++/* PPP channel connection event types */ ++#define PPP_CHANNEL_DISCONNECT 0 ++#define PPP_CHANNEL_CONNECT 1 ++ ++/* Register the PPP channel connect notifier */ ++extern void ppp_channel_connection_register_notify(struct notifier_block *nb); ++ ++/* Unregister the PPP channel connect notifier */ ++extern void ppp_channel_connection_unregister_notify(struct notifier_block *nb); ++/* QCA NSS Clients Support - End */ ++ + #endif /* __KERNEL__ */ + #endif +--- a/include/linux/if_pppol2tp.h ++++ b/include/linux/if_pppol2tp.h +@@ -12,4 +12,30 @@ + #include + #include + ++/* QCA NSS ECM support - Start */ ++/* ++ * Holds L2TP channel info ++ */ ++struct pppol2tp_common_addr { ++ int tunnel_version; /* v2 or v3 */ ++ __u32 local_tunnel_id, remote_tunnel_id; /* tunnel id */ ++ __u32 local_session_id, remote_session_id; /* session id */ ++ struct sockaddr_in local_addr, remote_addr; /* ip address and port */ ++}; ++ ++/* ++ * L2TP channel operations ++ */ ++struct pppol2tp_channel_ops { ++ struct ppp_channel_ops ops; /* ppp channel ops */ ++}; ++ ++/* ++ * exported function which calls pppol2tp channel's get addressing ++ * function ++ */ ++extern int pppol2tp_channel_addressing_get(struct ppp_channel *, ++ struct pppol2tp_common_addr *); ++/* QCA NSS ECM support - End */ ++ + #endif +--- a/net/l2tp/l2tp_ppp.c ++++ b/net/l2tp/l2tp_ppp.c +@@ -123,9 +123,17 @@ struct pppol2tp_session { + }; + + static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb); +- +-static const struct ppp_channel_ops pppol2tp_chan_ops = { +- .start_xmit = pppol2tp_xmit, ++static int pppol2tp_get_channel_protocol(struct ppp_channel *); ++static int pppol2tp_get_channel_protocol_ver(struct ppp_channel *); ++static void pppol2tp_hold_chan(struct ppp_channel *); ++static void pppol2tp_release_chan(struct ppp_channel *); ++ ++static const struct pppol2tp_channel_ops pppol2tp_chan_ops = { ++ .ops.start_xmit = pppol2tp_xmit, ++ .ops.get_channel_protocol = pppol2tp_get_channel_protocol, ++ .ops.get_channel_protocol_ver = pppol2tp_get_channel_protocol_ver, ++ .ops.hold = pppol2tp_hold_chan, ++ .ops.release = pppol2tp_release_chan, + }; + + static const struct proto_ops pppol2tp_ops; +@@ -373,6 +381,13 @@ static int pppol2tp_xmit(struct ppp_chan + skb->data[0] = PPP_ALLSTATIONS; + skb->data[1] = PPP_UI; + ++ /* QCA NSS ECM support - start */ ++ /* set incoming interface as the ppp interface */ ++ if ((skb->protocol == htons(ETH_P_IP)) || ++ (skb->protocol == htons(ETH_P_IPV6))) ++ skb->skb_iif = ppp_dev_index(chan); ++ /* QCA NSS ECM support - End */ ++ + local_bh_disable(); + l2tp_xmit_skb(session, skb); + local_bh_enable(); +@@ -818,7 +833,7 @@ static int pppol2tp_connect(struct socke + po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; + + po->chan.private = sk; +- po->chan.ops = &pppol2tp_chan_ops; ++ po->chan.ops = (struct ppp_channel_ops *)&pppol2tp_chan_ops.ops; + po->chan.mtu = pppol2tp_tunnel_mtu(tunnel); + + error = ppp_register_net_channel(sock_net(sk), &po->chan); +@@ -1732,6 +1747,109 @@ static void __exit pppol2tp_exit(void) + unregister_pernet_device(&pppol2tp_net_ops); + } + ++/* QCA NSS ECM support - Start */ ++/* pppol2tp_hold_chan() */ ++static void pppol2tp_hold_chan(struct ppp_channel *chan) ++{ ++ struct sock *sk = (struct sock *)chan->private; ++ ++ sock_hold(sk); ++} ++ ++/* pppol2tp_release_chan() */ ++static void pppol2tp_release_chan(struct ppp_channel *chan) ++{ ++ struct sock *sk = (struct sock *)chan->private; ++ ++ sock_put(sk); ++} ++ ++/* pppol2tp_get_channel_protocol() ++ * Return the protocol type of the L2TP over PPP protocol ++ */ ++static int pppol2tp_get_channel_protocol(struct ppp_channel *chan) ++{ ++ return PX_PROTO_OL2TP; ++} ++ ++/* pppol2tp_get_channel_protocol_ver() ++ * Return the protocol version of the L2TP over PPP protocol ++ */ ++static int pppol2tp_get_channel_protocol_ver(struct ppp_channel *chan) ++{ ++ struct sock *sk; ++ struct l2tp_session *session; ++ struct l2tp_tunnel *tunnel; ++ int version = 0; ++ ++ if (chan && chan->private) ++ sk = (struct sock *)chan->private; ++ else ++ return -1; ++ ++ /* Get session and tunnel contexts from the socket */ ++ session = pppol2tp_sock_to_session(sk); ++ if (!session) ++ return -1; ++ ++ tunnel = session->tunnel; ++ if (!tunnel) { ++ sock_put(sk); ++ return -1; ++ } ++ ++ version = tunnel->version; ++ ++ sock_put(sk); ++ ++ return version; ++} ++ ++/* pppol2tp_get_addressing() */ ++static int pppol2tp_get_addressing(struct ppp_channel *chan, ++ struct pppol2tp_common_addr *addr) ++{ ++ struct sock *sk = (struct sock *)chan->private; ++ struct l2tp_session *session; ++ struct l2tp_tunnel *tunnel; ++ struct inet_sock *isk = NULL; ++ int err = -ENXIO; ++ ++ /* Get session and tunnel contexts from the socket */ ++ session = pppol2tp_sock_to_session(sk); ++ if (!session) ++ return err; ++ ++ tunnel = session->tunnel; ++ if (!tunnel) { ++ sock_put(sk); ++ return err; ++ } ++ isk = inet_sk(tunnel->sock); ++ ++ addr->local_tunnel_id = tunnel->tunnel_id; ++ addr->remote_tunnel_id = tunnel->peer_tunnel_id; ++ addr->local_session_id = session->session_id; ++ addr->remote_session_id = session->peer_session_id; ++ ++ addr->local_addr.sin_port = isk->inet_sport; ++ addr->remote_addr.sin_port = isk->inet_dport; ++ addr->local_addr.sin_addr.s_addr = isk->inet_saddr; ++ addr->remote_addr.sin_addr.s_addr = isk->inet_daddr; ++ ++ sock_put(sk); ++ return 0; ++} ++ ++/* pppol2tp_channel_addressing_get() */ ++int pppol2tp_channel_addressing_get(struct ppp_channel *chan, ++ struct pppol2tp_common_addr *addr) ++{ ++ return pppol2tp_get_addressing(chan, addr); ++} ++EXPORT_SYMBOL(pppol2tp_channel_addressing_get); ++/* QCA NSS ECM support - End */ ++ + module_init(pppol2tp_init); + module_exit(pppol2tp_exit); + +--- a/drivers/net/ppp/ppp_generic.c ++++ b/drivers/net/ppp/ppp_generic.c +@@ -3743,6 +3743,32 @@ int ppp_is_multilink(struct net_device * + } + EXPORT_SYMBOL(ppp_is_multilink); + ++/* __ppp_is_multilink() ++ * Returns >0 if the device is a multilink PPP netdevice, 0 if not or < 0 ++ * if the device is not PPP. Caller should acquire ppp_lock before calling ++ * this function ++ */ ++int __ppp_is_multilink(struct net_device *dev) ++{ ++ struct ppp *ppp; ++ unsigned int flags; ++ ++ if (!dev) ++ return -1; ++ ++ if (dev->type != ARPHRD_PPP) ++ return -1; ++ ++ ppp = netdev_priv(dev); ++ flags = ppp->flags; ++ ++ if (flags & SC_MULTILINK) ++ return 1; ++ ++ return 0; ++} ++EXPORT_SYMBOL(__ppp_is_multilink); ++ + /* ppp_channel_get_protocol() + * Call this to obtain the underlying protocol of the PPP channel, + * e.g. PX_PROTO_OE +@@ -3881,6 +3907,59 @@ int ppp_hold_channels(struct net_device + } + EXPORT_SYMBOL(ppp_hold_channels); + ++/* __ppp_hold_channels() ++ * Returns the PPP channels of the PPP device, storing each one into ++ * channels[]. ++ * ++ * channels[] has chan_sz elements. ++ * This function returns the number of channels stored, up to chan_sz. ++ * It will return < 0 if the device is not PPP. ++ * ++ * You MUST release the channels using ppp_release_channels(). ++ */ ++int __ppp_hold_channels(struct net_device *dev, struct ppp_channel *channels[], ++ unsigned int chan_sz) ++{ ++ struct ppp *ppp; ++ int c; ++ struct channel *pch; ++ ++ if (!dev) ++ return -1; ++ ++ if (dev->type != ARPHRD_PPP) ++ return -1; ++ ++ ppp = netdev_priv(dev); ++ ++ c = 0; ++ list_for_each_entry(pch, &ppp->channels, clist) { ++ struct ppp_channel *chan; ++ ++ if (!pch->chan) { ++ /* Channel is going / gone away */ ++ continue; ++ } ++ ++ if (c == chan_sz) { ++ /* No space to record channel */ ++ return c; ++ } ++ ++ /* Hold the channel, if supported */ ++ chan = pch->chan; ++ if (!chan->ops->hold) ++ continue; ++ ++ chan->ops->hold(chan); ++ ++ /* Record the channel */ ++ channels[c++] = chan; ++ } ++ return c; ++} ++EXPORT_SYMBOL(__ppp_hold_channels); ++ + /* ppp_release_channels() + * Releases channels + */ +--- a/net/l2tp/l2tp_core.h ++++ b/net/l2tp/l2tp_core.h +@@ -235,6 +235,9 @@ struct l2tp_session *l2tp_session_get_by + void l2tp_stats_update(struct l2tp_tunnel *tunnel, struct l2tp_session *session, + struct l2tp_stats *stats); + ++void l2tp_stats_update(struct l2tp_tunnel *tunnel, struct l2tp_session *session, ++ struct l2tp_stats *stats); ++ + /* Tunnel and session lifetime management. + * Creation of a new instance is a two-step process: create, then register. + * Destruction is triggered using the *_delete functions, and completes asynchronously. diff --git a/target/linux/qualcommax/patches-6.6/2603-7-qca-nss-clients-iptunnel-lock-this-cpu.patch b/target/linux/qualcommax/patches-6.6/2603-7-qca-nss-clients-iptunnel-lock-this-cpu.patch new file mode 100644 index 00000000000000..e4ed49ea4c21fb --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2603-7-qca-nss-clients-iptunnel-lock-this-cpu.patch @@ -0,0 +1,22 @@ +--- a/net/ipv6/ip6_tunnel.c ++++ b/net/ipv6/ip6_tunnel.c +@@ -2417,7 +2417,7 @@ nla_put_failure: + */ + void ip6_update_offload_stats(struct net_device *dev, void *ptr) + { +- struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, 0); ++ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); + const struct pcpu_sw_netstats *offload_stats = + (struct pcpu_sw_netstats *)ptr; + +--- a/net/ipv6/sit.c ++++ b/net/ipv6/sit.c +@@ -1736,7 +1736,7 @@ nla_put_failure: + /* QCA NSS Clients Support - Start */ + void ipip6_update_offload_stats(struct net_device *dev, void *ptr) + { +- struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, 0); ++ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); + const struct pcpu_sw_netstats *offload_stats = + (struct pcpu_sw_netstats *)ptr; + diff --git a/target/linux/qualcommax/patches-6.6/2603-8-qca-nss-clients-add-tls-mgr-support.patch b/target/linux/qualcommax/patches-6.6/2603-8-qca-nss-clients-add-tls-mgr-support.patch new file mode 100644 index 00000000000000..0499e237f6be88 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2603-8-qca-nss-clients-add-tls-mgr-support.patch @@ -0,0 +1,24 @@ +--- /dev/null ++++ b/include/uapi/linux/tlshdr.h +@@ -0,0 +1,21 @@ ++#ifndef _UAPI_LINUX_TLSHDR_H ++#define _UAPI_LINUX_TLSHDR_H ++ ++#include ++ ++struct tlshdr { ++ __u8 type; ++ __be16 version; ++ __be16 len; ++} __attribute__((packed)); ++ ++#define TLSHDR_REC_TYPE_CCS 20 /* TLS packet is change cipher specification */ ++#define TLSHDR_REC_TYPE_ALERT 21 /* TLS packet is Alert */ ++#define TLSHDR_REC_TYPE_HANDSHAKE 22 /* TLS packet is Handshake */ ++#define TLSHDR_REC_TYPE_DATA 23 /* TLS packet is Application data */ ++ ++#define TLSHDR_VERSION_1_1 0x0302 /* TLS Header Version(tls 1.1) */ ++#define TLSHDR_VERSION_1_2 0x0303 /* TLS Header Version(tls 1.2) */ ++#define TLSHDR_VERSION_1_3 0x0304 /* TLS Header Version(tls 1.3) */ ++ ++#endif /* _UAPI_LINUX_TLSHDR_H */ diff --git a/target/linux/qualcommax/patches-6.6/2604-qca-add-mcs-support.patch b/target/linux/qualcommax/patches-6.6/2604-qca-add-mcs-support.patch new file mode 100644 index 00000000000000..53707f67798f8b --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2604-qca-add-mcs-support.patch @@ -0,0 +1,925 @@ +--- a/include/linux/if_bridge.h ++++ b/include/linux/if_bridge.h +@@ -258,4 +258,17 @@ extern br_get_dst_hook_t __rcu *br_get_d + extern struct net_device *br_fdb_bridge_dev_get_and_hold(struct net_bridge *br); + /* QCA NSS bridge-mgr support - End */ + ++/* QCA qca-mcs support - Start */ ++typedef struct net_bridge_port *br_get_dst_hook_t(const struct net_bridge_port *src, ++ struct sk_buff **skb); ++extern br_get_dst_hook_t __rcu *br_get_dst_hook; ++ ++typedef int (br_multicast_handle_hook_t)(const struct net_bridge_port *src, ++ struct sk_buff *skb); ++extern br_multicast_handle_hook_t __rcu *br_multicast_handle_hook; ++ ++typedef void (br_notify_hook_t)(int group, int event, const void *ptr); ++extern br_notify_hook_t __rcu *br_notify_hook; ++/* QCA qca-mcs support - End */ ++ + #endif +--- a/net/bridge/br_fdb.c ++++ b/net/bridge/br_fdb.c +@@ -263,7 +263,8 @@ static void fdb_notify(struct net_bridge + kfree_skb(skb); + goto errout; + } +- rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); ++ __br_notify(RTNLGRP_NEIGH, type, fdb); /* QCA qca-mcs support */ ++ rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); + return; + errout: + rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); +@@ -329,6 +330,7 @@ struct net_bridge_fdb_entry *br_fdb_find + { + return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid); + } ++EXPORT_SYMBOL_GPL(br_fdb_find_rcu); /* QCA qca-mcs support */ + + /* When a static FDB entry is added, the mac address from the entry is + * added to the bridge private HW address list and all required ports +--- a/net/bridge/br_private.h ++++ b/net/bridge/br_private.h +@@ -906,6 +906,7 @@ void br_manage_promisc(struct net_bridge + int nbp_backup_change(struct net_bridge_port *p, struct net_device *backup_dev); + + /* br_input.c */ ++int br_pass_frame_up(struct sk_buff *skb, bool promisc); /* QCA qca-mcs support */ + int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb); + rx_handler_func_t *br_get_rx_handler(const struct net_device *dev); + +@@ -2271,4 +2272,14 @@ bool br_is_neigh_suppress_enabled(const + #define __br_get(__hook, __default, __args ...) \ + (__hook ? (__hook(__args)) : (__default)) + /* QCA NSS ECM support - End */ ++ ++/* QCA qca-mcs support - Start */ ++static inline void __br_notify(int group, int type, const void *data) ++{ ++ br_notify_hook_t *notify_hook = rcu_dereference(br_notify_hook); ++ ++ if (notify_hook) ++ notify_hook(group, type, data); ++} ++/* QCA qca-mcs support - End */ + #endif +--- a/net/bridge/br_netlink.c ++++ b/net/bridge/br_netlink.c +@@ -656,6 +656,7 @@ void br_info_notify(int event, const str + kfree_skb(skb); + goto errout; + } ++ __br_notify(RTNLGRP_LINK, event, port); /* QCA qca-mcs support */ + rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); + return; + errout: +--- a/net/bridge/br.c ++++ b/net/bridge/br.c +@@ -472,6 +472,12 @@ static void __exit br_deinit(void) + br_fdb_fini(); + } + ++/* QCA qca-mcs support - Start */ ++/* Hook for bridge event notifications */ ++br_notify_hook_t __rcu *br_notify_hook __read_mostly; ++EXPORT_SYMBOL_GPL(br_notify_hook); ++/* QCA qca-mcs support - End */ ++ + module_init(br_init) + module_exit(br_deinit) + MODULE_LICENSE("GPL"); +--- a/net/bridge/br_device.c ++++ b/net/bridge/br_device.c +@@ -83,6 +83,12 @@ netdev_tx_t br_dev_xmit(struct sk_buff * + if (is_broadcast_ether_addr(dest)) { + br_flood(br, skb, BR_PKT_BROADCAST, false, true, vid); + } else if (is_multicast_ether_addr(dest)) { ++ /* QCA qca-mcs support - Start */ ++ br_multicast_handle_hook_t *multicast_handle_hook = rcu_dereference(br_multicast_handle_hook); ++ if (!__br_get(multicast_handle_hook, true, NULL, skb)) ++ goto out; ++ /* QCA qca-mcs support - End */ ++ + if (unlikely(netpoll_tx_running(dev))) { + br_flood(br, skb, BR_PKT_MULTICAST, false, true, vid); + goto out; +--- a/net/bridge/br_input.c ++++ b/net/bridge/br_input.c +@@ -23,6 +23,16 @@ + #include "br_private.h" + #include "br_private_tunnel.h" + ++/* QCA qca-mcs support - Start */ ++/* Hook for external Multicast handler */ ++br_multicast_handle_hook_t __rcu *br_multicast_handle_hook __read_mostly; ++EXPORT_SYMBOL_GPL(br_multicast_handle_hook); ++ ++/* Hook for external forwarding logic */ ++br_get_dst_hook_t __rcu *br_get_dst_hook __read_mostly; ++EXPORT_SYMBOL_GPL(br_get_dst_hook); ++/* QCA qca-mcs support - End */ ++ + static int + br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb) + { +@@ -30,7 +40,7 @@ br_netif_receive_skb(struct net *net, st + return netif_receive_skb(skb); + } + +-static int br_pass_frame_up(struct sk_buff *skb, bool promisc) ++int br_pass_frame_up(struct sk_buff *skb, bool promisc) + { + struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; + struct net_bridge *br = netdev_priv(brdev); +@@ -69,6 +79,7 @@ static int br_pass_frame_up(struct sk_bu + dev_net(indev), NULL, skb, indev, NULL, + br_netif_receive_skb); + } ++EXPORT_SYMBOL_GPL(br_pass_frame_up); /* QCA qca-mcs support */ + + /* note: already called with rcu_read_lock */ + int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb) +@@ -84,6 +95,11 @@ int br_handle_frame_finish(struct net *n + bool promisc; + u16 vid = 0; + u8 state; ++ /* QCA qca-mcs support - Start */ ++ br_multicast_handle_hook_t *multicast_handle_hook; ++ struct net_bridge_port *pdst = NULL; ++ br_get_dst_hook_t *get_dst_hook = rcu_dereference(br_get_dst_hook); ++ /* QCA qca-mcs support - End */ + + if (!p) + goto drop; +@@ -175,6 +191,11 @@ int br_handle_frame_finish(struct net *n + + switch (pkt_type) { + case BR_PKT_MULTICAST: ++ /* QCA qca-mcs support - Start */ ++ multicast_handle_hook = rcu_dereference(br_multicast_handle_hook); ++ if (!__br_get(multicast_handle_hook, true, p, skb)) ++ goto out; ++ /* QCA qca-mcs support - End */ + mdst = br_mdb_get(brmctx, skb, vid); + if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && + br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst)) { +@@ -190,8 +211,15 @@ int br_handle_frame_finish(struct net *n + } + break; + case BR_PKT_UNICAST: +- dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid); +- break; ++ /* QCA qca-mcs support - Start */ ++ pdst = __br_get(get_dst_hook, NULL, p, &skb); ++ if (pdst) { ++ if (!skb) ++ goto out; ++ } else { ++ /* QCA qca-mcs support - End */ ++ dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid); ++ } + default: + break; + } +@@ -206,6 +234,12 @@ int br_handle_frame_finish(struct net *n + dst->used = now; + br_forward(dst->dst, skb, local_rcv, false); + } else { ++ /* QCA qca-mcs support - Start */ ++ if (pdst) { ++ br_forward(pdst, skb, local_rcv, false); ++ goto out; ++ } ++ /* QCA qca-mcs support - End */ + if (!mcast_hit) + br_flood(br, skb, pkt_type, local_rcv, false, vid); + else +--- a/include/linux/mroute.h ++++ b/include/linux/mroute.h +@@ -92,4 +92,44 @@ struct rtmsg; + int ipmr_get_route(struct net *net, struct sk_buff *skb, + __be32 saddr, __be32 daddr, + struct rtmsg *rtm, u32 portid); ++ ++/* QCA ECM qca-mcs support - Start */ ++#define IPMR_MFC_EVENT_UPDATE 1 ++#define IPMR_MFC_EVENT_DELETE 2 ++ ++/* ++ * Callback to registered modules in the event of updates to a multicast group ++ */ ++typedef void (*ipmr_mfc_event_offload_callback_t)(__be32 origin, __be32 group, ++ u32 max_dest_dev, ++ u32 dest_dev_idx[], ++ u8 op); ++ ++/* ++ * Register the callback used to inform offload modules when updates occur to ++ * MFC. The callback is registered by offload modules ++ */ ++extern bool ipmr_register_mfc_event_offload_callback( ++ ipmr_mfc_event_offload_callback_t mfc_offload_cb); ++ ++/* ++ * De-Register the callback used to inform offload modules when updates occur ++ * to MFC ++ */ ++extern void ipmr_unregister_mfc_event_offload_callback(void); ++ ++/* ++ * Find the destination interface list, given a multicast group and source ++ */ ++extern int ipmr_find_mfc_entry(struct net *net, __be32 origin, __be32 group, ++ u32 max_dst_cnt, u32 dest_dev[]); ++ ++/* ++ * Out-of-band multicast statistics update for flows that are offloaded from ++ * Linux ++ */ ++extern int ipmr_mfc_stats_update(struct net *net, __be32 origin, __be32 group, ++ u64 pkts_in, u64 bytes_in, ++ u64 pkts_out, u64 bytes_out); ++/* QCA ECM qca-mcs support - End */ + #endif +--- a/include/linux/mroute6.h ++++ b/include/linux/mroute6.h +@@ -137,4 +137,47 @@ static inline int ip6mr_sk_ioctl(struct + return 1; + } + #endif ++ ++/* QCA qca-mcs support - Start */ ++#define IP6MR_MFC_EVENT_UPDATE 1 ++#define IP6MR_MFC_EVENT_DELETE 2 ++ ++/* ++ * Callback to registered modules in the event of updates to a multicast group ++ */ ++typedef void (*ip6mr_mfc_event_offload_callback_t)(struct in6_addr *origin, ++ struct in6_addr *group, ++ u32 max_dest_dev, ++ u32 dest_dev_idx[], ++ uint8_t op); ++ ++/* ++ * Register the callback used to inform offload modules when updates occur ++ * to MFC. The callback is registered by offload modules ++ */ ++extern bool ip6mr_register_mfc_event_offload_callback( ++ ip6mr_mfc_event_offload_callback_t mfc_offload_cb); ++ ++/* ++ * De-Register the callback used to inform offload modules when updates occur ++ * to MFC ++ */ ++extern void ip6mr_unregister_mfc_event_offload_callback(void); ++ ++/* ++ * Find the destination interface list given a multicast group and source ++ */ ++extern int ip6mr_find_mfc_entry(struct net *net, struct in6_addr *origin, ++ struct in6_addr *group, u32 max_dst_cnt, ++ u32 dest_dev[]); ++ ++/* ++ * Out-of-band multicast statistics update for flows that are offloaded from ++ * Linux ++ */ ++extern int ip6mr_mfc_stats_update(struct net *net, struct in6_addr *origin, ++ struct in6_addr *group, uint64_t pkts_in, ++ uint64_t bytes_in, uint64_t pkts_out, ++ uint64_t bytes_out); ++/* QCA qca-mcs support - End */ + #endif +--- a/net/ipv4/ipmr.c ++++ b/net/ipv4/ipmr.c +@@ -113,6 +113,15 @@ static void igmpmsg_netlink_event(const + static void mroute_clean_tables(struct mr_table *mrt, int flags); + static void ipmr_expire_process(struct timer_list *t); + ++/* QCA ECM qca-mcs support - Start */ ++/* spinlock for offload */ ++static DEFINE_SPINLOCK(lock); ++ ++static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt, __be32 origin, ++ __be32 mcastgrp); ++static ipmr_mfc_event_offload_callback_t __rcu ipmr_mfc_event_offload_callback; ++/* QCA ECM qca-mcs support - End */ ++ + #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES + #define ipmr_for_each_table(mrt, net) \ + list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list, \ +@@ -223,6 +232,228 @@ static int ipmr_rule_fill(struct fib_rul + return 0; + } + ++/* QCA ECM qca-mcs support - Start */ ++/* ipmr_sync_entry_update() ++ * Call the registered offload callback to report an update to a multicast ++ * route entry. The callback receives the list of destination interfaces and ++ * the interface count ++ */ ++static void ipmr_sync_entry_update(struct mr_table *mrt, ++ struct mfc_cache *cache) ++{ ++ int vifi, dest_if_count = 0; ++ u32 dest_dev[MAXVIFS]; ++ __be32 origin; ++ __be32 group; ++ ipmr_mfc_event_offload_callback_t offload_update_cb_f; ++ ++ memset(dest_dev, 0, sizeof(dest_dev)); ++ ++ origin = cache->mfc_origin; ++ group = cache->mfc_mcastgrp; ++ ++ spin_lock(&mrt_lock); ++ for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) { ++ if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) && ++ (cache->_c.mfc_un.res.ttls[vifi] < 255))) { ++ continue; ++ } ++ if (dest_if_count == MAXVIFS) { ++ spin_unlock(&mrt_lock); ++ return; ++ } ++ ++ if (!VIF_EXISTS(mrt, vifi)) { ++ spin_unlock(&mrt_lock); ++ return; ++ } ++ dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex; ++ dest_if_count++; ++ } ++ spin_unlock(&mrt_lock); ++ ++ rcu_read_lock(); ++ offload_update_cb_f = rcu_dereference(ipmr_mfc_event_offload_callback); ++ ++ if (!offload_update_cb_f) { ++ rcu_read_unlock(); ++ return; ++ } ++ ++ offload_update_cb_f(group, origin, dest_if_count, dest_dev, ++ IPMR_MFC_EVENT_UPDATE); ++ rcu_read_unlock(); ++} ++ ++/* ipmr_sync_entry_delete() ++ * Call the registered offload callback to inform of a multicast route entry ++ * delete event ++ */ ++static void ipmr_sync_entry_delete(u32 origin, u32 group) ++{ ++ ipmr_mfc_event_offload_callback_t offload_update_cb_f; ++ ++ rcu_read_lock(); ++ offload_update_cb_f = rcu_dereference(ipmr_mfc_event_offload_callback); ++ ++ if (!offload_update_cb_f) { ++ rcu_read_unlock(); ++ return; ++ } ++ ++ offload_update_cb_f(group, origin, 0, NULL, IPMR_MFC_EVENT_DELETE); ++ rcu_read_unlock(); ++} ++ ++/* ipmr_register_mfc_event_offload_callback() ++ * Register the IPv4 Multicast update offload callback with IPMR ++ */ ++bool ipmr_register_mfc_event_offload_callback( ++ ipmr_mfc_event_offload_callback_t mfc_offload_cb) ++{ ++ ipmr_mfc_event_offload_callback_t offload_update_cb_f; ++ ++ rcu_read_lock(); ++ offload_update_cb_f = rcu_dereference(ipmr_mfc_event_offload_callback); ++ ++ if (offload_update_cb_f) { ++ rcu_read_unlock(); ++ return false; ++ } ++ rcu_read_unlock(); ++ ++ spin_lock(&lock); ++ rcu_assign_pointer(ipmr_mfc_event_offload_callback, mfc_offload_cb); ++ spin_unlock(&lock); ++ synchronize_rcu(); ++ return true; ++} ++EXPORT_SYMBOL(ipmr_register_mfc_event_offload_callback); ++ ++/* ipmr_unregister_mfc_event_offload_callback() ++ * De-register the IPv4 Multicast update offload callback with IPMR ++ */ ++void ipmr_unregister_mfc_event_offload_callback(void) ++{ ++ spin_lock(&lock); ++ rcu_assign_pointer(ipmr_mfc_event_offload_callback, NULL); ++ spin_unlock(&lock); ++ synchronize_rcu(); ++} ++EXPORT_SYMBOL(ipmr_unregister_mfc_event_offload_callback); ++ ++/* ipmr_find_mfc_entry() ++ * Returns destination interface list for a particular multicast flow, and ++ * the number of interfaces in the list ++ */ ++int ipmr_find_mfc_entry(struct net *net, __be32 origin, __be32 group, ++ u32 max_dest_cnt, u32 dest_dev[]) ++{ ++ int vifi, dest_if_count = 0; ++ struct mr_table *mrt; ++ struct mfc_cache *cache; ++ ++ mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); ++ if (!mrt) ++ return -ENOENT; ++ ++ rcu_read_lock(); ++ cache = ipmr_cache_find(mrt, origin, group); ++ if (!cache) { ++ rcu_read_unlock(); ++ return -ENOENT; ++ } ++ ++ spin_lock(&mrt_lock); ++ for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) { ++ if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) && ++ (cache->_c.mfc_un.res.ttls[vifi] < 255))) { ++ continue; ++ } ++ ++ /* We have another valid destination interface entry. Check if ++ * the number of the destination interfaces for the route is ++ * exceeding the size of the array given to us ++ */ ++ if (dest_if_count == max_dest_cnt) { ++ spin_unlock(&mrt_lock); ++ rcu_read_unlock(); ++ return -EINVAL; ++ } ++ ++ if (!VIF_EXISTS(mrt, vifi)) { ++ spin_unlock(&mrt_lock); ++ rcu_read_unlock(); ++ return -EINVAL; ++ } ++ ++ dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex; ++ dest_if_count++; ++ } ++ spin_unlock(&mrt_lock); ++ rcu_read_unlock(); ++ ++ return dest_if_count; ++} ++EXPORT_SYMBOL(ipmr_find_mfc_entry); ++ ++/* ipmr_mfc_stats_update() ++ * Update the MFC/VIF statistics for offloaded flows ++ */ ++int ipmr_mfc_stats_update(struct net *net, __be32 origin, __be32 group, ++ u64 pkts_in, u64 bytes_in, ++ u64 pkts_out, u64 bytes_out) ++{ ++ int vif, vifi; ++ struct mr_table *mrt; ++ struct mfc_cache *cache; ++ ++ mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); ++ if (!mrt) ++ return -ENOENT; ++ ++ rcu_read_lock(); ++ cache = ipmr_cache_find(mrt, origin, group); ++ if (!cache) { ++ rcu_read_unlock(); ++ return -ENOENT; ++ } ++ ++ vif = cache->_c.mfc_parent; ++ ++ spin_lock(&mrt_lock); ++ if (!VIF_EXISTS(mrt, vif)) { ++ spin_unlock(&mrt_lock); ++ rcu_read_unlock(); ++ return -EINVAL; ++ } ++ ++ mrt->vif_table[vif].pkt_in += pkts_in; ++ mrt->vif_table[vif].bytes_in += bytes_in; ++ cache->_c.mfc_un.res.pkt += pkts_out; ++ cache->_c.mfc_un.res.bytes += bytes_out; ++ ++ for (vifi = cache->_c.mfc_un.res.minvif; ++ vifi < cache->_c.mfc_un.res.maxvif; vifi++) { ++ if ((cache->_c.mfc_un.res.ttls[vifi] > 0) && ++ (cache->_c.mfc_un.res.ttls[vifi] < 255)) { ++ if (!VIF_EXISTS(mrt, vifi)) { ++ spin_unlock(&mrt_lock); ++ rcu_read_unlock(); ++ return -EINVAL; ++ } ++ mrt->vif_table[vifi].pkt_out += pkts_out; ++ mrt->vif_table[vifi].bytes_out += bytes_out; ++ } ++ } ++ spin_unlock(&mrt_lock); ++ rcu_read_unlock(); ++ ++ return 0; ++} ++EXPORT_SYMBOL(ipmr_mfc_stats_update); ++/* QCA ECM qca-mcs support - End */ ++ + static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = { + .family = RTNL_FAMILY_IPMR, + .rule_size = sizeof(struct ipmr_rule), +@@ -1192,6 +1423,11 @@ static int ipmr_mfc_delete(struct mr_tab + mroute_netlink_event(mrt, c, RTM_DELROUTE); + mr_cache_put(&c->_c); + ++ /* QCA ECM qca-mcs support - Start */ ++ /* Inform offload modules of the delete event */ ++ ipmr_sync_entry_delete(c->mfc_origin, c->mfc_mcastgrp); ++ /* QCA ECM qca-mcs support - End */ ++ + return 0; + } + +@@ -1221,6 +1457,12 @@ static int ipmr_mfc_add(struct net *net, + call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, c, + mrt->id); + mroute_netlink_event(mrt, c, RTM_NEWROUTE); ++ ++ /* QCA ECM qca-mcs support - Start */ ++ /* Inform offload modules of the update event */ ++ ipmr_sync_entry_update(mrt, c); ++ /* QCA ECM qca-mcs support - End */ ++ + return 0; + } + +@@ -1281,6 +1523,7 @@ static void mroute_clean_tables(struct m + struct net *net = read_pnet(&mrt->net); + struct mr_mfc *c, *tmp; + struct mfc_cache *cache; ++ u32 origin, group; /* QCA ECM qca-mcs support */ + LIST_HEAD(list); + int i; + +@@ -1305,10 +1548,19 @@ static void mroute_clean_tables(struct m + rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params); + list_del_rcu(&c->list); + cache = (struct mfc_cache *)c; ++ /* QCA ECM qca-mcs support - Start */ ++ origin = cache->mfc_origin; ++ group = cache->mfc_mcastgrp; ++ /* QCA ECM qca-mcs support - End */ + call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, cache, + mrt->id); + mroute_netlink_event(mrt, cache, RTM_DELROUTE); + mr_cache_put(c); ++ ++ /* QCA ECM qca-mcs support - Start */ ++ /* Inform offload modules of the delete event */ ++ ipmr_sync_entry_delete(origin, group); ++ /* QCA ECM qca-mcs support - End */ + } + } + +--- a/net/ipv6/ip6mr.c ++++ b/net/ipv6/ip6mr.c +@@ -102,6 +102,17 @@ static int ip6mr_rtm_dumproute(struct sk + static void mroute_clean_tables(struct mr_table *mrt, int flags); + static void ipmr_expire_process(struct timer_list *t); + ++/* QCA qca-mcs support - Start */ ++/* Spinlock for offload */ ++static DEFINE_SPINLOCK(lock); ++ ++static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt, ++ const struct in6_addr *origin, ++ const struct in6_addr *mcastgrp); ++static ip6mr_mfc_event_offload_callback_t __rcu ++ ip6mr_mfc_event_offload_callback; ++/* QCA qca-mcs support - End */ ++ + #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES + #define ip6mr_for_each_table(mrt, net) \ + list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list, \ +@@ -380,6 +391,227 @@ static struct mr_table_ops ip6mr_mr_tabl + .cmparg_any = &ip6mr_mr_table_ops_cmparg_any, + }; + ++/* QCA qca-mcs support - Start */ ++/* ip6mr_sync_entry_update() ++ * Call the registered offload callback to report an update to a multicast ++ * route entry. The callback receives the list of destination interfaces and ++ * the interface count ++ */ ++static void ip6mr_sync_entry_update(struct mr_table *mrt, ++ struct mfc6_cache *cache) ++{ ++ int vifi, dest_if_count = 0; ++ u32 dest_dev[MAXMIFS]; ++ struct in6_addr mc_origin, mc_group; ++ ip6mr_mfc_event_offload_callback_t offload_update_cb_f; ++ ++ memset(dest_dev, 0, sizeof(dest_dev)); ++ ++ spin_lock(&mrt_lock); ++ ++ for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) { ++ if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) && ++ (cache->_c.mfc_un.res.ttls[vifi] < 255))) { ++ continue; ++ } ++ ++ if (dest_if_count == MAXMIFS) { ++ spin_unlock(&mrt_lock); ++ return; ++ } ++ ++ if (!VIF_EXISTS(mrt, vifi)) { ++ spin_unlock(&mrt_lock); ++ return; ++ } ++ ++ dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex; ++ dest_if_count++; ++ } ++ ++ memcpy(&mc_origin, &cache->mf6c_origin, sizeof(struct in6_addr)); ++ memcpy(&mc_group, &cache->mf6c_mcastgrp, sizeof(struct in6_addr)); ++ spin_unlock(&mrt_lock); ++ ++ rcu_read_lock(); ++ offload_update_cb_f = rcu_dereference(ip6mr_mfc_event_offload_callback); ++ ++ if (!offload_update_cb_f) { ++ rcu_read_unlock(); ++ return; ++ } ++ ++ offload_update_cb_f(&mc_group, &mc_origin, dest_if_count, dest_dev, ++ IP6MR_MFC_EVENT_UPDATE); ++ rcu_read_unlock(); ++} ++ ++/* ip6mr_sync_entry_delete() ++ * Call the registered offload callback to inform of a multicast route entry ++ * delete event ++ */ ++static void ip6mr_sync_entry_delete(struct in6_addr *mc_origin, ++ struct in6_addr *mc_group) ++{ ++ ip6mr_mfc_event_offload_callback_t offload_update_cb_f; ++ ++ rcu_read_lock(); ++ offload_update_cb_f = rcu_dereference(ip6mr_mfc_event_offload_callback); ++ ++ if (!offload_update_cb_f) { ++ rcu_read_unlock(); ++ return; ++ } ++ ++ offload_update_cb_f(mc_group, mc_origin, 0, NULL, ++ IP6MR_MFC_EVENT_DELETE); ++ rcu_read_unlock(); ++} ++ ++/* ip6mr_register_mfc_event_offload_callback() ++ * Register the IPv6 multicast update callback for offload modules ++ */ ++bool ip6mr_register_mfc_event_offload_callback( ++ ip6mr_mfc_event_offload_callback_t mfc_offload_cb) ++{ ++ ip6mr_mfc_event_offload_callback_t offload_update_cb_f; ++ ++ rcu_read_lock(); ++ offload_update_cb_f = rcu_dereference(ip6mr_mfc_event_offload_callback); ++ ++ if (offload_update_cb_f) { ++ rcu_read_unlock(); ++ return false; ++ } ++ rcu_read_unlock(); ++ ++ spin_lock(&lock); ++ rcu_assign_pointer(ip6mr_mfc_event_offload_callback, mfc_offload_cb); ++ spin_unlock(&lock); ++ synchronize_rcu(); ++ return true; ++} ++EXPORT_SYMBOL(ip6mr_register_mfc_event_offload_callback); ++ ++/* ip6mr_unregister_mfc_event_offload_callback() ++ * De-register the IPv6 multicast update callback for offload modules ++ */ ++void ip6mr_unregister_mfc_event_offload_callback(void) ++{ ++ spin_lock(&lock); ++ rcu_assign_pointer(ip6mr_mfc_event_offload_callback, NULL); ++ spin_unlock(&lock); ++ synchronize_rcu(); ++} ++EXPORT_SYMBOL(ip6mr_unregister_mfc_event_offload_callback); ++ ++/* ip6mr_find_mfc_entry() ++ * Return the destination interface list for a particular multicast flow, and ++ * the number of interfaces in the list ++ */ ++int ip6mr_find_mfc_entry(struct net *net, struct in6_addr *origin, ++ struct in6_addr *group, u32 max_dest_cnt, ++ u32 dest_dev[]) ++{ ++ int vifi, dest_if_count = 0; ++ struct mr_table *mrt; ++ struct mfc6_cache *cache; ++ ++ mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); ++ if (!mrt) ++ return -ENOENT; ++ ++ spin_lock(&mrt_lock); ++ cache = ip6mr_cache_find(mrt, origin, group); ++ if (!cache) { ++ spin_unlock(&mrt_lock); ++ return -ENOENT; ++ } ++ ++ for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) { ++ if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) && ++ (cache->_c.mfc_un.res.ttls[vifi] < 255))) { ++ continue; ++ } ++ ++ /* We have another valid destination interface entry. Check if ++ * the number of the destination interfaces for the route is ++ * exceeding the size of the array given to us ++ */ ++ if (dest_if_count == max_dest_cnt) { ++ spin_unlock(&mrt_lock); ++ return -EINVAL; ++ } ++ ++ if (!VIF_EXISTS(mrt, vifi)) { ++ spin_unlock(&mrt_lock); ++ return -EINVAL; ++ } ++ ++ dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex; ++ dest_if_count++; ++ } ++ spin_unlock(&mrt_lock); ++ ++ return dest_if_count; ++} ++EXPORT_SYMBOL(ip6mr_find_mfc_entry); ++ ++/* ip6mr_mfc_stats_update() ++ * Update the MFC/VIF statistics for offloaded flows ++ */ ++int ip6mr_mfc_stats_update(struct net *net, struct in6_addr *origin, ++ struct in6_addr *group, u64 pkts_in, ++ u64 bytes_in, uint64_t pkts_out, ++ u64 bytes_out) ++{ ++ int vif, vifi; ++ struct mr_table *mrt; ++ struct mfc6_cache *cache; ++ ++ mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); ++ ++ if (!mrt) ++ return -ENOENT; ++ ++ spin_lock(&mrt_lock); ++ cache = ip6mr_cache_find(mrt, origin, group); ++ if (!cache) { ++ spin_unlock(&mrt_lock); ++ return -ENOENT; ++ } ++ ++ vif = cache->_c.mfc_parent; ++ ++ if (!VIF_EXISTS(mrt, vif)) { ++ spin_unlock(&mrt_lock); ++ return -EINVAL; ++ } ++ ++ mrt->vif_table[vif].pkt_in += pkts_in; ++ mrt->vif_table[vif].bytes_in += bytes_in; ++ cache->_c.mfc_un.res.pkt += pkts_out; ++ cache->_c.mfc_un.res.bytes += bytes_out; ++ ++ for (vifi = cache->_c.mfc_un.res.minvif; ++ vifi < cache->_c.mfc_un.res.maxvif; vifi++) { ++ if ((cache->_c.mfc_un.res.ttls[vifi] > 0) && ++ (cache->_c.mfc_un.res.ttls[vifi] < 255)) { ++ if (!VIF_EXISTS(mrt, vifi)) { ++ spin_unlock(&mrt_lock); ++ return -EINVAL; ++ } ++ mrt->vif_table[vifi].pkt_out += pkts_out; ++ mrt->vif_table[vifi].bytes_out += bytes_out; ++ } ++ } ++ ++ spin_unlock(&mrt_lock); ++ return 0; ++} ++EXPORT_SYMBOL(ip6mr_mfc_stats_update); ++/* QCA qca-mcs support - End */ ++ + static struct mr_table *ip6mr_new_table(struct net *net, u32 id) + { + struct mr_table *mrt; +@@ -1221,6 +1453,7 @@ static int ip6mr_mfc_delete(struct mr_ta + int parent) + { + struct mfc6_cache *c; ++ struct in6_addr mc_origin, mc_group; /* QCA qca-mcs support */ + + /* The entries are added/deleted only under RTNL */ + rcu_read_lock(); +@@ -1229,6 +1462,12 @@ static int ip6mr_mfc_delete(struct mr_ta + rcu_read_unlock(); + if (!c) + return -ENOENT; ++ ++ /* QCA qca-mcs support - Start */ ++ memcpy(&mc_origin, &c->mf6c_origin, sizeof(struct in6_addr)); ++ memcpy(&mc_group, &c->mf6c_mcastgrp, sizeof(struct in6_addr)); ++ /* QCA qca-mcs support - End */ ++ + rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params); + list_del_rcu(&c->_c.list); + +@@ -1236,6 +1475,12 @@ static int ip6mr_mfc_delete(struct mr_ta + FIB_EVENT_ENTRY_DEL, c, mrt->id); + mr6_netlink_event(mrt, c, RTM_DELROUTE); + mr_cache_put(&c->_c); ++ ++ /* QCA qca-mcs support - Start */ ++ /* Inform offload modules of the delete event */ ++ ip6mr_sync_entry_delete(&mc_origin, &mc_group); ++ /* QCA qca-mcs support - End */ ++ + return 0; + } + +@@ -1457,6 +1702,12 @@ static int ip6mr_mfc_add(struct net *net + call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, + c, mrt->id); + mr6_netlink_event(mrt, c, RTM_NEWROUTE); ++ ++ /* QCA qca-mcs support - Start */ ++ /* Inform offload modules of the update event */ ++ ip6mr_sync_entry_update(mrt, c); ++ /* QCA qca-mcs support - End */ ++ + return 0; + } + +@@ -1519,6 +1770,10 @@ static int ip6mr_mfc_add(struct net *net + + static void mroute_clean_tables(struct mr_table *mrt, int flags) + { ++ /* QCA qca-mcs support - Start */ ++ struct mfc6_cache *cache; ++ struct in6_addr mc_origin, mc_group; ++ /* QCA qca-mcs support - End */ + struct mr_mfc *c, *tmp; + LIST_HEAD(list); + int i; +@@ -1541,13 +1796,23 @@ static void mroute_clean_tables(struct m + if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC_STATIC)) || + (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC))) + continue; ++ /* QCA qca-mcs support - Start */ ++ cache = (struct mfc6_cache *)c; ++ memcpy(&mc_origin, &cache->mf6c_origin, sizeof(struct in6_addr)); ++ memcpy(&mc_group, &cache->mf6c_mcastgrp, sizeof(struct in6_addr)); ++ /* QCA qca-mcs support - End */ + rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params); + list_del_rcu(&c->list); + call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net), + FIB_EVENT_ENTRY_DEL, +- (struct mfc6_cache *)c, mrt->id); +- mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE); ++ cache, mrt->id); ++ mr6_netlink_event(mrt, cache, RTM_DELROUTE); + mr_cache_put(c); ++ ++ /* QCA qca-mcs support - Start */ ++ /* Inform offload modules of the delete event */ ++ ip6mr_sync_entry_delete(&mc_origin, &mc_group); ++ /* QCA qca-mcs support - End */ + } + } + diff --git a/target/linux/qualcommax/patches-6.6/2605-qca-nss-cfi-support.patch b/target/linux/qualcommax/patches-6.6/2605-qca-nss-cfi-support.patch new file mode 100644 index 00000000000000..f6a439ba5306a6 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2605-qca-nss-cfi-support.patch @@ -0,0 +1,111 @@ +--- a/crypto/authenc.c ++++ b/crypto/authenc.c +@@ -415,6 +415,8 @@ static int crypto_authenc_create(struct + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + ++ inst->alg.base.cra_flags |= (auth_base->cra_flags | ++ enc->base.cra_flags) & CRYPTO_ALG_NOSUPP_SG; + inst->alg.base.cra_priority = enc->base.cra_priority * 10 + + auth_base->cra_priority; + inst->alg.base.cra_blocksize = enc->base.cra_blocksize; +--- a/include/linux/crypto.h ++++ b/include/linux/crypto.h +@@ -86,6 +86,11 @@ + #define CRYPTO_NOLOAD 0x00008000 + + /* ++ * Set this flag if algorithm does not support SG list transforms ++ */ ++#define CRYPTO_ALG_NOSUPP_SG 0x0000c000 ++ ++/* + * The algorithm may allocate memory during request processing, i.e. during + * encryption, decryption, or hashing. Users can request an algorithm with this + * flag unset if they can't handle memory allocation failures. +--- a/net/ipv4/esp4.c ++++ b/net/ipv4/esp4.c +@@ -658,6 +658,7 @@ static int esp_output(struct xfrm_state + struct ip_esp_hdr *esph; + struct crypto_aead *aead; + struct esp_info esp; ++ bool nosupp_sg; + + esp.inplace = true; + +@@ -669,6 +670,11 @@ static int esp_output(struct xfrm_state + aead = x->data; + alen = crypto_aead_authsize(aead); + ++ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG; ++ if (nosupp_sg && skb_linearize(skb)) { ++ return -ENOMEM; ++ } ++ + esp.tfclen = 0; + if (x->tfcpad) { + struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); +@@ -890,6 +896,7 @@ static int esp_input(struct xfrm_state * + u8 *iv; + struct scatterlist *sg; + int err = -EINVAL; ++ bool nosupp_sg; + + if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) + goto out; +@@ -897,6 +904,12 @@ static int esp_input(struct xfrm_state * + if (elen <= 0) + goto out; + ++ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG; ++ if (nosupp_sg && skb_linearize(skb)) { ++ err = -ENOMEM; ++ goto out; ++ } ++ + assoclen = sizeof(struct ip_esp_hdr); + seqhilen = 0; + +--- a/net/ipv6/esp6.c ++++ b/net/ipv6/esp6.c +@@ -696,6 +696,7 @@ static int esp6_output(struct xfrm_state + struct ip_esp_hdr *esph; + struct crypto_aead *aead; + struct esp_info esp; ++ bool nosupp_sg; + + esp.inplace = true; + +@@ -707,6 +708,11 @@ static int esp6_output(struct xfrm_state + aead = x->data; + alen = crypto_aead_authsize(aead); + ++ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG; ++ if (nosupp_sg && skb_linearize(skb)) { ++ return -ENOMEM; ++ } ++ + esp.tfclen = 0; + if (x->tfcpad) { + struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); +@@ -934,6 +940,7 @@ static int esp6_input(struct xfrm_state + __be32 *seqhi; + u8 *iv; + struct scatterlist *sg; ++ bool nosupp_sg; + + if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) { + ret = -EINVAL; +@@ -945,6 +952,12 @@ static int esp6_input(struct xfrm_state + goto out; + } + ++ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG; ++ if (nosupp_sg && skb_linearize(skb)) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ + assoclen = sizeof(struct ip_esp_hdr); + seqhilen = 0; + diff --git a/target/linux/qualcommax/patches-6.6/2606-1-qca-nss-ecm-bridge-Fixes-for-Bridge-VLAN-Filtering.patch b/target/linux/qualcommax/patches-6.6/2606-1-qca-nss-ecm-bridge-Fixes-for-Bridge-VLAN-Filtering.patch new file mode 100644 index 00000000000000..b4d5f89dd559a6 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2606-1-qca-nss-ecm-bridge-Fixes-for-Bridge-VLAN-Filtering.patch @@ -0,0 +1,341 @@ +From 7732ede3f72eebb8742e17e61e07e9286c442aec Mon Sep 17 00:00:00 2001 +From: Vishnu Vardhan Bantanahal +Date: Mon, 15 May 2023 17:56:04 +0530 +Subject: [PATCH 277/281] bridge: Fixes for Bridge VLAN Filtering + +1. Fix function to check for bridge master status while checking +for Bridge VLAN filter feature is enabled on bridge slave ports. +2. Disable default PVID for bridges during device registration in +the system. +Change-Id: Ibea6559c1b0700a2300b60e20d57b7818e23a8a8 +Signed-off-by: Vishnu Vardhan Bantanahal + +bridge: Fix Bridge VLAN stats update +This patch fixes Bridge VLAN stats update for both bridge master +and bridge slave. +Change-Id: Ia26f4c71e83e27dd83336815cda5c05c8c3f24ff +Signed-off-by: Vishnu Vardhan Bantanahal + +bridge: Add bridge VLAN filter APIs for offload for 6.1 Kernel + +Change-Id: I54e44c26664f86ae024f54605a032713a9a3eee5 +Signed-off-by: Vishnu Vardhan Bantanahal +--- + include/linux/if_bridge.h | 29 +++++- + include/linux/netdevice.h | 2 +- + net/bridge/br.c | 4 + + net/bridge/br_if.c | 11 ++- + net/bridge/br_private.h | 1 + + net/bridge/br_vlan.c | 186 +++++++++++++++++++++++++++++++++++++- + net/core/dev.c | 2 +- + 7 files changed, 227 insertions(+), 8 deletions(-) + +--- a/include/linux/if_bridge.h ++++ b/include/linux/if_bridge.h +@@ -128,6 +128,12 @@ int br_vlan_get_info_rcu(const struct ne + bool br_mst_enabled(const struct net_device *dev); + int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids); + int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state); ++ ++extern struct net_device *br_fdb_find_vid_by_mac(struct net_device *dev, u8 *mac, u16 *vid); ++extern int br_vlan_get_tag_skb(const struct sk_buff *skb, u16 *vid); ++extern int br_dev_is_vlan_filter_enabled(struct net_device *dev); ++extern int br_vlan_update_stats(struct net_device* dev, u32 vid, u64 rx_bytes, u64 rx_packets, u64 tx_bytes, u64 tx_packets); ++extern int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo); + #else + static inline bool br_vlan_enabled(const struct net_device *dev) + { +@@ -149,8 +155,27 @@ static inline int br_vlan_get_pvid_rcu(c + return -EINVAL; + } + +-static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, +- struct bridge_vlan_info *p_vinfo) ++static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo) ++{ ++ return -EINVAL; ++} ++ ++static inline struct net_device *br_fdb_find_vid_by_mac(struct net_device *dev, u8 *mac, u16 *vid) ++{ ++ return NULL; ++} ++ ++static inline int br_vlan_get_tag_skb(const struct sk_buff *skb, u16 *vid) ++{ ++ return -EINVAL; ++} ++ ++static inline int br_dev_is_vlan_filter_enabled(const struct net_device *dev) ++{ ++ return -EINVAL; ++} ++ ++static inline int br_vlan_update_stats(struct net_device* dev, u32 vid, u64 rx_bytes, u64 rx_packets, u64 tx_bytes, u64 tx_packets) + { + return -EINVAL; + } +--- a/net/bridge/br.c ++++ b/net/bridge/br.c +@@ -42,6 +42,10 @@ static int br_device_event(struct notifi + return notifier_from_errno(err); + + if (event == NETDEV_REGISTER) { ++#if IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) ++ br_vlan_disable_default_pvid(netdev_priv(dev)); ++#endif ++ + /* register of bridge completed, add sysfs entries */ + err = br_sysfs_addbr(dev); + if (err) +--- a/net/bridge/br_if.c ++++ b/net/bridge/br_if.c +@@ -800,9 +800,12 @@ struct net_device *br_port_dev_get(struc + struct sk_buff *skb, + unsigned int cookie) + { ++#if !IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) + struct net_bridge_fdb_entry *fdbe; + struct net_bridge *br; ++#endif + struct net_device *netdev = NULL; ++ u16 __maybe_unused vid; + + /* Is this a bridge? */ + if (!(dev->priv_flags & IFF_EBRIDGE)) +@@ -831,14 +834,20 @@ struct net_device *br_port_dev_get(struc + * determine the port to use - fall back to using FDB + */ + ++#if IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) ++ /* Lookup the fdb entry and get reference to the port dev. ++ * dev_hold() is done as part of br_fdb_find_vid_by_mac() ++ */ ++ netdev = br_fdb_find_vid_by_mac(dev, addr, &vid); ++#else + br = netdev_priv(dev); +- +- /* Lookup the fdb entry and get reference to the port dev */ + fdbe = br_fdb_find_rcu(br, addr, 0); + if (fdbe && fdbe->dst) { + netdev = fdbe->dst->dev; /* port device */ + dev_hold(netdev); + } ++#endif ++ + out: + rcu_read_unlock(); + return netdev; +--- a/net/bridge/br_private.h ++++ b/net/bridge/br_private.h +@@ -1563,6 +1563,7 @@ void br_vlan_fill_forward_path_pvid(stru + int br_vlan_fill_forward_path_mode(struct net_bridge *br, + struct net_bridge_port *dst, + struct net_device_path *path); ++void br_vlan_disable_default_pvid(struct net_bridge *br); + + static inline struct net_bridge_vlan_group *br_vlan_group( + const struct net_bridge *br) +--- a/net/bridge/br_vlan.c ++++ b/net/bridge/br_vlan.c +@@ -933,8 +933,190 @@ int br_vlan_get_proto(const struct net_d + } + EXPORT_SYMBOL_GPL(br_vlan_get_proto); + ++/* ++ * br_vlan_get_tag_skb() ++ * Returns VLAN tag is its found valid in skb. ++ */ ++int br_vlan_get_tag_skb(const struct sk_buff *skb, u16 *vid) ++{ ++ return br_vlan_get_tag(skb, vid); ++ ++} ++EXPORT_SYMBOL_GPL(br_vlan_get_tag_skb); ++ ++/* ++ * br_dev_is_vlan_filter_enabled() ++ * Caller should ensure to hold rcu_lock() ++ * Returns 0, when device(port or bridge device) has a valid bridge ++ * vlan filter configuration and returns error otherwise. ++ */ ++int br_dev_is_vlan_filter_enabled(struct net_device *dev) ++{ ++ struct net_bridge_port *p; ++ struct net_bridge_vlan_group *vg = NULL; ++ struct net_device *master = NULL; ++ ++ if (!dev) { ++ return -ENODEV; ++ } ++ ++ if (netif_is_bridge_master(dev)) { ++ /* ++ * Its a bridge device ++ */ ++ if (!br_vlan_enabled(dev)) { ++ return -ENOENT; ++ } ++ ++ vg = br_vlan_group(netdev_priv(dev)); ++ } else if (dev->priv_flags & IFF_BRIDGE_PORT) { ++ /* ++ * It's a bridge port ++ */ ++ master = netdev_master_upper_dev_get_rcu(dev); ++ if (!master) { ++ return -EINVAL; ++ } ++ ++ if (!br_vlan_enabled(master)) { ++ return -ENOENT; ++ } ++ ++ p = br_port_get_rcu(dev); ++ if (p) ++ vg = nbp_vlan_group(p); ++ } else { ++ /* ++ * Neither a bridge device or port ++ */ ++ return -EINVAL; ++ } ++ ++ if (vg != NULL && vg->num_vlans) { ++ return 0; ++ } ++ ++ return -ENXIO; ++} ++EXPORT_SYMBOL_GPL(br_dev_is_vlan_filter_enabled); ++ ++/* ++ * br_fdb_find_vid_by_mac() ++ * Caller ensures to ensure rcu_lock() is taken. ++ * Returns 0 in case of lookup was performed. ++ * Look up the bridge fdb table for the mac-address & find associated ++ * VLAN id associated with it. ++ * vid is non-zero for succesfull lookup, otherwise 0. ++ * We dev_hold() on the returned device, caller will release this hold. ++ */ ++struct net_device *br_fdb_find_vid_by_mac(struct net_device *dev, u8 *mac, u16 *vid) ++{ ++ struct net_bridge *br; ++ struct net_bridge_fdb_entry *f; ++ struct net_device *netdev = NULL; ++ ++ if (!mac) { ++ return NULL; ++ } ++ ++ if (!dev || !netif_is_bridge_master(dev)) { ++ return NULL; ++ } ++ ++ br = netdev_priv(dev); ++ if (!br) { ++ return NULL; ++ } ++ ++ hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) { ++ if (ether_addr_equal(f->key.addr.addr, mac)) { ++ *vid = f->key.vlan_id; ++ if (f->dst) { ++ netdev = f->dst->dev; ++ dev_hold(netdev); ++ break; ++ } ++ } ++ } ++ return netdev; ++} ++EXPORT_SYMBOL_GPL(br_fdb_find_vid_by_mac); ++ ++/* ++ * br_vlan_update_stats() ++ * Update bridge VLAN filter statistics. ++ */ ++int br_vlan_update_stats(struct net_device *dev, u32 vid, u64 rx_bytes, u64 rx_packets, u64 tx_bytes, u64 tx_packets) ++{ ++ struct net_bridge_port *p; ++ struct net_bridge_vlan *v; ++ struct pcpu_sw_netstats *stats; ++ const struct net_bridge *br; ++ struct net_bridge_vlan_group *vg; ++ struct net_device *brdev; ++ ++ if (!dev) { ++ return -ENODEV; ++ } ++ ++ if (!netif_is_bridge_port(dev) && !netif_is_bridge_master(dev)) { ++ return -EINVAL; ++ } ++ ++ rcu_read_lock(); ++ ++ brdev = dev; ++ if (!netif_is_bridge_master(dev)) { ++ brdev = netdev_master_upper_dev_get_rcu(dev); ++ if (!brdev) { ++ rcu_read_unlock(); ++ return -EPERM; ++ } ++ } ++ ++ br = netdev_priv(brdev); ++ if (!br || !br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) { ++ rcu_read_unlock(); ++ return -EINVAL; ++ } ++ ++ p = br_port_get_rcu(dev); ++ if (p) { ++ vg = nbp_vlan_group_rcu(p); ++ } else if (netif_is_bridge_master(dev)) { ++ vg = br_vlan_group(netdev_priv(dev)); ++ } else { ++ rcu_read_unlock(); ++ return -EINVAL; ++ } ++ ++ ++ if (!vg) { ++ rcu_read_unlock(); ++ return -ENXIO; ++ } ++ ++ v = br_vlan_find(vg, vid); ++ if (!v || !br_vlan_should_use(v)) { ++ rcu_read_unlock(); ++ return -ENOENT; ++ } ++ ++ stats = this_cpu_ptr(v->stats); ++ u64_stats_update_begin(&stats->syncp); ++ u64_stats_add(&stats->rx_bytes, rx_bytes); ++ u64_stats_add(&stats->rx_packets, rx_packets); ++ u64_stats_add(&stats->tx_bytes, tx_bytes); ++ u64_stats_add(&stats->tx_packets, tx_packets); ++ u64_stats_update_end(&stats->syncp); ++ ++ rcu_read_unlock(); ++ return 0; ++} ++EXPORT_SYMBOL_GPL(br_vlan_update_stats); ++ + int __br_vlan_set_proto(struct net_bridge *br, __be16 proto, +- struct netlink_ext_ack *extack) ++ struct netlink_ext_ack *extack) + { + struct switchdev_attr attr = { + .orig_dev = br->dev, +@@ -1068,7 +1250,7 @@ static bool vlan_default_pvid(struct net + return false; + } + +-static void br_vlan_disable_default_pvid(struct net_bridge *br) ++void br_vlan_disable_default_pvid(struct net_bridge *br) + { + struct net_bridge_port *p; + u16 pvid = br->default_pvid; diff --git a/target/linux/qualcommax/patches-6.6/2610-netfilter-nf_conntrack_ecache-Fix-NSS-ECM-BRK-kernel-panic.patch b/target/linux/qualcommax/patches-6.6/2610-netfilter-nf_conntrack_ecache-Fix-NSS-ECM-BRK-kernel-panic.patch new file mode 100644 index 00000000000000..4b9ee21f2f8545 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2610-netfilter-nf_conntrack_ecache-Fix-NSS-ECM-BRK-kernel-panic.patch @@ -0,0 +1,10 @@ +--- a/net/netfilter/nf_conntrack_ecache.c ++++ b/net/netfilter/nf_conntrack_ecache.c +@@ -266,7 +266,6 @@ void nf_conntrack_register_notifier(stru + mutex_lock(&nf_ct_ecache_mutex); + notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb, + lockdep_is_held(&nf_ct_ecache_mutex)); +- WARN_ON_ONCE(notify); + rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new); + mutex_unlock(&nf_ct_ecache_mutex); + } diff --git a/target/linux/qualcommax/patches-6.6/2611-ipv6-Fix-null-pointer-dereference-in-ipv6-output.patch b/target/linux/qualcommax/patches-6.6/2611-ipv6-Fix-null-pointer-dereference-in-ipv6-output.patch new file mode 100644 index 00000000000000..25f99fc4adef3c --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2611-ipv6-Fix-null-pointer-dereference-in-ipv6-output.patch @@ -0,0 +1,80 @@ +From eee3a7956b943dd3e23a74fbb5bfe89405eb0782 Mon Sep 17 00:00:00 2001 +From: Andrea Righi +Date: Mon, 6 Dec 2021 17:34:47 +0100 +Subject: UBUNTU: SAUCE: ipv6: fix NULL pointer dereference in ip6_output() + +It is possible to trigger a NULL pointer dereference by running the srv6 +net kselftest (tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh): + +[ 249.051216] BUG: kernel NULL pointer dereference, address: 0000000000000378 +[ 249.052331] #PF: supervisor read access in kernel mode +[ 249.053137] #PF: error_code(0x0000) - not-present page +[ 249.053960] PGD 0 P4D 0 +[ 249.054376] Oops: 0000 [#1] PREEMPT SMP NOPTI +[ 249.055083] CPU: 1 PID: 21 Comm: ksoftirqd/1 Tainted: G E 5.16.0-rc4 #2 +[ 249.056328] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014 +[ 249.057632] RIP: 0010:ip6_forward+0x53c/0xab0 +[ 249.058354] Code: 49 c7 44 24 20 00 00 00 00 48 83 e0 fe 48 8b 40 30 48 3d 70 b2 b5 81 0f 85 b5 04 00 00 e8 7c f2 ff ff 41 89 c5 e9 17 01 00 00 <44> 8b 93 78 03 00 00 45 85 d2 0f 85 92 fb ff ff 49 8b 54 24 10 48 +[ 249.061274] RSP: 0018:ffffc900000cbb30 EFLAGS: 00010246 +[ 249.062042] RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffff8881051d3400 +[ 249.063141] RDX: ffff888104bda000 RSI: 00000000000002c0 RDI: 0000000000000000 +[ 249.064264] RBP: ffffc900000cbbc8 R08: 0000000000000000 R09: 0000000000000000 +[ 249.065376] R10: 0000000000000040 R11: 0000000000000000 R12: ffff888103409800 +[ 249.066498] R13: ffff8881051d3410 R14: ffff888102725280 R15: ffff888103525000 +[ 249.067619] FS: 0000000000000000(0000) GS:ffff88813bc80000(0000) knlGS:0000000000000000 +[ 249.068881] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +[ 249.069777] CR2: 0000000000000378 CR3: 0000000104980000 CR4: 0000000000750ee0 +[ 249.070907] PKRU: 55555554 +[ 249.071337] Call Trace: +[ 249.071730] +[ 249.072070] ? debug_smp_processor_id+0x17/0x20 +[ 249.072807] seg6_input_core+0x2bb/0x2d0 +[ 249.073436] ? _raw_spin_unlock_irqrestore+0x29/0x40 +[ 249.074225] seg6_input+0x3b/0x130 +[ 249.074768] lwtunnel_input+0x5e/0xa0 +[ 249.075357] ip_rcv+0x17b/0x190 +[ 249.075867] ? update_load_avg+0x82/0x600 +[ 249.076514] __netif_receive_skb_one_core+0x86/0xa0 +[ 249.077231] __netif_receive_skb+0x15/0x60 +[ 249.077843] process_backlog+0x97/0x160 +[ 249.078389] __napi_poll+0x31/0x170 +[ 249.078912] net_rx_action+0x229/0x270 +[ 249.079506] __do_softirq+0xef/0x2ed +[ 249.080085] run_ksoftirqd+0x37/0x50 +[ 249.080663] smpboot_thread_fn+0x193/0x230 +[ 249.081312] kthread+0x17a/0x1a0 +[ 249.081847] ? smpboot_register_percpu_thread+0xe0/0xe0 +[ 249.082677] ? set_kthread_struct+0x50/0x50 +[ 249.083340] ret_from_fork+0x22/0x30 +[ 249.083926] +[ 249.090295] ---[ end trace 1998d7ba5965a365 ]--- + +It looks like commit 0857d6f8c759 ("ipv6: When forwarding count rx stats +on the orig netdev") tries to determine the right netdev to account the +rx stats, but in this particular case it's failing and the netdev is +NULL. + +Fallback to the previous method of determining the netdev interface (via +skb->dev) to account the rx stats when the orig netdev can't be +determined. + +Fixes: 0857d6f8c759 ("ipv6: When forwarding count rx stats on the orig netdev") +Signed-off-by: Andrea Righi +(cherry picked from https://lore.kernel.org/lkml/20211206163447.991402-1-andrea.righi@canonical.com/T/#u) +Signed-off-by: Andrea Righi +--- + net/ipv6/ip6_output.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -498,6 +498,9 @@ int ip6_forward(struct sk_buff *skb) + u32 mtu; + + idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif)); ++ if (unlikely(!idev)) ++ idev = __in6_dev_get_safely(skb->dev); ++ + if (net->ipv6.devconf_all->forwarding == 0) + goto error; + diff --git a/target/linux/qualcommax/patches-6.6/2613-netfilter_optional_tcp_window_check.patc.patch b/target/linux/qualcommax/patches-6.6/2613-netfilter_optional_tcp_window_check.patc.patch new file mode 100644 index 00000000000000..150853573d0116 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/2613-netfilter_optional_tcp_window_check.patc.patch @@ -0,0 +1,108 @@ +From ed42112c77bfb68594f49e252ace2dd6b8c8e7ff Mon Sep 17 00:00:00 2001 +From: Felix Fietkau +Date: Thu, 16 Mar 2023 17:21:39 +0530 +Subject: [PATCH 063/281] OpenWrt: + 613-netfilter_optional_tcp_window_check.patch + +netfilter: optional tcp window check + +Signed-off-by: Felix Fietkau +Signed-off-by: Christian 'Ansuel' Marangi + +Change-Id: I6f7a23b89062cca58c87554e75ae32b0e2aa2831 +Signed-off-by: Ram Chandra Jangir +--- + include/net/netns/conntrack.h | 1 + + net/netfilter/nf_conntrack_proto_tcp.c | 9 ++++++++- + net/netfilter/nf_conntrack_standalone.c | 10 ++++++++++ + 3 files changed, 19 insertions(+), 1 deletion(-) + +diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h +index 1f463b3957c7..2af4f8d24282 100644 +--- a/include/net/netns/conntrack.h ++++ b/include/net/netns/conntrack.h +@@ -26,6 +26,7 @@ struct nf_tcp_net { + unsigned int timeouts[TCP_CONNTRACK_TIMEOUT_MAX]; + u8 tcp_loose; + u8 tcp_be_liberal; ++ u8 tcp_no_window_check; + u8 tcp_max_retrans; + u8 tcp_ignore_invalid_rst; + #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) +diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c +index 3ac1af6f59fc..0a2badd52b54 100644 +--- a/net/netfilter/nf_conntrack_proto_tcp.c ++++ b/net/netfilter/nf_conntrack_proto_tcp.c +@@ -513,11 +513,15 @@ tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir, + struct ip_ct_tcp *state = &ct->proto.tcp; + struct ip_ct_tcp_state *sender = &state->seen[dir]; + struct ip_ct_tcp_state *receiver = &state->seen[!dir]; ++ const struct nf_tcp_net *tn = nf_tcp_pernet(nf_ct_net(ct)); + __u32 seq, ack, sack, end, win, swin; + bool in_recv_win, seq_ok; + s32 receiver_offset; + u16 win_raw; + ++ if (tn->tcp_no_window_check) ++ return NFCT_TCP_ACCEPT; ++ + /* + * Get the required data from the packet. + */ +@@ -1257,7 +1261,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct, + IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED && + timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK]) + timeout = timeouts[TCP_CONNTRACK_UNACK]; +- else if (ct->proto.tcp.last_win == 0 && ++ else if (!tn->tcp_no_window_check && ct->proto.tcp.last_win == 0 && + timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS]) + timeout = timeouts[TCP_CONNTRACK_RETRANS]; + else +@@ -1573,6 +1577,9 @@ void nf_conntrack_tcp_init_net(struct net *net) + */ + tn->tcp_be_liberal = 0; + ++ /* Skip Windows Check */ ++ tn->tcp_no_window_check = 0; ++ + /* If it's non-zero, we turn off RST sequence number check */ + tn->tcp_ignore_invalid_rst = 0; + +diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c +index e9654169b005..84b8e28f0782 100644 +--- a/net/netfilter/nf_conntrack_standalone.c ++++ b/net/netfilter/nf_conntrack_standalone.c +@@ -637,6 +637,7 @@ enum nf_ct_sysctl_index { + #endif + NF_SYSCTL_CT_PROTO_TCP_LOOSE, + NF_SYSCTL_CT_PROTO_TCP_LIBERAL, ++ NF_SYSCTL_CT_PROTO_TCP_NO_WINDOW_CHECK, + NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST, + NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS, + NF_SYSCTL_CT_PROTO_TIMEOUT_UDP, +@@ -844,6 +845,14 @@ static struct ctl_table nf_ct_sysctl_table[] = { + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, ++ [NF_SYSCTL_CT_PROTO_TCP_NO_WINDOW_CHECK] = { ++ .procname = "nf_conntrack_tcp_no_window_check", ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = proc_dou8vec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE, ++ }, + [NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST] = { + .procname = "nf_conntrack_tcp_ignore_invalid_rst", + .maxlen = sizeof(u8), +@@ -1054,6 +1063,7 @@ static void nf_conntrack_standalone_init_tcp_sysctl(struct net *net, + + XASSIGN(LOOSE, &tn->tcp_loose); + XASSIGN(LIBERAL, &tn->tcp_be_liberal); ++ XASSIGN(NO_WINDOW_CHECK, &tn->tcp_no_window_check); + XASSIGN(MAX_RETRANS, &tn->tcp_max_retrans); + XASSIGN(IGNORE_INVALID_RST, &tn->tcp_ignore_invalid_rst); + #undef XASSIGN +-- +2.17.1 + diff --git a/target/linux/qualcommax/patches-6.6/9990-1-qca-skb_recycler-support.patch b/target/linux/qualcommax/patches-6.6/9990-1-qca-skb_recycler-support.patch new file mode 100644 index 00000000000000..b44f50e1921e96 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/9990-1-qca-skb_recycler-support.patch @@ -0,0 +1,431 @@ +--- a/include/linux/cpuhotplug.h ++++ b/include/linux/cpuhotplug.h +@@ -94,6 +94,7 @@ enum cpuhp_state { + CPUHP_RADIX_DEAD, + CPUHP_PAGE_ALLOC, + CPUHP_NET_DEV_DEAD, ++ CPUHP_SKB_RECYCLER_DEAD, + CPUHP_PCI_XGENE_DEAD, + CPUHP_IOMMU_IOVA_DEAD, + CPUHP_LUSTRE_CFS_DEAD, +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -1059,6 +1059,10 @@ struct sk_buff { + /* only useable after checking ->active_extensions != 0 */ + struct skb_ext *extensions; + #endif ++ ++#ifdef CONFIG_DEBUG_OBJECTS_SKBUFF ++ void *free_addr; ++#endif + }; + + /* if you move pkt_type around you also must adapt those constants */ +@@ -1244,7 +1248,7 @@ static inline void kfree_skb_list(struct + kfree_skb_list_reason(segs, SKB_DROP_REASON_NOT_SPECIFIED); + } + +-#ifdef CONFIG_TRACEPOINTS ++#ifdef CONFIG_SKB_RECYCLER + void consume_skb(struct sk_buff *skb); + #else + static inline void consume_skb(struct sk_buff *skb) +@@ -1256,6 +1260,9 @@ static inline void consume_skb(struct sk + void __consume_stateless_skb(struct sk_buff *skb); + void __kfree_skb(struct sk_buff *skb); + extern struct kmem_cache *skbuff_cache; ++extern void kfree_skbmem(struct sk_buff *skb); ++extern void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason, ++ bool napi_safe); + + void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); + bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, +--- a/net/Kconfig ++++ b/net/Kconfig +@@ -369,6 +369,52 @@ config NET_FLOW_LIMIT + with many clients some protection against DoS by a single (spoofed) + flow that greatly exceeds average workload. + ++config SKB_RECYCLER ++ bool "Generic skb recycling" ++ default y ++ help ++ SKB_RECYCLER is used to implement RX-to-RX skb recycling. ++ This config enables the recycling scheme for bridging and ++ routing workloads. It can reduce skbuff freeing or ++ reallocation overhead. ++ ++config SKB_RECYCLER_MULTI_CPU ++ bool "Cross-CPU recycling for CPU-locked workloads" ++ depends on SMP && SKB_RECYCLER ++ default n ++ ++config SKB_RECYCLER_PREALLOC ++ bool "Enable preallocation of SKBs" ++ depends on SKB_RECYCLER ++ default n ++ help ++ Preallocates SKBs in recycling lists and the number of ++ SKBs are configured through CONFIG_SKB_RECYCLE_MAX_PREALLOC_SKBS. ++ This needs SKB_RECYCLER to be enabled. ++ The number of preallocated SKBs can be passed using ++ SKB_RECYCLE_MAX_PREALLOC_SKBS. ++ ++config SKB_RECYCLE_MAX_PREALLOC_SKBS ++ int "Number of SKBs to be preallocated" ++ depends on SKB_RECYCLER_PREALLOC ++ default 16384 ++ help ++ Number of SKBs each of 4K size to be preallocated for recycling ++ ++config SKB_RECYCLE_SIZE ++ int "Minimum size for a recycled buffer" ++ depends on SKB_RECYCLER ++ default 2304 ++ help ++ Minimum size for a recycled buffer ++ ++config ALLOC_SKB_PAGE_FRAG_DISABLE ++ bool "Disable page fragment based skbuff payload allocations" ++ depends on !SKB_RECYCLER ++ default n ++ help ++ Disable page fragment based allocations for skbuff payloads. ++ + menu "Network testing" + + config NET_PKTGEN +--- a/net/core/Makefile ++++ b/net/core/Makefile +@@ -41,3 +41,4 @@ obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o + obj-$(CONFIG_BPF_SYSCALL) += sock_map.o + obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o + obj-$(CONFIG_OF) += of_net.o ++obj-$(CONFIG_SKB_RECYCLER) += skbuff_recycle.o +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -6043,10 +6043,16 @@ static int process_backlog(struct napi_s + + napi->weight = READ_ONCE(dev_rx_weight); + while (again) { +- struct sk_buff *skb; ++ struct sk_buff *skb, *next_skb; + + while ((skb = __skb_dequeue(&sd->process_queue))) { + rcu_read_lock(); ++ ++ next_skb = skb_peek(&sd->process_queue); ++ if (likely(next_skb)) { ++ prefetch(next_skb->data); ++ } ++ + __netif_receive_skb(skb); + rcu_read_unlock(); + input_queue_head_incr(sd); +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -87,6 +87,38 @@ + + #include "dev.h" + #include "sock_destructor.h" ++#include "skbuff_recycle.h" ++ ++struct kmem_cache *skb_data_cache; ++struct kmem_cache *skb_data_cache_2100; ++/* ++ * For low memory profile, NSS_SKB_FIXED_SIZE_2K is enabled and ++ * CONFIG_SKB_RECYCLER is disabled. For premium and enterprise profile ++ * CONFIG_SKB_RECYCLER is enabled and NSS_SKB_FIXED_SIZE_2K is disabled. ++ * Irrespective of NSS_SKB_FIXED_SIZE_2K enabled/disabled, the ++ * CONFIG_SKB_RECYCLER and __LP64__ determines the value of SKB_DATA_CACHE_SIZE ++ */ ++#if defined(CONFIG_SKB_RECYCLER) ++/* ++ * Both caches are kept same size when recycler is enabled so that all the ++ * skbs could be recycled. 2688 for 64bit arch, 2624 for 32bit arch ++ */ ++#define SKB_DATA_CACHE_SIZE (SKB_DATA_ALIGN(SKB_RECYCLE_SIZE + NET_SKB_PAD) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) ++#define SKB_DATA_CACHE_SIZE_2100 SKB_DATA_CACHE_SIZE ++#else ++/* ++ * DATA CACHE is 2368 for 64bit arch, 2176 for 32bit arch ++ * DATA_CACHE_2100 is 2496 for 64bit arch, 2432 for 32bit arch ++ * DATA CACHE size should always be lesser than that of DATA_CACHE_2100 size ++ */ ++#if defined(__LP64__) ++#define SKB_DATA_CACHE_SIZE (SKB_DATA_ALIGN(1984 + NET_SKB_PAD) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) ++#define SKB_DATA_CACHE_SIZE_2100 (SKB_DATA_ALIGN(2100 + NET_SKB_PAD) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) ++#else ++#define SKB_DATA_CACHE_SIZE (SKB_DATA_ALIGN(1856 + NET_SKB_PAD) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) ++#define SKB_DATA_CACHE_SIZE_2100 (SKB_DATA_ALIGN(2100 + NET_SKB_PAD) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) ++#endif ++#endif + + struct kmem_cache *skbuff_cache __ro_after_init; + static struct kmem_cache *skbuff_fclone_cache __ro_after_init; +@@ -551,21 +583,31 @@ static void *kmalloc_reserve(unsigned in + bool *pfmemalloc) + { + bool ret_pfmemalloc = false; +- size_t obj_size; ++ unsigned int obj_size = *size; + void *obj; + + obj_size = SKB_HEAD_ALIGN(*size); +- if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE && +- !(flags & KMALLOC_NOT_NORMAL_BITS)) { +- obj = kmem_cache_alloc_node(skb_small_head_cache, +- flags | __GFP_NOMEMALLOC | __GFP_NOWARN, +- node); +- *size = SKB_SMALL_HEAD_CACHE_SIZE; ++ if (obj_size > SZ_2K && obj_size <= SKB_DATA_CACHE_SIZE) { ++ obj = kmem_cache_alloc_node(skb_data_cache, ++ flags | __GFP_NOMEMALLOC | __GFP_NOWARN, ++ node); ++ *size = SKB_DATA_CACHE_SIZE; ++ if (obj || !(gfp_pfmemalloc_allowed(flags))) ++ goto out; ++ /* Try again but now we are using pfmemalloc reserves */ ++ ret_pfmemalloc = true; ++ obj = kmem_cache_alloc_node(skb_data_cache, flags, node); ++ goto out; ++ } else if (obj_size > SZ_2K && obj_size <= SKB_DATA_CACHE_SIZE_2100) { ++ obj = kmem_cache_alloc_node(skb_data_cache_2100, ++ flags | __GFP_NOMEMALLOC | __GFP_NOWARN, ++ node); ++ *size = SKB_DATA_CACHE_SIZE_2100; + if (obj || !(gfp_pfmemalloc_allowed(flags))) + goto out; + /* Try again but now we are using pfmemalloc reserves */ + ret_pfmemalloc = true; +- obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node); ++ obj = kmem_cache_alloc_node(skb_data_cache_2100, flags, node); + goto out; + } + +@@ -648,10 +690,12 @@ struct sk_buff *__alloc_skb(unsigned int + * aligned memory blocks, unless SLUB/SLAB debug is enabled. + * Both skb->head and skb_shared_info are cache line aligned. + */ ++ size = SKB_DATA_ALIGN(size); ++ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc); + if (unlikely(!data)) + goto nodata; +- /* kmalloc_size_roundup() might give us more room than requested. ++ /* kmalloc_reserve(size) might give us more room than requested. + * Put skb_shared_info exactly at the end of allocated zone, + * to allow max possible filling before reallocation. + */ +@@ -686,7 +730,7 @@ EXPORT_SYMBOL(__alloc_skb); + /** + * __netdev_alloc_skb - allocate an skbuff for rx on a specific device + * @dev: network device to receive on +- * @len: length to allocate ++ * @length: length to allocate + * @gfp_mask: get_free_pages mask, passed to alloc_skb + * + * Allocate a new &sk_buff and assign it a usage count of one. The +@@ -696,29 +740,53 @@ EXPORT_SYMBOL(__alloc_skb); + * + * %NULL is returned if there is no free memory. + */ +-struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, +- gfp_t gfp_mask) ++struct sk_buff *__netdev_alloc_skb(struct net_device *dev, ++ unsigned int length, gfp_t gfp_mask) + { +- struct page_frag_cache *nc; + struct sk_buff *skb; ++ unsigned int len = length; ++ ++#ifdef CONFIG_SKB_RECYCLER ++ skb = skb_recycler_alloc(dev, length, true); ++ if (likely(skb)) ++ return skb; ++ ++ len = SKB_RECYCLE_SIZE; ++ if (unlikely(length > SKB_RECYCLE_SIZE)) ++ len = length; ++ ++ skb = __alloc_skb(len + NET_SKB_PAD, gfp_mask, ++ SKB_ALLOC_RX, NUMA_NO_NODE); ++ if (!skb) ++ goto skb_fail; ++ goto skb_success; ++#else ++ struct page_frag_cache *nc; + bool pfmemalloc; ++ bool page_frag_alloc_enable = true; + void *data; + + len += NET_SKB_PAD; + ++ ++#ifdef CONFIG_ALLOC_SKB_PAGE_FRAG_DISABLE ++ page_frag_alloc_enable = false; ++#endif + /* If requested length is either too small or too big, + * we use kmalloc() for skb->head allocation. + */ + if (len <= SKB_WITH_OVERHEAD(1024) || + len > SKB_WITH_OVERHEAD(PAGE_SIZE) || +- (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { ++ (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA)) || ++ !page_frag_alloc_enable) { + skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); + if (!skb) + goto skb_fail; + goto skb_success; + } + +- len = SKB_HEAD_ALIGN(len); ++ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); ++ len = SKB_DATA_ALIGN(len); + + if (sk_memalloc_socks()) + gfp_mask |= __GFP_MEMALLOC; +@@ -747,6 +815,7 @@ struct sk_buff *__netdev_alloc_skb(struc + if (pfmemalloc) + skb->pfmemalloc = 1; + skb->head_frag = 1; ++#endif + + skb_success: + skb_reserve(skb, NET_SKB_PAD); +@@ -817,7 +886,8 @@ struct sk_buff *__napi_alloc_skb(struct + data = page_frag_alloc_1k(&nc->page_small, gfp_mask); + pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small); + } else { +- len = SKB_HEAD_ALIGN(len); ++ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); ++ len = SKB_DATA_ALIGN(len); + + data = page_frag_alloc(&nc->page, len, gfp_mask); + pfmemalloc = nc->page.pfmemalloc; +@@ -975,7 +1045,7 @@ static void skb_free_head(struct sk_buff + } + } + +-static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason, ++void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason, + bool napi_safe) + { + struct skb_shared_info *shinfo = skb_shinfo(skb); +@@ -1018,7 +1088,7 @@ exit: + /* + * Free an skbuff by memory without cleaning the state. + */ +-static void kfree_skbmem(struct sk_buff *skb) ++void kfree_skbmem(struct sk_buff *skb) + { + struct sk_buff_fclones *fclones; + +@@ -1282,7 +1352,6 @@ void skb_tx_error(struct sk_buff *skb) + } + EXPORT_SYMBOL(skb_tx_error); + +-#ifdef CONFIG_TRACEPOINTS + /** + * consume_skb - free an skbuff + * @skb: buffer to free +@@ -1291,13 +1360,48 @@ EXPORT_SYMBOL(skb_tx_error); + * Functions identically to kfree_skb, but kfree_skb assumes that the frame + * is being dropped after a failure and notes that + */ ++#ifdef CONFIG_SKB_RECYCLER + void consume_skb(struct sk_buff *skb) + { + if (!skb_unref(skb)) + return; ++ prefetch(&skb->destructor); ++ ++ /*Tian: Not sure if we need to continue using this since ++ * since unref does the work in 5.4 ++ */ ++ ++ /* ++ if (likely(atomic_read(&skb->users) == 1)) ++ smp_rmb(); ++ else if (likely(!atomic_dec_and_test(&skb->users))) ++ return; ++ */ ++ ++ /* If possible we'd like to recycle any skb rather than just free it, ++ * but in order to do that we need to release any head state too. ++ * We don't want to do this later because we'll be in a pre-emption ++ * disabled state. ++ */ ++ skb_release_head_state(skb); ++ ++ /* Can we recycle this skb? If we can then it will be much faster ++ * for us to recycle this one later than to allocate a new one ++ * from scratch. ++ */ ++ if (likely(skb->head) && likely(skb_recycler_consume(skb))) ++ return; + ++#ifdef CONFIG_TRACEPOINTS + trace_consume_skb(skb, __builtin_return_address(0)); +- __kfree_skb(skb); ++#endif ++ /* We're not recycling so now we need to do the rest of what we would ++ * have done in __kfree_skb (above and beyond the skb_release_head_state ++ * that we already did). ++ */ ++ if (likely(skb->head)) ++ skb_release_data(skb, SKB_CONSUMED, false); ++ kfree_skbmem(skb); + } + EXPORT_SYMBOL(consume_skb); + #endif +@@ -2112,6 +2216,8 @@ int pskb_expand_head(struct sk_buff *skb + + if (skb_pfmemalloc(skb)) + gfp_mask |= __GFP_MEMALLOC; ++ size = SKB_DATA_ALIGN(size); ++ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); + if (!data) +@@ -4865,6 +4971,14 @@ static void skb_extensions_init(void) {} + + void __init skb_init(void) + { ++ skb_data_cache = kmem_cache_create_usercopy("skb_data_cache", ++ SKB_DATA_CACHE_SIZE, ++ 0, SLAB_PANIC, 0, SKB_DATA_CACHE_SIZE, ++ NULL); ++ skb_data_cache_2100 = kmem_cache_create_usercopy("skb_data_cache_2100", ++ SKB_DATA_CACHE_SIZE_2100, ++ 0, SLAB_PANIC, 0, SKB_DATA_CACHE_SIZE, ++ NULL); + skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache", + sizeof(struct sk_buff), + 0, +@@ -4890,6 +5004,7 @@ void __init skb_init(void) + SKB_SMALL_HEAD_HEADROOM, + NULL); + skb_extensions_init(); ++ skb_recycler_init(); + } + + static int +@@ -6393,6 +6508,8 @@ static int pskb_carve_inside_header(stru + if (skb_pfmemalloc(skb)) + gfp_mask |= __GFP_MEMALLOC; + ++ size = SKB_DATA_ALIGN(size); ++ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); + if (!data) + return -ENOMEM; +@@ -6509,6 +6626,8 @@ static int pskb_carve_inside_nonlinear(s + if (skb_pfmemalloc(skb)) + gfp_mask |= __GFP_MEMALLOC; + ++ size = SKB_DATA_ALIGN(size); ++ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL); + if (!data) + return -ENOMEM; diff --git a/target/linux/qualcommax/patches-6.6/9999-revert-crypto-api-disallow-identical-driver-names.patch b/target/linux/qualcommax/patches-6.6/9999-revert-crypto-api-disallow-identical-driver-names.patch new file mode 100644 index 00000000000000..3f7f58dfee5af2 --- /dev/null +++ b/target/linux/qualcommax/patches-6.6/9999-revert-crypto-api-disallow-identical-driver-names.patch @@ -0,0 +1,10 @@ +--- a/crypto/algapi.c ++++ b/crypto/algapi.c +@@ -341,7 +341,6 @@ __crypto_register_alg(struct crypto_alg + } + + if (!strcmp(q->cra_driver_name, alg->cra_name) || +- !strcmp(q->cra_driver_name, alg->cra_driver_name) || + !strcmp(q->cra_name, alg->cra_driver_name)) + goto err; + }