From 5e3a6df2d09e0649949b6f0a0271acca2c1dd79e Mon Sep 17 00:00:00 2001 From: Umang Agrawal Date: Wed, 3 Jan 2018 19:28:30 +0530 Subject: [PATCH 001/151] power: qpnp-charger: Fix null pointer dereference error Fix to prevent null pointer dereference error in the led brightness set function by an initial check to determine whether the power_supply structure is defined or not. CRs-Fixed: 2166164 Change-Id: Ifcd6e55aa78c4c4d4dc539ac6ffe67263d198b47 Signed-off-by: Umang Agrawal --- drivers/power/qpnp-smbcharger.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/power/qpnp-smbcharger.c b/drivers/power/qpnp-smbcharger.c index 328877a87951..8120fb1514d2 100644 --- a/drivers/power/qpnp-smbcharger.c +++ b/drivers/power/qpnp-smbcharger.c @@ -4243,10 +4243,12 @@ static void smbchg_chg_led_brightness_set(struct led_classdev *cdev, reg = (value > LED_OFF) ? CHG_LED_ON << CHG_LED_SHIFT : CHG_LED_OFF << CHG_LED_SHIFT; - if (value > LED_OFF) - power_supply_set_hi_power_state(chip->bms_psy, 1); - else - power_supply_set_hi_power_state(chip->bms_psy, 0); + if (chip->bms_psy) { + if (value > LED_OFF) + power_supply_set_hi_power_state(chip->bms_psy, 1); + else + power_supply_set_hi_power_state(chip->bms_psy, 0); + } pr_smb(PR_STATUS, "set the charger led brightness to value=%d\n", @@ -4289,11 +4291,16 @@ static void smbchg_chg_led_blink_set(struct smbchg_chip *chip, u8 reg; int rc; + if (chip->bms_psy) { + if (blinking == 0) + power_supply_set_hi_power_state(chip->bms_psy, 0); + else + power_supply_set_hi_power_state(chip->bms_psy, 1); + } + if (blinking == 0) { reg = CHG_LED_OFF << CHG_LED_SHIFT; - power_supply_set_hi_power_state(chip->bms_psy, 0); } else { - power_supply_set_hi_power_state(chip->bms_psy, 1); if (blinking == 1) reg = LED_BLINKING_PATTERN2 << CHG_LED_SHIFT; else if (blinking == 2) From 88de5f11d85da45d2a0a9588ac19665ecacdb3b5 Mon Sep 17 00:00:00 2001 From: Meng Wang Date: Mon, 15 Jan 2018 18:43:33 +0800 Subject: [PATCH 002/151] ASoC: msm: qdsp6v2: update backend name Some backend names are not there. Delete them from current be_name. When the return value of adm_populate_channel_weight is 0, it should keep running, not return error. Change-Id: I447b81d6edfc89db6cb3742c1719e745c6071c12 Signed-off-by: Meng Wang --- sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c | 58 ++++++++++------------ sound/soc/msm/qdsp6v2/q6adm.c | 4 +- 2 files changed, 28 insertions(+), 34 deletions(-) diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c index 3b280cebe1ff..dd43b5c4c989 100644 --- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -2318,37 +2318,31 @@ static const char *const be_name[] = { "INT_FM_RX", "INT_FM_TX", "AFE_PCM_RX", "AFE_PCM_TX", "AUXPCM_RX", "AUXPCM_TX", "VOICE_PLAYBACK_TX", "VOICE2_PLAYBACK_TX", "INCALL_RECORD_RX", "INCALL_RECORD_TX", "MI2S_RX", "MI2S_TX", -"SEC_I2S_RX", "SLIM_1_RX", "SLIM_1_TX", "SLIM_2_RX", -"SLIM_2_TX", "SLIM_3_RX", "SLIM_3_TX", "SLIM_4_RX", -"SLIM_4_TX", "SLIM_5_RX", "SLIM_5_TX", "SLIM_6_RX", -"SLIM_6_TX", "SLIM_7_RX", "SLIM_7_TX", "SLIM_8_RX", -"SLIM_8_TX", "EXTPROC_RX", "EXTPROC_TX", "EXPROC_EC_TX", -"QUAT_MI2S_RX", "QUAT_MI2S_TX", "SECOND_MI2S_RX", "SECOND_MI2S_TX", -"PRI_MI2S_RX", "PRI_MI2S_TX", "TERT_MI2S_RX", "TERT_MI2S_TX", -"AUDIO_I2S_RX", "SEC_AUXPCM_RX", "SEC_AUXPCM_TX", "SPDIF_RX", -"SECOND_MI2S_RX_SD1", "QUIN_MI2S_RX", "QUIN_MI2S_TX", "SENARY_MI2S_TX", -"PRI_TDM_RX_0", "PRI_TDM_TX_0", "PRI_TDM_RX_1", "PRI_TDM_TX_1", -"PRI_TDM_RX_2", "PRI_TDM_TX_2", "PRI_TDM_RX_3", "PRI_TDM_TX_3", -"PRI_TDM_RX_4", "PRI_TDM_TX_4", "PRI_TDM_RX_5", "PRI_TDM_TX_5", -"PRI_TDM_RX_6", "PRI_TDM_TX_6", "PRI_TDM_RX_7", "PRI_TDM_TX_7", -"SEC_TDM_RX_0", "SEC_TDM_TX_0", "SEC_TDM_RX_1", "SEC_TDM_TX_1", -"SEC_TDM_RX_2", "SEC_TDM_TX_2", "SEC_TDM_RX_3", "SEC_TDM_TX_3", -"SEC_TDM_RX_4", "SEC_TDM_TX_4", "SEC_TDM_RX_5", "SEC_TDM_TX_5", -"SEC_TDM_RX_6", "SEC_TDM_TX_6", "SEC_TDM_RX_7", "SEC_TDM_TX_7", -"TERT_TDM_RX_0", "TERT_TDM_TX_0", "TERT_TDM_RX_1", "TERT_TDM_TX_1", -"TERT_TDM_RX_2", "TERT_TDM_TX_2", "TERT_TDM_RX_3", "TERT_TDM_TX_3", -"TERT_TDM_RX_4", "TERT_TDM_TX_4", "TERT_TDM_RX_5", "TERT_TDM_TX_5", -"TERT_TDM_RX_6", "TERT_TDM_TX_6", "TERT_TDM_RX_7", "TERT_TDM_TX_7", -"QUAT_TDM_RX_0", "QUAT_TDM_TX_0", "QUAT_TDM_RX_1", "QUAT_TDM_TX_1", -"QUAT_TDM_RX_2", "QUAT_TDM_TX_2", "QUAT_TDM_RX_3", "QUAT_TDM_TX_3", -"QUAT_TDM_RX_4", "QUAT_TDM_TX_4", "QUAT_TDM_RX_5", "QUAT_TDM_TX_5", -"QUAT_TDM_RX_6", "QUAT_TDM_TX_6", "QUAT_TDM_RX_7", "QUAT_TDM_TX_7", -"INT_BT_A2DP_RX", "USB_RX", "USB_TX", "DISPLAY_PORT_RX", -"TERT_AUXPCM_RX", "TERT_AUXPCM_TX", "QUAT_AUXPCM_RX", "QUAT_AUXPCM_TX", -"INT0_MI2S_RX", "INT0_MI2S_TX", "INT1_MI2S_RX", "INT1_MI2S_TX", -"INT2_MI2S_RX", "INT2_MI2S_TX", "INT3_MI2S_RX", "INT3_MI2S_TX", -"INT4_MI2S_RX", "INT4_MI2S_TX", "INT5_MI2S_RX", "INT5_MI2S_TX", -"INT6_MI2S_RX", "INT6_MI2S_TX" +"SEC_I2S_RX", "SLIM_1_RX", "SLIM_1_TX", "SLIM_4_RX", +"SLIM_4_TX", "SLIM_3_RX", "SLIM_3_TX", "SLIM_5_TX", +"EXTPROC_RX", "EXTPROC_TX", "EXPROC_EC_TX", "QUAT_MI2S_RX", +"QUAT_MI2S_TX", "SECOND_MI2S_RX", "SECOND_MI2S_TX", "PRI_MI2S_RX", +"PRI_MI2S_TX", "TERT_MI2S_RX", "TERT_MI2S_TX", "AUDIO_I2S_RX", +"SEC_AUXPCM_RX", "SEC_AUXPCM_TX", "SLIM_6_RX", "SLIM_6_TX", +"SPDIF_RX", "SECOND_MI2S_RX_SD1", "SLIM_5_RX", "QUIN_MI2S_RX", +"QUIN_MI2S_TX", "SENARY_MI2S_TX", "PRI_TDM_RX_0", "PRI_TDM_TX_0", +"PRI_TDM_RX_1", "PRI_TDM_TX_1", "PRI_TDM_RX_2", "PRI_TDM_TX_2", +"PRI_TDM_RX_3", "PRI_TDM_TX_3", "PRI_TDM_RX_4", "PRI_TDM_TX_4", +"PRI_TDM_RX_5", "PRI_TDM_TX_5", "PRI_TDM_RX_6", "PRI_TDM_TX_6", +"PRI_TDM_RX_7", "PRI_TDM_TX_7", "SEC_TDM_RX_0", "SEC_TDM_TX_0", +"SEC_TDM_RX_1", "SEC_TDM_TX_1", "SEC_TDM_RX_2", "SEC_TDM_TX_2", +"SEC_TDM_RX_3", "SEC_TDM_TX_3", "SEC_TDM_RX_4", "SEC_TDM_TX_4", +"SEC_TDM_RX_5", "SEC_TDM_TX_5", "SEC_TDM_RX_6", "SEC_TDM_TX_6", +"SEC_TDM_RX_7", "SEC_TDM_TX_7", "TERT_TDM_RX_0", "TERT_TDM_TX_0", +"TERT_TDM_RX_1", "TERT_TDM_TX_1", "TERT_TDM_RX_2", "TERT_TDM_TX_2", +"TERT_TDM_RX_3", "TERT_TDM_TX_3", "TERT_TDM_RX_4", "TERT_TDM_TX_4", +"TERT_TDM_RX_5", "TERT_TDM_TX_5", "TERT_TDM_RX_6", "TERT_TDM_TX_6", +"TERT_TDM_RX_7", "TERT_TDM_TX_7", "QUAT_TDM_RX_0", "QUAT_TDM_TX_0", +"QUAT_TDM_RX_1", "QUAT_TDM_TX_1", "QUAT_TDM_RX_2", "QUAT_TDM_TX_2", +"QUAT_TDM_RX_3", "QUAT_TDM_TX_3", "QUAT_TDM_RX_4", "QUAT_TDM_TX_4", +"QUAT_TDM_RX_5", "QUAT_TDM_TX_5", "QUAT_TDM_RX_6", "QUAT_TDM_TX_6", +"QUAT_TDM_RX_7", "QUAT_TDM_TX_7", "INT_BT_A2DP_RX", "SLIM_2_TX", +"AFE_LOOPBACK_TX" }; static SOC_ENUM_SINGLE_DECL(mm1_channel_mux, diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c index f1917e703d89..6c31314bf548 100644 --- a/sound/soc/msm/qdsp6v2/q6adm.c +++ b/sound/soc/msm/qdsp6v2/q6adm.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -721,7 +721,7 @@ int adm_programable_channel_mixer(int port_id, int copp_idx, int session_id, index = index + ch_mixer->input_channels[channel_index]; ret = adm_populate_channel_weight(&adm_pspd_params[index], ch_mixer, channel_index); - if (!ret) { + if (ret) { pr_err("%s: fail to get channel weight with error %d\n", __func__, ret); goto fail_cmd; From 31b3a8161e688c099b5c0ddeabb94ae0ddec26d0 Mon Sep 17 00:00:00 2001 From: raghavendra ambadas Date: Fri, 3 Aug 2018 12:54:23 +0530 Subject: [PATCH 003/151] fbdev: msm: Fix unintialisation of variables The uninitialised variables in MDP driver can be accessed with incorrect values. This change fixes the errors reported in static analysis of MDP driver code. Change-Id: I69193a1243d17dfb88e51717179e9f2ed2c4b5df Signed-off-by: Raghavendra Ambadas --- drivers/video/msm/mdss/mdss_dba_utils.c | 5 ++--- drivers/video/msm/mdss/mdss_mdp.c | 4 ++-- drivers/video/msm/mdss/mdss_mdp_ctl.c | 2 +- drivers/video/msm/mdss/mdss_mdp_overlay.c | 2 +- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/video/msm/mdss/mdss_dba_utils.c b/drivers/video/msm/mdss/mdss_dba_utils.c index 8804244765a1..3fecf12053b8 100644 --- a/drivers/video/msm/mdss/mdss_dba_utils.c +++ b/drivers/video/msm/mdss/mdss_dba_utils.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -734,7 +734,6 @@ void *mdss_dba_utils_init(struct mdss_dba_utils_init_data *uid) struct mdss_dba_utils_data *udata = NULL; struct msm_dba_reg_info info; struct cec_abstract_init_data cec_abst_init_data; - void *cec_abst_data; int ret = 0; if (!uid) { @@ -823,7 +822,7 @@ void *mdss_dba_utils_init(struct mdss_dba_utils_init_data *uid) udata->cec_abst_data = cec_abstract_init(&cec_abst_init_data); if (IS_ERR_OR_NULL(udata->cec_abst_data)) { pr_err("error initializing cec abstract module\n"); - ret = PTR_ERR(cec_abst_data); + ret = PTR_ERR(udata->cec_abst_data); goto error; } diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c index 393c22948341..789c8650a9ef 100644 --- a/drivers/video/msm/mdss/mdss_mdp.c +++ b/drivers/video/msm/mdss/mdss_mdp.c @@ -782,7 +782,7 @@ void mdss_mdp_irq_clear(struct mdss_data_type *mdata, int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num) { - int irq_idx, idx; + int irq_idx = 0; unsigned long irq_flags; int ret = 0; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); @@ -801,7 +801,7 @@ int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num) spin_lock_irqsave(&mdp_lock, irq_flags); if (mdata->mdp_irq_mask[irq.reg_idx] & irq.irq_mask) { pr_warn("MDSS MDP IRQ-0x%x is already set, mask=%x\n", - irq.irq_mask, mdata->mdp_irq_mask[idx]); + irq.irq_mask, mdata->mdp_irq_mask[irq.reg_idx]); ret = -EBUSY; } else { pr_debug("MDP IRQ mask old=%x new=%x\n", diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c index e9f5252d5a81..8271e27bbad7 100644 --- a/drivers/video/msm/mdss/mdss_mdp_ctl.c +++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c @@ -933,7 +933,7 @@ static u32 mdss_mdp_calc_prefill_line_time(struct mdss_mdp_ctl *ctl, { u32 prefill_us = 0; u32 prefill_amortized = 0; - struct mdss_data_type *mdata; + struct mdss_data_type *mdata = mdss_mdp_get_mdata(); struct mdss_mdp_mixer *mixer; struct mdss_panel_info *pinfo; u32 fps, v_total; diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c index 0a99acba594b..b232a44131b6 100644 --- a/drivers/video/msm/mdss/mdss_mdp_overlay.c +++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c @@ -1755,7 +1755,7 @@ static bool __is_roi_valid(struct mdss_mdp_pipe *pipe, int mdss_mode_switch(struct msm_fb_data_type *mfd, u32 mode) { - struct mdss_rect l_roi, r_roi; + struct mdss_rect l_roi = {0}, r_roi = {0}; struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd); struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd); struct mdss_mdp_ctl *sctl; From 0c84f0e126e2bb0dacc11927f28938d9481005d4 Mon Sep 17 00:00:00 2001 From: annamraj Date: Wed, 11 Apr 2018 10:42:13 +0530 Subject: [PATCH 004/151] msm: camera: Fix for Possible information leak issue Fix for possible information leak issue because of unintialised variable Which can be accesed from userspace in camera fd driver Signed-off-by: annamraj Change-Id: I4552c4829e9532d848e46fd123316b26105e310e --- drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c index f49ccfc5282c..2d808608e6de 100644 --- a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c +++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -440,7 +440,7 @@ static int msm_fd_open(struct file *file) } ctx->mem_pool.fd_device = ctx->fd_device; - ctx->stats = vmalloc(sizeof(*ctx->stats) * MSM_FD_MAX_RESULT_BUFS); + ctx->stats = vzalloc(sizeof(*ctx->stats) * MSM_FD_MAX_RESULT_BUFS); if (!ctx->stats) { dev_err(device->dev, "No memory for face statistics\n"); ret = -ENOMEM; From dab5d6eeec68fc3ebcaff7be633a995a82b466dc Mon Sep 17 00:00:00 2001 From: Mohammed Javid Date: Mon, 25 Jun 2018 18:58:20 +0530 Subject: [PATCH 005/151] msm: ipa: Add WLAN FW SSR event Add WLAN FW SSR event to handle FW rejuvenate scenario. Change-Id: I417d46cc07f455792906a18f4b6dc8e5ca8ca3de Acked-by: Pooja Kumari Signed-off-by: Mohammed Javid --- drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c | 1 + drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c | 1 + include/uapi/linux/msm_ipa.h | 8 +++++++- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c index 0261e857298a..c46b3542839e 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c @@ -89,6 +89,7 @@ const char *ipa_event_name[] = { __stringify(IPA_QUOTA_REACH), __stringify(IPA_SSR_BEFORE_SHUTDOWN), __stringify(IPA_SSR_AFTER_POWERUP), + __stringify(WLAN_FWR_SSR_BEFORE_SHUTDOWN), }; const char *ipa_hdr_l2_type_name[] = { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c index be6b1a49ddcf..a3b106143fa8 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c @@ -70,6 +70,7 @@ const char *ipa3_event_name[] = { __stringify(IPA_QUOTA_REACH), __stringify(IPA_SSR_BEFORE_SHUTDOWN), __stringify(IPA_SSR_AFTER_POWERUP), + __stringify(WLAN_FWR_SSR_BEFORE_SHUTDOWN), }; const char *ipa3_hdr_l2_type_name[] = { diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index 081bbd91bfec..ca6eb8215b86 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -471,7 +471,13 @@ enum ipa_ssr_event { IPA_SSR_EVENT_MAX, }; -#define IPA_EVENT_MAX_NUM (IPA_SSR_EVENT_MAX) +enum ipa_wlan_fw_ssr_event { + WLAN_FWR_SSR_BEFORE_SHUTDOWN = IPA_SSR_EVENT_MAX, + IPA_WLAN_FW_SSR_EVENT_MAX +#define IPA_WLAN_FW_SSR_EVENT_MAX IPA_WLAN_FW_SSR_EVENT_MAX +}; + +#define IPA_EVENT_MAX_NUM (IPA_WLAN_FW_SSR_EVENT_MAX) /** * enum ipa_rm_resource_name - IPA RM clients identification names From 73f63f7148d97a7a6595354dfa2859458acee2d5 Mon Sep 17 00:00:00 2001 From: AnilKumar Chimata Date: Tue, 7 Aug 2018 19:56:35 +0530 Subject: [PATCH 006/151] defconfig: Enable rng driver for msm8909 Enable legacy rng driver for msm8909 target. Change-Id: I3e0c8c92fd0da8ce5e72d37e03dce7c0d3695a8d Signed-off-by: AnilKumar Chimata --- arch/arm/configs/msm8909-perf_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/configs/msm8909-perf_defconfig b/arch/arm/configs/msm8909-perf_defconfig index 2557519188a3..4a3c275c4866 100644 --- a/arch/arm/configs/msm8909-perf_defconfig +++ b/arch/arm/configs/msm8909-perf_defconfig @@ -316,6 +316,7 @@ CONFIG_SERIAL_MSM_HS=y CONFIG_SERIAL_MSM_SMD=y CONFIG_DIAG_CHAR=y CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_MSM_LEGACY=y CONFIG_MSM_SMD_PKT=y CONFIG_MSM_ADSPRPC=y CONFIG_I2C_CHARDEV=y From 58f27a4427fe658b12687a3f1796289575b3b2f1 Mon Sep 17 00:00:00 2001 From: Chetan C R Date: Wed, 25 Jul 2018 18:27:04 +0530 Subject: [PATCH 007/151] defconfig: msm: enable MSM_SYSMON_COMM for 8909 This option adds support for MSM System Monitor library, which provides an API that may be used for notifying subsystems within the SoC about other subsystems' power-up/down state-changes. Change-Id: If410422b4274545daf89613e58bc797064c5f7d4 Signed-off-by: Chetan C R --- arch/arm/configs/msm8909-perf_defconfig | 1 + arch/arm/configs/msm8909_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm/configs/msm8909-perf_defconfig b/arch/arm/configs/msm8909-perf_defconfig index 2557519188a3..11fcf4afca8e 100644 --- a/arch/arm/configs/msm8909-perf_defconfig +++ b/arch/arm/configs/msm8909-perf_defconfig @@ -484,6 +484,7 @@ CONFIG_MSM_QMI_INTERFACE=y CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y CONFIG_MSM_EVENT_TIMER=y CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_SYSMON_COMM=y CONFIG_MSM_PIL=y CONFIG_MSM_PIL_SSR_GENERIC=y CONFIG_MSM_PIL_MSS_QDSP6V5=y diff --git a/arch/arm/configs/msm8909_defconfig b/arch/arm/configs/msm8909_defconfig index de501ebb77c0..39e0c2c3b850 100644 --- a/arch/arm/configs/msm8909_defconfig +++ b/arch/arm/configs/msm8909_defconfig @@ -495,6 +495,7 @@ CONFIG_MSM_QMI_INTERFACE=y CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y CONFIG_MSM_EVENT_TIMER=y CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_SYSMON_COMM=y CONFIG_MSM_PIL=y CONFIG_MSM_PIL_SSR_GENERIC=y CONFIG_MSM_PIL_MSS_QDSP6V5=y From b8a0a496f19b76a269535df9e14fefedf3769d70 Mon Sep 17 00:00:00 2001 From: Hardik Arya Date: Thu, 8 Mar 2018 11:50:49 +0530 Subject: [PATCH 008/151] diag: Allocate DCI memory using vzalloc instead of kzalloc Currently there is a possibility of kmalloc failing when system is running low on memory condition. The patch changes the dci memory allocation from kzalloc to vzalloc. CRs-Fixed: 2195818 Change-Id: I92b20d8e77ce5b2a96212f9d0757fbbff2703891 Signed-off-by: Hardik Arya --- drivers/char/diag/diag_dci.c | 41 ++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c index e195d9d0931c..6e24f475d65d 100644 --- a/drivers/char/diag/diag_dci.c +++ b/drivers/char/diag/diag_dci.c @@ -26,6 +26,7 @@ #include #include #include +#include #ifdef CONFIG_DIAG_OVER_USB #include #endif @@ -229,7 +230,7 @@ static int diag_dci_init_buffer(struct diag_dci_buffer_t *buffer, int type) switch (type) { case DCI_BUF_PRIMARY: buffer->capacity = IN_BUF_SIZE; - buffer->data = kzalloc(buffer->capacity, GFP_KERNEL); + buffer->data = vzalloc(buffer->capacity); if (!buffer->data) return -ENOMEM; break; @@ -239,7 +240,7 @@ static int diag_dci_init_buffer(struct diag_dci_buffer_t *buffer, int type) break; case DCI_BUF_CMD: buffer->capacity = DIAG_MAX_REQ_SIZE + DCI_BUF_SIZE; - buffer->data = kzalloc(buffer->capacity, GFP_KERNEL); + buffer->data = vzalloc(buffer->capacity); if (!buffer->data) return -ENOMEM; break; @@ -2615,7 +2616,7 @@ static int diag_dci_init_remote(void) create_dci_event_mask_tbl(temp->event_mask_composite); } - partial_pkt.data = kzalloc(MAX_DCI_PACKET_SZ, GFP_KERNEL); + partial_pkt.data = vzalloc(MAX_DCI_PACKET_SZ); if (!partial_pkt.data) { pr_err("diag: Unable to create partial pkt data\n"); return -ENOMEM; @@ -2671,7 +2672,7 @@ int diag_dci_init(void) goto err; if (driver->apps_dci_buf == NULL) { - driver->apps_dci_buf = kzalloc(DCI_BUF_SIZE, GFP_KERNEL); + driver->apps_dci_buf = vzalloc(DCI_BUF_SIZE); if (driver->apps_dci_buf == NULL) goto err; } @@ -2688,12 +2689,12 @@ int diag_dci_init(void) return DIAG_DCI_NO_ERROR; err: pr_err("diag: Could not initialize diag DCI buffers"); - kfree(driver->apps_dci_buf); + vfree(driver->apps_dci_buf); driver->apps_dci_buf = NULL; if (driver->diag_dci_wq) destroy_workqueue(driver->diag_dci_wq); - kfree(partial_pkt.data); + vfree(partial_pkt.data); partial_pkt.data = NULL; mutex_destroy(&driver->dci_mutex); mutex_destroy(&dci_log_mask_mutex); @@ -2713,9 +2714,9 @@ void diag_dci_channel_init(void) void diag_dci_exit(void) { - kfree(partial_pkt.data); + vfree(partial_pkt.data); partial_pkt.data = NULL; - kfree(driver->apps_dci_buf); + vfree(driver->apps_dci_buf); driver->apps_dci_buf = NULL; mutex_destroy(&driver->dci_mutex); mutex_destroy(&dci_log_mask_mutex); @@ -2855,7 +2856,7 @@ int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry) new_entry->in_service = 0; INIT_LIST_HEAD(&new_entry->list_write_buf); mutex_init(&new_entry->write_buf_mutex); - new_entry->dci_log_mask = kzalloc(DCI_LOG_MASK_SIZE, GFP_KERNEL); + new_entry->dci_log_mask = vzalloc(DCI_LOG_MASK_SIZE); if (!new_entry->dci_log_mask) { pr_err("diag: Unable to create log mask for client, %d", driver->dci_client_id); @@ -2863,7 +2864,7 @@ int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry) } create_dci_log_mask_tbl(new_entry->dci_log_mask, DCI_LOG_MASK_CLEAN); - new_entry->dci_event_mask = kzalloc(DCI_EVENT_MASK_SIZE, GFP_KERNEL); + new_entry->dci_event_mask = vzalloc(DCI_EVENT_MASK_SIZE); if (!new_entry->dci_event_mask) { pr_err("diag: Unable to create event mask for client, %d", driver->dci_client_id); @@ -2873,7 +2874,7 @@ int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry) new_entry->buffers = kzalloc(new_entry->num_buffers * sizeof(struct diag_dci_buf_peripheral_t), - GFP_KERNEL); + GFP_KERNEL); if (!new_entry->buffers) { pr_err("diag: Unable to allocate buffers for peripherals in %s\n", __func__); @@ -2897,7 +2898,7 @@ int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry) if (!proc_buf->buf_primary) goto fail_alloc; proc_buf->buf_cmd = kzalloc(sizeof(struct diag_dci_buffer_t), - GFP_KERNEL); + GFP_KERNEL); if (!proc_buf->buf_cmd) goto fail_alloc; err = diag_dci_init_buffer(proc_buf->buf_primary, @@ -2930,7 +2931,7 @@ int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry) if (proc_buf) { mutex_destroy(&proc_buf->health_mutex); if (proc_buf->buf_primary) { - kfree(proc_buf->buf_primary->data); + vfree(proc_buf->buf_primary->data); proc_buf->buf_primary->data = NULL; mutex_destroy( &proc_buf->buf_primary->data_mutex); @@ -2938,7 +2939,7 @@ int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry) kfree(proc_buf->buf_primary); proc_buf->buf_primary = NULL; if (proc_buf->buf_cmd) { - kfree(proc_buf->buf_cmd->data); + vfree(proc_buf->buf_cmd->data); proc_buf->buf_cmd->data = NULL; mutex_destroy( &proc_buf->buf_cmd->data_mutex); @@ -2947,9 +2948,9 @@ int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry) proc_buf->buf_cmd = NULL; } } - kfree(new_entry->dci_event_mask); + vfree(new_entry->dci_event_mask); new_entry->dci_event_mask = NULL; - kfree(new_entry->dci_log_mask); + vfree(new_entry->dci_log_mask); new_entry->dci_log_mask = NULL; kfree(new_entry->buffers); new_entry->buffers = NULL; @@ -2984,7 +2985,7 @@ int diag_dci_deinit_client(struct diag_dci_client_tbl *entry) * Clear the client's log and event masks, update the cumulative * masks and send the masks to peripherals */ - kfree(entry->dci_log_mask); + vfree(entry->dci_log_mask); entry->dci_log_mask = NULL; diag_dci_invalidate_cumulative_log_mask(token); if (token == DCI_LOCAL_PROC) @@ -2993,7 +2994,7 @@ int diag_dci_deinit_client(struct diag_dci_client_tbl *entry) if (ret != DIAG_DCI_NO_ERROR) { return ret; } - kfree(entry->dci_event_mask); + vfree(entry->dci_event_mask); entry->dci_event_mask = NULL; diag_dci_invalidate_cumulative_event_mask(token); if (token == DCI_LOCAL_PROC) @@ -3057,12 +3058,12 @@ int diag_dci_deinit_client(struct diag_dci_client_tbl *entry) } mutex_lock(&proc_buf->buf_primary->data_mutex); - kfree(proc_buf->buf_primary->data); + vfree(proc_buf->buf_primary->data); proc_buf->buf_primary->data = NULL; mutex_unlock(&proc_buf->buf_primary->data_mutex); mutex_lock(&proc_buf->buf_cmd->data_mutex); - kfree(proc_buf->buf_cmd->data); + vfree(proc_buf->buf_cmd->data); proc_buf->buf_cmd->data = NULL; mutex_unlock(&proc_buf->buf_cmd->data_mutex); From 542d94a947eb8d319a13a0a55ac4f9885d3b9361 Mon Sep 17 00:00:00 2001 From: Li Jinyue Date: Thu, 14 Dec 2017 17:04:54 +0800 Subject: [PATCH 009/151] BACKPORT: futex: Prevent overflow by strengthen input validation UBSAN reports signed integer overflow in kernel/futex.c: UBSAN: Undefined behaviour in kernel/futex.c:2041:18 signed integer overflow: 0 - -2147483648 cannot be represented in type 'int' Add a sanity check to catch negative values of nr_wake and nr_requeue. Signed-off-by: Li Jinyue Signed-off-by: Thomas Gleixner Signed-off-by: Erick Reyes Signed-off-by: Oleg Matcovschi Cc: peterz@infradead.org Cc: dvhart@infradead.org Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/1513242294-31786-1-git-send-email-lijinyue@huawei.com Cherry-picked from fbe0e839d1e22d88810f3ee3e2f1479be4c0aa4a Bug: 76106267 Change-Id: I954cc2848678318b60ec3f103d0c15f87b4605a4 Git-repo: https://android.googlesource.com/kernel/msm Git-commit: cf94cf625ff65a10864321006482e547f023926d Signed-off-by: Dennis Cagle --- kernel/futex.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/futex.c b/kernel/futex.c index bd00696741ed..a8c6a9dbedab 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1514,6 +1514,9 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, struct futex_hash_bucket *hb1, *hb2; struct futex_q *this, *next; + if (nr_wake < 0 || nr_requeue < 0) + return -EINVAL; + if (requeue_pi) { /* * Requeue PI only works on two distinct uaddrs. This From 2a8afe54b477c9ad218a18cdd8d5e4777f2163c8 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 9 Feb 2016 11:15:14 -0800 Subject: [PATCH 010/151] futex: Remove requirement for lock_page() in get_futex_key() commit 65d8fc777f6dcfee12785c057a6b57f679641c90 upstream. When dealing with key handling for shared futexes, we can drastically reduce the usage/need of the page lock. 1) For anonymous pages, the associated futex object is the mm_struct which does not require the page lock. 2) For inode based, keys, we can check under RCU read lock if the page mapping is still valid and take reference to the inode. This just leaves one rare race that requires the page lock in the slow path when examining the swapcache. Additionally realtime users currently have a problem with the page lock being contended for unbounded periods of time during futex operations. Task A get_futex_key() lock_page() ---> preempted Now any other task trying to lock that page will have to wait until task A gets scheduled back in, which is an unbound time. With this patch, we pretty much have a lockless futex_get_key(). Experiments show that this patch can boost/speedup the hashing of shared futexes with the perf futex benchmarks (which is good for measuring such change) by up to 45% when there are high (> 100) thread counts on a 60 core Westmere. Lower counts are pretty much in the noise range or less than 10%, but mid range can be seen at over 30% overall throughput (hash ops/sec). This makes anon-mem shared futexes much closer to its private counterpart. Signed-off-by: Mel Gorman [ Ported on top of thp refcount rework, changelog, comments, fixes. ] Signed-off-by: Davidlohr Bueso Reviewed-by: Thomas Gleixner Cc: Chris Mason Cc: Darren Hart Cc: Hugh Dickins Cc: Linus Torvalds Cc: Mel Gorman Cc: Peter Zijlstra Cc: Sebastian Andrzej Siewior Cc: dave@stgolabs.net Link: http://lkml.kernel.org/r/1455045314-8305-3-git-send-email-dave@stgolabs.net Signed-off-by: Ingo Molnar Signed-off-by: Chenbo Feng Signed-off-by: Greg Kroah-Hartman Git-repo: https://android.googlesource.com/kernel/msm Git-commit: 93dcb09e29bb24a86aa7b7eff65e424f7dc98af2 Change-Id: I5c0f1e167973b387e78fb46b1d29fc2a1a9a0d0d Signed-off-by: Srinivasa Rao Kuppala --- kernel/futex.c | 98 ++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 91 insertions(+), 7 deletions(-) diff --git a/kernel/futex.c b/kernel/futex.c index bd00696741ed..440f89921ebc 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -400,6 +400,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) unsigned long address = (unsigned long)uaddr; struct mm_struct *mm = current->mm; struct page *page, *page_head; + struct address_space *mapping; int err, ro = 0; /* @@ -478,7 +479,19 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) } #endif - lock_page(page_head); + /* + * The treatment of mapping from this point on is critical. The page + * lock protects many things but in this context the page lock + * stabilizes mapping, prevents inode freeing in the shared + * file-backed region case and guards against movement to swap cache. + * + * Strictly speaking the page lock is not needed in all cases being + * considered here and page lock forces unnecessarily serialization + * From this point on, mapping will be re-verified if necessary and + * page lock will be acquired only if it is unavoidable + */ + + mapping = READ_ONCE(page_head->mapping); /* * If page_head->mapping is NULL, then it cannot be a PageAnon @@ -495,18 +508,31 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) * shmem_writepage move it from filecache to swapcache beneath us: * an unlikely race, but we do need to retry for page_head->mapping. */ - if (!page_head->mapping) { - int shmem_swizzled = PageSwapCache(page_head); + if (unlikely(!mapping)) { + int shmem_swizzled; + + /* + * Page lock is required to identify which special case above + * applies. If this is really a shmem page then the page lock + * will prevent unexpected transitions. + */ + lock_page(page); + shmem_swizzled = PageSwapCache(page) || page->mapping; unlock_page(page_head); put_page(page_head); + if (shmem_swizzled) goto again; + return -EFAULT; } /* * Private mappings are handled in a simple way. * + * If the futex key is stored on an anonymous page, then the associated + * object is the mm which is implicitly pinned by the calling process. + * * NOTE: When userspace waits on a MAP_SHARED mapping, even if * it's a read-only handle, it's expected that futexes attach to * the object not the particular process. @@ -524,16 +550,74 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ key->private.mm = mm; key->private.address = address; + + get_futex_key_refs(key); /* implies smp_mb(); (B) */ + } else { + struct inode *inode; + + /* + * The associated futex object in this case is the inode and + * the page->mapping must be traversed. Ordinarily this should + * be stabilised under page lock but it's not strictly + * necessary in this case as we just want to pin the inode, not + * update the radix tree or anything like that. + * + * The RCU read lock is taken as the inode is finally freed + * under RCU. If the mapping still matches expectations then the + * mapping->host can be safely accessed as being a valid inode. + */ + rcu_read_lock(); + + if (READ_ONCE(page_head->mapping) != mapping) { + rcu_read_unlock(); + put_page(page_head); + + goto again; + } + + inode = READ_ONCE(mapping->host); + if (!inode) { + rcu_read_unlock(); + put_page(page_head); + + goto again; + } + + /* + * Take a reference unless it is about to be freed. Previously + * this reference was taken by ihold under the page lock + * pinning the inode in place so i_lock was unnecessary. The + * only way for this check to fail is if the inode was + * truncated in parallel so warn for now if this happens. + * + * We are not calling into get_futex_key_refs() in file-backed + * cases, therefore a successful atomic_inc return below will + * guarantee that get_futex_key() will still imply smp_mb(); (B). + */ + if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) { + rcu_read_unlock(); + put_page(page_head); + + goto again; + } + + /* Should be impossible but lets be paranoid for now */ + if (WARN_ON_ONCE(inode->i_mapping != mapping)) { + err = -EFAULT; + rcu_read_unlock(); + iput(inode); + + goto out; + } + key->both.offset |= FUT_OFF_INODE; /* inode-based key */ - key->shared.inode = page_head->mapping->host; + key->shared.inode = inode; key->shared.pgoff = basepage_index(page); + rcu_read_unlock(); } - get_futex_key_refs(key); /* implies MB (B) */ - out: - unlock_page(page_head); put_page(page_head); return err; } From 03b822fce93bbf8b563f3cf485799716f1f1002e Mon Sep 17 00:00:00 2001 From: Jianyu Zhan Date: Mon, 7 Mar 2016 09:32:24 +0800 Subject: [PATCH 011/151] futex: Replace barrier() in unqueue_me() with READ_ONCE() commit 29b75eb2d56a714190a93d7be4525e617591077a upstream. Commit e91467ecd1ef ("bug in futex unqueue_me") introduced a barrier() in unqueue_me() to prevent the compiler from rereading the lock pointer which might change after a check for NULL. Replace the barrier() with a READ_ONCE() for the following reasons: 1) READ_ONCE() is a weaker form of barrier() that affects only the specific load operation, while barrier() is a general compiler level memory barrier. READ_ONCE() was not available at the time when the barrier was added. 2) Aside of that READ_ONCE() is descriptive and self explainatory while a barrier without comment is not clear to the casual reader. No functional change. [ tglx: Massaged changelog ] Change-Id: I41b0f0c77dc827536685dddb60f32a31c1cde559 Signed-off-by: Jianyu Zhan Acked-by: Christian Borntraeger Acked-by: Darren Hart Cc: dave@stgolabs.net Cc: peterz@infradead.org Cc: linux@rasmusvillemoes.dk Cc: akpm@linux-foundation.org Cc: fengguang.wu@intel.com Cc: bigeasy@linutronix.de Link: http://lkml.kernel.org/r/1457314344-5685-1-git-send-email-nasa4836@gmail.com Signed-off-by: Thomas Gleixner Signed-off-by: Davidlohr Bueso Signed-off-by: Greg Kroah-Hartman Git-repo: https://android.googlesource.com/kernel/msm Git-commit: 1920b8a6a6ed80d3a595ff718c0a99c7d4d895c4 Signed-off-by: Srinivasa Rao Kuppala --- kernel/futex.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/kernel/futex.c b/kernel/futex.c index bd00696741ed..ab6c15d9f67b 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1831,8 +1831,12 @@ static int unqueue_me(struct futex_q *q) /* In the common case we don't take the spinlock, which is nice. */ retry: - lock_ptr = q->lock_ptr; - barrier(); + /* + * q->lock_ptr can change between this read and the following spin_lock. + * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and + * optimizing lock_ptr out of the logic below. + */ + lock_ptr = READ_ONCE(q->lock_ptr); if (lock_ptr != NULL) { spin_lock(lock_ptr); /* From 56f8ad3f396f8319d2bb8689acf19b7fd4ba931f Mon Sep 17 00:00:00 2001 From: Vishwanath Raju K Date: Thu, 9 Aug 2018 12:07:06 +0530 Subject: [PATCH 012/151] ARM: dts: msm: Fix address and size width for common cma node for sdx20 "reserved-mem" tuple is set to 1 for "address-cells" and "size-cells". Fix the CMA region attributes as per the that. Change-Id: I6db1f50644150dbf92e4c9721334f3a627aeb922 Signed-off-by: Vishwanath Raju K --- arch/arm/boot/dts/qcom/sdx20.dtsi | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm/boot/dts/qcom/sdx20.dtsi b/arch/arm/boot/dts/qcom/sdx20.dtsi index 6059ed7278f6..e7cda3babf4e 100644 --- a/arch/arm/boot/dts/qcom/sdx20.dtsi +++ b/arch/arm/boot/dts/qcom/sdx20.dtsi @@ -42,10 +42,10 @@ &reserved_mem { linux,cma { compatible = "shared-dma-pool"; - alloc-ranges = <0 0x00000000 0 0x90000000>; + alloc-ranges = <0 0x90000000>; reusable; - alignment = <0 0x400000>; - size = <0 0xc00000>; + alignment = <0x400000>; + size = <0xc00000>; linux,cma-default; }; }; From 71e848bc86f5223805687e19429b62300bac9b2f Mon Sep 17 00:00:00 2001 From: Dhanalakshmi Siddani Date: Fri, 10 Aug 2018 21:06:22 +0530 Subject: [PATCH 013/151] ASoC: msm: qdsp6v2: add routing controls to support compress capture Add routing controls to allow MM17, 18 and 19 connection with primary TDM mics to support for compress capture. Change-Id: I45fa0207a3f45f11ab9870ece51c2b7d0b8e8247 CRs-Fixed: 2273335 Signed-off-by: Dhanalakshmi Siddani --- sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c | 48 ++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c index 13878cab3b4d..5bd2e8441bbc 100644 --- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c @@ -6319,6 +6319,18 @@ static const struct snd_kcontrol_new mmul17_mixer_controls[] = { SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX, MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0, + MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1, + MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2, + MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3, + MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new mmul18_mixer_controls[] = { @@ -6352,6 +6364,18 @@ static const struct snd_kcontrol_new mmul18_mixer_controls[] = { SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX, MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0, + MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1, + MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2, + MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3, + MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new mmul19_mixer_controls[] = { @@ -6385,6 +6409,18 @@ static const struct snd_kcontrol_new mmul19_mixer_controls[] = { SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX, MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0, + MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1, + MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2, + MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3, + MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new mmul20_mixer_controls[] = { @@ -11227,6 +11263,18 @@ static const struct snd_soc_dapm_route intercon[] = { {"MultiMedia17 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"}, {"MultiMedia18 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"}, {"MultiMedia19 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"}, + {"MultiMedia17 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"}, + {"MultiMedia18 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"}, + {"MultiMedia19 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"}, + {"MultiMedia17 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"}, + {"MultiMedia18 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"}, + {"MultiMedia19 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"}, + {"MultiMedia17 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"}, + {"MultiMedia18 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"}, + {"MultiMedia19 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"}, + {"MultiMedia17 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"}, + {"MultiMedia18 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"}, + {"MultiMedia19 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"}, {"MultiMedia28 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"}, {"MultiMedia29 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"}, {"MultiMedia3 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, From 71e737f82e9a7e4936816d73f39cc7674f087011 Mon Sep 17 00:00:00 2001 From: Dhanalakshmi Siddani Date: Fri, 10 Aug 2018 19:03:19 +0530 Subject: [PATCH 014/151] ASoC: msm: add new dai link for compress capture for apq8009 Add new dai link to add support for compress capture on external codec for apq8009 and correct dai link name to use proper FE. CRs-Fixed: 2273335 Change-Id: I5d4ff675090c23af1824840ff7d29de911f6b297 Signed-off-by: Dhanalakshmi Siddani --- sound/soc/msm/apq8009-i2s-ext-codec.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/sound/soc/msm/apq8009-i2s-ext-codec.c b/sound/soc/msm/apq8009-i2s-ext-codec.c index 14734a366096..edea677f08c3 100644 --- a/sound/soc/msm/apq8009-i2s-ext-codec.c +++ b/sound/soc/msm/apq8009-i2s-ext-codec.c @@ -2396,7 +2396,7 @@ static struct snd_soc_dai_link msm_compr_fe_dai[] = { {/* hw:x,43 */ .name = "APQ8009 Compress3", .stream_name = "Compress3", - .cpu_dai_name = "MultiMedia10", + .cpu_dai_name = "MultiMedia17", .platform_name = "msm-compress-dsp", .dynamic = 1, .dpcm_capture = 1, @@ -2406,12 +2406,12 @@ static struct snd_soc_dai_link msm_compr_fe_dai[] = { .codec_name = "snd-soc-dummy", .ignore_suspend = 1, .ignore_pmdown_time = 1, - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA10, + .be_id = MSM_FRONTEND_DAI_MULTIMEDIA17, }, {/* hw:x,44 */ .name = "APQ8009 Compress4", .stream_name = "Compress4", - .cpu_dai_name = "MultiMedia11", + .cpu_dai_name = "MultiMedia18", .platform_name = "msm-compress-dsp", .dynamic = 1, .dpcm_capture = 1, @@ -2421,7 +2421,22 @@ static struct snd_soc_dai_link msm_compr_fe_dai[] = { .codec_name = "snd-soc-dummy", .ignore_suspend = 1, .ignore_pmdown_time = 1, - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA11, + .be_id = MSM_FRONTEND_DAI_MULTIMEDIA18, + }, + {/* hw:x,45 */ + .name = "APQ8009 Compress5", + .stream_name = "Compress5", + .cpu_dai_name = "MultiMedia19", + .platform_name = "msm-compress-dsp", + .dynamic = 1, + .dpcm_capture = 1, + .trigger = {SND_SOC_DPCM_TRIGGER_POST, + SND_SOC_DPCM_TRIGGER_POST}, + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + .ignore_suspend = 1, + .ignore_pmdown_time = 1, + .be_id = MSM_FRONTEND_DAI_MULTIMEDIA19, }, }; From 3d63731ebe6d6f5d510b21d5811ec15d3984abd7 Mon Sep 17 00:00:00 2001 From: Pratap Nirujogi Date: Wed, 8 Aug 2018 20:43:29 +0530 Subject: [PATCH 015/151] msm: camera: cpp: Check for valid tx level TX and RX FIFOs of Microcontroller are used to exchange commands and messages between Micro FW and CPP driver. TX FIFO depth is 16 32-bit words, incase of errors there is a chance of overflow. To prevent possible out of bound access, TX FIFO depth or level is checked for MAX depth before accessing the FIFO. Change-Id: I5adf39b46ff10e358c4a2c03a2de07d44b99cedb Signed-off-by: Pratap Nirujogi Signed-off-by: VijayaKumar T M --- .../platform/msm/camera_v2/pproc/cpp/msm_cpp.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c index 4c00276c376f..71cbc3cec43f 100644 --- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c +++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -833,9 +833,14 @@ static irqreturn_t msm_cpp_irq(int irq_num, void *data) if (irq_status & 0x8) { tx_level = msm_camera_io_r(cpp_dev->base + MSM_CPP_MICRO_FIFO_TX_STAT) >> 2; - for (i = 0; i < tx_level; i++) { - tx_fifo[i] = msm_camera_io_r(cpp_dev->base + - MSM_CPP_MICRO_FIFO_TX_DATA); + if (tx_level < MSM_CPP_TX_FIFO_LEVEL) { + for (i = 0; i < tx_level; i++) { + tx_fifo[i] = msm_camera_io_r(cpp_dev->base + + MSM_CPP_MICRO_FIFO_TX_DATA); + } + } else { + pr_err("Fatal invalid tx level %d", tx_level); + goto err; } spin_lock_irqsave(&cpp_dev->tasklet_lock, flags); queue_cmd = &cpp_dev->tasklet_queue_cmd[cpp_dev->taskletq_idx]; @@ -890,6 +895,7 @@ static irqreturn_t msm_cpp_irq(int irq_num, void *data) pr_debug("DEBUG_R1: 0x%x\n", msm_camera_io_r(cpp_dev->base + 0x8C)); } +err: msm_camera_io_w(irq_status, cpp_dev->base + MSM_CPP_MICRO_IRQGEN_CLR); return IRQ_HANDLED; } From 68baefef28219558bc65b2f05b2809bc227fc6f2 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Wed, 10 May 2017 09:53:40 +0200 Subject: [PATCH 016/151] UPSTREAM: scsi: sg: don't return bogus Sg_requests If the list search in sg_get_rq_mark() fails to find a valid request, we return a bogus element. This then can later lead to a GPF in sg_remove_scat(). So don't return bogus Sg_requests in sg_get_rq_mark() but NULL in case the list search doesn't find a valid request. Bug: 79090045 Signed-off-by: Johannes Thumshirn Reported-by: Andrey Konovalov Cc: Hannes Reinecke Cc: Christoph Hellwig Cc: Doug Gilbert Reviewed-by: Hannes Reinecke Acked-by: Doug Gilbert Signed-off-by: Martin K. Petersen Signed-off-by: Chenbo Feng Git-repo: https://android.googlesource.com/kernel/msm Git-commit: 58408c68e4d7abc1957ca8cc3fec69619578b06a Change-Id: If95d1a8eef3748c9937201e524184b89a5eaaf2e Bug: 75300370 Signed-off-by: Srinivasa Rao Kuppala --- drivers/scsi/sg.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 73c116ba4aba..c1abb35c896a 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -2124,11 +2124,12 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id) if ((1 == resp->done) && (!resp->sg_io_owned) && ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { resp->done = 2; /* guard against other readers */ - break; + write_unlock_irqrestore(&sfp->rq_list_lock, iflags); + return resp; } } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); - return resp; + return NULL; } /* always adds to end of list */ From caf11d3c271fb80327589691ebf6b091e98d17ff Mon Sep 17 00:00:00 2001 From: Gustavo Solaira Date: Tue, 14 Aug 2018 20:45:08 -0700 Subject: [PATCH 017/151] msm: ep_pcie: Apply gpio settings based on the device tree flags Read the device tree gpio flags for all the pins but apply them only for the MDM2AP_STATUS in order to support boards that require an active low signal. Change-Id: Ia1972b4b9046ff947864b7a31781919a31396ae0 Signed-off-by: Gustavo Solaira --- drivers/platform/msm/ep_pcie/ep_pcie_core.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_core.c b/drivers/platform/msm/ep_pcie/ep_pcie_core.c index 28cf5150ed01..62d79acb95e5 100644 --- a/drivers/platform/msm/ep_pcie/ep_pcie_core.c +++ b/drivers/platform/msm/ep_pcie/ep_pcie_core.c @@ -760,6 +760,7 @@ static int ep_pcie_get_resources(struct ep_pcie_dev_t *dev, char prop_name[MAX_PROP_SIZE]; const __be32 *prop; u32 *clkfreq = NULL; + enum of_gpio_flags gpio_flags; EP_PCIE_DBG(dev, "PCIe V%d\n", dev->rev); @@ -868,10 +869,15 @@ static int ep_pcie_get_resources(struct ep_pcie_dev_t *dev, for (i = 0; i < EP_PCIE_MAX_GPIO; i++) { gpio_info = &dev->gpio[i]; - ret = of_get_named_gpio((&pdev->dev)->of_node, - gpio_info->name, 0); + ret = of_get_named_gpio_flags((&pdev->dev)->of_node, + gpio_info->name, 0, &gpio_flags); if (ret >= 0) { gpio_info->num = ret; + if (i == EP_PCIE_GPIO_MDM2AP) { + gpio_info->init = + gpio_flags & OF_GPIO_ACTIVE_LOW; + gpio_info->on = !gpio_info->init; + } ret = 0; EP_PCIE_DBG(dev, "GPIO num for %s is %d\n", gpio_info->name, gpio_info->num); From e8cbe6e62295904d17d3edfc704b5fa1e1e7795f Mon Sep 17 00:00:00 2001 From: Surendar Karka Date: Sat, 5 May 2018 15:50:47 +0530 Subject: [PATCH 018/151] ASoC: msm: qdsp6v2: align param size of PSPD matrix Params size should be multiple of 4 bytes. Aligning param size in multiple of 4 bytes to make 32 bit. CRs-Fixed: 2235684 Change-Id: Ib568ebd31ef540aa8c6ad756b885f96b02458916 Signed-off-by: Surendar Karka --- sound/soc/msm/qdsp6v2/q6adm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c index 0536e05fb539..7b9d01f01c00 100644 --- a/sound/soc/msm/qdsp6v2/q6adm.c +++ b/sound/soc/msm/qdsp6v2/q6adm.c @@ -602,7 +602,8 @@ int adm_programable_channel_mixer(int port_id, int copp_idx, int session_id, ch_mixer->input_channels[channel_index] + ch_mixer->input_channels[channel_index] * ch_mixer->output_channel); - roundup(param_size, 4); + /* Params size should be multiple of 4 bytes i.e 32bit aligned*/ + param_size = roundup(param_size, 4); sz = sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5) + sizeof(struct default_chmixer_param_id_coeff) + From b89d716203c81d3158f1a5a9fabc73f54fe7913d Mon Sep 17 00:00:00 2001 From: Gustavo Solaira Date: Thu, 16 Aug 2018 10:14:05 -0700 Subject: [PATCH 019/151] ARM: dts: msm: Enable reset via PM_RESIN_N for mdm9650 PCIe EP TTP Enable PMIC stage 2 reset for mdm9650 PCIe EP TTP when the reset line is held low for 2 seconds. This is used by the PCIe RC host to force reset the MDM if it crashes or becomes unresponsive. Change-Id: Ifdfab79cb6441d332305e6dd7e83bede2f500940 Signed-off-by: Gustavo Solaira --- arch/arm/boot/dts/qcom/mdm9650-pcie-ep-ttp.dts | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/arm/boot/dts/qcom/mdm9650-pcie-ep-ttp.dts b/arch/arm/boot/dts/qcom/mdm9650-pcie-ep-ttp.dts index 2577e4386ea6..b3b37cc41913 100644 --- a/arch/arm/boot/dts/qcom/mdm9650-pcie-ep-ttp.dts +++ b/arch/arm/boot/dts/qcom/mdm9650-pcie-ep-ttp.dts @@ -192,3 +192,18 @@ bias-pull-down; }; }; + +&pmd9650_pon { + interrupts = <0x0 0x8 0x0>, <0x0 0x8 0x1>; + interrupt-names = "kpdpwr", "resin"; + qcom,s3-src = "resin"; + + qcom,pon_2 { + qcom,pon-type = <1>; + qcom,support-reset = <1>; + qcom,s1-timer = <0>; + qcom,s2-timer = <2000>; + qcom,s2-type = <7>; + qcom,pull-up = <1>; + }; +}; From ecef5ef61ab76fc367abd2f57850e7ee450acb04 Mon Sep 17 00:00:00 2001 From: Gustavo Solaira Date: Mon, 13 Aug 2018 15:42:53 -0700 Subject: [PATCH 020/151] ARM: dts: msm: Support IPC with PCIe host for mdm9650 EP TTP Enable MDM2AP_STATUS_N signal as GPIO_64 active low for IPC between mdm9650 EP TTP and the PCIe host. This allows the host to detect the MDM status. An active low signal is needed because there is a level shifter with an active pull up between MDM and the host. Use a different GPIO for CLKREQ_N since GPIO_64 is needed for the status signal. Change-Id: I19830c7c6820267e517a93d27da5183c95fb6ca3 Signed-off-by: Gustavo Solaira --- .../arm/boot/dts/qcom/mdm9650-pcie-ep-ttp.dts | 21 ++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/arch/arm/boot/dts/qcom/mdm9650-pcie-ep-ttp.dts b/arch/arm/boot/dts/qcom/mdm9650-pcie-ep-ttp.dts index b3b37cc41913..f3645dc64439 100644 --- a/arch/arm/boot/dts/qcom/mdm9650-pcie-ep-ttp.dts +++ b/arch/arm/boot/dts/qcom/mdm9650-pcie-ep-ttp.dts @@ -13,7 +13,9 @@ /dts-v1/; +#include #include "mdm9650-v1.1-mtp.dtsi" + / { model = "Qualcomm Technologies, Inc. MDM 9650 PCIE EP TTP"; compatible = "qcom,mdm9650-ttp", "qcom,mdm9650", "qcom,ttp"; @@ -165,7 +167,8 @@ &pcie_ep { status = "ok"; - mdm2apstatus-gpio = <&tlmm_pinmux 85 0>; + mdm2apstatus-gpio = <&tlmm_pinmux 64 GPIO_ACTIVE_LOW>; + clkreq-gpio = <&tlmm_pinmux 85 0>; }; &pcie0 { @@ -182,14 +185,26 @@ &pcie0_mdm2apstatus_default { mux { - pins = "gpio85"; + pins = "gpio64"; function = "gpio"; }; + config { + pins = "gpio64"; + drive-strength = <2>; + bias-pull-up; + }; +}; + +&pcie0_clkreq_default { + mux { + pins = "gpio85"; + function = "gpio"; + }; config { pins = "gpio85"; drive-strength = <2>; - bias-pull-down; + bias-pull-up; }; }; From 5bd511a9a92d09aa6eac6cd231abdbb31a99f497 Mon Sep 17 00:00:00 2001 From: Animesh Kishore Date: Wed, 28 Mar 2018 00:53:31 +0530 Subject: [PATCH 021/151] mdss: mdp: Fix fudge factor overflow check Fudge adjustment is always 64 bit operation irrespective of underlying architecture is 32/64 bit. Fix max value to compare overflow against. Add warning if adjustments can't go through without overflow. Change-Id: I9c15ea8c1754c9ddb997546dc476bb6d45198524 Signed-off-by: Animesh Kishore --- drivers/video/msm/mdss/mdss_mdp_ctl.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c index 8271e27bbad7..01d06f278579 100644 --- a/drivers/video/msm/mdss/mdss_mdp_ctl.c +++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c @@ -76,13 +76,15 @@ static inline u64 fudge_factor(u64 val, u32 numer, u32 denom) u64 result = val; if (val) { - u64 temp = -1UL; + u64 temp = U64_MAX; do_div(temp, val); if (temp > numer) { /* no overflow, so we can do the operation*/ result = (val * (u64)numer); do_div(result, denom); + } else { + pr_warn("Overflow, skip fudge factor\n"); } } return result; From 514463c434ab081f258e68829c2abcc214d1e7d9 Mon Sep 17 00:00:00 2001 From: Kaustubh Pandey Date: Mon, 20 Aug 2018 13:00:36 +0530 Subject: [PATCH 022/151] net: memset smsg to avoid the padding data memset smsg to avoid the padding data of kernel to be shared with user space. Fix is to set fields event to all "0", but there is actually 6 bytes padding between "sktype" and "skflags", so memset was done to set all the padding bits to 0. CRs-Fixed: 2287852 Change-Id: I435486b80ad19c5fa54b098680623e7a4f080198 Signed-off-by: Kaustubh Pandey Acked-by: Chinmay Agarwal --- net/core/sockev_nlmcast.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/sockev_nlmcast.c b/net/core/sockev_nlmcast.c index 3d7dabef6410..22148bf76e0a 100644 --- a/net/core/sockev_nlmcast.c +++ b/net/core/sockev_nlmcast.c @@ -36,7 +36,6 @@ static struct netlink_kernel_cfg nlcfg = { static void _sockev_event(unsigned long event, __u8 *evstr, int buflen) { - memset(evstr, 0, buflen); switch (event) { case SOCKEV_SOCKET: @@ -96,6 +95,7 @@ static int sockev_client_cb(struct notifier_block *nb, NETLINK_CB(skb).dst_group = SKNLGRP_SOCKEV; smsg = nlmsg_data(nlh); + memset(smsg, 0, sizeof(struct sknlsockevmsg)); smsg->pid = current->pid; _sockev_event(event, smsg->event, sizeof(smsg->event)); smsg->skfamily = sock->sk->sk_family; From 99c65d362205ad1ef96d1804f9183b0c76ccba35 Mon Sep 17 00:00:00 2001 From: Deepak Shankar Date: Tue, 7 Aug 2018 12:03:26 +0530 Subject: [PATCH 023/151] msm:ais:Handling bigger value than upper bound in msm_cpp_irq api In msm_cpp_irq function, tx_level is read using msm_carmera_io_r(), However, this value is never verified to lower than MSM_CPP_TX_FIFO_LEVEL (16), As tx_level is used as the upper bound for the following loop, any value bigger than 16 will result in a buffer overflow. Hence handling this case as error with error log. Change-Id: I5752779ddde154560204d4dfc2f48413a725e00a Signed-off-by: Deepak Shankar --- drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.c index d265210b7b60..37891ad09817 100644 --- a/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.c +++ b/drivers/media/platform/msm/ais/pproc/cpp/msm_cpp.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -836,9 +836,14 @@ static irqreturn_t msm_cpp_irq(int irq_num, void *data) if (irq_status & 0x8) { tx_level = msm_camera_io_r(cpp_dev->base + MSM_CPP_MICRO_FIFO_TX_STAT) >> 2; - for (i = 0; i < tx_level; i++) { - tx_fifo[i] = msm_camera_io_r(cpp_dev->base + - MSM_CPP_MICRO_FIFO_TX_DATA); + if (tx_level < MSM_CPP_TX_FIFO_LEVEL) { + for (i = 0; i < tx_level; i++) { + tx_fifo[i] = msm_camera_io_r(cpp_dev->base + + MSM_CPP_MICRO_FIFO_TX_DATA); + } + } else { + pr_err("Fatal invalid tx level %d", tx_level); + goto err; } spin_lock_irqsave(&cpp_dev->tasklet_lock, flags); queue_cmd = &cpp_dev->tasklet_queue_cmd[cpp_dev->taskletq_idx]; @@ -893,6 +898,7 @@ static irqreturn_t msm_cpp_irq(int irq_num, void *data) pr_debug("DEBUG_R1: 0x%x\n", msm_camera_io_r(cpp_dev->base + 0x8C)); } +err: msm_camera_io_w(irq_status, cpp_dev->base + MSM_CPP_MICRO_IRQGEN_CLR); return IRQ_HANDLED; } From 57aa6092e499be16fd9583fc87615e8126b276f5 Mon Sep 17 00:00:00 2001 From: Deepak Shankar Date: Thu, 16 Aug 2018 14:59:17 +0530 Subject: [PATCH 024/151] msm: ais: Fix out-of-bounds read in string class name jpeg driver is calling class_create with stack variable, which can be overwritten by other stack variables. Change-Id: Ief49d44e25c70716dcb474b2635e42e0e8e9ca15 Signed-off-by: Deepak Shankar --- drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_dev.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_dev.c b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_dev.c index a0a6ffd0e69a..a2232e6cc731 100644 --- a/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_dev.c +++ b/drivers/media/platform/msm/ais/jpeg_10/msm_jpeg_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -32,6 +32,8 @@ #define MSM_JPEG_NAME "jpeg" #define DEV_NAME_LEN 10 +static char devname[DEV_NAME_LEN]; + static int msm_jpeg_open(struct inode *inode, struct file *filp) { int rc = 0; @@ -185,7 +187,6 @@ static int msm_jpeg_init_dev(struct platform_device *pdev) struct msm_jpeg_device *msm_jpeg_device_p; const struct of_device_id *device_id; const struct msm_jpeg_priv_data *priv_data; - char devname[DEV_NAME_LEN]; msm_jpeg_device_p = kzalloc(sizeof(struct msm_jpeg_device), GFP_ATOMIC); if (!msm_jpeg_device_p) { From 7dddaced11d13415a0876c0ad76e3d4b0dd04e15 Mon Sep 17 00:00:00 2001 From: Pradeep P V K Date: Mon, 2 Jul 2018 17:37:56 +0530 Subject: [PATCH 025/151] ARM: dts: msm: Add QDSD_BOOT_CTL to sdhci node for MSM8953 MTP. Add TLMM_QDSD_BOOT_CTL register address into sdhc node so that mmc driver should be able to access this register and update it. The mmc driver would need to update this register for proper SD card detection functionality. The default value of this register routes the SD card detect pin to QDSD. If QDSS driver is present, then it updates this register during its probe. But if QDSS driver is disabled (which a debug driver so its not mandatory to enable it), then the default configuration is affecting SD card detection functionality since the card-detect gpio is routed to QDSD. To avoid this, mmc driver has to update it with correct value to ensure proper SD card detection. Change-Id: If1b9843372eb84ee22acec5961c6e69fab941c5d Signed-off-by: Pradeep P V K --- arch/arm/boot/dts/qcom/msm8953.dtsi | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/arm/boot/dts/qcom/msm8953.dtsi b/arch/arm/boot/dts/qcom/msm8953.dtsi index 2c523daaaadf..5862b5c5046e 100644 --- a/arch/arm/boot/dts/qcom/msm8953.dtsi +++ b/arch/arm/boot/dts/qcom/msm8953.dtsi @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1772,8 +1772,10 @@ sdhc_1: sdhci@7824900 { compatible = "qcom,sdhci-msm"; - reg = <0x7824900 0x500>, <0x7824000 0x800>, <0x7824e00 0x200>; - reg-names = "hc_mem", "core_mem", "cmdq_mem"; + reg = <0x7824900 0x500>, <0x7824000 0x800>, <0x7824e00 0x200>, + <0x0119d000 0x4>; + reg-names = "hc_mem", "core_mem", "cmdq_mem", + "tlmm_mem"; interrupts = <0 123 0>, <0 138 0>; interrupt-names = "hc_irq", "pwr_irq"; From 69b2f386db33809d6152ead7734a45ddad487705 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 2 Jan 2016 10:06:29 +0000 Subject: [PATCH 026/151] mmc: sd: limit SD card power limit according to cards capabilities The SD card specification allows cards to error out a SWITCH command where the requested function in a group is not supported. The spec provides for a set of capabilities which indicate which functions are supported. In the case of the power limit, requesting an unsupported power level via the SWITCH command fails, resulting in the power level remaining at the power-on default of 0.72W, even though the host and card may support higher powers levels. This has been seen with SanDisk 8GB cards, which support the default 0.72W and 1.44W (200mA and 400mA) in combination with an iMX6 host, supporting up to 2.88W (800mA). This currently causes us to try to set a power limit function value of '3' (2.88W) which the card errors out on, and thereby causes the power level to remain at 0.72W rather than the desired 1.44W. Arrange to limit the selected current limit by the capabilities reported by the card to avoid the SWITCH command failing. Select the highest current limit that the host and card combination support. Change-Id: I7fbd035cdc606946e997fa4671ec5683acd3ed1b Signed-off-by: Russell King Fixes: a39ca6ae0a08 ("mmc: core: Simplify and fix for SD switch processing") Signed-off-by: Ulf Hansson Git-commit: d9812780a020bcec44565b5950b2a8b31afb5545 Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git Signed-off-by: Pradeep P V K --- drivers/mmc/core/sd.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index b58bf6d1f505..322927acc9dc 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -335,6 +335,7 @@ static int mmc_read_switch(struct mmc_card *card) card->sw_caps.sd3_bus_mode = status[13]; /* Driver Strengths supported by the card */ card->sw_caps.sd3_drv_type = status[9]; + card->sw_caps.sd3_curr_limit = status[7] | status[6] << 8; } out: @@ -592,14 +593,25 @@ static int sd_set_current_limit(struct mmc_card *card, u8 *status) * when we set current limit to 200ma, the card will draw 200ma, and * when we set current limit to 400/600/800ma, the card will draw its * maximum 300ma from the host. + * + * The above is incorrect: if we try to set a current limit that is + * not supported by the card, the card can rightfully error out the + * attempt, and remain at the default current limit. This results + * in a 300mA card being limited to 200mA even though the host + * supports 800mA. Failures seen with SanDisk 8GB UHS cards with + * an iMX6 host. --rmk */ - if (max_current >= 800) + if (max_current >= 800 && + card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_800) current_limit = SD_SET_CURRENT_LIMIT_800; - else if (max_current >= 600) + else if (max_current >= 600 && + card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_600) current_limit = SD_SET_CURRENT_LIMIT_600; - else if (max_current >= 400) + else if (max_current >= 400 && + card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_400) current_limit = SD_SET_CURRENT_LIMIT_400; - else if (max_current >= 200) + else if (max_current >= 200 && + card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_200) current_limit = SD_SET_CURRENT_LIMIT_200; if (current_limit != SD_SET_CURRENT_NO_CHANGE) { From 0337426cd305dd3d196e92ecbfb5399b43a2f7f6 Mon Sep 17 00:00:00 2001 From: Arunk Khandavalli Date: Fri, 24 Aug 2018 15:03:03 +0530 Subject: [PATCH 027/151] nl80211: nl80211_update_ft_ies to validate NL80211_ATTR_IE Current nl80211_update_ft_ies doesn't validate NL80211_ATTR_IE before dereferencing it, which leads to a null pointer exception if not passed. This commit validates this attribute too. Change-Id: Ia40b02fc218bc26a07bc6b2153f425b8cae3bd82 CRs-Fixed: 2261685 Signed-off-by: Arunk Khandavalli Signed-off-by: Srinivas Dasari --- net/wireless/nl80211.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 79eb85a5b698..98fe19e5f26f 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -9874,6 +9874,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_MDID] || + !info->attrs[NL80211_ATTR_IE] || !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; From 2493f5f8b689c6e2f572ee20d413fab1025bcf33 Mon Sep 17 00:00:00 2001 From: AnilKumar Chimata Date: Thu, 7 Jun 2018 15:48:29 +0530 Subject: [PATCH 028/151] crypto: msm: Fix device stuck issue Device is failed to boot after enabling the full disk encryption with general purpose crypto engine (GPCE) as current logic is bound to cpu, which result in crypto device stuck while processing the crypto requests. This patch adds the changes to remove the cpu bound logic to avoid crypto device stuck issue. Also FIPS changes are snapshot from msm-3.10 commit <95b93df76fda087> (ASoC: msm: Create codec info entries for msm8976). Change-Id: I785a7e399f514de7b308cdda10f80abd46fbd110 Signed-off-by: AnilKumar Chimata --- drivers/char/hw_random/Makefile | 2 +- drivers/char/hw_random/ctr_drbg.c | 887 ++++++++ drivers/char/hw_random/ctr_drbg.h | 113 + drivers/char/hw_random/fips_drbg.c | 283 +++ drivers/char/hw_random/fips_drbg.h | 62 + drivers/char/hw_random/msm_fips_selftest.c | 332 +++ drivers/char/hw_random/msm_fips_selftest.h | 31 + drivers/char/hw_random/msm_rng.c | 359 ++- drivers/char/hw_random/msm_rng.h | 48 + drivers/crypto/msm/Makefile | 4 + drivers/crypto/msm/qce.h | 6 +- drivers/crypto/msm/qce50.c | 2330 +++++++------------- drivers/crypto/msm/qce50.h | 82 +- drivers/crypto/msm/qcedev.c | 85 +- drivers/crypto/msm/qcedevi.h | 46 +- drivers/crypto/msm/qcrypto.c | 1091 ++------- drivers/crypto/msm/qcryptoi.h | 72 + 17 files changed, 3263 insertions(+), 2570 deletions(-) create mode 100644 drivers/char/hw_random/ctr_drbg.c create mode 100644 drivers/char/hw_random/ctr_drbg.h create mode 100644 drivers/char/hw_random/fips_drbg.c create mode 100644 drivers/char/hw_random/fips_drbg.h create mode 100644 drivers/char/hw_random/msm_fips_selftest.c create mode 100644 drivers/char/hw_random/msm_fips_selftest.h create mode 100644 drivers/char/hw_random/msm_rng.h create mode 100644 drivers/crypto/msm/qcryptoi.h diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index e444f87b3341..c6f73bc5a60f 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -30,4 +30,4 @@ obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o -obj-$(CONFIG_HW_RANDOM_MSM_LEGACY) += msm_rng.o +obj-$(CONFIG_HW_RANDOM_MSM_LEGACY) += msm_rng.o fips_drbg.o ctr_drbg.o msm_fips_selftest.o diff --git a/drivers/char/hw_random/ctr_drbg.c b/drivers/char/hw_random/ctr_drbg.c new file mode 100644 index 000000000000..add8e7cce853 --- /dev/null +++ b/drivers/char/hw_random/ctr_drbg.c @@ -0,0 +1,887 @@ +/* + * Copyright (c) 2014, 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "ctr_drbg.h" +#include "fips_drbg.h" + +#define AES128_KEY_SIZE (16) +#define AES128_BLOCK_SIZE (16) +#define AES_TEXT_LENGTH (64) +#define MAX_TEXT_LENGTH (2048) +#define CYPHER_OUT_LEN_64 (64) + +static uint8_t df_initial_k[16] = + "\x0\x1\x2\x3\x4\x5\x6\x7\x8\x9\xa\xb\xc\xd\xe\xf"; + +static void _crypto_cipher_test_complete(struct crypto_async_request *req, + int err) +{ + struct msm_ctr_tcrypt_result_s *res; + + if (!req) + return; + + res = req->data; + if (!res) + return; + + if (err == -EINPROGRESS) + return; + res->err = err; + complete(&res->completion); +} + +static int ctr_aes_init(struct ctr_drbg_ctx_s *ctx) +{ + int status = 0; + + ctx->aes_ctx.tfm = crypto_alloc_ablkcipher("qcom-ecb(aes)", 0, 0); + if (IS_ERR_OR_NULL(ctx->aes_ctx.tfm)) { + pr_err("%s: qcom-ecb(aes) failed, ctx->aes_ctx.tfm %pK\n", + __func__, ctx->aes_ctx.tfm); + ctx->aes_ctx.tfm = crypto_alloc_ablkcipher("ecb(aes)", 0, 0); + if (IS_ERR_OR_NULL(ctx->aes_ctx.tfm)) { + pr_err("%s: qcom-ecb(aes) failed\n", __func__); + status = -ENOMEM; + goto out; + } + } + + ctx->aes_ctx.req = ablkcipher_request_alloc(ctx->aes_ctx.tfm, + GFP_KERNEL); + if (IS_ERR_OR_NULL(ctx->aes_ctx.req)) { + pr_err("%s: Failed to allocate request\n", __func__); + status = -ENOMEM; + goto clr_tfm; + } + + ablkcipher_request_set_callback(ctx->aes_ctx.req, + CRYPTO_TFM_REQ_MAY_BACKLOG, + _crypto_cipher_test_complete, + &ctx->aes_ctx.result); + + memset(&ctx->aes_ctx.input, 0, sizeof(struct msm_ctr_buffer_s)); + memset(&ctx->aes_ctx.output, 0, sizeof(struct msm_ctr_buffer_s)); + + ctx->aes_ctx.input.virt_addr = kmalloc(AES128_BLOCK_SIZE, + GFP_KERNEL | __GFP_DMA); + if (!ctx->aes_ctx.input.virt_addr) { + status = -ENOMEM; + goto clr_req; + } + ctx->aes_ctx.output.virt_addr = kmalloc(AES128_BLOCK_SIZE, + GFP_KERNEL | __GFP_DMA); + if (!ctx->aes_ctx.output.virt_addr) { + status = -ENOMEM; + goto clr_input; + } + /* + * -------------------------------------------------------------------- + * Set DF AES mode + * -------------------------------------------------------------------- + */ + ctx->df_aes_ctx.tfm = crypto_alloc_ablkcipher("qcom-ecb(aes)", 0, 0); + if (IS_ERR_OR_NULL(ctx->df_aes_ctx.tfm)) { + pr_err("%s: qcom-ecb(aes) failed\n", __func__); + ctx->df_aes_ctx.tfm = crypto_alloc_ablkcipher("ecb(aes)", 0, 0); + if (IS_ERR_OR_NULL(ctx->df_aes_ctx.tfm)) { + pr_err("%s: ecb(aes) failed\n", __func__); + status = -ENOMEM; + goto clr_output; + } + } + + ctx->df_aes_ctx.req = ablkcipher_request_alloc(ctx->df_aes_ctx.tfm, + GFP_KERNEL); + if (IS_ERR_OR_NULL(ctx->df_aes_ctx.req)) { + pr_err("Failed to allocate request\n"); + status = -ENOMEM; + goto clr_df_tfm; + } + + ablkcipher_request_set_callback(ctx->df_aes_ctx.req, + CRYPTO_TFM_REQ_MAY_BACKLOG, + _crypto_cipher_test_complete, + &ctx->df_aes_ctx.result); + + memset(&ctx->df_aes_ctx.input, 0, sizeof(ctx->df_aes_ctx.input)); + memset(&ctx->df_aes_ctx.output, 0, sizeof(ctx->df_aes_ctx.output)); + + ctx->df_aes_ctx.input.virt_addr = kmalloc(AES128_BLOCK_SIZE, + GFP_KERNEL | __GFP_DMA); + if (!ctx->df_aes_ctx.input.virt_addr) { + status = -ENOMEM; + goto clr_df_req; + } + + ctx->df_aes_ctx.output.virt_addr = kmalloc(AES128_BLOCK_SIZE, + GFP_KERNEL | __GFP_DMA); + if (!ctx->df_aes_ctx.output.virt_addr) { + status = -ENOMEM; + goto clr_df_input; + } + + goto out; + +clr_df_input: + kzfree(ctx->df_aes_ctx.input.virt_addr); +clr_df_req: + if (ctx->df_aes_ctx.req) + ablkcipher_request_free(ctx->df_aes_ctx.req); +clr_df_tfm: + if (ctx->df_aes_ctx.tfm) + crypto_free_ablkcipher(ctx->df_aes_ctx.tfm); +clr_output: + kzfree(ctx->aes_ctx.output.virt_addr); +clr_input: + kzfree(ctx->aes_ctx.input.virt_addr); +clr_req: + if (ctx->aes_ctx.req) + ablkcipher_request_free(ctx->aes_ctx.req); +clr_tfm: + if (ctx->aes_ctx.tfm) + crypto_free_ablkcipher(ctx->aes_ctx.tfm); +out: + return status; +} + +/* + * Increments the V field in *ctx + */ +static void increment_V(struct ctr_drbg_ctx_s *ctx) +{ + unsigned sum = 1; + int i; + uint8_t *p = &ctx->seed.key_V.V[0]; + + /* + * To make known answer tests work, this has to be done big_endian. + * So we just do it by bytes. + * since we are using AES-128, the key size is 16 bytes. + */ + for (i = 15; sum != 0 && i >= 0; --i) { + sum += p[i]; + p[i] = (sum & 0xff); + sum >>= 8; + } +} + +/* + * The NIST update function. It updates the key and V to new values + * (to prevent backtracking) and optionally stirs in data. data may + * be null, otherwise *data is from 0 to 256 bits long. + * keysched is an optional keyschedule to use as an optimization. It + * must be consistent with the key in *ctx. No changes are made to + * *ctx until it is assured that there will be no failures. Note that + * data_len is in bytes. (That may not be offical NIST + * recommendation, but I do it anyway; they say "or equivalent" and + * this is equivalent enough.) + */ +static enum ctr_drbg_status_t +update(struct ctr_drbg_ctx_s *ctx, const uint8_t *data, size_t data_len) +{ + uint8_t temp[32]; + unsigned int i; + int rc; + struct scatterlist sg_in, sg_out; + + for (i = 0; i < 2; ++i) { + increment_V(ctx); + init_completion(&ctx->aes_ctx.result.completion); + + /* + * Note: personalize these called routines for + * specific testing. + */ + memcpy(ctx->aes_ctx.input.virt_addr, + ctx->seed.key_V.V, + CTR_DRBG_BLOCK_LEN_BYTES); + + crypto_ablkcipher_clear_flags(ctx->aes_ctx.tfm, ~0); + + /* Encrypt some clear text! */ + sg_init_one(&sg_in, + ctx->aes_ctx.input.virt_addr, + AES128_BLOCK_SIZE); + sg_init_one(&sg_out, + ctx->aes_ctx.output.virt_addr, + AES128_BLOCK_SIZE); + ablkcipher_request_set_crypt(ctx->aes_ctx.req, + &sg_in, + &sg_out, + CTR_DRBG_BLOCK_LEN_BYTES, + NULL); + + rc = crypto_ablkcipher_encrypt(ctx->aes_ctx.req); + + switch (rc) { + case 0: + break; + case -EINPROGRESS: + case -EBUSY: + rc = wait_for_completion_interruptible( + &ctx->aes_ctx.result.completion); + if (!rc && !ctx->aes_ctx.result.err) { + init_completion( + &ctx->aes_ctx.result.completion); + break; + } + default: + pr_debug("ablkcipher_encrypt ret %d err %d\n", + rc, ctx->aes_ctx.result.err); + break; + } + + init_completion(&ctx->aes_ctx.result.completion); + + memcpy(temp + AES128_BLOCK_SIZE * i, + ctx->aes_ctx.output.virt_addr, + AES128_BLOCK_SIZE); + } + + if (data_len > 0) + pr_debug("in update, data_len = %zu\n", data_len); + + for (i = 0; i < data_len; ++i) + ctx->seed.as_bytes[i] = temp[i] ^ data[i]; + + /* now copy the rest of temp to key and V */ + if (32 > data_len) { + memcpy(ctx->seed.as_bytes + data_len, + temp + data_len, + 32 - data_len); + } + + memset(temp, 0, 32); + return CTR_DRBG_SUCCESS; +} + +/* + * Reseeds the CTR_DRBG instance with entropy. entropy_len_bits must + * be exactly 256. + */ +enum ctr_drbg_status_t ctr_drbg_reseed(struct ctr_drbg_ctx_s *ctx, + const void *entropy, + size_t entropy_len_bits) +{ + enum ctr_drbg_status_t update_rv; + uint8_t seed_material[32]; + int rc; + + if (ctx == NULL || entropy == NULL) + return CTR_DRBG_INVALID_ARG; + + update_rv = block_cipher_df(ctx, (uint8_t *)entropy, + (entropy_len_bits / 8), seed_material, 32); + if (update_rv != CTR_DRBG_SUCCESS) { + memset(seed_material, 0, 32); + return CTR_DRBG_GENERAL_ERROR; + } + + rc = crypto_ablkcipher_setkey(ctx->aes_ctx.tfm, + ctx->seed.key_V.key, AES128_KEY_SIZE); + if (rc) { + memset(seed_material, 0, 32); + pr_debug("set-key in Instantiate failed, returns %d\n", rc); + return CTR_DRBG_GENERAL_ERROR; + } + + pr_debug("ctr_drbg_reseed, to call update\n"); + update_rv = update(ctx, (const uint8_t *)seed_material, 32); + pr_debug("ctr_drbg_reseed, after called update\n"); + if (update_rv != CTR_DRBG_SUCCESS) { + memset(seed_material, 0, 32); + return update_rv; + } + ctx->reseed_counter = 1; /* think 0 but SP 800-90 says 1 */ + + memset(seed_material, 0, 32); + + return CTR_DRBG_SUCCESS; + +} + +/* + * The NIST instantiate function. entropy_len_bits must be exactly + * 256. After reseed_interval generate requests, generated requests + * will fail until the CTR_DRBG instance is reseeded. As per NIST SP + * 800-90, an error is returned if reseed_interval > 2^48. + */ + +enum ctr_drbg_status_t +ctr_drbg_instantiate(struct ctr_drbg_ctx_s *ctx, + const uint8_t *entropy, + size_t entropy_len_bits, + const uint8_t *nonce, + size_t nonce_len_bits, + unsigned long long reseed_interval) +{ + + enum ctr_drbg_status_t update_rv; + uint8_t seed_material[32]; + uint8_t df_input[32]; + int rc; + + if (ctx == NULL || entropy == NULL || nonce == NULL) + return CTR_DRBG_INVALID_ARG; + if (((nonce_len_bits / 8) + (entropy_len_bits / 8)) > 32) { + pr_debug("entropy_len_bits + nonce_len_bits is too long\n"); + pr_debug("nonce len: %zu, entropy: %zu\n", + nonce_len_bits, entropy_len_bits); + return CTR_DRBG_INVALID_ARG_ERR1; + } + + if (reseed_interval > (1ULL << 48)) + return CTR_DRBG_INVALID_ARG_ERR2; + + ctr_aes_init(ctx); + + memset(ctx->seed.as_bytes, 0, sizeof(ctx->seed.as_bytes)); + memcpy(df_input, (uint8_t *)entropy, entropy_len_bits / 8); + memcpy(df_input + (entropy_len_bits / 8), nonce, nonce_len_bits / 8); + + update_rv = block_cipher_df(ctx, df_input, 24, seed_material, 32); + memset(df_input, 0, 32); + + if (update_rv != CTR_DRBG_SUCCESS) { + pr_debug("block_cipher_df failed, returns %d\n", update_rv); + memset(seed_material, 0, 32); + return CTR_DRBG_GENERAL_ERROR; + } + + rc = crypto_ablkcipher_setkey(ctx->aes_ctx.tfm, + ctx->seed.key_V.key, + AES128_KEY_SIZE); + if (rc) { + pr_debug("crypto_ablkcipher_setkey API failed: %d\n", rc); + memset(seed_material, 0, 32); + return CTR_DRBG_GENERAL_ERROR; + } + update_rv = update(ctx, (const uint8_t *)seed_material, 32); + if (update_rv != CTR_DRBG_SUCCESS) { + memset(seed_material, 0, 32); + return update_rv; + } + + ctx->reseed_counter = 1; /* think 0 but SP 800-90 says 1 */ + ctx->reseed_interval = reseed_interval; + + memset(seed_material, 0, 32); + + pr_debug("return from ctr_drbg_instantiate\n"); + + return CTR_DRBG_SUCCESS; +} + +/* + * Generate random bits. len_bits is specified in bits, as required by + * NIST SP800-90. It fails with CTR_DRBG_NEEDS_RESEED if the number + * of generates since instantiation or the last reseed >= the + * reseed_interval supplied at instantiation. len_bits must be a + * multiple of 8. len_bits must not exceed 2^19, as per NIST SP + * 800-90. Optionally stirs in additional_input which is + * additional_input_len_bits long, and is silently rounded up to a + * multiple of 8. CTR_DRBG_INVALID_ARG is returned if any pointer arg + * is null and the corresponding length is non-zero or if + * additioanl_input_len_bits > 256. + */ +enum ctr_drbg_status_t +ctr_drbg_generate_w_data(struct ctr_drbg_ctx_s *ctx, + void *additional_input, + size_t additional_input_len_bits, + void *buffer, + size_t len_bits) +{ + size_t total_blocks = (len_bits + 127) / 128; + enum ctr_drbg_status_t update_rv; + int rv = 0; + size_t i; + int rc; + struct scatterlist sg_in, sg_out; + + if (ctx == NULL) + return CTR_DRBG_INVALID_ARG; + if (buffer == NULL && len_bits > 0) + return CTR_DRBG_INVALID_ARG; + if (len_bits % 8 != 0) + return CTR_DRBG_INVALID_ARG; + if (len_bits > (1<<19)) + return CTR_DRBG_INVALID_ARG; + + if ((additional_input == NULL && additional_input_len_bits > 0) || + additional_input_len_bits > CTR_DRBG_SEED_LEN_BITS) + return CTR_DRBG_INVALID_ARG; + if (ctx->reseed_counter > ctx->reseed_interval) + return CTR_DRBG_NEEDS_RESEED; + + rc = crypto_ablkcipher_setkey(ctx->aes_ctx.tfm, + ctx->seed.key_V.key, + AES128_KEY_SIZE); + if (rc) { + pr_debug("crypto_ablkcipher_setkey API failed: %d\n", rc); + return CTR_DRBG_GENERAL_ERROR; + } + if (rv < 0) + return CTR_DRBG_GENERAL_ERROR; + + if (!ctx->continuous_test_started) { + increment_V(ctx); + init_completion(&ctx->aes_ctx.result.completion); + crypto_ablkcipher_clear_flags(ctx->aes_ctx.tfm, ~0); + memcpy(ctx->aes_ctx.input.virt_addr, ctx->seed.key_V.V, 16); + sg_init_one(&sg_in, ctx->aes_ctx.input.virt_addr, 16); + sg_init_one(&sg_out, ctx->aes_ctx.output.virt_addr, 16); + ablkcipher_request_set_crypt(ctx->aes_ctx.req, &sg_in, &sg_out, + CTR_DRBG_BLOCK_LEN_BYTES, NULL); + rc = crypto_ablkcipher_encrypt(ctx->aes_ctx.req); + switch (rc) { + case 0: + break; + case -EINPROGRESS: + case -EBUSY: + rc = wait_for_completion_interruptible( + &ctx->aes_ctx.result.completion); + if (!rc && !ctx->aes_ctx.result.err) { + init_completion( + &ctx->aes_ctx.result.completion); + break; + } + /* fall through */ + default: + pr_debug(":crypto_ablkcipher_encrypt returned with %d result %d on iteration\n", + rc, + ctx->aes_ctx.result.err); + break; + } + init_completion(&ctx->aes_ctx.result.completion); + + memcpy(ctx->prev_drn, ctx->aes_ctx.output.virt_addr, 16); + ctx->continuous_test_started = 1; + } + + /* Generate the output */ + for (i = 0; i < total_blocks; ++i) { + /* Increment the counter */ + increment_V(ctx); + if (((len_bits % 128) != 0) && (i == (total_blocks - 1))) { + /* last block and it's a fragment */ + init_completion(&ctx->aes_ctx.result.completion); + + /* + * Note: personalize these called routines for + * specific testing. + */ + + crypto_ablkcipher_clear_flags(ctx->aes_ctx.tfm, ~0); + + /* Encrypt some clear text! */ + + memcpy(ctx->aes_ctx.input.virt_addr, + ctx->seed.key_V.V, + 16); + sg_init_one(&sg_in, + ctx->aes_ctx.input.virt_addr, + 16); + sg_init_one(&sg_out, + ctx->aes_ctx.output.virt_addr, + 16); + ablkcipher_request_set_crypt(ctx->aes_ctx.req, + &sg_in, + &sg_out, + CTR_DRBG_BLOCK_LEN_BYTES, + NULL); + + rc = crypto_ablkcipher_encrypt(ctx->aes_ctx.req); + + switch (rc) { + case 0: + break; + case -EINPROGRESS: + case -EBUSY: + rc = wait_for_completion_interruptible( + &ctx->aes_ctx.result.completion); + if (!rc && !ctx->aes_ctx.result.err) { + init_completion( + &ctx->aes_ctx.result.completion); + break; + } + /* fall through */ + default: + break; + } + + init_completion(&ctx->aes_ctx.result.completion); + + if (!memcmp(ctx->prev_drn, + ctx->aes_ctx.output.virt_addr, + 16)) + return CTR_DRBG_GENERAL_ERROR; + + memcpy(ctx->prev_drn, ctx->aes_ctx.output.virt_addr, + 16); + rv = 0; + memcpy((uint8_t *)buffer + 16*i, + ctx->aes_ctx.output.virt_addr, + (len_bits % 128)/8); + } else { + /* normal case: encrypt direct to target buffer */ + + init_completion(&ctx->aes_ctx.result.completion); + + /* + * Note: personalize these called routines for + * specific testing. + */ + + crypto_ablkcipher_clear_flags(ctx->aes_ctx.tfm, ~0); + + /* Encrypt some clear text! */ + + memcpy(ctx->aes_ctx.input.virt_addr, + ctx->seed.key_V.V, + 16); + sg_init_one(&sg_in, + ctx->aes_ctx.input.virt_addr, + 16); + sg_init_one(&sg_out, + ctx->aes_ctx.output.virt_addr, + 16); + ablkcipher_request_set_crypt(ctx->aes_ctx.req, + &sg_in, + &sg_out, + CTR_DRBG_BLOCK_LEN_BYTES, + NULL); + + rc = crypto_ablkcipher_encrypt(ctx->aes_ctx.req); + + switch (rc) { + case 0: + break; + case -EINPROGRESS: + case -EBUSY: + rc = wait_for_completion_interruptible( + &ctx->aes_ctx.result.completion); + if (!rc && !ctx->aes_ctx.result.err) { + init_completion( + &ctx->aes_ctx.result.completion); + break; + } + /* fall through */ + default: + break; + } + + if (!memcmp(ctx->prev_drn, + ctx->aes_ctx.output.virt_addr, + 16)) + return CTR_DRBG_GENERAL_ERROR; + memcpy(ctx->prev_drn, ctx->aes_ctx.output.virt_addr, + 16); + memcpy((uint8_t *)buffer + 16*i, + ctx->aes_ctx.output.virt_addr, + 16); + rv = 0; + } + } + + update_rv = update(ctx, + additional_input, + (additional_input_len_bits + 7) / 8); /* round up */ + if (update_rv != CTR_DRBG_SUCCESS) + return update_rv; + + ctx->reseed_counter += 1; + + return CTR_DRBG_SUCCESS; +} + +/* + * Generate random bits, but with no provided data. See notes on + * ctr_drbg_generate_w_data() + */ +enum ctr_drbg_status_t +ctr_drbg_generate(struct ctr_drbg_ctx_s *ctx, + void *buffer, + size_t len_bits) + +{ + return ctr_drbg_generate_w_data(ctx, NULL, 0, buffer, len_bits); +} + +void ctr_aes_deinit(struct ctr_drbg_ctx_s *ctx) +{ + if (ctx->aes_ctx.req) { + ablkcipher_request_free(ctx->aes_ctx.req); + ctx->aes_ctx.req = NULL; + } + if (ctx->aes_ctx.tfm) { + crypto_free_ablkcipher(ctx->aes_ctx.tfm); + ctx->aes_ctx.tfm = NULL; + } + + kzfree(ctx->aes_ctx.input.virt_addr); + ctx->aes_ctx.input.virt_addr = NULL; + kzfree(ctx->aes_ctx.output.virt_addr); + ctx->aes_ctx.output.virt_addr = NULL; + + if (ctx->df_aes_ctx.req) { + ablkcipher_request_free(ctx->df_aes_ctx.req); + ctx->df_aes_ctx.req = NULL; + } + if (ctx->df_aes_ctx.tfm) { + crypto_free_ablkcipher(ctx->df_aes_ctx.tfm); + ctx->df_aes_ctx.tfm = NULL; + } + + kzfree(ctx->df_aes_ctx.input.virt_addr); + ctx->df_aes_ctx.input.virt_addr = NULL; + kzfree(ctx->df_aes_ctx.output.virt_addr); + ctx->df_aes_ctx.output.virt_addr = NULL; +} + +/* + * Zeroizes the context structure. In some future implementation it + * could also free resources. So do call it. + */ +void +ctr_drbg_uninstantiate(struct ctr_drbg_ctx_s *ctx) +{ + ctr_aes_deinit(ctx); + memset(ctx, 0, sizeof(*ctx)); +} + +/* + * the derivation functions to handle biased entropy input. + */ +static enum ctr_drbg_status_t df_bcc_func(struct ctr_drbg_ctx_s *ctx, + uint8_t *key, + uint8_t *input, + uint32_t input_size, + uint8_t *output) +{ + enum ctr_drbg_status_t ret_val = CTR_DRBG_SUCCESS; + uint8_t *p; + int rc; + int i; + int n; + struct scatterlist sg_in, sg_out; + + if (0 != (input_size % CTR_DRBG_BLOCK_LEN_BYTES)) + return CTR_DRBG_INVALID_ARG; + + n = input_size / CTR_DRBG_BLOCK_LEN_BYTES; + + for (i = 0; i < CTR_DRBG_BLOCK_LEN_BYTES; i++) + ctx->df_aes_ctx.output.virt_addr[i] = 0; + + rc = crypto_ablkcipher_setkey(ctx->df_aes_ctx.tfm, + key, + AES128_KEY_SIZE); + if (rc) { + pr_debug("crypto_ablkcipher_setkey API failed: %d\n", rc); + return CTR_DRBG_GENERAL_ERROR; + } + + p = input; + while (n > 0) { + for (i = 0; i < CTR_DRBG_BLOCK_LEN_BYTES; i++, p++) + ctx->df_aes_ctx.input.virt_addr[i] = + ctx->df_aes_ctx.output.virt_addr[i] ^ (*p); + + init_completion(&ctx->df_aes_ctx.result.completion); + + /* + * Note: personalize these called routines for + * specific testing. + */ + + crypto_ablkcipher_clear_flags(ctx->df_aes_ctx.tfm, ~0); + + /* Encrypt some clear text! */ + + sg_init_one(&sg_in, ctx->df_aes_ctx.input.virt_addr, 16); + sg_init_one(&sg_out, ctx->df_aes_ctx.output.virt_addr, 16); + + ablkcipher_request_set_crypt(ctx->df_aes_ctx.req, + &sg_in, + &sg_out, + CTR_DRBG_BLOCK_LEN_BYTES, + NULL); + + rc = crypto_ablkcipher_encrypt(ctx->df_aes_ctx.req); + + switch (rc) { + case 0: + break; + case -EINPROGRESS: + case -EBUSY: + rc = wait_for_completion_interruptible( + &ctx->df_aes_ctx.result.completion); + if (!rc && !ctx->df_aes_ctx.result.err) { + init_completion( + &ctx->df_aes_ctx.result.completion); + break; + } + /* fall through */ + default: + break; + } + + init_completion(&ctx->df_aes_ctx.result.completion); + n--; + } + + for (i = 0; i < CTR_DRBG_BLOCK_LEN_BYTES; i++) + output[i] = ctx->df_aes_ctx.output.virt_addr[i]; + + return ret_val; +} + +/* output_size must <= 512 bits (<= 64) */ +enum ctr_drbg_status_t block_cipher_df(struct ctr_drbg_ctx_s *ctx, + const uint8_t *input, uint32_t input_size, + uint8_t *output, uint32_t output_size) +{ + enum ctr_drbg_status_t ret_val = CTR_DRBG_SUCCESS; + uint32_t s_len = 0; + uint32_t s_pad_len = 0; + uint8_t temp[32]; + uint32_t out_len = 0; + uint8_t siv_string[CYPHER_OUT_LEN_64]; + uint8_t *p_s_string = NULL; + int rc; + struct scatterlist sg_in, sg_out; + + if (output_size > CYPHER_OUT_LEN_64) + return CTR_DRBG_INVALID_ARG; + + s_len = input_size + 9; + + s_pad_len = s_len % 16; + + if (0 != s_pad_len) + s_len += (16 - s_pad_len); + + /* add the length of IV */ + s_len += 16; + + if (s_len > CYPHER_OUT_LEN_64) + pr_debug("error! s_len is too big\n"); + + memset(siv_string, 0, 64); + + p_s_string = siv_string + 16; + + p_s_string[3] = input_size; + p_s_string[7] = output_size; + memcpy(p_s_string + 8, input, input_size); + p_s_string[8 + input_size] = 0x80; + if (0 < s_pad_len) + memset(p_s_string + 9 + input_size, '\0', s_pad_len); + + ret_val = df_bcc_func(ctx, df_initial_k, siv_string, s_len, temp); + if (ret_val != CTR_DRBG_SUCCESS) { + pr_debug("df_bcc_func failed, returned %d\n", ret_val); + goto out; + } + + siv_string[3] = 0x1; + ret_val = df_bcc_func(ctx, df_initial_k, siv_string, s_len, temp + 16); + if (ret_val != CTR_DRBG_SUCCESS) + goto out; + + out_len = 0; + rc = crypto_ablkcipher_setkey(ctx->df_aes_ctx.tfm, temp, + AES128_KEY_SIZE); + if (rc) { + pr_debug("crypto_ablkcipher_setkey API failed: %d\n", rc); + goto out; + } + memcpy(ctx->df_aes_ctx.input.virt_addr, temp + 16, 16); + + while (out_len < output_size) { + + init_completion(&ctx->df_aes_ctx.result.completion); + + /* + * Note: personalize these called routines for + * specific testing. + */ + + crypto_ablkcipher_clear_flags(ctx->df_aes_ctx.tfm, ~0); + + /* Encrypt some clear text! */ + + sg_init_one(&sg_in, ctx->df_aes_ctx.input.virt_addr, 16); + sg_init_one(&sg_out, ctx->df_aes_ctx.output.virt_addr, 16); + ablkcipher_request_set_crypt(ctx->df_aes_ctx.req, + &sg_in, + &sg_out, + CTR_DRBG_BLOCK_LEN_BYTES, + NULL); + + rc = crypto_ablkcipher_encrypt(ctx->df_aes_ctx.req); + + switch (rc) { + case 0: + break; + case -EINPROGRESS: + case -EBUSY: + rc = wait_for_completion_interruptible( + &ctx->df_aes_ctx.result.completion); + if (!rc && !ctx->df_aes_ctx.result.err) { + init_completion( + &ctx->df_aes_ctx.result.completion); + break; + } + /* fall through */ + default: + break; + } + + + init_completion(&ctx->df_aes_ctx.result.completion); + + memcpy(output + out_len, ctx->df_aes_ctx.output.virt_addr, 16); + memcpy(ctx->df_aes_ctx.input.virt_addr, output + out_len, 16); + out_len += 16; + } + +out: + memset(siv_string, 0, 64); + memset(temp, 0, 32); + return ret_val; +} diff --git a/drivers/char/hw_random/ctr_drbg.h b/drivers/char/hw_random/ctr_drbg.h new file mode 100644 index 000000000000..d8afad494908 --- /dev/null +++ b/drivers/char/hw_random/ctr_drbg.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2014, 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __MSM_CTR_DRBG_H__ +#define __MSM_CTR_DRBG_H__ + +/* + * This is the module that is actually follows the details of NIST SP + * 800-90 so it can claim to use a FIPS-approved algorithm. + * + * Added ctr_drbg_generate_w_data which supplies + * additional input to the generate operation. + */ + +#define CTR_DRBG_MAX_REQ_LEN_BITS (1 << 19) +#define CTR_DRBG_SEED_LEN_BITS 256 +#define CTR_DRBG_BLOCK_LEN_BITS 128 +#define CTR_DRBG_BLOCK_LEN_BYTES (CTR_DRBG_BLOCK_LEN_BITS/8) +#define CTR_DRBG_MAX_RESEED_INTERVAL (1ULL << 48) + +#define MSM_AES128_BLOCK_SIZE (16) +#define MSM_ENTROPY_BUFFER_SIZE (16) +#define MSM_NONCE_BUFFER_SIZE (8) + +enum ctr_drbg_status_t { + CTR_DRBG_SUCCESS = 0, + CTR_DRBG_NEEDS_RESEED, + CTR_DRBG_INVALID_ARG, + CTR_DRBG_INVALID_ARG_ERR1, + CTR_DRBG_INVALID_ARG_ERR2, + CTR_DRBG_GENERAL_ERROR = 0xFF, +}; + +union ctr_drbg_seed_t { + uint8_t as_bytes[32]; + uint32_t as_words[8]; + uint64_t as_64[4]; + struct { + uint8_t key[16]; + uint8_t V[16]; + } key_V; +}; + +struct msm_ctr_tcrypt_result_s { + struct completion completion; + int err; +}; + +struct msm_ctr_buffer_s { + unsigned char *virt_addr; +}; + +struct aes_struct_s { + struct crypto_ablkcipher *tfm; + struct ablkcipher_request *req; + struct msm_ctr_buffer_s input; + struct msm_ctr_buffer_s output; + struct msm_ctr_tcrypt_result_s result; +}; + +struct ctr_drbg_ctx_s { + unsigned long long reseed_counter; /* starts at 1 as per SP 800-90 */ + unsigned long long reseed_interval; + union ctr_drbg_seed_t seed; + struct aes_struct_s aes_ctx; + struct aes_struct_s df_aes_ctx; + uint8_t prev_drn[MSM_AES128_BLOCK_SIZE]; + uint8_t continuous_test_started; +}; + +enum ctr_drbg_status_t ctr_drbg_instantiate(struct ctr_drbg_ctx_s *ctx, + const uint8_t *entropy, + size_t entropy_len_bits, + const uint8_t *nonce, + size_t nonce_len_bits, + unsigned long long reseed_interval); + +enum ctr_drbg_status_t ctr_drbg_reseed(struct ctr_drbg_ctx_s *ctx, + const void *entropy, + size_t entropy_len); + +enum ctr_drbg_status_t ctr_drbg_generate_w_data(struct ctr_drbg_ctx_s *ctx, + void *additional_input, + size_t additional_input_len_bits, + void *buffer, + size_t len_bits); + +enum ctr_drbg_status_t ctr_drbg_generate(struct ctr_drbg_ctx_s *ctx, + void *buffer, + size_t len); + +void ctr_drbg_uninstantiate(struct ctr_drbg_ctx_s *ctx); + +enum ctr_drbg_status_t block_cipher_df(struct ctr_drbg_ctx_s *ctx, + const uint8_t *input, + uint32_t input_size, + uint8_t *output, + uint32_t output_size + ); +void ctr_aes_deinit(struct ctr_drbg_ctx_s *ctx); + +#endif /* __MSM_CTR_DRBG_H__ */ diff --git a/drivers/char/hw_random/fips_drbg.c b/drivers/char/hw_random/fips_drbg.c new file mode 100644 index 000000000000..51b364aec903 --- /dev/null +++ b/drivers/char/hw_random/fips_drbg.c @@ -0,0 +1,283 @@ +/* + * Copyright (c) 2014, 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "msm_rng.h" +#include "fips_drbg.h" + +/* The fips-140 random number generator is a wrapper around the CTR_DRBG + * random number generator, which is built according to the + * specifications in NIST SP 800-90 using AES-128. + * + * This wrapper has the following functionality + * a. Entropy collection is via a callback. + * b. A failure of CTR_DRBG because reseeding is needed invisibly + * causes the underlying CTR_DRBG instance to be reseeded with + * new random data and then the generate request is retried. + * c. Limitations in CTR_DRBG (like not allowed more than 65536 bytes + * to be genrated in one request) are worked around. At this level + * it just works. + * d. On success the return value is zero. If the callback was invoked + * and returned a non-zero value, that value is returned. On all other + * errors -1 is returned. + */ + +#ifndef NULL + #define NULL 0 +#endif + +/* 32 bytes = 256 bits = seed length */ +#define MAGIC 0xab10d161 + +#define RESEED_INTERVAL (1 << 31) + +static int get_entropy_callback(void *ctx, void *buf) +{ + struct msm_rng_device *msm_rng_dev = (struct msm_rng_device *)ctx; + int ret_val = -1; + + if (!ctx) + return FIPS140_PRNG_ERR; + + if (!buf) + return FIPS140_PRNG_ERR; + + ret_val = msm_rng_direct_read(msm_rng_dev, buf); + if ((size_t)ret_val != Q_HW_DRBG_BLOCK_BYTES) + return ret_val; + + return 0; +} + +/* Initialize *ctx. Automatically reseed after reseed_interval calls + * to fips_drbg_gen. The underlying CTR_DRBG will automatically be + * reseeded every reseed_interval requests. Values over + * CTR_DRBG_MAX_RESEED_INTERVAL (2^48) or that are zero are silently + * converted to CTR_DRBG_MAX_RESEED_INTERVAL. (It is easy to justify + * lowering values that are too large to CTR_DRBG_MAX_RESEED_INTERVAL + * (the NIST SP800-90 limit): just silently enforcing the rules. + * Silently converted 0 to to CTR_DRBG_MAX_RESEED_INTERVAL is harder. + * The alternative is to return an error. But since + * CTR_DRBG_MAX_RESEED is safe, we relieve the caller of one more + * error to worry about.) + */ +static int +do_fips_drbg_init(struct fips_drbg_ctx_s *ctx, + get_entropy_callback_t callback, + void *callback_ctx, + unsigned long long reseed_interval) +{ + uint8_t entropy_pool[Q_HW_DRBG_BLOCK_BYTES]; + enum ctr_drbg_status_t init_rv; + int rv = -1; + + if (ctx == NULL) + return FIPS140_PRNG_ERR; + if (callback == NULL) + return FIPS140_PRNG_ERR; + if (reseed_interval == 0 || + reseed_interval > CTR_DRBG_MAX_RESEED_INTERVAL) + reseed_interval = CTR_DRBG_MAX_RESEED_INTERVAL; + + /* fill in callback related fields in ctx */ + ctx->get_entropy_callback = callback; + ctx->get_entropy_callback_ctx = callback_ctx; + + if (!ctx->fips_drbg_started) { + rv = (*ctx->get_entropy_callback)(ctx->get_entropy_callback_ctx, + ctx->prev_hw_drbg_block); + if (rv != 0) + return FIPS140_PRNG_ERR; + ctx->fips_drbg_started = 1; + } + + rv = (*ctx->get_entropy_callback)(ctx->get_entropy_callback_ctx, + entropy_pool); + if (rv != 0) { + memset(entropy_pool, 0, Q_HW_DRBG_BLOCK_BYTES); + return FIPS140_PRNG_ERR; + } + + if (!memcmp(entropy_pool, ctx->prev_hw_drbg_block, + Q_HW_DRBG_BLOCK_BYTES)) { + memset(entropy_pool, 0, Q_HW_DRBG_BLOCK_BYTES); + return FIPS140_PRNG_ERR; + } + + memcpy(ctx->prev_hw_drbg_block, entropy_pool, + Q_HW_DRBG_BLOCK_BYTES); + + init_rv = ctr_drbg_instantiate(&ctx->ctr_drbg_ctx, + entropy_pool, + 8 * MSM_ENTROPY_BUFFER_SIZE, + entropy_pool + MSM_ENTROPY_BUFFER_SIZE, + 8 * 8, + reseed_interval); + + memset(entropy_pool, 0, Q_HW_DRBG_BLOCK_BYTES); + + if (init_rv == 0) + ctx->magic = MAGIC; + + return 0; +} + +int fips_drbg_init(struct msm_rng_device *msm_rng_ctx) +{ + int ret_val; + + ret_val = do_fips_drbg_init(msm_rng_ctx->drbg_ctx, + get_entropy_callback, + msm_rng_ctx, + RESEED_INTERVAL + ); + if (ret_val != 0) + ret_val = FIPS140_PRNG_ERR; + + return ret_val; +} + +/* Push new entropy into the CTR_DRBG instance in ctx, combining + * it with the entropy already there. On success, 0 is returned. If + * the callback returns a non-zero value, that value is returned. + * Other errors return -1. + */ +static int +fips_drbg_reseed(struct fips_drbg_ctx_s *ctx) +{ + uint8_t entropy_pool[Q_HW_DRBG_BLOCK_BYTES]; + int rv; + enum ctr_drbg_status_t init_rv; + + if (ctx == NULL) + return FIPS140_PRNG_ERR; + + if (!ctx->fips_drbg_started) { + rv = (*ctx->get_entropy_callback)(ctx->get_entropy_callback_ctx, + ctx->prev_hw_drbg_block); + if (rv != 0) + return FIPS140_PRNG_ERR; + ctx->fips_drbg_started = 1; + } + + rv = (*ctx->get_entropy_callback)(ctx->get_entropy_callback_ctx, + entropy_pool); + if (rv != 0) { + memset(entropy_pool, 0, Q_HW_DRBG_BLOCK_BYTES); + return FIPS140_PRNG_ERR; + } + + if (!memcmp(entropy_pool, ctx->prev_hw_drbg_block, + Q_HW_DRBG_BLOCK_BYTES)) { + memset(entropy_pool, 0, Q_HW_DRBG_BLOCK_BYTES); + return FIPS140_PRNG_ERR; + } + + memcpy(ctx->prev_hw_drbg_block, entropy_pool, + Q_HW_DRBG_BLOCK_BYTES); + + init_rv = ctr_drbg_reseed(&ctx->ctr_drbg_ctx, + entropy_pool, + 8 * MSM_ENTROPY_BUFFER_SIZE); + + /* Zeroize the buffer for security. */ + memset(entropy_pool, 0, Q_HW_DRBG_BLOCK_BYTES); + + return (init_rv == CTR_DRBG_SUCCESS ? + FIPS140_PRNG_OK : + FIPS140_PRNG_ERR); +} + +/* generate random bytes. len is in bytes On success returns 0. If + * the callback returns a non-zero value, that is returned. Other + * errors return -1. */ +int +fips_drbg_gen(struct fips_drbg_ctx_s *ctx, void *tgt, size_t len) +{ + + /* + * The contorted flow in this function is so that the CTR_DRBG + * stuff can follow NIST SP 800-90, which has the generate function + * fail and return a special code if a reseed is needed. We also work + * around the CTR_DRBG limitation of the maximum request sized being + * 2^19 bits. + */ + enum ctr_drbg_status_t gen_rv; + int rv; + + if (ctx == NULL || ctx->magic != MAGIC) + return FIPS140_PRNG_ERR; + if (tgt == NULL && len > 0) + return FIPS140_PRNG_ERR; + while (len > 0) { + size_t req_len; + + if (len < (CTR_DRBG_MAX_REQ_LEN_BITS / 8)) + req_len = len; + else + req_len = CTR_DRBG_MAX_REQ_LEN_BITS / 8; + + gen_rv = ctr_drbg_generate(&ctx->ctr_drbg_ctx, + tgt, + 8*req_len); + switch (gen_rv) { + case CTR_DRBG_SUCCESS: + tgt = (uint8_t *)tgt + req_len; + len -= req_len; + break; + case CTR_DRBG_NEEDS_RESEED: + rv = fips_drbg_reseed(ctx); + if (rv != 0) + return rv; + break; + default: + return FIPS140_PRNG_ERR; + } + } + + return 0; +} + +/* free resources and zeroize state */ +void +fips_drbg_final(struct fips_drbg_ctx_s *ctx) +{ + ctr_drbg_uninstantiate(&ctx->ctr_drbg_ctx); + ctx->get_entropy_callback = NULL; + ctx->get_entropy_callback_ctx = NULL; + ctx->fips_drbg_started = 0; + memset(ctx->prev_hw_drbg_block, 0, Q_HW_DRBG_BLOCK_BYTES); + ctx->magic = 0; +} diff --git a/drivers/char/hw_random/fips_drbg.h b/drivers/char/hw_random/fips_drbg.h new file mode 100644 index 000000000000..3f16e0eb664e --- /dev/null +++ b/drivers/char/hw_random/fips_drbg.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2014, 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __MSM_FIPS_DRBG_H__ +#define __MSM_FIPS_DRBG_H__ + +#include "ctr_drbg.h" +#include "msm_rng.h" + +#define FIPS140_PRNG_OK (0) +#define FIPS140_PRNG_ERR (-1) + +extern int _do_msm_fips_drbg_init(void *rng_dev); + +typedef int (*get_entropy_callback_t)(void *ctx, void *buf); + +struct fips_drbg_ctx_s { + uint32_t magic; /* for checking that ctx is likely valid */ + get_entropy_callback_t get_entropy_callback; + void *get_entropy_callback_ctx; + struct ctr_drbg_ctx_s ctr_drbg_ctx; + uint8_t fips_drbg_started; + uint8_t prev_hw_drbg_block[Q_HW_DRBG_BLOCK_BYTES]; +}; + +/* + * initialize *ctx, requesting automatic reseed after reseed_interval + * calls to qpsi_rng_gen. callback is a function to get entropy. + * callback_ctx is a pointer to any context structure that function + * may need. (Pass NULL if no context structure is needed.) callback + * must return zero or a positive number on success, and a + * negative number on an error. + */ +int fips_drbg_init(struct msm_rng_device *msm_rng_ctx); + +/* generated random data. Returns 0 on success, -1 on failures */ +int fips_drbg_gen(struct fips_drbg_ctx_s *ctx, void *tgt, size_t len); + + +/* + * free resources and make zero state. + * Failure to call fips_drbg_final is not a security issue, since + * CTR_DRBG provides backtracking resistance by updating Key and V + * immediately after the data has been generated but before the + * generate function returns. But it is a resource issue (except at + * program termination), as it abandons a FILE structure and a file + * descriptor. + */ +void fips_drbg_final(struct fips_drbg_ctx_s *ctx); + +#endif /* __MSM_FIPS_DRBG_H__ */ diff --git a/drivers/char/hw_random/msm_fips_selftest.c b/drivers/char/hw_random/msm_fips_selftest.c new file mode 100644 index 000000000000..554c379b68ce --- /dev/null +++ b/drivers/char/hw_random/msm_fips_selftest.c @@ -0,0 +1,332 @@ +/* + * Copyright (c) 2014, 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include "fips_drbg.h" +#include "ctr_drbg.h" +#include "msm_rng.h" +#include "msm_fips_selftest.h" + +#define CTRAES128_ENTROPY_BYTES (16) +#define CTRAES128_NONCE_BYTES (8) +#define CTRAES128_MAX_OUTPUT_BYTES (64) + +struct ctr_drbg_testcase_s { + char *name; + char *entropy_string; + char *nonce_string; + char *reseed_entropy_string; + char *expected_string; +}; + +static struct ctr_drbg_testcase_s t0 = { + .name = "use_pr_0", + .entropy_string = "\x8f\xb9\x57\x3a\x54\x62\x53\xcd" + "\xbf\x62\x15\xa1\x80\x5a\x41\x38", + .nonce_string = "\x7c\x2c\xe6\x54\x02\xbc\xa6\x83", + .reseed_entropy_string = "\xbc\x5a\xd8\x9a\xe1\x8c\x49\x1f" + "\x90\xa2\xae\x9e\x7e\x2c\xf9\x9d", + .expected_string = "\x07\x62\x82\xe8\x0e\x65\xd7\x70" + "\x1a\x35\xb3\x44\x63\x68\xb6\x16" + "\xf8\xd9\x62\x23\xb9\xb5\x11\x64" + "\x23\xa3\xa2\x32\xc7\x2c\xea\xbf" + "\x4a\xcc\xc4\x0a\xc6\x19\xd6\xaa" + "\x68\xae\xdb\x8b\x26\x70\xb8\x07" + "\xcc\xe9\x9f\xc2\x1b\x8f\xa5\x16" + "\xef\x75\xb6\x8f\xc0\x6c\x87\xc7", +}; + +static struct ctr_drbg_testcase_s t1 = { + .name = "use_pr_1", + .entropy_string = "\xa3\x56\xf3\x9a\xce\x48\x59\xb1" + "\xe1\x99\x49\x40\x22\x8e\xa4\xeb", + .nonce_string = "\xff\x33\xe9\x51\x39\xf7\x67\xf1", + .reseed_entropy_string = "\x66\x8f\x0f\xe2\xd8\xa9\xa9\x29" + "\x20\xfc\xb9\xf3\x55\xd6\xc3\x4c", + .expected_string = "\xa1\x06\x61\x65\x7b\x98\x0f\xac" + "\xce\x77\x91\xde\x7f\x6f\xe6\x1e" + "\x88\x15\xe5\xe2\x4c\xce\xb8\xa6" + "\x63\xf2\xe8\x2f\x5b\xfb\x16\x92" + "\x06\x2a\xf3\xa8\x59\x05\xe0\x5a" + "\x92\x9a\x07\x65\xc7\x41\x29\x3a" + "\x4b\x1d\x15\x3e\x02\x14\x7b\xdd" + "\x74\x5e\xbd\x70\x07\x4d\x6c\x08", +}; + +static struct ctr_drbg_testcase_s *testlist[] = { + &t0, &t1, /* list of tests */ +}; + +static int allzeroP(void *p, size_t len) +{ + size_t i; + + for (i = 0; i < len; ++i) + if (((uint8_t *)p)[i] != 0) + return 0; + + return 1; +} + +/* + * basic test. return value is error count. + */ +int fips_ctraes128_df_known_answer_test(struct ctr_debg_test_inputs_s *tcase) +{ + struct ctr_drbg_ctx_s ctx; + enum ctr_drbg_status_t rv; + + if (tcase->observed_string_len > CTRAES128_MAX_OUTPUT_BYTES) { + pr_debug("known answer test output is bigger than 64\n"); + return 1; + } + + memset(&ctx, 0, sizeof(ctx)); + + ctx.continuous_test_started = 1; + + rv = ctr_drbg_instantiate(&ctx, + tcase->entropy_string, + 8 * CTRAES128_ENTROPY_BYTES, + tcase->nonce_string, + 8 * CTRAES128_NONCE_BYTES, + 1<<19); + if (rv != CTR_DRBG_SUCCESS) { + pr_err("test instantiate failed with code %d\n", rv); + return 1; + } + + rv = ctr_drbg_reseed(&ctx, + tcase->reseed_entropy_string, + 8 * CTRAES128_ENTROPY_BYTES); + if (rv != CTR_DRBG_SUCCESS) { + pr_err("test reseed failed with code %d\n", rv); + return 1; + } + + rv = ctr_drbg_generate(&ctx, + tcase->observed_string, + tcase->observed_string_len * 8); + if (rv != CTR_DRBG_SUCCESS) { + pr_err("test generate (2) failed with code %d\n", rv); + return 1; + } + + rv = ctr_drbg_generate(&ctx, + tcase->observed_string, + tcase->observed_string_len * 8); + if (rv != CTR_DRBG_SUCCESS) { + pr_err("test generate (2) failed with code %d\n", rv); + return 1; + } + + ctr_drbg_uninstantiate(&ctx); + + if (!allzeroP(&ctx.seed, sizeof(ctx.seed))) { + pr_err("test Final failed to zero the context\n"); + return 1; + } + + pr_info("DRBG counter test done\n"); + return 0; + +} + +static int fips_drbg_healthcheck_sanitytest(void) +{ + struct ctr_drbg_ctx_s *p_ctx = NULL; + enum ctr_drbg_status_t rv = CTR_DRBG_SUCCESS; + char entropy_string[MSM_ENTROPY_BUFFER_SIZE]; + char nonce[MSM_NONCE_BUFFER_SIZE]; + char buffer[32]; + + pr_info("start DRBG health check sanity test\n"); + p_ctx = kzalloc(sizeof(struct ctr_drbg_ctx_s), GFP_KERNEL); + if (!p_ctx) + return CTR_DRBG_GENERAL_ERROR; + + /* + * test DRGB Instantiaion function error handling. + * Sends a NULL pointer as DTR-DRBG context. + */ + rv = ctr_drbg_instantiate(NULL, + entropy_string, + 8 * CTRAES128_ENTROPY_BYTES, + nonce, + 8 * CTRAES128_NONCE_BYTES, + 1<<19); + if (rv == CTR_DRBG_SUCCESS) { + rv = CTR_DRBG_INVALID_ARG; + pr_err("failed to handle NULL pointer of CTR context\n"); + goto outbuf; + } + + /* + * test DRGB Instantiaion function error handling. + * Sends a NULL pointer as entropy input. + */ + rv = ctr_drbg_instantiate(p_ctx, + NULL, + 8 * CTRAES128_ENTROPY_BYTES, + nonce, + 8 * CTRAES128_NONCE_BYTES, + 1<<19); + if (rv == CTR_DRBG_SUCCESS) { + rv = CTR_DRBG_INVALID_ARG; + pr_err("failed to handle NULL pointer of entropy string\n"); + goto outbuf; + } + + rv = ctr_drbg_instantiate(p_ctx, + entropy_string, + 8 * CTRAES128_ENTROPY_BYTES, + NULL, + 8 * CTRAES128_NONCE_BYTES, + 1<<19); + if (rv == CTR_DRBG_SUCCESS) { + rv = CTR_DRBG_INVALID_ARG; + pr_err("failed to handle NULL pointer of nonce string\n"); + goto outbuf; + } + + /* + * test DRGB Instantiaion function error handling. + * Sends very long seed length. + */ + rv = ctr_drbg_instantiate(p_ctx, + entropy_string, + 8 * CTRAES128_ENTROPY_BYTES, + nonce, + 32 * CTRAES128_NONCE_BYTES, + 1<<19); + if (rv == CTR_DRBG_SUCCESS) { + rv = CTR_DRBG_INVALID_ARG; + pr_err("failed to handle incorrect seed size\n"); + goto outbuf; + } + + rv = ctr_drbg_instantiate(p_ctx, + entropy_string, + 8 * CTRAES128_ENTROPY_BYTES, + nonce, + 8 * CTRAES128_NONCE_BYTES, + 1<<19); + if (rv != CTR_DRBG_SUCCESS) { + pr_err("Instantiation failed to handle CTR-DRBG instance\n"); + goto outbuf; + } + + /* + * test DRGB generator function error handling. + * set output string as NULL. + */ + rv = ctr_drbg_generate(p_ctx, NULL, 256); + if (rv == CTR_DRBG_SUCCESS) { + pr_err("failed to handle incorrect buffer pointer\n"); + rv = CTR_DRBG_INVALID_ARG; + goto outdrbg; + } + + rv = ctr_drbg_generate(p_ctx, &buffer, 1 << 20); + if (rv == CTR_DRBG_SUCCESS) { + pr_err("failed to handle too long output length\n"); + rv = CTR_DRBG_INVALID_ARG; + goto outdrbg; + } + + rv = ctr_drbg_generate(p_ctx, &buffer, 177); + if (rv == CTR_DRBG_SUCCESS) { + pr_err("failed to handle incorrect output length\n"); + rv = CTR_DRBG_INVALID_ARG; + goto outdrbg; + } + + pr_info("DRBG health check sanity test passed\n"); + rv = CTR_DRBG_SUCCESS; + +outdrbg: + ctr_drbg_uninstantiate(p_ctx); + +outbuf: + kzfree(p_ctx); + + return rv; +} + +int fips_self_test(void) +{ + struct ctr_debg_test_inputs_s cavs_input; + uint8_t entropy[CTRAES128_ENTROPY_BYTES]; + uint8_t nonce[CTRAES128_NONCE_BYTES]; + uint8_t reseed_entropy[CTRAES128_ENTROPY_BYTES]; + uint8_t expected[CTRAES128_MAX_OUTPUT_BYTES]; + uint8_t observed[CTRAES128_MAX_OUTPUT_BYTES]; + unsigned int i; + int errors = 0; + int ret; + unsigned int len; + + cavs_input.entropy_string = entropy; + cavs_input.nonce_string = nonce; + cavs_input.reseed_entropy_string = reseed_entropy; + cavs_input.observed_string = observed; + cavs_input.observed_string_len = CTRAES128_MAX_OUTPUT_BYTES; + + ret = fips_drbg_healthcheck_sanitytest(); + if (CTR_DRBG_SUCCESS != ret) { + pr_err("DRBG health check fail\n"); + errors++; + return errors; + } + + len = sizeof(testlist)/sizeof(struct ctr_drbg_testcase_s *); + for (i = 0; i < len; ++i) { + memcpy(entropy, + testlist[i]->entropy_string, + CTRAES128_ENTROPY_BYTES); + memcpy(nonce, + testlist[i]->nonce_string, + CTRAES128_NONCE_BYTES); + memcpy(reseed_entropy, + testlist[i]->reseed_entropy_string, + CTRAES128_ENTROPY_BYTES); + memcpy(expected, + testlist[i]->expected_string, + CTRAES128_MAX_OUTPUT_BYTES); + + pr_debug("starting test %s\n", testlist[i]->name); + ret = fips_ctraes128_df_known_answer_test(&cavs_input); + pr_debug("completed test %s\n\n", testlist[i]->name); + if (ret) { + pr_debug("got error from drbg known answer test\n"); + return 1; + } + + if (memcmp(expected, + cavs_input.observed_string, + CTRAES128_MAX_OUTPUT_BYTES) != 0) { + errors++; + pr_info("%s: generate failed\n", testlist[i]->name); + return 1; + } + pr_info("%s: generate PASSED\n", testlist[i]->name); + } + if (errors == 0) + pr_debug("All tests passed\n"); + else + pr_debug("%d tests failed\n", errors); + + return errors; +} diff --git a/drivers/char/hw_random/msm_fips_selftest.h b/drivers/char/hw_random/msm_fips_selftest.h new file mode 100644 index 000000000000..ea6e111239de --- /dev/null +++ b/drivers/char/hw_random/msm_fips_selftest.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2014, 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __MSM_FIPS_SELFTEST_H__ +#define __MSM_FIPS_SELFTEST_H__ + +struct ctr_debg_test_inputs_s { + char *entropy_string; /* must by 16 bytes */ + char *nonce_string; /* must be 8 bytes */ + char *reseed_entropy_string; /* must be 16 bytes */ + char *observed_string; /* length is defined + in observed_string_len */ + int observed_string_len; +}; + +int fips_ctraes128_df_known_answer_test(struct ctr_debg_test_inputs_s *tcase); + +int fips_self_test(void); + +#endif /* __MSM_FIPS_SELFTEST_H__ */ diff --git a/drivers/char/hw_random/msm_rng.c b/drivers/char/hw_random/msm_rng.c index e3dda3e21de4..2bc308bf1615 100644 --- a/drivers/char/hw_random/msm_rng.c +++ b/drivers/char/hw_random/msm_rng.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2013, 2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2013,2015,2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -28,12 +28,13 @@ #include #include #include -#include -#include #include - +#include "msm_rng.h" +#include "ctr_drbg.h" +#include "fips_drbg.h" +#include "msm_fips_selftest.h" #define DRIVER_NAME "msm_rng" @@ -52,22 +53,28 @@ #define MAX_HW_FIFO_DEPTH 16 /* FIFO is 16 words deep */ #define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) /* FIFO is 32 bits wide */ -struct msm_rng_device { - struct platform_device *pdev; - void __iomem *base; - struct clk *prng_clk; - uint32_t qrng_perf_client; - struct mutex rng_lock; +enum { + FIPS_NOT_STARTED = 0, + DRBG_FIPS_STARTED, }; struct msm_rng_device msm_rng_device_info; -static struct msm_rng_device *msm_rng_dev_cached; -struct mutex cached_rng_lock; + +enum fips_status g_fips140_status; + +#ifdef CONFIG_FIPS_ENABLE +static int fips_mode_enabled = FIPS_NOT_STARTED; +#define FIPS140_STATUS FIPS140_STATUS_FAIL +#else +#define FIPS140_STATUS FIPS140_STATUS_NA +#endif + static long msm_rng_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { long ret = 0; + pr_debug("ioctl: cmd = %d\n", cmd); switch (cmd) { case QRNG_IOCTL_RESET_BUS_BANDWIDTH: pr_info("calling msm_rng_bus_scale(LOW)\n"); @@ -89,8 +96,7 @@ static long msm_rng_ioctl(struct file *filp, unsigned int cmd, * back to caller * */ -static int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev, - void *data, size_t max) +int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev, void *data) { struct platform_device *pdev; void __iomem *base; @@ -103,18 +109,104 @@ static int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev, pdev = msm_rng_dev->pdev; base = msm_rng_dev->base; + mutex_lock(&msm_rng_dev->rng_lock); + + if (msm_rng_dev->qrng_perf_client) { + ret = msm_bus_scale_client_update_request( + msm_rng_dev->qrng_perf_client, 1); + if (ret) + pr_err("bus_scale_client_update_req failed\n"); + } + /* enable PRNG clock */ + ret = clk_prepare_enable(msm_rng_dev->prng_clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable clock in callback\n"); + goto err; + } + /* read random data from h/w */ + do { + /* check status bit if data is available */ + while (!(readl_relaxed(base + PRNG_STATUS_OFFSET) + & 0x00000001)) { + if (failed == 10) { + dev_err(&pdev->dev, + "Data not available after retry\n"); + break; + } + dev_err(&pdev->dev, "msm_rng:Data not available\n"); + msleep_interruptible(10); + failed++; + } + + /* read FIFO */ + val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET); + if (!val) + break; /* no data to read so just bail */ + + /* write data back to callers pointer */ + *(retdata++) = val; + currsize += 4; + + } while (currsize < Q_HW_DRBG_BLOCK_BYTES); + + /* vote to turn off clock */ + clk_disable_unprepare(msm_rng_dev->prng_clk); +err: + if (msm_rng_dev->qrng_perf_client) { + ret = msm_bus_scale_client_update_request( + msm_rng_dev->qrng_perf_client, 0); + if (ret) + dev_err(&pdev->dev, + "bus_scale_client_update_req failed ret %d\n", + ret); + } + mutex_unlock(&msm_rng_dev->rng_lock); + + return currsize; +} + +static int msm_rng_drbg_read(struct hwrng *rng, + void *data, size_t max, bool wait) +{ + struct msm_rng_device *msm_rng_dev; + struct platform_device *pdev; + void __iomem *base; + size_t currsize = 0; + u32 val; + u32 *retdata = data; + int ret, ret1; + int failed = 0; + + msm_rng_dev = (struct msm_rng_device *)rng->priv; + pdev = msm_rng_dev->pdev; + base = msm_rng_dev->base; + /* no room for word data */ if (max < 4) return 0; mutex_lock(&msm_rng_dev->rng_lock); + /* read random data from CTR-AES based DRBG */ + if (FIPS140_DRBG_ENABLED == msm_rng_dev->fips140_drbg_enabled) { + ret1 = fips_drbg_gen(msm_rng_dev->drbg_ctx, data, max); + if (FIPS140_PRNG_ERR == ret1) { + mutex_unlock(&msm_rng_dev->rng_lock); + panic("random number generator generator error\n"); + } + } else { + ret1 = 1; + } + if (msm_rng_dev->qrng_perf_client) { ret = msm_bus_scale_client_update_request( msm_rng_dev->qrng_perf_client, 1); if (ret) - pr_err("bus_scale_client_update_req failed!\n"); + dev_err(&pdev->dev, + "bus_scale_client_update_req failed %d\n", ret); } + + /* read random data from h/w */ /* enable PRNG clock */ ret = clk_prepare_enable(msm_rng_dev->prng_clk); if (ret) { @@ -127,10 +219,11 @@ static int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev, while (!(readl_relaxed(base + PRNG_STATUS_OFFSET) & 0x00000001)) { if (failed == 10) { - pr_err("Data not available after retry\n"); + dev_err(&pdev->dev, + "Data not available after retry\n"); break; } - pr_err("msm_rng:Data not available!\n"); + dev_err(&pdev->dev, "msm_rng:Data not available\n"); msleep_interruptible(10); failed++; } @@ -141,14 +234,14 @@ static int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev, break; /* no data to read so just bail */ /* write data back to callers pointer */ - *(retdata++) = val; + if (0 != ret1) + *(retdata++) = val; currsize += 4; + /* make sure we stay on 32bit boundary */ if ((max - currsize) < 4) break; - } while (currsize < max); - /* vote to turn off clock */ clk_disable_unprepare(msm_rng_dev->prng_clk); err: @@ -156,29 +249,113 @@ static int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev, ret = msm_bus_scale_client_update_request( msm_rng_dev->qrng_perf_client, 0); if (ret) - pr_err("bus_scale_client_update_req failed!\n"); + dev_err(&pdev->dev, + "bus_scale_client_update_req failed\n"); } + mutex_unlock(&msm_rng_dev->rng_lock); - val = 0L; return currsize; } -static int msm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) + +#ifdef CONFIG_FIPS_ENABLE +static void _fips_drbg_init_error(struct msm_rng_device *msm_rng_dev) { - struct msm_rng_device *msm_rng_dev; - int rv = 0; + unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME); + clk_put(msm_rng_dev->prng_clk); + iounmap(msm_rng_dev->base); + kzfree(msm_rng_dev->drbg_ctx); + kzfree(msm_rng_dev); + /* random number generator initialization error */ + BUG(); +} +#else +static inline void _fips_drbg_init_error(struct msm_rng_device *msm_rng_dev) +{ +} +#endif - msm_rng_dev = (struct msm_rng_device *)rng->priv; - rv = msm_rng_direct_read(msm_rng_dev, data, max); +#ifdef CONFIG_FIPS_ENABLE +int _do_msm_fips_drbg_init(void *rng_dev) +{ + struct msm_rng_device *msm_rng_dev = (struct msm_rng_device *) rng_dev; + + int ret; + + if (!msm_rng_dev) + return 1; + + ret = fips_drbg_init(msm_rng_dev); + if (!ret) { + pr_debug("start fips self test\n"); + ret = fips_self_test(); + if (ret) { + msm_rng_dev->fips140_drbg_enabled = + FIPS140_DRBG_DISABLED; + _fips_drbg_init_error(msm_rng_dev); + } else { + msm_rng_dev->fips140_drbg_enabled = + FIPS140_DRBG_ENABLED; + } + } else { + msm_rng_dev->fips140_drbg_enabled = FIPS140_DRBG_DISABLED; + _fips_drbg_init_error(msm_rng_dev); + } - return rv; + return ret; +} +#else +int _do_msm_fips_drbg_init(void *rng_dev) +{ + return 0; } +#endif +#ifdef CONFIG_FIPS_ENABLE +static int msm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + struct msm_rng_device *msm_rng_dev = (struct msm_rng_device *)rng->priv; + unsigned char a[Q_HW_DRBG_BLOCK_BYTES]; + int read_size; + unsigned char *p = data; + + switch (fips_mode_enabled) { + case DRBG_FIPS_STARTED: + return msm_rng_drbg_read(rng, data, max, wait); + case FIPS_NOT_STARTED: + if (g_fips140_status != FIPS140_STATUS_PASS) { + do { + read_size = msm_rng_direct_read(msm_rng_dev, a); + if (read_size <= 0) + break; + if ((max - read_size > 0)) { + memcpy(p, a, read_size); + p += read_size; + max -= read_size; + } else { + memcpy(p, a, max); + break; + } + } while (1); + return p - (unsigned char *)data; + } + fips_mode_enabled = DRBG_FIPS_STARTED; + return msm_rng_drbg_read(rng, data, max, wait); + default: + return 0; + } + return 0; +} +#else +static int msm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + return msm_rng_drbg_read(rng, data, max, wait); +} +#endif static struct hwrng msm_rng = { .name = DRIVER_NAME, .read = msm_rng_read, - .quality = 700, }; static int msm_rng_enable_hw(struct msm_rng_device *msm_rng_dev) @@ -191,7 +368,7 @@ static int msm_rng_enable_hw(struct msm_rng_device *msm_rng_dev) ret = msm_bus_scale_client_update_request( msm_rng_dev->qrng_perf_client, 1); if (ret) - pr_err("bus_scale_client_update_req failed!\n"); + pr_err("bus_scale_client_update_req failed\n"); } /* Enable the PRNG CLK */ ret = clk_prepare_enable(msm_rng_dev->prng_clk); @@ -230,7 +407,7 @@ static int msm_rng_enable_hw(struct msm_rng_device *msm_rng_dev) ret = msm_bus_scale_client_update_request( msm_rng_dev->qrng_perf_client, 0); if (ret) - pr_err("bus_scale_client_update_req failed!\n"); + pr_err("bus_scale_client_update_req failed\n"); } return 0; @@ -242,18 +419,31 @@ static const struct file_operations msm_rng_fops = { static struct class *msm_rng_class; static struct cdev msm_rng_cdev; +#ifdef CONFIG_FIPS_ENABLE + +static void _first_msm_drbg_init(struct msm_rng_device *msm_rng_dev) +{ + fips_reg_drbg_callback((void *)msm_rng_dev); +} +#else +static void _first_msm_drbg_init(struct msm_rng_device *msm_rng_dev) +{ + _do_msm_fips_drbg_init(msm_rng_dev); +} +#endif + static int msm_rng_probe(struct platform_device *pdev) { struct resource *res; struct msm_rng_device *msm_rng_dev = NULL; void __iomem *base = NULL; - bool configure_qrng = true; int error = 0; int ret = 0; struct device *dev; - struct msm_bus_scale_pdata *qrng_platform_support = NULL; + bool configure_qrng = true; + g_fips140_status = FIPS140_STATUS; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "invalid address\n"); @@ -276,6 +466,13 @@ static int msm_rng_probe(struct platform_device *pdev) } msm_rng_dev->base = base; + msm_rng_dev->drbg_ctx = kzalloc(sizeof(*msm_rng_dev->drbg_ctx), + GFP_KERNEL); + if (!msm_rng_dev->drbg_ctx) { + error = -ENOMEM; + goto err_alloc_drbg_ctx; + } + /* create a handle for clock control */ if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node, "qcom,msm-rng-iface-clk"))) @@ -317,7 +514,6 @@ static int msm_rng_probe(struct platform_device *pdev) } mutex_init(&msm_rng_dev->rng_lock); - mutex_init(&cached_rng_lock); /* register with hwrng framework */ msm_rng.priv = (unsigned long) msm_rng_dev; @@ -343,15 +539,19 @@ static int msm_rng_probe(struct platform_device *pdev) goto unregister_chrdev; } cdev_init(&msm_rng_cdev, &msm_rng_fops); - msm_rng_dev_cached = msm_rng_dev; + + _first_msm_drbg_init(msm_rng_dev); + return error; unregister_chrdev: unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME); rollback_clk: clk_put(msm_rng_dev->prng_clk); -err_clk_get: +err_alloc_drbg_ctx: iounmap(msm_rng_dev->base); +err_clk_get: + kzfree(msm_rng_dev->drbg_ctx); err_iomap: kzfree(msm_rng_dev); err_exit: @@ -370,68 +570,15 @@ static int msm_rng_remove(struct platform_device *pdev) if (msm_rng_dev->qrng_perf_client) msm_bus_scale_unregister_client(msm_rng_dev->qrng_perf_client); - kzfree(msm_rng_dev); - msm_rng_dev_cached = NULL; - return 0; -} - -static int qrng_get_random(struct crypto_rng *tfm, u8 *rdata, - unsigned int dlen) -{ - int sizeread = 0; - int rv = -EFAULT; - - if (!msm_rng_dev_cached) { - pr_err("%s: msm_rng_dev is not initialized.\n", __func__); - rv = -ENODEV; - goto err_exit; - } - - if (!rdata) { - pr_err("%s: data buffer is null!\n", __func__); - rv = -EINVAL; - goto err_exit; - } - - if (signal_pending(current) || - mutex_lock_interruptible(&cached_rng_lock)) { - pr_err("%s: mutex lock interrupted!\n", __func__); - rv = -ERESTARTSYS; - goto err_exit; + if (msm_rng_dev->drbg_ctx) { + fips_drbg_final(msm_rng_dev->drbg_ctx); + kzfree(msm_rng_dev->drbg_ctx); + msm_rng_dev->drbg_ctx = NULL; } - sizeread = msm_rng_direct_read(msm_rng_dev_cached, rdata, dlen); - - if (sizeread == dlen) - rv = 0; - - mutex_unlock(&cached_rng_lock); -err_exit: - return rv; - -} - -static int qrng_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen) -{ + kzfree(msm_rng_dev); return 0; } -static struct crypto_alg rng_alg = { - .cra_name = "qrng", - .cra_driver_name = "fips_hw_qrng", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_RNG, - .cra_ctxsize = 0, - .cra_type = &crypto_rng_type, - .cra_module = THIS_MODULE, - .cra_u = { - .rng = { - .rng_make_random = qrng_get_random, - .rng_reset = qrng_reset, - .seedsize = 0, - } - } -}; - static struct of_device_id qrng_match[] = { { .compatible = "qcom,msm-rng", }, @@ -450,35 +597,21 @@ static struct platform_driver rng_driver = { static int __init msm_rng_init(void) { - int ret; - - msm_rng_dev_cached = NULL; - ret = platform_driver_register(&rng_driver); - if (ret) { - pr_err("%s: platform_driver_register error:%d\n", - __func__, ret); - goto err_exit; - } - ret = crypto_register_alg(&rng_alg); - if (ret) { - pr_err("%s: crypto_register_algs error:%d\n", - __func__, ret); - goto err_exit; - } - -err_exit: - return ret; + return platform_driver_register(&rng_driver); } module_init(msm_rng_init); static void __exit msm_rng_exit(void) { - crypto_unregister_alg(&rng_alg); platform_driver_unregister(&rng_driver); } module_exit(msm_rng_exit); +#ifdef CONFIG_FIPS_ENABLE +EXPORT_SYMBOL(fips_ctraes128_df_known_answer_test); +#endif +EXPORT_SYMBOL(_do_msm_fips_drbg_init); MODULE_AUTHOR("The Linux Foundation"); MODULE_DESCRIPTION("Qualcomm MSM Random Number Driver"); diff --git a/drivers/char/hw_random/msm_rng.h b/drivers/char/hw_random/msm_rng.h new file mode 100644 index 000000000000..83fdde6c1a3f --- /dev/null +++ b/drivers/char/hw_random/msm_rng.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2014, 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __MSM_RNG_HEADER__ +#define __MSM_RNG_HEADER__ + +#include +#include + +struct _fips_drbg_ctx; +extern enum fips_status g_fips140_status; + +#define FIPS140_DRBG_ENABLED (1) +#define FIPS140_DRBG_DISABLED (0) + +#define Q_HW_DRBG_BLOCK_BYTES (32) + +extern void fips_reg_drbg_callback(void *src); + +struct msm_rng_device { + struct platform_device *pdev; + void __iomem *base; + struct clk *prng_clk; + uint32_t qrng_perf_client; + struct mutex rng_lock; + struct fips_drbg_ctx_s *drbg_ctx; + int fips140_drbg_enabled; +}; + +/* + * + * This function calls hardware random bit generator + * directory and retuns it back to caller. + * + */ +int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev, void *data); + +#endif diff --git a/drivers/crypto/msm/Makefile b/drivers/crypto/msm/Makefile index 993840ca12eb..f13fe65f7a8d 100644 --- a/drivers/crypto/msm/Makefile +++ b/drivers/crypto/msm/Makefile @@ -7,6 +7,10 @@ endif ifdef CONFIG_COMPAT obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += compat_qcedev.o endif +ifeq ($(CONFIG_FIPS_ENABLE), y) + obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev_fips.o + obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto_fips.o +endif obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto.o obj-$(CONFIG_CRYPTO_DEV_OTA_CRYPTO) += ota_crypto.o obj-$(CONFIG_CRYPTO_DEV_QCOM_ICE) += ice.o diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h index 7455a122922c..6937c9e20a47 100644 --- a/drivers/crypto/msm/qce.h +++ b/drivers/crypto/msm/qce.h @@ -1,6 +1,6 @@ /* Qualcomm Crypto Engine driver API * - * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2010-2015, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -157,8 +157,8 @@ struct qce_req { unsigned int authsize; /* authentication key kength */ unsigned char nonce[MAX_NONCE];/* nonce for ccm mode */ unsigned char *assoc; /* Ptr to formatted associated data */ - unsigned int assoclen; /* Formatted associated data length */ - struct scatterlist *asg; /* Formatted associated data sg */ + size_t assoclen; /* Formatted associated data length */ + size_t trail_assoclen; /* trail associated data length */ unsigned char *enckey; /* cipher key */ unsigned int encklen; /* cipher key length */ unsigned char *iv; /* initialization vector */ diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c index 3060077aa15e..4061a5d996f4 100644 --- a/drivers/crypto/msm/qce50.c +++ b/drivers/crypto/msm/qce50.c @@ -1,6 +1,6 @@ /* Qualcomm Crypto Engine driver. * - * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -38,22 +38,16 @@ #include "qcryptohw_50.h" #include "qce_ota.h" -#define CRYPTO_CONFIG_RESET 0xE01EF -#define MAX_SPS_DESC_FIFO_SIZE 0xfff0 -#define QCE_MAX_NUM_DSCR 0x200 +#define CRYPTO_CONFIG_RESET 0xE001F +#define QCE_MAX_NUM_DSCR 0x500 #define QCE_SECTOR_SIZE 0x200 #define CE_CLK_100MHZ 100000000 #define CE_CLK_DIV 1000000 #define CRYPTO_CORE_MAJOR_VER_NUM 0x05 #define CRYPTO_CORE_MINOR_VER_NUM 0x03 -#define CRYPTO_CORE_STEP_VER_NUM 0x1 - -#define CRYPTO_REQ_USER_PAT 0xdead0000 static DEFINE_MUTEX(bam_register_lock); -static DEFINE_MUTEX(qce_iomap_mutex); - struct bam_registration_info { struct list_head qlist; unsigned long handle; @@ -64,40 +58,6 @@ struct bam_registration_info { }; static LIST_HEAD(qce50_bam_list); -/* Used to determine the mode */ -#define MAX_BUNCH_MODE_REQ 2 -/* Max number of request supported */ -#define MAX_QCE_BAM_REQ 8 -/* Interrupt flag will be set for every SET_INTR_AT_REQ request */ -#define SET_INTR_AT_REQ (MAX_QCE_BAM_REQ / 2) -/* To create extra request space to hold dummy request */ -#define MAX_QCE_BAM_REQ_WITH_DUMMY_REQ (MAX_QCE_BAM_REQ + 1) -/* Allocate the memory for MAX_QCE_BAM_REQ + 1 (for dummy request) */ -#define MAX_QCE_ALLOC_BAM_REQ MAX_QCE_BAM_REQ_WITH_DUMMY_REQ -/* QCE driver modes */ -#define IN_INTERRUPT_MODE 0 -#define IN_BUNCH_MODE 1 -/* Dummy request data length */ -#define DUMMY_REQ_DATA_LEN 64 -/* Delay timer to expire when in bunch mode */ -#define DELAY_IN_JIFFIES 5 -/* Index to point the dummy request */ -#define DUMMY_REQ_INDEX MAX_QCE_BAM_REQ - -#define TOTAL_IOVEC_SPACE_PER_PIPE (QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec)) - -enum qce_owner { - QCE_OWNER_NONE = 0, - QCE_OWNER_CLIENT = 1, - QCE_OWNER_TIMEOUT = 2 -}; - -struct dummy_request { - struct qce_sha_req sreq; - struct scatterlist sg; - struct ahash_request areq; -}; - /* * CE HW device structure. * Each engine has an instance of the structure. @@ -111,8 +71,6 @@ struct qce_device { unsigned char *coh_vmem; /* Allocated coherent virtual memory */ dma_addr_t coh_pmem; /* Allocated coherent physical memory */ int memsize; /* Memory allocated */ - unsigned char *iovec_vmem; /* Allocate iovec virtual memory */ - int iovec_memsize; /* Memory allocated */ uint32_t bam_mem; /* bam physical address, from DT */ uint32_t bam_mem_size; /* bam io size, from DT */ int is_shared; /* CE HW is shared */ @@ -128,40 +86,37 @@ struct qce_device { struct clk *ce_core_clk; /* Handle to CE clk */ struct clk *ce_clk; /* Handle to CE clk */ struct clk *ce_bus_clk; /* Handle to CE AXI clk*/ + bool no_get_around; - bool no_ccm_mac_status_get_around; + qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */ + + int assoc_nents; + int ivsize; + int authsize; + int src_nents; + int dst_nents; + + dma_addr_t phy_iv_in; + unsigned char dec_iv[16]; + int dir; + void *areq; + enum qce_cipher_mode_enum mode; + struct qce_ce_cfg_reg_setting reg; + struct ce_sps_data ce_sps; + uint32_t engines_avail; + dma_addr_t phy_ota_src; + dma_addr_t phy_ota_dst; + unsigned int ota_size; unsigned int ce_opp_freq_hz; + bool use_sw_aes_cbc_ecb_ctr_algo; bool use_sw_aead_algo; bool use_sw_aes_xts_algo; bool use_sw_ahash_algo; bool use_sw_hmac_algo; bool use_sw_aes_ccm_algo; - uint32_t engines_avail; - struct qce_ce_cfg_reg_setting reg; - struct ce_bam_info ce_bam_info; - struct ce_request_info ce_request_info[MAX_QCE_ALLOC_BAM_REQ]; - unsigned int ce_request_index; - enum qce_owner owner; - atomic_t no_of_queued_req; - struct timer_list timer; - struct dummy_request dummyreq; - unsigned int mode; - unsigned int intr_cadence; - unsigned int dev_no; - struct qce_driver_stats qce_stats; - atomic_t bunch_cmd_seq; - atomic_t last_intr_seq; - bool cadence_flag; - uint8_t *dummyreq_in_buf; }; -static void print_notify_debug(struct sps_event_notify *notify); -static void _sps_producer_callback(struct sps_event_notify *notify); -static int qce_dummy_req(struct qce_device *pce_dev); - -static int _qce50_disp_stats; - /* Standard initialization vector for SHA-1, source: FIPS 180-2 */ static uint32_t _std_init_vector_sha1[] = { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 @@ -218,8 +173,8 @@ static int count_sg(struct scatterlist *sg, int nbytes) int i; for (i = 0; nbytes > 0; i++, sg = scatterwalk_sg_next(sg)) { - if (NULL == sg) { - pr_err("qce50.c: count_sg, sg = NULL"); + if (sg == NULL) { + pr_err("qce50: count_sg, sg = NULL\n"); break; } nbytes -= sg->length; @@ -233,8 +188,8 @@ static int qce_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int i; for (i = 0; i < nents; ++i) { - if (NULL == sg) { - pr_err("qce50.c: qce_dma_map_sg, sg = NULL"); + if (sg == NULL) { + pr_err("qce50: qce_dma_map_sg, sg = NULL\n"); break; } dma_map_sg(dev, sg, 1, direction); @@ -250,8 +205,8 @@ static int qce_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int i; for (i = 0; i < nents; ++i) { - if (NULL == sg) { - pr_err("qce50.c: qce_dma_unmap_sg, sg = NULL"); + if (sg == NULL) { + pr_err("qce50: qce_dma_unmap_sg, sg = NULL\n"); break; } dma_unmap_sg(dev, sg, 1, direction); @@ -277,34 +232,18 @@ static int _probe_ce_engine(struct qce_device *pce_dev) pce_dev->phy_iobase, maj_rev, min_rev, step_rev); return -EIO; } else { - /* - * The majority of crypto HW bugs have been fixed in 5.3.0 and - * above. That allows a single sps transfer of consumer - * pipe, and a single sps transfer of producer pipe - * for a crypto request. no_get_around flag indicates this. - * - * In 5.3.1, the CCM MAC_FAILED in result dump issue is - * fixed. no_ccm_mac_status_get_around flag indicates this. - */ pce_dev->no_get_around = (min_rev >= CRYPTO_CORE_MINOR_VER_NUM) ? true : false; - if (min_rev > CRYPTO_CORE_MINOR_VER_NUM) - pce_dev->no_ccm_mac_status_get_around = true; - else if ((min_rev == CRYPTO_CORE_MINOR_VER_NUM) && - (step_rev >= CRYPTO_CORE_STEP_VER_NUM)) - pce_dev->no_ccm_mac_status_get_around = true; - else - pce_dev->no_ccm_mac_status_get_around = false; } - pce_dev->ce_bam_info.minor_version = min_rev; + pce_dev->ce_sps.minor_version = min_rev; pce_dev->engines_avail = readl_relaxed(pce_dev->iobase + CRYPTO_ENGINES_AVAIL); dev_info(pce_dev->pdev, "Qualcomm Crypto %d.%d.%d device found @0x%x\n", maj_rev, min_rev, step_rev, pce_dev->phy_iobase); - pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE; + pce_dev->ce_sps.ce_burst_size = MAX_CE_BAM_BURST_SIZE; dev_info(pce_dev->pdev, "CE device = 0x%x\n" @@ -314,25 +253,21 @@ static int _probe_ce_engine(struct qce_device *pce_dev) "IO base BAM = 0x%pK\n" "BAM IRQ %d\n" "Engines Availability = 0x%x\n", - pce_dev->ce_bam_info.ce_device, + pce_dev->ce_sps.ce_device, pce_dev->iobase, - pce_dev->ce_bam_info.dest_pipe_index, - pce_dev->ce_bam_info.src_pipe_index, - pce_dev->ce_bam_info.bam_iobase, - pce_dev->ce_bam_info.bam_irq, + pce_dev->ce_sps.dest_pipe_index, + pce_dev->ce_sps.src_pipe_index, + pce_dev->ce_sps.bam_iobase, + pce_dev->ce_sps.bam_irq, pce_dev->engines_avail); return 0; }; static struct qce_cmdlist_info *_ce_get_hash_cmdlistinfo( - struct qce_device *pce_dev, - int req_info, struct qce_sha_req *sreq) + struct qce_device *pce_dev, struct qce_sha_req *sreq) { - struct ce_sps_data *pce_sps_data; - struct qce_cmdlistptr_ops *cmdlistptr; + struct qce_cmdlistptr_ops *cmdlistptr = &pce_dev->ce_sps.cmdlistptr; - pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps; - cmdlistptr = &pce_sps_data->cmdlistptr; switch (sreq->alg) { case QCE_HASH_SHA1: return &cmdlistptr->auth_sha1; @@ -356,7 +291,7 @@ static int _ce_setup_hash(struct qce_device *pce_dev, struct qce_sha_req *sreq, struct qce_cmdlist_info *cmdlistinfo) { - uint32_t auth32[(SHA256_DIGEST_SIZE / sizeof(uint32_t))+1]; + uint32_t auth32[(SHA256_DIGEST_SIZE / sizeof(uint32_t)) + 1]; uint32_t diglen; int i; uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = { @@ -481,28 +416,24 @@ static int _ce_setup_hash(struct qce_device *pce_dev, if (sreq->size) pce->data = sreq->size; else - pce->data = pce_dev->ce_bam_info.ce_burst_size; + pce->data = pce_dev->ce_sps.ce_burst_size; return 0; } static struct qce_cmdlist_info *_ce_get_aead_cmdlistinfo( - struct qce_device *pce_dev, - int req_info, struct qce_req *creq) + struct qce_device *pce_dev, struct qce_req *creq) { - struct ce_sps_data *pce_sps_data; - struct qce_cmdlistptr_ops *cmdlistptr; - - pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps; - cmdlistptr = &pce_sps_data->cmdlistptr; switch (creq->alg) { case CIPHER_ALG_DES: switch (creq->mode) { case QCE_MODE_CBC: if (creq->auth_alg == QCE_HASH_SHA1_HMAC) - return &cmdlistptr->aead_hmac_sha1_cbc_des; + return &pce_dev->ce_sps. + cmdlistptr.aead_hmac_sha1_cbc_des; else if (creq->auth_alg == QCE_HASH_SHA256_HMAC) - return &cmdlistptr->aead_hmac_sha256_cbc_des; + return &pce_dev->ce_sps. + cmdlistptr.aead_hmac_sha256_cbc_des; else return NULL; break; @@ -514,9 +445,11 @@ static struct qce_cmdlist_info *_ce_get_aead_cmdlistinfo( switch (creq->mode) { case QCE_MODE_CBC: if (creq->auth_alg == QCE_HASH_SHA1_HMAC) - return &cmdlistptr->aead_hmac_sha1_cbc_3des; + return &pce_dev->ce_sps. + cmdlistptr.aead_hmac_sha1_cbc_3des; else if (creq->auth_alg == QCE_HASH_SHA256_HMAC) - return &cmdlistptr->aead_hmac_sha256_cbc_3des; + return &pce_dev->ce_sps. + cmdlistptr.aead_hmac_sha256_cbc_3des; else return NULL; break; @@ -529,21 +462,21 @@ static struct qce_cmdlist_info *_ce_get_aead_cmdlistinfo( case QCE_MODE_CBC: if (creq->encklen == AES128_KEY_SIZE) { if (creq->auth_alg == QCE_HASH_SHA1_HMAC) - return &cmdlistptr-> + return &pce_dev->ce_sps.cmdlistptr. aead_hmac_sha1_cbc_aes_128; else if (creq->auth_alg == QCE_HASH_SHA256_HMAC) - return &cmdlistptr-> + return &pce_dev->ce_sps.cmdlistptr. aead_hmac_sha256_cbc_aes_128; else return NULL; } else if (creq->encklen == AES256_KEY_SIZE) { if (creq->auth_alg == QCE_HASH_SHA1_HMAC) - return &cmdlistptr-> + return &pce_dev->ce_sps.cmdlistptr. aead_hmac_sha1_cbc_aes_256; else if (creq->auth_alg == QCE_HASH_SHA256_HMAC) - return &cmdlistptr-> + return &pce_dev->ce_sps.cmdlistptr. aead_hmac_sha256_cbc_aes_256; else return NULL; @@ -598,14 +531,19 @@ static int _ce_setup_aead(struct qce_device *pce_dev, struct qce_req *q_req, return -EINVAL; } - /* only support cbc mode */ - if (q_req->mode != QCE_MODE_CBC) + switch (q_req->mode) { + case QCE_MODE_CBC: + pce_dev->mode = q_req->mode; + break; + default: return -EINVAL; - - _byte_stream_to_net_words(enciv32, q_req->iv, ivsize); - pce = cmdlistinfo->encr_cntr_iv; - for (i = 0; i < enciv_in_word; i++, pce++) - pce->data = enciv32[i]; + } + if (q_req->mode != QCE_MODE_ECB) { + _byte_stream_to_net_words(enciv32, q_req->iv, ivsize); + pce = cmdlistinfo->encr_cntr_iv; + for (i = 0; i < enciv_in_word; i++, pce++) + pce->data = enciv32[i]; + } /* * write encr key @@ -676,19 +614,13 @@ static int _ce_setup_aead(struct qce_device *pce_dev, struct qce_req *q_req, return 0; -} +}; static struct qce_cmdlist_info *_ce_get_cipher_cmdlistinfo( - struct qce_device *pce_dev, - int req_info, struct qce_req *creq) + struct qce_device *pce_dev, struct qce_req *creq) { - struct ce_request_info *preq_info; - struct ce_sps_data *pce_sps_data; - struct qce_cmdlistptr_ops *cmdlistptr; + struct qce_cmdlistptr_ops *cmdlistptr = &pce_dev->ce_sps.cmdlistptr; - preq_info = &pce_dev->ce_request_info[req_info]; - pce_sps_data = &preq_info->ce_sps; - cmdlistptr = &pce_sps_data->cmdlistptr; if (creq->alg != CIPHER_ALG_AES) { switch (creq->alg) { case CIPHER_ALG_DES: @@ -855,6 +787,7 @@ static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq, encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256; break; } + pce_dev->mode = creq->mode; switch (creq->alg) { case CIPHER_ALG_DES: @@ -1123,19 +1056,16 @@ static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req, return 0; } -static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info) +static void _qce_dump_descr_fifos(struct qce_device *pce_dev) { int i, j, ents; - struct ce_sps_data *pce_sps_data; - struct sps_iovec *iovec; + struct sps_iovec *iovec = pce_dev->ce_sps.in_transfer.iovec; uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD; - pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps; - iovec = pce_sps_data->in_transfer.iovec; pr_info("==============================================\n"); pr_info("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n"); pr_info("==============================================\n"); - for (i = 0; i < pce_sps_data->in_transfer.iovec_count; i++) { + for (i = 0; i < pce_dev->ce_sps.in_transfer.iovec_count; i++) { pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i, iovec->addr, iovec->size, iovec->flags); if (iovec->flags & cmd_flags) { @@ -1156,8 +1086,8 @@ static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info) pr_info("==============================================\n"); pr_info("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n"); pr_info("==============================================\n"); - iovec = pce_sps_data->out_transfer.iovec; - for (i = 0; i < pce_sps_data->out_transfer.iovec_count; i++) { + iovec = pce_dev->ce_sps.out_transfer.iovec; + for (i = 0; i < pce_dev->ce_sps.out_transfer.iovec_count; i++) { pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i, iovec->addr, iovec->size, iovec->flags); iovec++; @@ -1166,9 +1096,9 @@ static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info) #ifdef QCE_DEBUG -static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info) +static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev) { - _qce_dump_descr_fifos(pce_dev, req_info); + _qce_dump_descr_fifos(pce_dev); } #define QCE_WRITE_REG(val, addr) \ @@ -1179,7 +1109,7 @@ static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info) #else -static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info) +static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev) { } @@ -1191,7 +1121,7 @@ static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info) static int _ce_setup_hash_direct(struct qce_device *pce_dev, struct qce_sha_req *sreq) { - uint32_t auth32[(SHA256_DIGEST_SIZE / sizeof(uint32_t))+1]; + uint32_t auth32[(SHA256_DIGEST_SIZE / sizeof(uint32_t)) + 1]; uint32_t diglen; bool use_hw_key = false; bool use_pipe_key = false; @@ -1458,9 +1388,7 @@ static int _ce_setup_aead_direct(struct qce_device *pce_dev, return -EINVAL; } - - - + pce_dev->mode = q_req->mode; /* write CNTR0_IV0_REG */ if (q_req->mode != QCE_MODE_ECB) { _byte_stream_to_net_words(enciv32, q_req->iv, ivsize); @@ -1697,6 +1625,7 @@ static int _ce_setup_cipher_direct(struct qce_device *pce_dev, encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256; break; } + pce_dev->mode = creq->mode; switch (creq->alg) { case CIPHER_ALG_DES: @@ -1952,8 +1881,7 @@ static int _ce_f9_setup_direct(struct qce_device *pce_dev, CRYPTO_CONFIG_REG)); /* write go */ QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | - (1 << CRYPTO_CLR_CNTXT)), - pce_dev->iobase + CRYPTO_GOPROC_REG); + (1 << CRYPTO_CLR_CNTXT)), pce_dev->iobase + CRYPTO_GOPROC_REG); /* * Ensure previous instructions (setting the GO register) * was completed before issuing a DMA transfer request @@ -2030,8 +1958,7 @@ static int _ce_f8_setup_direct(struct qce_device *pce_dev, CRYPTO_CONFIG_REG)); /* write go */ QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | - (1 << CRYPTO_CLR_CNTXT)), - pce_dev->iobase + CRYPTO_GOPROC_REG); + (1 << CRYPTO_CLR_CNTXT)), pce_dev->iobase + CRYPTO_GOPROC_REG); /* * Ensure previous instructions (setting the GO register) * was completed before issuing a DMA transfer request @@ -2041,99 +1968,77 @@ static int _ce_f8_setup_direct(struct qce_device *pce_dev, } -static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info) +static int _qce_unlock_other_pipes(struct qce_device *pce_dev) { int rc = 0; - struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info - [req_info].ce_sps; if (pce_dev->no_get_around || pce_dev->support_cmd_dscr == false) return rc; - rc = sps_transfer_one(pce_dev->ce_bam_info.consumer.pipe, - GET_PHYS_ADDR(pce_sps_data-> - cmdlistptr.unlock_all_pipes.cmdlist), - 0, NULL, (SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_UNLOCK)); + pce_dev->ce_sps.consumer.event.callback = NULL; + rc = sps_transfer_one(pce_dev->ce_sps.consumer.pipe, + GET_PHYS_ADDR(pce_dev->ce_sps.cmdlistptr.unlock_all_pipes.cmdlist), + 0, NULL, (SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_UNLOCK)); if (rc) { - pr_err("sps_xfr_one() fail rc=%d", rc); + pr_err("sps_xfr_one() fail rc=%d\n", rc); rc = -EINVAL; } return rc; } -static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info, - bool is_complete); -static int _aead_complete(struct qce_device *pce_dev, int req_info) +static int _aead_complete(struct qce_device *pce_dev) { struct aead_request *areq; unsigned char mac[SHA256_DIGEST_SIZE]; - uint32_t ccm_fail_status = 0; + uint32_t status; uint32_t result_dump_status; - int32_t result_status = 0; - struct ce_request_info *preq_info; - struct ce_sps_data *pce_sps_data; - qce_comp_func_ptr_t qce_callback; - - preq_info = &pce_dev->ce_request_info[req_info]; - pce_sps_data = &preq_info->ce_sps; - qce_callback = preq_info->qce_cb; - areq = (struct aead_request *) preq_info->areq; + int32_t result_status; + + areq = (struct aead_request *) pce_dev->areq; if (areq->src != areq->dst) { - qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents, + qce_dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); } - qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents, + qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); - qce_dma_unmap_sg(pce_dev->pdev, areq->assoc, preq_info->assoc_nents, + qce_dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents, DMA_TO_DEVICE); /* check MAC */ - memcpy(mac, (char *)(&pce_sps_data->result->auth_iv[0]), + memcpy(mac, (char *)(&pce_dev->ce_sps.result->auth_iv[0]), SHA256_DIGEST_SIZE); /* read status before unlock */ - if (preq_info->dir == QCE_DECRYPT) { - if (pce_dev->no_get_around) - if (pce_dev->no_ccm_mac_status_get_around) - ccm_fail_status = be32_to_cpu(pce_sps_data-> - result->status); - else - ccm_fail_status = be32_to_cpu(pce_sps_data-> - result_null->status); - else - ccm_fail_status = readl_relaxed(pce_dev->iobase + - CRYPTO_STATUS_REG); - } - if (_qce_unlock_other_pipes(pce_dev, req_info)) { - qce_free_req_info(pce_dev, req_info, true); - qce_callback(areq, mac, NULL, -ENXIO); + status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); + + if (_qce_unlock_other_pipes(pce_dev)) { + pce_dev->qce_cb(areq, mac, NULL, -ENXIO); return -ENXIO; } - result_dump_status = be32_to_cpu(pce_sps_data->result->status); - pce_sps_data->result->status = 0; + result_status = 0; + result_dump_status = be32_to_cpu(pce_dev->ce_sps.result->status); + pce_dev->ce_sps.result->status = 0; if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR) | (1 << CRYPTO_HSD_ERR))) { pr_err("aead operation error. Status %x\n", result_dump_status); result_status = -ENXIO; - } else if (pce_sps_data->consumer_status | - pce_sps_data->producer_status) { + } else if (pce_dev->ce_sps.consumer_status | + pce_dev->ce_sps.producer_status) { pr_err("aead sps operation error. sps status %x %x\n", - pce_sps_data->consumer_status, - pce_sps_data->producer_status); + pce_dev->ce_sps.consumer_status, + pce_dev->ce_sps.producer_status); result_status = -ENXIO; } - if (preq_info->mode == QCE_MODE_CCM) { + if (pce_dev->mode == QCE_MODE_CCM) { /* * Not from result dump, instead, use the status we just * read of device for MAC_FAILED. */ - if (result_status == 0 && (preq_info->dir == QCE_DECRYPT) && - (ccm_fail_status & (1 << CRYPTO_MAC_FAILED))) + if (result_status == 0 && (status & (1 << CRYPTO_MAC_FAILED))) result_status = -EBADMSG; - qce_free_req_info(pce_dev, req_info, true); - qce_callback(areq, mac, NULL, result_status); + pce_dev->qce_cb(areq, mac, NULL, result_status); } else { uint32_t ivsize = 0; @@ -2141,166 +2046,145 @@ static int _aead_complete(struct qce_device *pce_dev, int req_info) unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE]; aead = crypto_aead_reqtfm(areq); ivsize = crypto_aead_ivsize(aead); - if (pce_dev->ce_bam_info.minor_version != 0) - dma_unmap_single(pce_dev->pdev, preq_info->phy_iv_in, + if (pce_dev->ce_sps.minor_version != 0) + dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in, ivsize, DMA_TO_DEVICE); - memcpy(iv, (char *)(pce_sps_data->result->encr_cntr_iv), + memcpy(iv, (char *)(pce_dev->ce_sps.result->encr_cntr_iv), sizeof(iv)); - qce_free_req_info(pce_dev, req_info, true); - qce_callback(areq, mac, iv, result_status); + pce_dev->qce_cb(areq, mac, iv, result_status); } return 0; }; -static int _sha_complete(struct qce_device *pce_dev, int req_info) +static int _sha_complete(struct qce_device *pce_dev) { struct ahash_request *areq; unsigned char digest[SHA256_DIGEST_SIZE]; uint32_t bytecount32[2]; - int32_t result_status = 0; + int32_t result_status; uint32_t result_dump_status; - struct ce_request_info *preq_info; - struct ce_sps_data *pce_sps_data; - qce_comp_func_ptr_t qce_callback; - - preq_info = &pce_dev->ce_request_info[req_info]; - pce_sps_data = &preq_info->ce_sps; - qce_callback = preq_info->qce_cb; - areq = (struct ahash_request *) preq_info->areq; + + areq = (struct ahash_request *) pce_dev->areq; if (!areq) { pr_err("sha operation error. areq is NULL\n"); return -ENXIO; } - qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents, + qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, DMA_TO_DEVICE); - memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]), + memcpy(digest, (char *)(&pce_dev->ce_sps.result->auth_iv[0]), SHA256_DIGEST_SIZE); _byte_stream_to_net_words(bytecount32, - (unsigned char *)pce_sps_data->result->auth_byte_count, + (unsigned char *)pce_dev->ce_sps.result->auth_byte_count, 2 * CRYPTO_REG_SIZE); - if (_qce_unlock_other_pipes(pce_dev, req_info)) { - qce_free_req_info(pce_dev, req_info, true); - qce_callback(areq, digest, (char *)bytecount32, + if (_qce_unlock_other_pipes(pce_dev)) { + pce_dev->qce_cb(areq, digest, (char *)bytecount32, -ENXIO); return -ENXIO; } - result_dump_status = be32_to_cpu(pce_sps_data->result->status); - pce_sps_data->result->status = 0; + result_status = 0; + result_dump_status = be32_to_cpu(pce_dev->ce_sps.result->status); + pce_dev->ce_sps.result->status = 0; if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR) | (1 << CRYPTO_HSD_ERR))) { pr_err("sha operation error. Status %x\n", result_dump_status); result_status = -ENXIO; - } else if (pce_sps_data->consumer_status) { + } else if (pce_dev->ce_sps.consumer_status) { pr_err("sha sps operation error. sps status %x\n", - pce_sps_data->consumer_status); + pce_dev->ce_sps.consumer_status); result_status = -ENXIO; } - qce_free_req_info(pce_dev, req_info, true); - qce_callback(areq, digest, (char *)bytecount32, result_status); + pce_dev->qce_cb(areq, digest, (char *)bytecount32, + result_status); return 0; -} +}; -static int _f9_complete(struct qce_device *pce_dev, int req_info) +static int _f9_complete(struct qce_device *pce_dev) { uint32_t mac_i; - int32_t result_status = 0; + int32_t result_status; uint32_t result_dump_status; - struct ce_request_info *preq_info; - struct ce_sps_data *pce_sps_data; - qce_comp_func_ptr_t qce_callback; - void *areq; - preq_info = &pce_dev->ce_request_info[req_info]; - pce_sps_data = &preq_info->ce_sps; - qce_callback = preq_info->qce_cb; - areq = preq_info->areq; - dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, - preq_info->ota_size, DMA_TO_DEVICE); + dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, + pce_dev->ota_size, DMA_TO_DEVICE); _byte_stream_to_net_words(&mac_i, - (char *)(&pce_sps_data->result->auth_iv[0]), + (char *)(&pce_dev->ce_sps.result->auth_iv[0]), CRYPTO_REG_SIZE); - if (_qce_unlock_other_pipes(pce_dev, req_info)) { - qce_free_req_info(pce_dev, req_info, true); - qce_callback(areq, NULL, NULL, -ENXIO); + if (_qce_unlock_other_pipes(pce_dev)) { + pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO); return -ENXIO; } - result_dump_status = be32_to_cpu(pce_sps_data->result->status); - pce_sps_data->result->status = 0; + result_status = 0; + result_dump_status = be32_to_cpu(pce_dev->ce_sps.result->status); + pce_dev->ce_sps.result->status = 0; if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR) | (1 << CRYPTO_HSD_ERR))) { pr_err("f9 operation error. Status %x\n", result_dump_status); result_status = -ENXIO; - } else if (pce_sps_data->consumer_status | - pce_sps_data->producer_status) { + } else if (pce_dev->ce_sps.consumer_status | + pce_dev->ce_sps.producer_status) { pr_err("f9 sps operation error. sps status %x %x\n", - pce_sps_data->consumer_status, - pce_sps_data->producer_status); + pce_dev->ce_sps.consumer_status, + pce_dev->ce_sps.producer_status); result_status = -ENXIO; } - qce_free_req_info(pce_dev, req_info, true); - qce_callback(areq, (char *)&mac_i, NULL, result_status); + pce_dev->qce_cb(pce_dev->areq, (char *)&mac_i, NULL, result_status); return 0; } -static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info) +static int _ablk_cipher_complete(struct qce_device *pce_dev) { struct ablkcipher_request *areq; unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE]; - int32_t result_status = 0; + int32_t result_status; uint32_t result_dump_status; - struct ce_request_info *preq_info; - struct ce_sps_data *pce_sps_data; - qce_comp_func_ptr_t qce_callback; - - preq_info = &pce_dev->ce_request_info[req_info]; - pce_sps_data = &preq_info->ce_sps; - qce_callback = preq_info->qce_cb; - areq = (struct ablkcipher_request *) preq_info->areq; + + areq = (struct ablkcipher_request *) pce_dev->areq; + if (areq->src != areq->dst) { qce_dma_unmap_sg(pce_dev->pdev, areq->dst, - preq_info->dst_nents, DMA_FROM_DEVICE); + pce_dev->dst_nents, DMA_FROM_DEVICE); } - qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents, + qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); - if (_qce_unlock_other_pipes(pce_dev, req_info)) { - qce_free_req_info(pce_dev, req_info, true); - qce_callback(areq, NULL, NULL, -ENXIO); + if (_qce_unlock_other_pipes(pce_dev)) { + pce_dev->qce_cb(areq, NULL, NULL, -ENXIO); return -ENXIO; } - result_dump_status = be32_to_cpu(pce_sps_data->result->status); - pce_sps_data->result->status = 0; + result_status = 0; + result_dump_status = be32_to_cpu(pce_dev->ce_sps.result->status); + pce_dev->ce_sps.result->status = 0; if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR) | (1 << CRYPTO_HSD_ERR))) { pr_err("ablk_cipher operation error. Status %x\n", result_dump_status); result_status = -ENXIO; - } else if (pce_sps_data->consumer_status | - pce_sps_data->producer_status) { + } else if (pce_dev->ce_sps.consumer_status | + pce_dev->ce_sps.producer_status) { pr_err("ablk_cipher sps operation error. sps status %x %x\n", - pce_sps_data->consumer_status, - pce_sps_data->producer_status); + pce_dev->ce_sps.consumer_status, + pce_dev->ce_sps.producer_status); result_status = -ENXIO; } - if (preq_info->mode == QCE_MODE_ECB) { - qce_free_req_info(pce_dev, req_info, true); - qce_callback(areq, NULL, NULL, pce_sps_data->consumer_status | - result_status); + if (pce_dev->mode == QCE_MODE_ECB) { + pce_dev->qce_cb(areq, NULL, NULL, + pce_dev->ce_sps.consumer_status | + result_status); } else { - if (pce_dev->ce_bam_info.minor_version == 0) { - if (preq_info->mode == QCE_MODE_CBC) { - if (preq_info->dir == QCE_DECRYPT) - memcpy(iv, (char *)preq_info->dec_iv, + if (pce_dev->ce_sps.minor_version == 0) { + if (pce_dev->mode == QCE_MODE_CBC) { + if (pce_dev->dir == QCE_DECRYPT) + memcpy(iv, (char *)pce_dev->dec_iv, sizeof(iv)); else memcpy(iv, (unsigned char *) @@ -2308,15 +2192,15 @@ static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info) areq->src->length - 16), sizeof(iv)); } - if ((preq_info->mode == QCE_MODE_CTR) || - (preq_info->mode == QCE_MODE_XTS)) { + if ((pce_dev->mode == QCE_MODE_CTR) || + (pce_dev->mode == QCE_MODE_XTS)) { uint32_t num_blk = 0; uint32_t cntr_iv3 = 0; unsigned long long cntr_iv64 = 0; unsigned char *b = (unsigned char *)(&cntr_iv3); memcpy(iv, areq->info, sizeof(iv)); - if (preq_info->mode != QCE_MODE_XTS) + if (pce_dev->mode != QCE_MODE_XTS) num_blk = areq->nbytes/16; else num_blk = 1; @@ -2338,71 +2222,54 @@ static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info) } } else { memcpy(iv, - (char *)(pce_sps_data->result->encr_cntr_iv), + (char *)(pce_dev->ce_sps.result->encr_cntr_iv), sizeof(iv)); } - qce_free_req_info(pce_dev, req_info, true); - qce_callback(areq, NULL, iv, result_status); + pce_dev->qce_cb(areq, NULL, iv, result_status); } return 0; -} +}; -static int _f8_complete(struct qce_device *pce_dev, int req_info) +static int _f8_complete(struct qce_device *pce_dev) { - int32_t result_status = 0; + int32_t result_status; uint32_t result_dump_status; - uint32_t result_dump_status2; - struct ce_request_info *preq_info; - struct ce_sps_data *pce_sps_data; - qce_comp_func_ptr_t qce_callback; - void *areq; - preq_info = &pce_dev->ce_request_info[req_info]; - pce_sps_data = &preq_info->ce_sps; - qce_callback = preq_info->qce_cb; - areq = preq_info->areq; - if (preq_info->phy_ota_dst) - dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst, - preq_info->ota_size, DMA_FROM_DEVICE); - if (preq_info->phy_ota_src) - dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, - preq_info->ota_size, (preq_info->phy_ota_dst) ? + if (pce_dev->phy_ota_dst != 0) + dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, + pce_dev->ota_size, DMA_FROM_DEVICE); + if (pce_dev->phy_ota_src != 0) + dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, + pce_dev->ota_size, (pce_dev->phy_ota_dst) ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); - if (_qce_unlock_other_pipes(pce_dev, req_info)) { - qce_free_req_info(pce_dev, req_info, true); - qce_callback(areq, NULL, NULL, -ENXIO); + if (_qce_unlock_other_pipes(pce_dev)) { + pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO); return -ENXIO; } - result_dump_status = be32_to_cpu(pce_sps_data->result->status); - result_dump_status2 = be32_to_cpu(pce_sps_data->result->status2); + result_status = 0; + result_dump_status = be32_to_cpu(pce_dev->ce_sps.result->status); + pce_dev->ce_sps.result->status = 0; - if ((result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR) - | (1 << CRYPTO_HSD_ERR)))) { - pr_err( - "f8 oper error. Dump Sta %x Sta2 %x req %d\n", - result_dump_status, result_dump_status2, req_info); + if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR) + | (1 << CRYPTO_HSD_ERR))) { + pr_err("f8 operation error. Status %x\n", result_dump_status); result_status = -ENXIO; - } else if (pce_sps_data->consumer_status | - pce_sps_data->producer_status) { + } else if (pce_dev->ce_sps.consumer_status | + pce_dev->ce_sps.producer_status) { pr_err("f8 sps operation error. sps status %x %x\n", - pce_sps_data->consumer_status, - pce_sps_data->producer_status); + pce_dev->ce_sps.consumer_status, + pce_dev->ce_sps.producer_status); result_status = -ENXIO; } - pce_sps_data->result->status = 0; - pce_sps_data->result->status2 = 0; - qce_free_req_info(pce_dev, req_info, true); - qce_callback(areq, NULL, NULL, result_status); + pce_dev->qce_cb(pce_dev->areq, NULL, NULL, result_status); return 0; } -static void _qce_sps_iovec_count_init(struct qce_device *pce_dev, int req_info) +static void _qce_sps_iovec_count_init(struct qce_device *pce_dev) { - struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info[req_info] - .ce_sps; - pce_sps_data->in_transfer.iovec_count = 0; - pce_sps_data->out_transfer.iovec_count = 0; + pce_dev->ce_sps.in_transfer.iovec_count = 0; + pce_dev->ce_sps.out_transfer.iovec_count = 0; } static void _qce_set_flag(struct sps_transfer *sps_bam_pipe, uint32_t flag) @@ -2415,7 +2282,7 @@ static void _qce_set_flag(struct sps_transfer *sps_bam_pipe, uint32_t flag) iovec->flags |= flag; } -static int _qce_sps_add_data(dma_addr_t paddr, uint32_t len, +static int _qce_sps_add_data(uint32_t addr, uint32_t len, struct sps_transfer *sps_bam_pipe) { struct sps_iovec *iovec = sps_bam_pipe->iovec + @@ -2434,11 +2301,11 @@ static int _qce_sps_add_data(dma_addr_t paddr, uint32_t len, else data_cnt = len; iovec->size = data_cnt; - iovec->addr = SPS_GET_LOWER_ADDR(paddr); - iovec->flags = SPS_GET_UPPER_ADDR(paddr); + iovec->addr = addr; + iovec->flags = 0; sps_bam_pipe->iovec_count++; iovec++; - paddr += data_cnt; + addr += data_cnt; len -= data_cnt; } return 0; @@ -2448,21 +2315,20 @@ static int _qce_sps_add_sg_data(struct qce_device *pce_dev, struct scatterlist *sg_src, uint32_t nbytes, struct sps_transfer *sps_bam_pipe) { - uint32_t data_cnt, len; - dma_addr_t addr; + uint32_t addr, data_cnt, len; struct sps_iovec *iovec = sps_bam_pipe->iovec + sps_bam_pipe->iovec_count; while (nbytes > 0) { - if (NULL == sg_src) { - pr_err("qce50.c: _qce_sps_add_sg_data, sg_src = NULL"); + if (sg_src == NULL) { + pr_err("qce50: _qce_sps_add_sg_data, sg = NULL\n"); break; } len = min(nbytes, sg_dma_len(sg_src)); nbytes -= len; addr = sg_dma_address(sg_src); - if (pce_dev->ce_bam_info.minor_version == 0) - len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size); + if (pce_dev->ce_sps.minor_version == 0) + len = ALIGN(len, pce_dev->ce_sps.ce_burst_size); while (len > 0) { if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) { pr_err("Num of descrptor %d exceed max (%d)", @@ -2473,13 +2339,13 @@ static int _qce_sps_add_sg_data(struct qce_device *pce_dev, if (len > SPS_MAX_PKT_SIZE) { data_cnt = SPS_MAX_PKT_SIZE; iovec->size = data_cnt; - iovec->addr = SPS_GET_LOWER_ADDR(addr); - iovec->flags = SPS_GET_UPPER_ADDR(addr); + iovec->addr = addr; + iovec->flags = 0; } else { data_cnt = len; iovec->size = data_cnt; - iovec->addr = SPS_GET_LOWER_ADDR(addr); - iovec->flags = SPS_GET_UPPER_ADDR(addr); + iovec->addr = addr; + iovec->flags = 0; } iovec++; sps_bam_pipe->iovec_count++; @@ -2495,53 +2361,38 @@ static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag, struct qce_cmdlist_info *cmdptr, struct sps_transfer *sps_bam_pipe) { - dma_addr_t paddr = GET_PHYS_ADDR(cmdptr->cmdlist); struct sps_iovec *iovec = sps_bam_pipe->iovec + sps_bam_pipe->iovec_count; iovec->size = cmdptr->size; - iovec->addr = SPS_GET_LOWER_ADDR(paddr); - iovec->flags = SPS_GET_UPPER_ADDR(paddr) | SPS_IOVEC_FLAG_CMD | flag; + iovec->addr = GET_PHYS_ADDR(cmdptr->cmdlist); + iovec->flags = SPS_IOVEC_FLAG_CMD | flag; sps_bam_pipe->iovec_count++; - if (sps_bam_pipe->iovec_count >= QCE_MAX_NUM_DSCR) { - pr_err("Num of descrptor %d exceed max (%d)", - sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR); - return -ENOMEM; - } + return 0; } -static int _qce_sps_transfer(struct qce_device *pce_dev, int req_info) +static int _qce_sps_transfer(struct qce_device *pce_dev) { int rc = 0; - struct ce_sps_data *pce_sps_data; - - pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps; - pce_sps_data->out_transfer.user = - (void *)((uintptr_t)(CRYPTO_REQ_USER_PAT | - (unsigned int) req_info)); - pce_sps_data->in_transfer.user = - (void *)((uintptr_t)(CRYPTO_REQ_USER_PAT | - (unsigned int) req_info)); - _qce_dump_descr_fifos_dbg(pce_dev, req_info); - - if (pce_sps_data->in_transfer.iovec_count) { - rc = sps_transfer(pce_dev->ce_bam_info.consumer.pipe, - &pce_sps_data->in_transfer); + + _qce_dump_descr_fifos_dbg(pce_dev); + if (pce_dev->ce_sps.in_transfer.iovec_count) { + rc = sps_transfer(pce_dev->ce_sps.consumer.pipe, + &pce_dev->ce_sps.in_transfer); if (rc) { pr_err("sps_xfr() fail (consumer pipe=0x%lx) rc = %d\n", - (uintptr_t)pce_dev->ce_bam_info.consumer.pipe, - rc); - goto ret; + (uintptr_t)pce_dev->ce_sps.consumer.pipe, rc); + _qce_dump_descr_fifos(pce_dev); + return rc; } } - rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe, - &pce_sps_data->out_transfer); - if (rc) + rc = sps_transfer(pce_dev->ce_sps.producer.pipe, + &pce_dev->ce_sps.out_transfer); + if (rc) { pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n", - (uintptr_t)pce_dev->ce_bam_info.producer.pipe, rc); -ret: - if (rc) - _qce_dump_descr_fifos(pce_dev, req_info); + (uintptr_t)pce_dev->ce_sps.producer.pipe, rc); + return rc; + } return rc; } @@ -2578,7 +2429,7 @@ static int qce_sps_init_ep_conn(struct qce_device *pce_dev, /* Allocate endpoint context */ sps_pipe_info = sps_alloc_endpoint(); if (!sps_pipe_info) { - pr_err("sps_alloc_endpoint() failed!!! is_producer=%d", + pr_err("sps_alloc_endpoint() failed!!! is_producer=%d\n", is_producer); rc = -ENOMEM; goto out; @@ -2601,7 +2452,7 @@ static int qce_sps_init_ep_conn(struct qce_device *pce_dev, * CE peripheral where as destination should * be system memory. */ - sps_connect_info->source = pce_dev->ce_bam_info.bam_handle; + sps_connect_info->source = pce_dev->ce_sps.bam_handle; sps_connect_info->destination = SPS_DEV_HANDLE_MEM; /* Producer pipe will handle this connection */ sps_connect_info->mode = SPS_MODE_SRC; @@ -2613,20 +2464,18 @@ static int qce_sps_init_ep_conn(struct qce_device *pce_dev, * CE peripheral */ sps_connect_info->source = SPS_DEV_HANDLE_MEM; - sps_connect_info->destination = pce_dev->ce_bam_info.bam_handle; + sps_connect_info->destination = pce_dev->ce_sps.bam_handle; sps_connect_info->mode = SPS_MODE_DEST; sps_connect_info->options = - SPS_O_AUTO_ENABLE; + SPS_O_AUTO_ENABLE | SPS_O_EOT; } /* Producer pipe index */ - sps_connect_info->src_pipe_index = - pce_dev->ce_bam_info.src_pipe_index; + sps_connect_info->src_pipe_index = pce_dev->ce_sps.src_pipe_index; /* Consumer pipe index */ - sps_connect_info->dest_pipe_index = - pce_dev->ce_bam_info.dest_pipe_index; + sps_connect_info->dest_pipe_index = pce_dev->ce_sps.dest_pipe_index; /* Set pipe group */ - sps_connect_info->lock_group = pce_dev->ce_bam_info.pipe_pair_index; + sps_connect_info->lock_group = pce_dev->ce_sps.pipe_pair_index; sps_connect_info->event_thresh = 0x10; /* * Max. no of scatter/gather buffers that can @@ -2638,10 +2487,8 @@ static int qce_sps_init_ep_conn(struct qce_device *pce_dev, * descriptor memory (256 bytes + 8 bytes). But in order to be * in power of 2, we are allocating 512 bytes of memory. */ - sps_connect_info->desc.size = QCE_MAX_NUM_DSCR * MAX_QCE_ALLOC_BAM_REQ * + sps_connect_info->desc.size = QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec); - if (sps_connect_info->desc.size > MAX_SPS_DESC_FIFO_SIZE) - sps_connect_info->desc.size = MAX_SPS_DESC_FIFO_SIZE; sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev, sps_connect_info->desc.size, &sps_connect_info->desc.phys_base, @@ -2663,21 +2510,12 @@ static int qce_sps_init_ep_conn(struct qce_device *pce_dev, } sps_event->mode = SPS_TRIGGER_CALLBACK; - sps_event->xfer_done = NULL; - sps_event->user = (void *)pce_dev; - if (is_producer) { + if (is_producer) sps_event->options = SPS_O_EOT | SPS_O_DESC_DONE; - sps_event->callback = _sps_producer_callback; - rc = sps_register_event(ep->pipe, sps_event); - if (rc) { - pr_err("Producer callback registration failed rc=%d\n", - rc); - goto sps_connect_err; - } - } else { + else sps_event->options = SPS_O_EOT; - sps_event->callback = NULL; - } + sps_event->xfer_done = NULL; + sps_event->user = (void *)pce_dev; pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%pK\n", is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)", @@ -2735,12 +2573,12 @@ static void qce_sps_release_bam(struct qce_device *pce_dev) if (pbam->cnt > 0) goto ret; - if (pce_dev->ce_bam_info.bam_handle) { - sps_deregister_bam_device(pce_dev->ce_bam_info.bam_handle); + if (pce_dev->ce_sps.bam_handle) { + sps_deregister_bam_device(pce_dev->ce_sps.bam_handle); pr_debug("deregister bam handle 0x%lx\n", - pce_dev->ce_bam_info.bam_handle); - pce_dev->ce_bam_info.bam_handle = 0; + pce_dev->ce_sps.bam_handle); + pce_dev->ce_sps.bam_handle = 0; } iounmap(pbam->bam_iobase); pr_debug("delete bam 0x%x\n", pbam->bam_mem); @@ -2773,9 +2611,9 @@ static int qce_sps_get_bam(struct qce_device *pce_dev) if (pbam) { pr_debug("found bam 0x%x\n", pbam->bam_mem); pbam->cnt++; - pce_dev->ce_bam_info.bam_handle = pbam->handle; - pce_dev->ce_bam_info.bam_mem = pbam->bam_mem; - pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase; + pce_dev->ce_sps.bam_handle = pbam->handle; + pce_dev->ce_sps.bam_mem = pbam->bam_mem; + pce_dev->ce_sps.bam_iobase = pbam->bam_iobase; pce_dev->pbam = pbam; pce_dev->support_cmd_dscr = pbam->support_cmd_dscr; goto ret; @@ -2799,11 +2637,11 @@ static int qce_sps_get_bam(struct qce_device *pce_dev) pr_err("Can not map BAM io memory\n"); goto ret; } - pce_dev->ce_bam_info.bam_mem = pbam->bam_mem; - pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase; + pce_dev->ce_sps.bam_mem = pbam->bam_mem; + pce_dev->ce_sps.bam_iobase = pbam->bam_iobase; pbam->handle = 0; pr_debug("allocate bam 0x%x\n", pbam->bam_mem); - bam_cfg = readl_relaxed(pce_dev->ce_bam_info.bam_iobase + + bam_cfg = readl_relaxed(pce_dev->ce_sps.bam_iobase + CRYPTO_BAM_CNFG_BITS_REG); pbam->support_cmd_dscr = (bam_cfg & CRYPTO_BAM_CD_ENABLE_MASK) ? true : false; @@ -2814,8 +2652,8 @@ static int qce_sps_get_bam(struct qce_device *pce_dev) } pce_dev->support_cmd_dscr = pbam->support_cmd_dscr; - bam.phys_addr = pce_dev->ce_bam_info.bam_mem; - bam.virt_addr = pce_dev->ce_bam_info.bam_iobase; + bam.phys_addr = pce_dev->ce_sps.bam_mem; + bam.virt_addr = (void *)pce_dev->ce_sps.bam_iobase; /* * This event thresold value is only significant for BAM-to-BAM @@ -2830,7 +2668,7 @@ static int qce_sps_get_bam(struct qce_device *pce_dev) */ bam.summing_threshold = 64; /* SPS driver wll handle the crypto BAM IRQ */ - bam.irq = (u32)pce_dev->ce_bam_info.bam_irq; + bam.irq = (u32)pce_dev->ce_sps.bam_irq; /* * Set flag to indicate BAM global device control is managed * remotely. @@ -2840,16 +2678,15 @@ static int qce_sps_get_bam(struct qce_device *pce_dev) else bam.manage = SPS_BAM_MGR_LOCAL; - bam.ee = pce_dev->ce_bam_info.bam_ee; - bam.ipc_loglevel = QCE_BAM_DEFAULT_IPC_LOGLVL; - bam.options |= SPS_BAM_CACHED_WP; + bam.ee = 1; + pr_debug("bam physical base=0x%lx\n", (uintptr_t)bam.phys_addr); pr_debug("bam virtual base=0x%pK\n", bam.virt_addr); /* Register CE Peripheral BAM device to SPS driver */ rc = sps_register_bam_device(&bam, &pbam->handle); if (rc) { - pr_err("sps_register_bam_device() failed! err=%d", rc); + pr_err("sps_register_bam_device() failed! err=%d\n", rc); rc = -EIO; iounmap(pbam->bam_iobase); kfree(pbam); @@ -2858,7 +2695,7 @@ static int qce_sps_get_bam(struct qce_device *pce_dev) pce_dev->pbam = pbam; list_add_tail(&pbam->qlist, &qce50_bam_list); - pce_dev->ce_bam_info.bam_handle = pbam->handle; + pce_dev->ce_sps.bam_handle = pbam->handle; ret: mutex_unlock(&bam_register_lock); @@ -2887,67 +2724,52 @@ static int qce_sps_init(struct qce_device *pce_dev) if (rc) return rc; pr_debug("BAM device registered. bam_handle=0x%lx\n", - pce_dev->ce_bam_info.bam_handle); + pce_dev->ce_sps.bam_handle); - rc = qce_sps_init_ep_conn(pce_dev, - &pce_dev->ce_bam_info.producer, true); + rc = qce_sps_init_ep_conn(pce_dev, &pce_dev->ce_sps.producer, true); if (rc) goto sps_connect_producer_err; - rc = qce_sps_init_ep_conn(pce_dev, - &pce_dev->ce_bam_info.consumer, false); + rc = qce_sps_init_ep_conn(pce_dev, &pce_dev->ce_sps.consumer, false); if (rc) goto sps_connect_consumer_err; + pce_dev->ce_sps.out_transfer.user = pce_dev->ce_sps.producer.pipe; + pce_dev->ce_sps.in_transfer.user = pce_dev->ce_sps.consumer.pipe; pr_info(" Qualcomm MSM CE-BAM at 0x%016llx irq %d\n", - (unsigned long long)pce_dev->ce_bam_info.bam_mem, - (unsigned int)pce_dev->ce_bam_info.bam_irq); + (unsigned long long)pce_dev->ce_sps.bam_mem, + (unsigned int)pce_dev->ce_sps.bam_irq); return rc; sps_connect_consumer_err: - qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer); + qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.producer); sps_connect_producer_err: qce_sps_release_bam(pce_dev); return rc; } -static inline int qce_alloc_req_info(struct qce_device *pce_dev) -{ - int i; - int request_index = pce_dev->ce_request_index; - - for (i = 0; i < MAX_QCE_BAM_REQ; i++) { - request_index++; - if (request_index >= MAX_QCE_BAM_REQ) - request_index = 0; - if (atomic_xchg(&pce_dev->ce_request_info[request_index]. - in_use, true) == false) { - pce_dev->ce_request_index = request_index; - return request_index; - } - } - pr_warn("pcedev %d no reqs available no_of_queued_req %d\n", - pce_dev->dev_no, atomic_read( - &pce_dev->no_of_queued_req)); - return -EBUSY; -} - -static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info, - bool is_complete) +/** + * De-initialize SPS HW connected with CE core + * + * This function deinitialize SPS endpoints and then + * deregisters BAM resources from SPS driver. + * + * This function should only be called once typically + * during driver remove. + * + * @pce_dev - Pointer to qce_device structure + * + */ +static void qce_sps_exit(struct qce_device *pce_dev) { - pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST; - if (atomic_xchg(&pce_dev->ce_request_info[req_info].in_use, - false) == true) { - if (req_info < MAX_QCE_BAM_REQ && is_complete) - atomic_dec(&pce_dev->no_of_queued_req); - } else - pr_warn("request info %d free already\n", req_info); + qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.consumer); + qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.producer); + qce_sps_release_bam(pce_dev); } static void print_notify_debug(struct sps_event_notify *notify) { - phys_addr_t addr = - DESC_FULL_ADDR((phys_addr_t) notify->data.transfer.iovec.flags, - notify->data.transfer.iovec.addr); + phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags, + notify->data.transfer.iovec.addr); pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%pK\n", notify->event_id, &addr, notify->data.transfer.iovec.size, @@ -2955,173 +2777,91 @@ static void print_notify_debug(struct sps_event_notify *notify) notify->data.transfer.user); } -static void _qce_req_complete(struct qce_device *pce_dev, unsigned int req_info) +static void _aead_sps_producer_callback(struct sps_event_notify *notify) { - struct ce_request_info *preq_info; - - preq_info = &pce_dev->ce_request_info[req_info]; + struct qce_device *pce_dev = (struct qce_device *) + ((struct sps_event_notify *)notify)->user; + int rc = 0; - switch (preq_info->xfer_type) { - case QCE_XFER_CIPHERING: - _ablk_cipher_complete(pce_dev, req_info); - break; - case QCE_XFER_HASHING: - _sha_complete(pce_dev, req_info); - break; - case QCE_XFER_AEAD: - _aead_complete(pce_dev, req_info); - break; - case QCE_XFER_F8: - _f8_complete(pce_dev, req_info); - break; - case QCE_XFER_F9: - _f9_complete(pce_dev, req_info); - break; - default: - qce_free_req_info(pce_dev, req_info, true); - break; + pce_dev->ce_sps.notify = *notify; + print_notify_debug(notify); + if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) { + pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE; + _aead_complete(pce_dev); + } else { + pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP; + pce_dev->ce_sps.out_transfer.iovec_count = 0; + _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), + CRYPTO_RESULT_DUMP_SIZE, + &pce_dev->ce_sps.out_transfer); + _qce_set_flag(&pce_dev->ce_sps.out_transfer, + SPS_IOVEC_FLAG_INT); + rc = sps_transfer(pce_dev->ce_sps.producer.pipe, + &pce_dev->ce_sps.out_transfer); + if (rc) { + pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n", + (uintptr_t)pce_dev->ce_sps.producer.pipe, rc); + } } } -static void qce_multireq_timeout(unsigned long data) +static void _sha_sps_producer_callback(struct sps_event_notify *notify) { - struct qce_device *pce_dev = (struct qce_device *)data; - int ret = 0; - int last_seq; - unsigned long flags; - - last_seq = atomic_read(&pce_dev->bunch_cmd_seq); - if (last_seq == 0 || - last_seq != atomic_read(&pce_dev->last_intr_seq)) { - atomic_set(&pce_dev->last_intr_seq, last_seq); - mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES)); - return; - } - /* last bunch mode command time out */ - - /* - * From here to dummy request finish sps request and set owner back - * to none, we disable interrupt. - * So it won't get preempted or interrupted. If bam inerrupts happen - * between, and completion callback gets called from BAM, a new - * request may be issued by the client driver. Deadlock may happen. - */ - local_irq_save(flags); - if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_TIMEOUT) - != QCE_OWNER_NONE) { - local_irq_restore(flags); - mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES)); - return; - } - - ret = qce_dummy_req(pce_dev); - if (ret) - pr_warn("pcedev %d: Failed to insert dummy req\n", - pce_dev->dev_no); - cmpxchg(&pce_dev->owner, QCE_OWNER_TIMEOUT, QCE_OWNER_NONE); - pce_dev->mode = IN_INTERRUPT_MODE; - local_irq_restore(flags); + struct qce_device *pce_dev = (struct qce_device *) + ((struct sps_event_notify *)notify)->user; - del_timer(&(pce_dev->timer)); - pce_dev->qce_stats.no_of_timeouts++; - pr_debug("pcedev %d mode switch to INTR\n", pce_dev->dev_no); + pce_dev->ce_sps.notify = *notify; + print_notify_debug(notify); + _sha_complete(pce_dev); } -void qce_get_driver_stats(void *handle) +static void _f9_sps_producer_callback(struct sps_event_notify *notify) { - struct qce_device *pce_dev = (struct qce_device *) handle; + struct qce_device *pce_dev = (struct qce_device *) + ((struct sps_event_notify *)notify)->user; - if (!_qce50_disp_stats) - return; - pr_info("Engine %d timeout occuured %d\n", pce_dev->dev_no, - pce_dev->qce_stats.no_of_timeouts); - pr_info("Engine %d dummy request inserted %d\n", pce_dev->dev_no, - pce_dev->qce_stats.no_of_dummy_reqs); - if (pce_dev->mode) - pr_info("Engine %d is in BUNCH MODE\n", pce_dev->dev_no); - else - pr_info("Engine %d is in INTERRUPT MODE\n", pce_dev->dev_no); - pr_info("Engine %d outstanding request %d\n", pce_dev->dev_no, - atomic_read(&pce_dev->no_of_queued_req)); + pce_dev->ce_sps.notify = *notify; + print_notify_debug(notify); + _f9_complete(pce_dev); } -EXPORT_SYMBOL(qce_get_driver_stats); -void qce_clear_driver_stats(void *handle) +static void _f8_sps_producer_callback(struct sps_event_notify *notify) { - struct qce_device *pce_dev = (struct qce_device *) handle; + struct qce_device *pce_dev = (struct qce_device *) + ((struct sps_event_notify *)notify)->user; - pce_dev->qce_stats.no_of_timeouts = 0; - pce_dev->qce_stats.no_of_dummy_reqs = 0; + pce_dev->ce_sps.notify = *notify; + print_notify_debug(notify); + _f8_complete(pce_dev); } -EXPORT_SYMBOL(qce_clear_driver_stats); -static void _sps_producer_callback(struct sps_event_notify *notify) +static void _ablk_cipher_sps_producer_callback(struct sps_event_notify *notify) { struct qce_device *pce_dev = (struct qce_device *) ((struct sps_event_notify *)notify)->user; int rc = 0; - unsigned int req_info; - struct ce_sps_data *pce_sps_data; - struct ce_request_info *preq_info; + pce_dev->ce_sps.notify = *notify; print_notify_debug(notify); - - req_info = (unsigned int)((uintptr_t)notify->data.transfer.user); - if ((req_info & 0xffff0000) != CRYPTO_REQ_USER_PAT) { - pr_warn("request information %d out of range\n", req_info); - return; - } - - req_info = req_info & 0x00ff; - if (req_info < 0 || req_info >= MAX_QCE_ALLOC_BAM_REQ) { - pr_warn("request information %d out of range\n", req_info); - return; - } - - preq_info = &pce_dev->ce_request_info[req_info]; - - pce_sps_data = &preq_info->ce_sps; - if ((preq_info->xfer_type == QCE_XFER_CIPHERING || - preq_info->xfer_type == QCE_XFER_AEAD) && - pce_sps_data->producer_state == QCE_PIPE_STATE_IDLE) { - pce_sps_data->producer_state = QCE_PIPE_STATE_COMP; - pce_sps_data->out_transfer.iovec_count = 0; - _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), + if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) { + pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE; + _ablk_cipher_complete(pce_dev); + } else { + pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP; + pce_dev->ce_sps.out_transfer.iovec_count = 0; + _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, - &pce_sps_data->out_transfer); - _qce_set_flag(&pce_sps_data->out_transfer, + &pce_dev->ce_sps.out_transfer); + _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); - rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe, - &pce_sps_data->out_transfer); + rc = sps_transfer(pce_dev->ce_sps.producer.pipe, + &pce_dev->ce_sps.out_transfer); if (rc) { pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n", - (uintptr_t)pce_dev->ce_bam_info.producer.pipe, - rc); + (uintptr_t)pce_dev->ce_sps.producer.pipe, rc); } - return; } - - _qce_req_complete(pce_dev, req_info); -} - -/** - * De-initialize SPS HW connected with CE core - * - * This function deinitialize SPS endpoints and then - * deregisters BAM resources from SPS driver. - * - * This function should only be called once typically - * during driver remove. - * - * @pce_dev - Pointer to qce_device structure - * - */ -static void qce_sps_exit(struct qce_device *pce_dev) -{ - qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.consumer); - qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer); - qce_sps_release_bam(pce_dev); -} +}; static void qce_add_cmd_element(struct qce_device *pdev, struct sps_command_element **cmd_ptr, u32 addr, @@ -3137,13 +2877,13 @@ static void qce_add_cmd_element(struct qce_device *pdev, (*cmd_ptr)++; } -static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index, +static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, unsigned char **pvaddr, enum qce_cipher_mode_enum mode, bool key_128) { struct sps_command_element *ce_vaddr; uintptr_t ce_vaddr_start; - struct qce_cmdlistptr_ops *cmdlistptr; + struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr; struct qce_cmdlist_info *pcl_info = NULL; int i = 0; uint32_t encr_cfg = 0; @@ -3151,9 +2891,8 @@ static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index, uint32_t xts_key_reg = 0; uint32_t iv_reg = 0; - cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr; *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)), - pdev->ce_bam_info.ce_burst_size); + pdev->ce_sps.ce_burst_size); ce_vaddr = (struct sps_command_element *)(*pvaddr); ce_vaddr_start = (uintptr_t)(*pvaddr); /* @@ -3310,23 +3049,22 @@ static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index, return 0; } -static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev, int cri_index, +static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev, unsigned char **pvaddr, enum qce_cipher_alg_enum alg, bool mode_cbc) { struct sps_command_element *ce_vaddr; uintptr_t ce_vaddr_start; - struct qce_cmdlistptr_ops *cmdlistptr; + struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr; struct qce_cmdlist_info *pcl_info = NULL; int i = 0; uint32_t encr_cfg = 0; uint32_t key_reg = 0; uint32_t iv_reg = 0; - cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr; *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)), - pdev->ce_bam_info.ce_burst_size); + pdev->ce_sps.ce_burst_size); ce_vaddr = (struct sps_command_element *)(*pvaddr); ce_vaddr_start = (uintptr_t)(*pvaddr); @@ -3424,65 +3162,21 @@ static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev, int cri_index, return 0; } -static int _setup_cipher_null_cmdlistptrs(struct qce_device *pdev, - int cri_index, unsigned char **pvaddr) -{ - struct sps_command_element *ce_vaddr; - uintptr_t ce_vaddr_start; - struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info - [cri_index].ce_sps.cmdlistptr; - struct qce_cmdlist_info *pcl_info = NULL; - - *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)), - pdev->ce_bam_info.ce_burst_size); - ce_vaddr_start = (uintptr_t)(*pvaddr); - ce_vaddr = (struct sps_command_element *)(*pvaddr); - - cmdlistptr->cipher_null.cmdlist = (uintptr_t)ce_vaddr; - pcl_info = &(cmdlistptr->cipher_null); - - qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, - pdev->ce_bam_info.ce_burst_size, NULL); - - qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, - pdev->reg.encr_cfg_aes_ecb_128, NULL); - qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, - NULL); - qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0, - NULL); - - qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, - 0, NULL); - qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, - 0, NULL); - qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0, - NULL); - - qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG, - ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) | - (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc); - - pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start; - *pvaddr = (unsigned char *) ce_vaddr; - return 0; -} - -static int _setup_auth_cmdlistptrs(struct qce_device *pdev, int cri_index, +static int _setup_auth_cmdlistptrs(struct qce_device *pdev, unsigned char **pvaddr, enum qce_hash_alg_enum alg, bool key_128) { struct sps_command_element *ce_vaddr; uintptr_t ce_vaddr_start; - struct qce_cmdlistptr_ops *cmdlistptr; + struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr; struct qce_cmdlist_info *pcl_info = NULL; int i = 0; uint32_t key_reg = 0; uint32_t auth_cfg = 0; uint32_t iv_reg = 0; - cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr; *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)), - pdev->ce_bam_info.ce_burst_size); + pdev->ce_sps.ce_burst_size); ce_vaddr_start = (uintptr_t)(*pvaddr); ce_vaddr = (struct sps_command_element *)(*pvaddr); @@ -3647,7 +3341,6 @@ static int _setup_auth_cmdlistptrs(struct qce_device *pdev, int cri_index, } static int _setup_aead_cmdlistptrs(struct qce_device *pdev, - int cri_index, unsigned char **pvaddr, uint32_t alg, uint32_t mode, @@ -3656,7 +3349,7 @@ static int _setup_aead_cmdlistptrs(struct qce_device *pdev, { struct sps_command_element *ce_vaddr; uintptr_t ce_vaddr_start; - struct qce_cmdlistptr_ops *cmdlistptr; + struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr; struct qce_cmdlist_info *pcl_info = NULL; uint32_t key_reg; uint32_t iv_reg; @@ -3664,9 +3357,8 @@ static int _setup_aead_cmdlistptrs(struct qce_device *pdev, uint32_t enciv_in_word; uint32_t encr_cfg; - cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr; *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)), - pdev->ce_bam_info.ce_burst_size); + pdev->ce_sps.ce_burst_size); ce_vaddr_start = (uintptr_t)(*pvaddr); ce_vaddr = (struct sps_command_element *)(*pvaddr); @@ -3862,13 +3554,12 @@ static int _setup_aead_cmdlistptrs(struct qce_device *pdev, return 0; } -static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index, +static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, unsigned char **pvaddr, bool key_128) { struct sps_command_element *ce_vaddr; uintptr_t ce_vaddr_start; - struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info - [cri_index].ce_sps.cmdlistptr; + struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr; struct qce_cmdlist_info *pcl_info = NULL; int i = 0; uint32_t encr_cfg = 0; @@ -3876,7 +3567,7 @@ static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index, uint32_t key_reg = 0; *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)), - pdev->ce_bam_info.ce_burst_size); + pdev->ce_sps.ce_burst_size); ce_vaddr_start = (uintptr_t)(*pvaddr); ce_vaddr = (struct sps_command_element *)(*pvaddr); @@ -3996,20 +3687,19 @@ static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index, return 0; } -static int _setup_f8_cmdlistptrs(struct qce_device *pdev, int cri_index, +static int _setup_f8_cmdlistptrs(struct qce_device *pdev, unsigned char **pvaddr, enum qce_ota_algo_enum alg) { struct sps_command_element *ce_vaddr; uintptr_t ce_vaddr_start; - struct qce_cmdlistptr_ops *cmdlistptr; + struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr; struct qce_cmdlist_info *pcl_info = NULL; int i = 0; uint32_t encr_cfg = 0; uint32_t key_reg = 4; - cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr; *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)), - pdev->ce_bam_info.ce_burst_size); + pdev->ce_sps.ce_burst_size); ce_vaddr = (struct sps_command_element *)(*pvaddr); ce_vaddr_start = (uintptr_t)(*pvaddr); @@ -4082,20 +3772,19 @@ static int _setup_f8_cmdlistptrs(struct qce_device *pdev, int cri_index, return 0; } -static int _setup_f9_cmdlistptrs(struct qce_device *pdev, int cri_index, +static int _setup_f9_cmdlistptrs(struct qce_device *pdev, unsigned char **pvaddr, enum qce_ota_algo_enum alg) { struct sps_command_element *ce_vaddr; uintptr_t ce_vaddr_start; - struct qce_cmdlistptr_ops *cmdlistptr; + struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr; struct qce_cmdlist_info *pcl_info = NULL; int i = 0; uint32_t auth_cfg = 0; uint32_t iv_reg = 0; - cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr; *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)), - pdev->ce_bam_info.ce_burst_size); + pdev->ce_sps.ce_burst_size); ce_vaddr_start = (uintptr_t)(*pvaddr); ce_vaddr = (struct sps_command_element *)(*pvaddr); @@ -4165,16 +3854,15 @@ static int _setup_f9_cmdlistptrs(struct qce_device *pdev, int cri_index, } static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev, - int cri_index, unsigned char **pvaddr) + unsigned char **pvaddr) { struct sps_command_element *ce_vaddr; uintptr_t ce_vaddr_start = (uintptr_t)(*pvaddr); - struct qce_cmdlistptr_ops *cmdlistptr; + struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr; struct qce_cmdlist_info *pcl_info = NULL; - cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr; *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)), - pdev->ce_bam_info.ce_burst_size); + pdev->ce_sps.ce_burst_size); ce_vaddr = (struct sps_command_element *)(*pvaddr); cmdlistptr->unlock_all_pipes.cmdlist = (uintptr_t)ce_vaddr; pcl_info = &(cmdlistptr->unlock_all_pipes); @@ -4191,7 +3879,7 @@ static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev, return 0; } -static int qce_setup_cmdlistptrs(struct qce_device *pdev, int cri_index, +static int qce_setup_cmdlistptrs(struct qce_device *pdev, unsigned char **pvaddr) { struct sps_command_element *ce_vaddr = @@ -4203,76 +3891,56 @@ static int qce_setup_cmdlistptrs(struct qce_device *pdev, int cri_index, */ ce_vaddr = (struct sps_command_element *)ALIGN(((uintptr_t) ce_vaddr), - pdev->ce_bam_info.ce_burst_size); + pdev->ce_sps.ce_burst_size); *pvaddr = (unsigned char *) ce_vaddr; - _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC, - true); - _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR, - true); - _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB, - true); - _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS, - true); - _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC, - false); - _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR, - false); - _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB, - false); - _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS, - false); - - _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES, - true); - _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES, - false); - _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES, - true); - _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES, - false); - - _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1, - false); - _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256, - false); - - _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1_HMAC, - false); - _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256_HMAC, - false); - - _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC, - true); - _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC, - false); - - _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES, - QCE_MODE_CBC, DES_KEY_SIZE, true); - _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES, - QCE_MODE_CBC, DES3_EDE_KEY_SIZE, true); - _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES, - QCE_MODE_CBC, AES128_KEY_SIZE, true); - _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES, - QCE_MODE_CBC, AES256_KEY_SIZE, true); - _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES, - QCE_MODE_CBC, DES_KEY_SIZE, false); - _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES, - QCE_MODE_CBC, DES3_EDE_KEY_SIZE, false); - _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES, - QCE_MODE_CBC, AES128_KEY_SIZE, false); - _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES, - QCE_MODE_CBC, AES256_KEY_SIZE, false); - - _setup_cipher_null_cmdlistptrs(pdev, cri_index, pvaddr); - - _setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, true); - _setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, false); - _setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI); - _setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G); - _setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI); - _setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G); - _setup_unlock_pipe_cmdlistptrs(pdev, cri_index, pvaddr); + _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CBC, true); + _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CTR, true); + _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_ECB, true); + _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_XTS, true); + _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CBC, false); + _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CTR, false); + _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_ECB, false); + _setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_XTS, false); + + _setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, true); + _setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, false); + _setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, true); + _setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, false); + + _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA1, false); + _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA256, false); + + _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA1_HMAC, false); + _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA256_HMAC, false); + + _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_AES_CMAC, true); + _setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_AES_CMAC, false); + + _setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, QCE_MODE_CBC, + DES_KEY_SIZE, true); + _setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, QCE_MODE_CBC, + DES3_EDE_KEY_SIZE, true); + _setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_AES, QCE_MODE_CBC, + AES128_KEY_SIZE, true); + _setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_AES, QCE_MODE_CBC, + AES256_KEY_SIZE, true); + _setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, QCE_MODE_CBC, + DES_KEY_SIZE, false); + _setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, QCE_MODE_CBC, + DES3_EDE_KEY_SIZE, false); + _setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_AES, QCE_MODE_CBC, + AES128_KEY_SIZE, false); + _setup_aead_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_AES, QCE_MODE_CBC, + AES256_KEY_SIZE, false); + + _setup_aead_ccm_cmdlistptrs(pdev, pvaddr, true); + _setup_aead_ccm_cmdlistptrs(pdev, pvaddr, false); + _setup_f8_cmdlistptrs(pdev, pvaddr, QCE_OTA_ALGO_KASUMI); + _setup_f8_cmdlistptrs(pdev, pvaddr, QCE_OTA_ALGO_SNOW3G); + _setup_f9_cmdlistptrs(pdev, pvaddr, QCE_OTA_ALGO_KASUMI); + _setup_f9_cmdlistptrs(pdev, pvaddr, QCE_OTA_ALGO_SNOW3G); + _setup_unlock_pipe_cmdlistptrs(pdev, pvaddr); return 0; } @@ -4280,57 +3948,33 @@ static int qce_setup_cmdlistptrs(struct qce_device *pdev, int cri_index, static int qce_setup_ce_sps_data(struct qce_device *pce_dev) { unsigned char *vaddr; - int i; - unsigned char *iovec_vaddr; - int iovec_memsize; vaddr = pce_dev->coh_vmem; vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr), - pce_dev->ce_bam_info.ce_burst_size); - iovec_vaddr = pce_dev->iovec_vmem; - iovec_memsize = pce_dev->iovec_memsize; - for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++) { - /* Allow for 256 descriptor (cmd and data) entries per pipe */ - pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec = - (struct sps_iovec *)iovec_vaddr; - pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec_phys = - virt_to_phys(pce_dev->ce_request_info[i]. - ce_sps.in_transfer.iovec); - iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE; - iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE; - pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec = - (struct sps_iovec *)iovec_vaddr; - pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec_phys = - virt_to_phys(pce_dev->ce_request_info[i]. - ce_sps.out_transfer.iovec); - iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE; - iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE; - if (pce_dev->support_cmd_dscr) - qce_setup_cmdlistptrs(pce_dev, i, &vaddr); - vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr), - pce_dev->ce_bam_info.ce_burst_size); - pce_dev->ce_request_info[i].ce_sps.result_dump = - (uintptr_t)vaddr; - pce_dev->ce_request_info[i].ce_sps.result_dump_phy = - GET_PHYS_ADDR((uintptr_t)vaddr); - pce_dev->ce_request_info[i].ce_sps.result = - (struct ce_result_dump_format *)vaddr; - vaddr += CRYPTO_RESULT_DUMP_SIZE; - - pce_dev->ce_request_info[i].ce_sps.result_dump_null = - (uintptr_t)vaddr; - pce_dev->ce_request_info[i].ce_sps.result_dump_null_phy = - GET_PHYS_ADDR((uintptr_t)vaddr); - pce_dev->ce_request_info[i].ce_sps.result_null = - (struct ce_result_dump_format *)vaddr; - vaddr += CRYPTO_RESULT_DUMP_SIZE; - - pce_dev->ce_request_info[i].ce_sps.ignore_buffer = - (uintptr_t)vaddr; - vaddr += pce_dev->ce_bam_info.ce_burst_size * 2; - } - if ((vaddr - pce_dev->coh_vmem) > pce_dev->memsize || - iovec_memsize < 0) + pce_dev->ce_sps.ce_burst_size); + /* Allow for 256 descriptor (cmd and data) entries per pipe */ + pce_dev->ce_sps.in_transfer.iovec = (struct sps_iovec *)vaddr; + pce_dev->ce_sps.in_transfer.iovec_phys = + (uintptr_t)GET_PHYS_ADDR(vaddr); + vaddr += QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec); + + pce_dev->ce_sps.out_transfer.iovec = (struct sps_iovec *)vaddr; + pce_dev->ce_sps.out_transfer.iovec_phys = + (uintptr_t)GET_PHYS_ADDR(vaddr); + vaddr += QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec); + + if (pce_dev->support_cmd_dscr) + qce_setup_cmdlistptrs(pce_dev, &vaddr); + vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr), + pce_dev->ce_sps.ce_burst_size); + pce_dev->ce_sps.result_dump = (uintptr_t)vaddr; + pce_dev->ce_sps.result = (struct ce_result_dump_format *)vaddr; + vaddr += CRYPTO_RESULT_DUMP_SIZE; + + pce_dev->ce_sps.ignore_buffer = (uintptr_t)vaddr; + vaddr += pce_dev->ce_sps.ce_burst_size * 2; + + if ((vaddr - pce_dev->coh_vmem) > pce_dev->memsize) panic("qce50: Not enough coherent memory. Allocate %x , need %lx\n", pce_dev->memsize, (uintptr_t)vaddr - (uintptr_t)pce_dev->coh_vmem); @@ -4339,8 +3983,8 @@ static int qce_setup_ce_sps_data(struct qce_device *pce_dev) static int qce_init_ce_cfg_val(struct qce_device *pce_dev) { - uint32_t beats = (pce_dev->ce_bam_info.ce_burst_size >> 3) - 1; - uint32_t pipe_pair = pce_dev->ce_bam_info.pipe_pair_index; + uint32_t beats = (pce_dev->ce_sps.ce_burst_size >> 3) - 1; + uint32_t pipe_pair = pce_dev->ce_sps.pipe_pair_index; pce_dev->reg.crypto_cfg_be = (beats << CRYPTO_REQ_SIZE) | BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) | @@ -4511,122 +4155,6 @@ static int qce_init_ce_cfg_val(struct qce_device *pce_dev) return 0; } -static void _qce_ccm_get_around_input(struct qce_device *pce_dev, - struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir) -{ - struct qce_cmdlist_info *cmdlistinfo; - struct ce_sps_data *pce_sps_data; - - pce_sps_data = &preq_info->ce_sps; - if ((dir == QCE_DECRYPT) && pce_dev->no_get_around && - !(pce_dev->no_ccm_mac_status_get_around)) { - cmdlistinfo = &pce_sps_data->cmdlistptr.cipher_null; - _qce_sps_add_cmd(pce_dev, 0, cmdlistinfo, - &pce_sps_data->in_transfer); - _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer), - pce_dev->ce_bam_info.ce_burst_size, - &pce_sps_data->in_transfer); - _qce_set_flag(&pce_sps_data->in_transfer, - SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD); - } -} - -static void _qce_ccm_get_around_output(struct qce_device *pce_dev, - struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir) -{ - struct ce_sps_data *pce_sps_data; - - pce_sps_data = &preq_info->ce_sps; - - if ((dir == QCE_DECRYPT) && pce_dev->no_get_around && - !(pce_dev->no_ccm_mac_status_get_around)) { - _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer), - pce_dev->ce_bam_info.ce_burst_size, - &pce_sps_data->out_transfer); - _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump_null), - CRYPTO_RESULT_DUMP_SIZE, &pce_sps_data->out_transfer); - } -} - -/* QCE_DUMMY_REQ */ -static void qce_dummy_complete(void *cookie, unsigned char *digest, - unsigned char *authdata, int ret) -{ - if (!cookie) - pr_err("invalid cookie\n"); -} - -static int qce_dummy_req(struct qce_device *pce_dev) -{ - int ret = 0; - - if (!(atomic_xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX]. - in_use, true) == false)) - return -EBUSY; - ret = qce_process_sha_req(pce_dev, NULL); - pce_dev->qce_stats.no_of_dummy_reqs++; - return ret; -} - -static int select_mode(struct qce_device *pce_dev, - struct ce_request_info *preq_info) -{ - struct ce_sps_data *pce_sps_data = &preq_info->ce_sps; - unsigned int no_of_queued_req; - unsigned int cadence; - - if (!pce_dev->no_get_around) { - _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT); - return 0; - } - - /* - * claim ownership of device - */ -again: - if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_CLIENT) - != QCE_OWNER_NONE) { - ndelay(40); - goto again; - } - no_of_queued_req = atomic_inc_return(&pce_dev->no_of_queued_req); - if (pce_dev->mode == IN_INTERRUPT_MODE) { - if (no_of_queued_req >= MAX_BUNCH_MODE_REQ) { - pce_dev->mode = IN_BUNCH_MODE; - pr_debug("pcedev %d mode switch to BUNCH\n", - pce_dev->dev_no); - _qce_set_flag(&pce_sps_data->out_transfer, - SPS_IOVEC_FLAG_INT); - pce_dev->intr_cadence = 0; - atomic_set(&pce_dev->bunch_cmd_seq, 1); - atomic_set(&pce_dev->last_intr_seq, 1); - mod_timer(&(pce_dev->timer), - (jiffies + DELAY_IN_JIFFIES)); - } else { - _qce_set_flag(&pce_sps_data->out_transfer, - SPS_IOVEC_FLAG_INT); - } - } else { - pce_dev->intr_cadence++; - cadence = (preq_info->req_len >> 7) + 1; - if (cadence > SET_INTR_AT_REQ) - cadence = SET_INTR_AT_REQ; - if (pce_dev->intr_cadence < cadence || ((pce_dev->intr_cadence - == cadence) && pce_dev->cadence_flag)) - atomic_inc(&pce_dev->bunch_cmd_seq); - else { - _qce_set_flag(&pce_sps_data->out_transfer, - SPS_IOVEC_FLAG_INT); - pce_dev->intr_cadence = 0; - atomic_set(&pce_dev->bunch_cmd_seq, 0); - atomic_set(&pce_dev->last_intr_seq, 0); - pce_dev->cadence_flag = ~pce_dev->cadence_flag; - } - } - - return 0; -} - static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req) { struct qce_device *pce_dev = (struct qce_device *) handle; @@ -4637,17 +4165,8 @@ static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req) int rc = 0; int ce_burst_size; struct qce_cmdlist_info *cmdlistinfo = NULL; - int req_info = -1; - struct ce_request_info *preq_info; - struct ce_sps_data *pce_sps_data; - req_info = qce_alloc_req_info(pce_dev); - if (req_info < 0) - return -EBUSY; - preq_info = &pce_dev->ce_request_info[req_info]; - pce_sps_data = &preq_info->ce_sps; - - ce_burst_size = pce_dev->ce_bam_info.ce_burst_size; + ce_burst_size = pce_dev->ce_sps.ce_burst_size; totallen_in = areq->cryptlen + areq->assoclen; if (q_req->dir == QCE_ENCRYPT) { q_req->cryptlen = areq->cryptlen; @@ -4659,56 +4178,56 @@ static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req) hw_pad_out = authsize; } - /* - * For crypto 5.0 that has burst size alignment requirement - * for data descritpor, - * the agent above(qcrypto) prepares the src scatter list with - * memory starting with associated data, followed by - * data stream to be ciphered. - * The destination scatter list is pointing to the same - * data area as source. - */ - if (pce_dev->ce_bam_info.minor_version == 0) - preq_info->src_nents = count_sg(areq->src, totallen_in); - else - preq_info->src_nents = count_sg(areq->src, areq->cryptlen); + if (pce_dev->ce_sps.minor_version == 0) { + /* + * For crypto 5.0 that has burst size alignment requirement + * for data descritpor, + * the agent above(qcrypto) prepares the src scatter list with + * memory starting with associated data, followed by + * data stream to be ciphered. + * The destination scatter list is pointing to the same + * data area as source. + */ + pce_dev->src_nents = count_sg(areq->src, totallen_in); + } else { + pce_dev->src_nents = count_sg(areq->src, areq->cryptlen); + } - preq_info->assoc_nents = count_sg(areq->assoc, areq->assoclen); + pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen); + pce_dev->authsize = q_req->authsize; /* associated data input */ - qce_dma_map_sg(pce_dev->pdev, areq->assoc, preq_info->assoc_nents, + qce_dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents, DMA_TO_DEVICE); /* cipher input */ - qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents, + qce_dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); /* cipher + mac output for encryption */ if (areq->src != areq->dst) { - if (pce_dev->ce_bam_info.minor_version == 0) + if (pce_dev->ce_sps.minor_version == 0) /* * The destination scatter list is pointing to the same * data area as src. * Note, the associated data will be pass-through * at the begining of destination area. */ - preq_info->dst_nents = count_sg(areq->dst, + pce_dev->dst_nents = count_sg(areq->dst, out_len + areq->assoclen); else - preq_info->dst_nents = count_sg(areq->dst, out_len); + pce_dev->dst_nents = count_sg(areq->dst, out_len); - qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents, + qce_dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); } else { - preq_info->dst_nents = preq_info->src_nents; + pce_dev->dst_nents = pce_dev->src_nents; } if (pce_dev->support_cmd_dscr) { - cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev, req_info, - q_req); + cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev, q_req); if (cmdlistinfo == NULL) { pr_err("Unsupported cipher algorithm %d, mode %d\n", q_req->alg, q_req->mode); - qce_free_req_info(pce_dev, req_info, false); return -EINVAL; } /* set up crypto device */ @@ -4719,33 +4238,34 @@ static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req) rc = _ce_setup_cipher_direct(pce_dev, q_req, totallen_in, areq->assoclen); } - if (rc < 0) goto bad; - preq_info->mode = q_req->mode; - /* setup for callback, and issue command to bam */ - preq_info->areq = q_req->areq; - preq_info->qce_cb = q_req->qce_cb; - preq_info->dir = q_req->dir; - - /* setup xfer type for producer callback handling */ - preq_info->xfer_type = QCE_XFER_AEAD; - preq_info->req_len = totallen_in; - - _qce_sps_iovec_count_init(pce_dev, req_info); + pce_dev->areq = q_req->areq; + pce_dev->qce_cb = q_req->qce_cb; + + /* Register callback event for EOT (End of transfer) event. */ + pce_dev->ce_sps.producer.event.callback = _aead_sps_producer_callback; + pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE; + rc = sps_register_event(pce_dev->ce_sps.producer.pipe, + &pce_dev->ce_sps.producer.event); + if (rc) { + pr_err("Producer callback registration failed rc = %d\n", rc); + goto bad; + } + _qce_sps_iovec_count_init(pce_dev); if (pce_dev->support_cmd_dscr) _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, - &pce_sps_data->in_transfer); + &pce_dev->ce_sps.in_transfer); - if (pce_dev->ce_bam_info.minor_version == 0) { + if (pce_dev->ce_sps.minor_version == 0) { if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen_in, - &pce_sps_data->in_transfer)) + &pce_dev->ce_sps.in_transfer)) goto bad; - _qce_set_flag(&pce_sps_data->in_transfer, + _qce_set_flag(&pce_dev->ce_sps.in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); /* @@ -4754,91 +4274,85 @@ static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req) */ if (_qce_sps_add_sg_data(pce_dev, areq->dst, out_len + areq->assoclen + hw_pad_out, - &pce_sps_data->out_transfer)) + &pce_dev->ce_sps.out_transfer)) goto bad; if (totallen_in > SPS_MAX_PKT_SIZE) { - _qce_set_flag(&pce_sps_data->out_transfer, + _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); - pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE; + pce_dev->ce_sps.producer.event.options = + SPS_O_DESC_DONE; + pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE; } else { if (_qce_sps_add_data(GET_PHYS_ADDR( - pce_sps_data->result_dump), + pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, - &pce_sps_data->out_transfer)) + &pce_dev->ce_sps.out_transfer)) goto bad; - _qce_set_flag(&pce_sps_data->out_transfer, + _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); - pce_sps_data->producer_state = QCE_PIPE_STATE_COMP; + pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP; } - rc = _qce_sps_transfer(pce_dev, req_info); } else { if (_qce_sps_add_sg_data(pce_dev, areq->assoc, areq->assoclen, - &pce_sps_data->in_transfer)) + &pce_dev->ce_sps.in_transfer)) goto bad; if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->cryptlen, - &pce_sps_data->in_transfer)) + &pce_dev->ce_sps.in_transfer)) goto bad; - _qce_set_flag(&pce_sps_data->in_transfer, + _qce_set_flag(&pce_dev->ce_sps.in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); - - _qce_ccm_get_around_input(pce_dev, preq_info, q_req->dir); - if (pce_dev->no_get_around) _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, - &pce_sps_data->cmdlistptr.unlock_all_pipes, - &pce_sps_data->in_transfer); + &pce_dev->ce_sps.cmdlistptr.unlock_all_pipes, + &pce_dev->ce_sps.in_transfer); /* Pass through to ignore associated data*/ if (_qce_sps_add_data( - GET_PHYS_ADDR(pce_sps_data->ignore_buffer), + GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer), areq->assoclen, - &pce_sps_data->out_transfer)) + &pce_dev->ce_sps.out_transfer)) goto bad; if (_qce_sps_add_sg_data(pce_dev, areq->dst, out_len, - &pce_sps_data->out_transfer)) + &pce_dev->ce_sps.out_transfer)) goto bad; /* Pass through to ignore hw_pad (padding of the MAC data) */ if (_qce_sps_add_data( - GET_PHYS_ADDR(pce_sps_data->ignore_buffer), - hw_pad_out, &pce_sps_data->out_transfer)) + GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer), + hw_pad_out, &pce_dev->ce_sps.out_transfer)) goto bad; if (pce_dev->no_get_around || totallen_in <= SPS_MAX_PKT_SIZE) { if (_qce_sps_add_data( - GET_PHYS_ADDR(pce_sps_data->result_dump), + GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, - &pce_sps_data->out_transfer)) + &pce_dev->ce_sps.out_transfer)) goto bad; - pce_sps_data->producer_state = QCE_PIPE_STATE_COMP; + pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP; } else { - pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE; + pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE; } - - _qce_ccm_get_around_output(pce_dev, preq_info, q_req->dir); - - select_mode(pce_dev, preq_info); - rc = _qce_sps_transfer(pce_dev, req_info); - cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE); + _qce_set_flag(&pce_dev->ce_sps.out_transfer, + SPS_IOVEC_FLAG_INT); } + rc = _qce_sps_transfer(pce_dev); if (rc) goto bad; return 0; bad: - if (preq_info->assoc_nents) { + if (pce_dev->assoc_nents) { qce_dma_unmap_sg(pce_dev->pdev, areq->assoc, - preq_info->assoc_nents, DMA_TO_DEVICE); + pce_dev->assoc_nents, DMA_TO_DEVICE); } - if (preq_info->src_nents) { - qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents, + if (pce_dev->src_nents) { + qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); } if (areq->src != areq->dst) { - qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents, + qce_dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); } - qce_free_req_info(pce_dev, req_info, false); return rc; } @@ -4853,10 +4367,10 @@ static int _qce_suspend(void *handle) qce_enable_clk(pce_dev); - sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe; + sps_pipe_info = pce_dev->ce_sps.consumer.pipe; sps_disconnect(sps_pipe_info); - sps_pipe_info = pce_dev->ce_bam_info.producer.pipe; + sps_pipe_info = pce_dev->ce_sps.producer.pipe; sps_disconnect(sps_pipe_info); qce_disable_clk(pce_dev); @@ -4875,8 +4389,8 @@ static int _qce_resume(void *handle) qce_enable_clk(pce_dev); - sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe; - sps_connect_info = &pce_dev->ce_bam_info.consumer.connect; + sps_pipe_info = pce_dev->ce_sps.consumer.pipe; + sps_connect_info = &pce_dev->ce_sps.consumer.connect; memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size); rc = sps_connect(sps_pipe_info, sps_connect_info); if (rc) { @@ -4884,18 +4398,16 @@ static int _qce_resume(void *handle) (uintptr_t)sps_pipe_info, rc); return rc; } - sps_pipe_info = pce_dev->ce_bam_info.producer.pipe; - sps_connect_info = &pce_dev->ce_bam_info.producer.connect; + sps_pipe_info = pce_dev->ce_sps.producer.pipe; + sps_connect_info = &pce_dev->ce_sps.producer.connect; memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size); rc = sps_connect(sps_pipe_info, sps_connect_info); if (rc) pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n", (uintptr_t)sps_pipe_info, rc); - rc = sps_register_event(sps_pipe_info, - &pce_dev->ce_bam_info.producer.event); - if (rc) - pr_err("Producer callback registration failed rc = %d\n", rc); + pce_dev->ce_sps.out_transfer.user = pce_dev->ce_sps.producer.pipe; + pce_dev->ce_sps.in_transfer.user = pce_dev->ce_sps.consumer.pipe; qce_disable_clk(pce_dev); return rc; @@ -4906,26 +4418,19 @@ EXPORT_SYMBOL(qce_pm_table); int qce_aead_req(void *handle, struct qce_req *q_req) { - struct qce_device *pce_dev = (struct qce_device *)handle; + struct qce_device *pce_dev; struct aead_request *areq; uint32_t authsize; struct crypto_aead *aead; uint32_t ivsize; uint32_t totallen; - int rc = 0; + int rc; struct qce_cmdlist_info *cmdlistinfo = NULL; - int req_info = -1; - struct ce_sps_data *pce_sps_data; - struct ce_request_info *preq_info; if (q_req->mode == QCE_MODE_CCM) return _qce_aead_ccm_req(handle, q_req); - req_info = qce_alloc_req_info(pce_dev); - if (req_info < 0) - return -EBUSY; - preq_info = &pce_dev->ce_request_info[req_info]; - pce_sps_data = &preq_info->ce_sps; + pce_dev = (struct qce_device *) handle; areq = (struct aead_request *) q_req->areq; aead = crypto_aead_reqtfm(areq); ivsize = crypto_aead_ivsize(aead); @@ -4945,83 +4450,87 @@ int qce_aead_req(void *handle, struct qce_req *q_req) totallen = q_req->cryptlen + areq->assoclen + ivsize; if (pce_dev->support_cmd_dscr) { - cmdlistinfo = _ce_get_aead_cmdlistinfo(pce_dev, - req_info, q_req); + cmdlistinfo = _ce_get_aead_cmdlistinfo(pce_dev, q_req); if (cmdlistinfo == NULL) { pr_err("Unsupported aead ciphering algorithm %d, mode %d, ciphering key length %d, auth digest size %d\n", q_req->alg, q_req->mode, q_req->encklen, q_req->authsize); - qce_free_req_info(pce_dev, req_info, false); return -EINVAL; } /* set up crypto device */ rc = _ce_setup_aead(pce_dev, q_req, totallen, areq->assoclen + ivsize, cmdlistinfo); - if (rc < 0) { - qce_free_req_info(pce_dev, req_info, false); + if (rc < 0) return -EINVAL; - } - } + }; - preq_info->assoc_nents = count_sg(areq->assoc, areq->assoclen); - /* - * For crypto 5.0 that has burst size alignment requirement - * for data descritpor, - * the agent above(qcrypto) prepares the src scatter list with - * memory starting with associated data, followed by - * iv, and data stream to be ciphered. - */ - if (pce_dev->ce_bam_info.minor_version == 0) - preq_info->src_nents = count_sg(areq->src, totallen); - else - preq_info->src_nents = count_sg(areq->src, q_req->cryptlen); + pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen); + + if (pce_dev->ce_sps.minor_version == 0) { + /* + * For crypto 5.0 that has burst size alignment requirement + * for data descritpor, + * the agent above(qcrypto) prepares the src scatter list with + * memory starting with associated data, followed by + * iv, and data stream to be ciphered. + */ + pce_dev->src_nents = count_sg(areq->src, totallen); + } else { + pce_dev->src_nents = count_sg(areq->src, q_req->cryptlen); + }; - preq_info->phy_iv_in = 0; + pce_dev->ivsize = q_req->ivsize; + pce_dev->authsize = q_req->authsize; + pce_dev->phy_iv_in = 0; /* associated data input */ - qce_dma_map_sg(pce_dev->pdev, areq->assoc, preq_info->assoc_nents, + qce_dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents, DMA_TO_DEVICE); /* cipher input */ - qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents, + qce_dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); /* cipher output for encryption */ if (areq->src != areq->dst) { - if (pce_dev->ce_bam_info.minor_version == 0) + if (pce_dev->ce_sps.minor_version == 0) /* * The destination scatter list is pointing to the same * data area as source. */ - preq_info->dst_nents = count_sg(areq->dst, totallen); + pce_dev->dst_nents = count_sg(areq->dst, totallen); else - preq_info->dst_nents = count_sg(areq->dst, + pce_dev->dst_nents = count_sg(areq->dst, q_req->cryptlen); - qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents, + qce_dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); } /* cipher iv for input */ - if (pce_dev->ce_bam_info.minor_version != 0) - preq_info->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv, + if (pce_dev->ce_sps.minor_version != 0) + pce_dev->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv, ivsize, DMA_TO_DEVICE); /* setup for callback, and issue command to bam */ - preq_info->areq = q_req->areq; - preq_info->qce_cb = q_req->qce_cb; - preq_info->dir = q_req->dir; - - /* setup xfer type for producer callback handling */ - preq_info->xfer_type = QCE_XFER_AEAD; - preq_info->req_len = totallen; - - _qce_sps_iovec_count_init(pce_dev, req_info); + pce_dev->areq = q_req->areq; + pce_dev->qce_cb = q_req->qce_cb; + + /* Register callback event for EOT (End of transfer) event. */ + pce_dev->ce_sps.producer.event.callback = _aead_sps_producer_callback; + pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE; + rc = sps_register_event(pce_dev->ce_sps.producer.pipe, + &pce_dev->ce_sps.producer.event); + if (rc) { + pr_err("Producer callback registration failed rc = %d\n", rc); + goto bad; + } + _qce_sps_iovec_count_init(pce_dev); if (pce_dev->support_cmd_dscr) { _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, - &pce_sps_data->in_transfer); + &pce_dev->ce_sps.in_transfer); } else { rc = _ce_setup_aead_direct(pce_dev, q_req, totallen, areq->assoclen + ivsize); @@ -5029,95 +4538,97 @@ int qce_aead_req(void *handle, struct qce_req *q_req) goto bad; } - preq_info->mode = q_req->mode; - - if (pce_dev->ce_bam_info.minor_version == 0) { + if (pce_dev->ce_sps.minor_version == 0) { if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen, - &pce_sps_data->in_transfer)) + &pce_dev->ce_sps.in_transfer)) goto bad; - _qce_set_flag(&pce_sps_data->in_transfer, + _qce_set_flag(&pce_dev->ce_sps.in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen, - &pce_sps_data->out_transfer)) + &pce_dev->ce_sps.out_transfer)) goto bad; if (totallen > SPS_MAX_PKT_SIZE) { - _qce_set_flag(&pce_sps_data->out_transfer, + _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); - pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE; + pce_dev->ce_sps.producer.event.options = + SPS_O_DESC_DONE; + pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE; } else { if (_qce_sps_add_data(GET_PHYS_ADDR( - pce_sps_data->result_dump), + pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, - &pce_sps_data->out_transfer)) + &pce_dev->ce_sps.out_transfer)) goto bad; - _qce_set_flag(&pce_sps_data->out_transfer, + _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); - pce_sps_data->producer_state = QCE_PIPE_STATE_COMP; + pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP; } - rc = _qce_sps_transfer(pce_dev, req_info); } else { if (_qce_sps_add_sg_data(pce_dev, areq->assoc, areq->assoclen, - &pce_sps_data->in_transfer)) + &pce_dev->ce_sps.in_transfer)) goto bad; - if (_qce_sps_add_data((uint32_t)preq_info->phy_iv_in, ivsize, - &pce_sps_data->in_transfer)) + if (_qce_sps_add_data((uint32_t)pce_dev->phy_iv_in, ivsize, + &pce_dev->ce_sps.in_transfer)) goto bad; if (_qce_sps_add_sg_data(pce_dev, areq->src, q_req->cryptlen, - &pce_sps_data->in_transfer)) + &pce_dev->ce_sps.in_transfer)) goto bad; - _qce_set_flag(&pce_sps_data->in_transfer, + _qce_set_flag(&pce_dev->ce_sps.in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); if (pce_dev->no_get_around) _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, - &pce_sps_data->cmdlistptr.unlock_all_pipes, - &pce_sps_data->in_transfer); + &pce_dev->ce_sps.cmdlistptr.unlock_all_pipes, + &pce_dev->ce_sps.in_transfer); /* Pass through to ignore associated + iv data*/ if (_qce_sps_add_data( - GET_PHYS_ADDR(pce_sps_data->ignore_buffer), + GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer), (ivsize + areq->assoclen), - &pce_sps_data->out_transfer)) + &pce_dev->ce_sps.out_transfer)) goto bad; if (_qce_sps_add_sg_data(pce_dev, areq->dst, q_req->cryptlen, - &pce_sps_data->out_transfer)) + &pce_dev->ce_sps.out_transfer)) goto bad; if (pce_dev->no_get_around || totallen <= SPS_MAX_PKT_SIZE) { if (_qce_sps_add_data( - GET_PHYS_ADDR(pce_sps_data->result_dump), + GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, - &pce_sps_data->out_transfer)) + &pce_dev->ce_sps.out_transfer)) goto bad; - pce_sps_data->producer_state = QCE_PIPE_STATE_COMP; + pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP; } else { - pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE; + pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE; } - select_mode(pce_dev, preq_info); - rc = _qce_sps_transfer(pce_dev, req_info); - cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE); + _qce_set_flag(&pce_dev->ce_sps.out_transfer, + SPS_IOVEC_FLAG_INT); } + rc = _qce_sps_transfer(pce_dev); if (rc) goto bad; return 0; bad: - if (preq_info->assoc_nents) + if (pce_dev->assoc_nents) { qce_dma_unmap_sg(pce_dev->pdev, areq->assoc, - preq_info->assoc_nents, DMA_TO_DEVICE); - if (preq_info->src_nents) - qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents, + pce_dev->assoc_nents, DMA_TO_DEVICE); + } + if (pce_dev->src_nents) { + qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); - if (areq->src != areq->dst) - qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents, + } + if (areq->src != areq->dst) { + qce_dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, DMA_FROM_DEVICE); - if (preq_info->phy_iv_in) - dma_unmap_single(pce_dev->pdev, preq_info->phy_iv_in, + } + if (pce_dev->phy_iv_in) { + dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in, ivsize, DMA_TO_DEVICE); - qce_free_req_info(pce_dev, req_info, false); + } return rc; } @@ -5130,50 +4641,38 @@ int qce_ablk_cipher_req(void *handle, struct qce_req *c_req) struct ablkcipher_request *areq = (struct ablkcipher_request *) c_req->areq; struct qce_cmdlist_info *cmdlistinfo = NULL; - int req_info = -1; - struct ce_sps_data *pce_sps_data; - struct ce_request_info *preq_info; - - req_info = qce_alloc_req_info(pce_dev); - if (req_info < 0) - return -EBUSY; - preq_info = &pce_dev->ce_request_info[req_info]; - pce_sps_data = &preq_info->ce_sps; - preq_info->src_nents = 0; - preq_info->dst_nents = 0; + pce_dev->src_nents = 0; + pce_dev->dst_nents = 0; /* cipher input */ - preq_info->src_nents = count_sg(areq->src, areq->nbytes); + pce_dev->src_nents = count_sg(areq->src, areq->nbytes); - qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents, + qce_dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); /* cipher output */ if (areq->src != areq->dst) { - preq_info->dst_nents = count_sg(areq->dst, areq->nbytes); + pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes); qce_dma_map_sg(pce_dev->pdev, areq->dst, - preq_info->dst_nents, DMA_FROM_DEVICE); + pce_dev->dst_nents, DMA_FROM_DEVICE); } else { - preq_info->dst_nents = preq_info->src_nents; - } - preq_info->dir = c_req->dir; - if ((pce_dev->ce_bam_info.minor_version == 0) && - (preq_info->dir == QCE_DECRYPT) && - (c_req->mode == QCE_MODE_CBC)) { - memcpy(preq_info->dec_iv, (unsigned char *) - sg_virt(areq->src) + areq->src->length - 16, + pce_dev->dst_nents = pce_dev->src_nents; + } + pce_dev->dir = c_req->dir; + if ((pce_dev->ce_sps.minor_version == 0) && (c_req->dir == QCE_DECRYPT) + && (c_req->mode == QCE_MODE_CBC)) { + memcpy(pce_dev->dec_iv, (unsigned char *)sg_virt(areq->src) + + areq->src->length - 16, NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE); } /* set up crypto device */ if (pce_dev->support_cmd_dscr) { - cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev, - req_info, c_req); + cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev, c_req); if (cmdlistinfo == NULL) { pr_err("Unsupported cipher algorithm %d, mode %d\n", c_req->alg, c_req->mode); - qce_free_req_info(pce_dev, req_info, false); return -EINVAL; } rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0, @@ -5184,66 +4683,66 @@ int qce_ablk_cipher_req(void *handle, struct qce_req *c_req) if (rc < 0) goto bad; - preq_info->mode = c_req->mode; - /* setup for client callback, and issue command to BAM */ - preq_info->areq = areq; - preq_info->qce_cb = c_req->qce_cb; - - /* setup xfer type for producer callback handling */ - preq_info->xfer_type = QCE_XFER_CIPHERING; - preq_info->req_len = areq->nbytes; - - _qce_sps_iovec_count_init(pce_dev, req_info); + pce_dev->areq = areq; + pce_dev->qce_cb = c_req->qce_cb; + + /* Register callback event for EOT (End of transfer) event. */ + pce_dev->ce_sps.producer.event.callback = + _ablk_cipher_sps_producer_callback; + pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE; + rc = sps_register_event(pce_dev->ce_sps.producer.pipe, + &pce_dev->ce_sps.producer.event); + if (rc) { + pr_err("Producer callback registration failed rc = %d\n", rc); + goto bad; + } + _qce_sps_iovec_count_init(pce_dev); if (pce_dev->support_cmd_dscr) _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, - &pce_sps_data->in_transfer); + &pce_dev->ce_sps.in_transfer); if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes, - &pce_sps_data->in_transfer)) + &pce_dev->ce_sps.in_transfer)) goto bad; - _qce_set_flag(&pce_sps_data->in_transfer, + _qce_set_flag(&pce_dev->ce_sps.in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); if (pce_dev->no_get_around) _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, - &pce_sps_data->cmdlistptr.unlock_all_pipes, - &pce_sps_data->in_transfer); + &pce_dev->ce_sps.cmdlistptr.unlock_all_pipes, + &pce_dev->ce_sps.in_transfer); if (_qce_sps_add_sg_data(pce_dev, areq->dst, areq->nbytes, - &pce_sps_data->out_transfer)) + &pce_dev->ce_sps.out_transfer)) goto bad; if (pce_dev->no_get_around || areq->nbytes <= SPS_MAX_PKT_SIZE) { - pce_sps_data->producer_state = QCE_PIPE_STATE_COMP; + pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP; if (_qce_sps_add_data( - GET_PHYS_ADDR(pce_sps_data->result_dump), + GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, - &pce_sps_data->out_transfer)) + &pce_dev->ce_sps.out_transfer)) goto bad; } else { - pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE; + pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE; } - - select_mode(pce_dev, preq_info); - rc = _qce_sps_transfer(pce_dev, req_info); - cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE); + _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); + rc = _qce_sps_transfer(pce_dev); if (rc) goto bad; - - return 0; + return 0; bad: if (areq->src != areq->dst) { - if (preq_info->dst_nents) { + if (pce_dev->dst_nents) { qce_dma_unmap_sg(pce_dev->pdev, areq->dst, - preq_info->dst_nents, DMA_FROM_DEVICE); + pce_dev->dst_nents, DMA_FROM_DEVICE); } } - if (preq_info->src_nents) { + if (pce_dev->src_nents) { qce_dma_unmap_sg(pce_dev->pdev, areq->src, - preq_info->src_nents, + pce_dev->src_nents, (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); } - qce_free_req_info(pce_dev, req_info, false); return rc; } EXPORT_SYMBOL(qce_ablk_cipher_req); @@ -5253,36 +4752,17 @@ int qce_process_sha_req(void *handle, struct qce_sha_req *sreq) struct qce_device *pce_dev = (struct qce_device *) handle; int rc; - struct ahash_request *areq; + struct ahash_request *areq = (struct ahash_request *)sreq->areq; struct qce_cmdlist_info *cmdlistinfo = NULL; - int req_info = -1; - struct ce_sps_data *pce_sps_data; - struct ce_request_info *preq_info; - bool is_dummy = false; - - if (!sreq) { - sreq = &(pce_dev->dummyreq.sreq); - req_info = DUMMY_REQ_INDEX; - is_dummy = true; - } else { - req_info = qce_alloc_req_info(pce_dev); - if (req_info < 0) - return -EBUSY; - } - - areq = (struct ahash_request *)sreq->areq; - preq_info = &pce_dev->ce_request_info[req_info]; - pce_sps_data = &preq_info->ce_sps; - preq_info->src_nents = count_sg(sreq->src, sreq->size); - qce_dma_map_sg(pce_dev->pdev, sreq->src, preq_info->src_nents, + pce_dev->src_nents = count_sg(sreq->src, sreq->size); + qce_dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents, DMA_TO_DEVICE); if (pce_dev->support_cmd_dscr) { - cmdlistinfo = _ce_get_hash_cmdlistinfo(pce_dev, req_info, sreq); + cmdlistinfo = _ce_get_hash_cmdlistinfo(pce_dev, sreq); if (cmdlistinfo == NULL) { pr_err("Unsupported hash algorithm %d\n", sreq->alg); - qce_free_req_info(pce_dev, req_info, false); return -EINVAL; } rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo); @@ -5292,61 +4772,57 @@ int qce_process_sha_req(void *handle, struct qce_sha_req *sreq) if (rc < 0) goto bad; - preq_info->areq = areq; - preq_info->qce_cb = sreq->qce_cb; + pce_dev->areq = areq; + pce_dev->qce_cb = sreq->qce_cb; - /* setup xfer type for producer callback handling */ - preq_info->xfer_type = QCE_XFER_HASHING; - preq_info->req_len = sreq->size; - - _qce_sps_iovec_count_init(pce_dev, req_info); + /* Register callback event for EOT (End of transfer) event. */ + pce_dev->ce_sps.producer.event.callback = _sha_sps_producer_callback; + pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE; + rc = sps_register_event(pce_dev->ce_sps.producer.pipe, + &pce_dev->ce_sps.producer.event); + if (rc) { + pr_err("Producer callback registration failed rc = %d\n", rc); + goto bad; + } + _qce_sps_iovec_count_init(pce_dev); if (pce_dev->support_cmd_dscr) _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, - &pce_sps_data->in_transfer); + &pce_dev->ce_sps.in_transfer); if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes, - &pce_sps_data->in_transfer)) + &pce_dev->ce_sps.in_transfer)) goto bad; /* always ensure there is input data. ZLT does not work for bam-ndp */ if (!areq->nbytes) _qce_sps_add_data( - GET_PHYS_ADDR(pce_sps_data->ignore_buffer), - pce_dev->ce_bam_info.ce_burst_size, - &pce_sps_data->in_transfer); - _qce_set_flag(&pce_sps_data->in_transfer, + GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer), + pce_dev->ce_sps.ce_burst_size, + &pce_dev->ce_sps.in_transfer); + _qce_set_flag(&pce_dev->ce_sps.in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); if (pce_dev->no_get_around) _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, - &pce_sps_data->cmdlistptr.unlock_all_pipes, - &pce_sps_data->in_transfer); + &pce_dev->ce_sps.cmdlistptr.unlock_all_pipes, + &pce_dev->ce_sps.in_transfer); - if (_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), + if (_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, - &pce_sps_data->out_transfer)) + &pce_dev->ce_sps.out_transfer)) goto bad; - - if (is_dummy) { - _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT); - rc = _qce_sps_transfer(pce_dev, req_info); - } else { - select_mode(pce_dev, preq_info); - rc = _qce_sps_transfer(pce_dev, req_info); - cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE); - } + _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); + rc = _qce_sps_transfer(pce_dev); if (rc) goto bad; - return 0; + return 0; bad: - if (preq_info->src_nents) { + if (pce_dev->src_nents) { qce_dma_unmap_sg(pce_dev->pdev, sreq->src, - preq_info->src_nents, DMA_TO_DEVICE); + pce_dev->src_nents, DMA_TO_DEVICE); } - qce_free_req_info(pce_dev, req_info, false); return rc; } EXPORT_SYMBOL(qce_process_sha_req); - int qce_f8_req(void *handle, struct qce_f8_req *req, void *cookie, qce_comp_func_ptr_t qce_cb) { @@ -5355,25 +4831,15 @@ int qce_f8_req(void *handle, struct qce_f8_req *req, dma_addr_t dst; int rc; struct qce_cmdlist_info *cmdlistinfo; - int req_info = -1; - struct ce_request_info *preq_info; - struct ce_sps_data *pce_sps_data; - - req_info = qce_alloc_req_info(pce_dev); - if (req_info < 0) - return -EBUSY; - preq_info = &pce_dev->ce_request_info[req_info]; - pce_sps_data = &preq_info->ce_sps; switch (req->algorithm) { case QCE_OTA_ALGO_KASUMI: - cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi; + cmdlistinfo = &pce_dev->ce_sps.cmdlistptr.f8_kasumi; break; case QCE_OTA_ALGO_SNOW3G: - cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g; + cmdlistinfo = &pce_dev->ce_sps.cmdlistptr.f8_snow3g; break; default: - qce_free_req_info(pce_dev, req_info, false); return -EINVAL; }; @@ -5381,13 +4847,11 @@ int qce_f8_req(void *handle, struct qce_f8_req *req, /* don't support key stream mode */ - if (key_stream_mode || (req->bearer >= QCE_OTA_MAX_BEARER)) { - qce_free_req_info(pce_dev, req_info, false); + if (key_stream_mode || (req->bearer >= QCE_OTA_MAX_BEARER)) return -EINVAL; - } /* F8 cipher input */ - preq_info->phy_ota_src = dma_map_single(pce_dev->pdev, + pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->data_in, req->data_len, (req->data_in == req->data_out) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); @@ -5396,13 +4860,13 @@ int qce_f8_req(void *handle, struct qce_f8_req *req, if (req->data_in != req->data_out) { dst = dma_map_single(pce_dev->pdev, req->data_out, req->data_len, DMA_FROM_DEVICE); - preq_info->phy_ota_dst = dst; + pce_dev->phy_ota_dst = dst; } else { /* in place ciphering */ - dst = preq_info->phy_ota_src; - preq_info->phy_ota_dst = 0; + dst = pce_dev->phy_ota_src; + pce_dev->phy_ota_dst = 0; } - preq_info->ota_size = req->data_len; + pce_dev->ota_size = req->data_len; /* set up crypto device */ @@ -5416,52 +4880,54 @@ int qce_f8_req(void *handle, struct qce_f8_req *req, goto bad; /* setup for callback, and issue command to sps */ - preq_info->areq = cookie; - preq_info->qce_cb = qce_cb; - - /* setup xfer type for producer callback handling */ - preq_info->xfer_type = QCE_XFER_F8; - preq_info->req_len = req->data_len; - - _qce_sps_iovec_count_init(pce_dev, req_info); + pce_dev->areq = cookie; + pce_dev->qce_cb = qce_cb; + + /* Register producer callback event for DESC_DONE event. */ + pce_dev->ce_sps.producer.event.callback = + _f8_sps_producer_callback; + pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE; + rc = sps_register_event(pce_dev->ce_sps.producer.pipe, + &pce_dev->ce_sps.producer.event); + if (rc) { + pr_err("Producer callback registration failed rc = %d\n", rc); + goto bad; + } + _qce_sps_iovec_count_init(pce_dev); if (pce_dev->support_cmd_dscr) _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, - &pce_sps_data->in_transfer); + &pce_dev->ce_sps.in_transfer); - _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->data_len, - &pce_sps_data->in_transfer); - - _qce_set_flag(&pce_sps_data->in_transfer, - SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); + _qce_sps_add_data((uint32_t)pce_dev->phy_ota_src, req->data_len, + &pce_dev->ce_sps.in_transfer); + _qce_set_flag(&pce_dev->ce_sps.in_transfer, + SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, - &pce_sps_data->cmdlistptr.unlock_all_pipes, - &pce_sps_data->in_transfer); + &pce_dev->ce_sps.cmdlistptr.unlock_all_pipes, + &pce_dev->ce_sps.in_transfer); _qce_sps_add_data((uint32_t)dst, req->data_len, - &pce_sps_data->out_transfer); + &pce_dev->ce_sps.out_transfer); - _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), + _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, - &pce_sps_data->out_transfer); - - select_mode(pce_dev, preq_info); - rc = _qce_sps_transfer(pce_dev, req_info); - cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE); + &pce_dev->ce_sps.out_transfer); + _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); + rc = _qce_sps_transfer(pce_dev); if (rc) goto bad; return 0; bad: - if (preq_info->phy_ota_dst != 0) - dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst, + if (pce_dev->phy_ota_dst != 0) + dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, req->data_len, DMA_FROM_DEVICE); - if (preq_info->phy_ota_src != 0) - dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, + if (pce_dev->phy_ota_src != 0) + dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, req->data_len, (req->data_in == req->data_out) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); - qce_free_req_info(pce_dev, req_info, false); return rc; } EXPORT_SYMBOL(qce_f8_req); @@ -5478,32 +4944,22 @@ int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq, dma_addr_t dst = 0; int rc = 0; struct qce_cmdlist_info *cmdlistinfo; - int req_info = -1; - struct ce_request_info *preq_info; - struct ce_sps_data *pce_sps_data; - - req_info = qce_alloc_req_info(pce_dev); - if (req_info < 0) - return -EBUSY; - preq_info = &pce_dev->ce_request_info[req_info]; - pce_sps_data = &preq_info->ce_sps; switch (req->algorithm) { case QCE_OTA_ALGO_KASUMI: - cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi; + cmdlistinfo = &pce_dev->ce_sps.cmdlistptr.f8_kasumi; break; case QCE_OTA_ALGO_SNOW3G: - cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g; + cmdlistinfo = &pce_dev->ce_sps.cmdlistptr.f8_snow3g; break; default: - qce_free_req_info(pce_dev, req_info, false); return -EINVAL; }; total = num_pkt * req->data_len; /* F8 cipher input */ - preq_info->phy_ota_src = dma_map_single(pce_dev->pdev, + pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->data_in, total, (req->data_in == req->data_out) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); @@ -5512,14 +4968,14 @@ int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq, if (req->data_in != req->data_out) { dst = dma_map_single(pce_dev->pdev, req->data_out, total, DMA_FROM_DEVICE); - preq_info->phy_ota_dst = dst; + pce_dev->phy_ota_dst = dst; } else { /* in place ciphering */ - dst = preq_info->phy_ota_src; - preq_info->phy_ota_dst = 0; + dst = pce_dev->phy_ota_src; + pce_dev->phy_ota_dst = 0; } - preq_info->ota_size = total; + pce_dev->ota_size = total; /* set up crypto device */ if (pce_dev->support_cmd_dscr) @@ -5532,49 +4988,52 @@ int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq, goto bad; /* setup for callback, and issue command to sps */ - preq_info->areq = cookie; - preq_info->qce_cb = qce_cb; - - /* setup xfer type for producer callback handling */ - preq_info->xfer_type = QCE_XFER_F8; - preq_info->req_len = total; - - _qce_sps_iovec_count_init(pce_dev, req_info); + pce_dev->areq = cookie; + pce_dev->qce_cb = qce_cb; + + /* Register producer callback event for DESC_DONE event. */ + pce_dev->ce_sps.producer.event.callback = + _f8_sps_producer_callback; + pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE; + rc = sps_register_event(pce_dev->ce_sps.producer.pipe, + &pce_dev->ce_sps.producer.event); + if (rc) { + pr_err("Producer callback registration failed rc = %d\n", rc); + goto bad; + } + _qce_sps_iovec_count_init(pce_dev); if (pce_dev->support_cmd_dscr) _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, - &pce_sps_data->in_transfer); + &pce_dev->ce_sps.in_transfer); - _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, total, - &pce_sps_data->in_transfer); - _qce_set_flag(&pce_sps_data->in_transfer, + _qce_sps_add_data((uint32_t)pce_dev->phy_ota_src, total, + &pce_dev->ce_sps.in_transfer); + _qce_set_flag(&pce_dev->ce_sps.in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, - &pce_sps_data->cmdlistptr.unlock_all_pipes, - &pce_sps_data->in_transfer); + &pce_dev->ce_sps.cmdlistptr.unlock_all_pipes, + &pce_dev->ce_sps.in_transfer); _qce_sps_add_data((uint32_t)dst, total, - &pce_sps_data->out_transfer); + &pce_dev->ce_sps.out_transfer); - _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), + _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, - &pce_sps_data->out_transfer); - - select_mode(pce_dev, preq_info); - rc = _qce_sps_transfer(pce_dev, req_info); - cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE); + &pce_dev->ce_sps.out_transfer); + _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); + rc = _qce_sps_transfer(pce_dev); if (rc == 0) return 0; bad: - if (preq_info->phy_ota_dst) - dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst, total, + if (pce_dev->phy_ota_dst) + dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, total, DMA_FROM_DEVICE); - dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, total, + dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, total, (req->data_in == req->data_out) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); - qce_free_req_info(pce_dev, req_info, false); return rc; } EXPORT_SYMBOL(qce_f8_multi_pkt_req); @@ -5585,31 +5044,22 @@ int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie, struct qce_device *pce_dev = (struct qce_device *) handle; int rc; struct qce_cmdlist_info *cmdlistinfo; - int req_info = -1; - struct ce_sps_data *pce_sps_data; - struct ce_request_info *preq_info; - - req_info = qce_alloc_req_info(pce_dev); - if (req_info < 0) - return -EBUSY; - preq_info = &pce_dev->ce_request_info[req_info]; - pce_sps_data = &preq_info->ce_sps; + switch (req->algorithm) { case QCE_OTA_ALGO_KASUMI: - cmdlistinfo = &pce_sps_data->cmdlistptr.f9_kasumi; + cmdlistinfo = &pce_dev->ce_sps.cmdlistptr.f9_kasumi; break; case QCE_OTA_ALGO_SNOW3G: - cmdlistinfo = &pce_sps_data->cmdlistptr.f9_snow3g; + cmdlistinfo = &pce_dev->ce_sps.cmdlistptr.f9_snow3g; break; default: - qce_free_req_info(pce_dev, req_info, false); return -EINVAL; }; - preq_info->phy_ota_src = dma_map_single(pce_dev->pdev, req->message, + pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->message, req->msize, DMA_TO_DEVICE); - preq_info->ota_size = req->msize; + pce_dev->ota_size = req->msize; if (pce_dev->support_cmd_dscr) rc = _ce_f9_setup(pce_dev, req, cmdlistinfo); @@ -5619,40 +5069,43 @@ int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie, goto bad; /* setup for callback, and issue command to sps */ - preq_info->areq = cookie; - preq_info->qce_cb = qce_cb; - - /* setup xfer type for producer callback handling */ - preq_info->xfer_type = QCE_XFER_F9; - preq_info->req_len = req->msize; + pce_dev->areq = cookie; + pce_dev->qce_cb = qce_cb; + + /* Register producer callback event for DESC_DONE event. */ + pce_dev->ce_sps.producer.event.callback = _f9_sps_producer_callback; + pce_dev->ce_sps.producer.event.options = SPS_O_DESC_DONE; + rc = sps_register_event(pce_dev->ce_sps.producer.pipe, + &pce_dev->ce_sps.producer.event); + if (rc) { + pr_err("Producer callback registration failed rc = %d\n", rc); + goto bad; + } - _qce_sps_iovec_count_init(pce_dev, req_info); + _qce_sps_iovec_count_init(pce_dev); if (pce_dev->support_cmd_dscr) _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo, - &pce_sps_data->in_transfer); - _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->msize, - &pce_sps_data->in_transfer); - _qce_set_flag(&pce_sps_data->in_transfer, + &pce_dev->ce_sps.in_transfer); + _qce_sps_add_data((uint32_t)pce_dev->phy_ota_src, req->msize, + &pce_dev->ce_sps.in_transfer); + _qce_set_flag(&pce_dev->ce_sps.in_transfer, SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD); _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK, - &pce_sps_data->cmdlistptr.unlock_all_pipes, - &pce_sps_data->in_transfer); + &pce_dev->ce_sps.cmdlistptr.unlock_all_pipes, + &pce_dev->ce_sps.in_transfer); - _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump), + _qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump), CRYPTO_RESULT_DUMP_SIZE, - &pce_sps_data->out_transfer); - - select_mode(pce_dev, preq_info); - rc = _qce_sps_transfer(pce_dev, req_info); - cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE); + &pce_dev->ce_sps.out_transfer); + _qce_set_flag(&pce_dev->ce_sps.out_transfer, SPS_IOVEC_FLAG_INT); + rc = _qce_sps_transfer(pce_dev); if (rc) goto bad; return 0; bad: - dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, + dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, req->msize, DMA_TO_DEVICE); - qce_free_req_info(pce_dev, req_info, false); return rc; } EXPORT_SYMBOL(qce_f9_req); @@ -5693,38 +5146,30 @@ static int __qce_get_device_tree_data(struct platform_device *pdev, if (of_property_read_u32((&pdev->dev)->of_node, "qcom,bam-pipe-pair", - &pce_dev->ce_bam_info.pipe_pair_index)) { + &pce_dev->ce_sps.pipe_pair_index)) { pr_err("Fail to get bam pipe pair information.\n"); return -EINVAL; } if (of_property_read_u32((&pdev->dev)->of_node, "qcom,ce-device", - &pce_dev->ce_bam_info.ce_device)) { + &pce_dev->ce_sps.ce_device)) { pr_err("Fail to get CE device information.\n"); return -EINVAL; } if (of_property_read_u32((&pdev->dev)->of_node, "qcom,ce-hw-instance", - &pce_dev->ce_bam_info.ce_hw_instance)) { + &pce_dev->ce_sps.ce_hw_instance)) { pr_err("Fail to get CE hw instance information.\n"); return -EINVAL; } - if (of_property_read_u32((&pdev->dev)->of_node, - "qcom,bam-ee", - &pce_dev->ce_bam_info.bam_ee)) { - pr_info("BAM Apps EE is not defined, setting to default 1\n"); - pce_dev->ce_bam_info.bam_ee = 1; - } if (of_property_read_u32((&pdev->dev)->of_node, "qcom,ce-opp-freq", &pce_dev->ce_opp_freq_hz)) { pr_info("CE operating frequency is not defined, setting to default 100MHZ\n"); pce_dev->ce_opp_freq_hz = CE_CLK_100MHZ; } - pce_dev->ce_bam_info.dest_pipe_index = - 2 * pce_dev->ce_bam_info.pipe_pair_index; - pce_dev->ce_bam_info.src_pipe_index = - pce_dev->ce_bam_info.dest_pipe_index + 1; + pce_dev->ce_sps.dest_pipe_index = 2 * pce_dev->ce_sps.pipe_pair_index; + pce_dev->ce_sps.src_pipe_index = pce_dev->ce_sps.dest_pipe_index + 1; resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crypto-base"); @@ -5754,15 +5199,15 @@ static int __qce_get_device_tree_data(struct platform_device *pdev, resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (resource) { - pce_dev->ce_bam_info.bam_irq = resource->start; + pce_dev->ce_sps.bam_irq = resource->start; } else { pr_err("CRYPTO BAM IRQ unavailable.\n"); goto err_dev; } return rc; err_dev: - if (pce_dev->ce_bam_info.bam_iobase) - iounmap(pce_dev->ce_bam_info.bam_iobase); + if (pce_dev->ce_sps.bam_iobase) + iounmap(pce_dev->ce_sps.bam_iobase); err_getting_bam_info: if (pce_dev->iobase) @@ -5785,21 +5230,12 @@ static int __qce_init_clk(struct qce_device *pce_dev) goto exit_put_core_src_clk; } } else { - if (pce_dev->support_only_core_src_clk) { - rc = PTR_ERR(pce_dev->ce_core_src_clk); - pce_dev->ce_core_src_clk = NULL; - pr_err("Unable to get CE core src clk\n"); - return rc; - } else { - pr_warn("Unable to get CE core src clk, set to NULL\n"); - pce_dev->ce_core_src_clk = NULL; - } + pr_warn("Unable to get CE core src clk, set to NULL\n"); + pce_dev->ce_core_src_clk = NULL; } if (pce_dev->support_only_core_src_clk) { pce_dev->ce_core_clk = NULL; - pce_dev->ce_clk = NULL; - pce_dev->ce_bus_clk = NULL; } else { pce_dev->ce_core_clk = clk_get(pce_dev->pdev, "core_clk"); if (IS_ERR(pce_dev->ce_core_clk)) { @@ -5807,31 +5243,30 @@ static int __qce_init_clk(struct qce_device *pce_dev) pr_err("Unable to get CE core clk\n"); goto exit_put_core_src_clk; } - pce_dev->ce_clk = clk_get(pce_dev->pdev, "iface_clk"); - if (IS_ERR(pce_dev->ce_clk)) { - rc = PTR_ERR(pce_dev->ce_clk); - pr_err("Unable to get CE interface clk\n"); - goto exit_put_core_clk; - } + } - pce_dev->ce_bus_clk = clk_get(pce_dev->pdev, "bus_clk"); - if (IS_ERR(pce_dev->ce_bus_clk)) { - rc = PTR_ERR(pce_dev->ce_bus_clk); - pr_err("Unable to get CE BUS interface clk\n"); - goto exit_put_iface_clk; - } + pce_dev->ce_clk = clk_get(pce_dev->pdev, "iface_clk"); + if (IS_ERR(pce_dev->ce_clk)) { + rc = PTR_ERR(pce_dev->ce_clk); + pr_err("Unable to get CE interface clk\n"); + goto exit_put_core_clk; + } + + pce_dev->ce_bus_clk = clk_get(pce_dev->pdev, "bus_clk"); + if (IS_ERR(pce_dev->ce_bus_clk)) { + rc = PTR_ERR(pce_dev->ce_bus_clk); + pr_err("Unable to get CE BUS interface clk\n"); + goto exit_put_iface_clk; } return rc; exit_put_iface_clk: - if (pce_dev->ce_clk) - clk_put(pce_dev->ce_clk); + clk_put(pce_dev->ce_clk); exit_put_core_clk: if (pce_dev->ce_core_clk) clk_put(pce_dev->ce_core_clk); exit_put_core_src_clk: - if (pce_dev->ce_core_src_clk) - clk_put(pce_dev->ce_core_src_clk); + clk_put(pce_dev->ce_core_src_clk); pr_err("Unable to init CE clks, rc = %d\n", rc); return rc; } @@ -5853,23 +5288,16 @@ int qce_enable_clk(void *handle) struct qce_device *pce_dev = (struct qce_device *)handle; int rc = 0; - if (pce_dev->ce_core_src_clk) { - rc = clk_prepare_enable(pce_dev->ce_core_src_clk); - if (rc) { - pr_err("Unable to enable/prepare CE core src clk\n"); - return rc; - } + if (pce_dev->support_only_core_src_clk) { + if (pce_dev->ce_core_src_clk) + rc = clk_prepare_enable(pce_dev->ce_core_src_clk); + } else { + if (pce_dev->ce_core_clk) + rc = clk_prepare_enable(pce_dev->ce_core_clk); } - - if (pce_dev->support_only_core_src_clk) + if (rc) { + pr_err("Unable to enable/prepare CE core clk\n"); return rc; - - if (pce_dev->ce_core_clk) { - rc = clk_prepare_enable(pce_dev->ce_core_clk); - if (rc) { - pr_err("Unable to enable/prepare CE core clk\n"); - goto exit_disable_core_src_clk; - } } if (pce_dev->ce_clk) { @@ -5890,14 +5318,12 @@ int qce_enable_clk(void *handle) return rc; exit_disable_ce_clk: - if (pce_dev->ce_clk) - clk_disable_unprepare(pce_dev->ce_clk); + clk_disable_unprepare(pce_dev->ce_clk); exit_disable_core_clk: - if (pce_dev->ce_core_clk) - clk_disable_unprepare(pce_dev->ce_core_clk); -exit_disable_core_src_clk: - if (pce_dev->ce_core_src_clk) + if (pce_dev->support_only_core_src_clk) clk_disable_unprepare(pce_dev->ce_core_src_clk); + else + clk_disable_unprepare(pce_dev->ce_core_clk); return rc; } EXPORT_SYMBOL(qce_enable_clk); @@ -5911,52 +5337,22 @@ int qce_disable_clk(void *handle) clk_disable_unprepare(pce_dev->ce_bus_clk); if (pce_dev->ce_clk) clk_disable_unprepare(pce_dev->ce_clk); - if (pce_dev->ce_core_clk) - clk_disable_unprepare(pce_dev->ce_core_clk); - if (pce_dev->ce_core_src_clk) - clk_disable_unprepare(pce_dev->ce_core_src_clk); + if (pce_dev->support_only_core_src_clk) { + if (pce_dev->ce_core_src_clk) + clk_disable_unprepare(pce_dev->ce_core_src_clk); + } else { + if (pce_dev->ce_core_clk) + clk_disable_unprepare(pce_dev->ce_core_clk); + } return rc; } EXPORT_SYMBOL(qce_disable_clk); -/* dummy req setup */ -static int setup_dummy_req(struct qce_device *pce_dev) -{ - char *input = - "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopqopqrpqrs"; - int len = DUMMY_REQ_DATA_LEN; - - memcpy(pce_dev->dummyreq_in_buf, input, len); - sg_set_buf(&pce_dev->dummyreq.sg, pce_dev->dummyreq_in_buf, len); - sg_mark_end(&pce_dev->dummyreq.sg); - - pce_dev->dummyreq.sreq.alg = QCE_HASH_SHA1; - pce_dev->dummyreq.sreq.qce_cb = qce_dummy_complete; - pce_dev->dummyreq.sreq.src = &pce_dev->dummyreq.sg; - pce_dev->dummyreq.sreq.auth_data[0] = 0; - pce_dev->dummyreq.sreq.auth_data[1] = 0; - pce_dev->dummyreq.sreq.auth_data[2] = 0; - pce_dev->dummyreq.sreq.auth_data[3] = 0; - pce_dev->dummyreq.sreq.first_blk = 1; - pce_dev->dummyreq.sreq.last_blk = 1; - pce_dev->dummyreq.sreq.size = len; - pce_dev->dummyreq.sreq.areq = &pce_dev->dummyreq.areq; - pce_dev->dummyreq.sreq.flags = 0; - pce_dev->dummyreq.sreq.authkey = NULL; - - pce_dev->dummyreq.areq.src = pce_dev->dummyreq.sreq.src; - pce_dev->dummyreq.areq.nbytes = pce_dev->dummyreq.sreq.size; - - return 0; -} - /* crypto engine open function. */ void *qce_open(struct platform_device *pdev, int *rc) { struct qce_device *pce_dev; - int i; - static int pcedev_no = 1; pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL); if (!pce_dev) { @@ -5966,7 +5362,6 @@ void *qce_open(struct platform_device *pdev, int *rc) } pce_dev->pdev = &pdev->dev; - mutex_lock(&qce_iomap_mutex); if (pdev->dev.of_node) { *rc = __qce_get_device_tree_data(pdev, pce_dev); if (*rc) @@ -5977,33 +5372,19 @@ void *qce_open(struct platform_device *pdev, int *rc) goto err_pce_dev; } - for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++) - atomic_set(&pce_dev->ce_request_info[i].in_use, false); - pce_dev->ce_request_index = 0; - - pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ; + pce_dev->memsize = 10 * PAGE_SIZE; pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev, pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL); - if (pce_dev->coh_vmem == NULL) { *rc = -ENOMEM; pr_err("Can not allocate coherent memory for sps data\n"); goto err_iobase; } - pce_dev->iovec_memsize = TOTAL_IOVEC_SPACE_PER_PIPE * - MAX_QCE_ALLOC_BAM_REQ * 2; - pce_dev->iovec_vmem = kzalloc(pce_dev->iovec_memsize, GFP_KERNEL); - if (pce_dev->iovec_vmem == NULL) - goto err_mem; - - pce_dev->dummyreq_in_buf = kzalloc(DUMMY_REQ_DATA_LEN, GFP_KERNEL); - if (pce_dev->dummyreq_in_buf == NULL) - goto err_mem; - *rc = __qce_init_clk(pce_dev); if (*rc) goto err_mem; + *rc = qce_enable_clk(pce_dev); if (*rc) goto err_enable_clk; @@ -6020,18 +5401,7 @@ void *qce_open(struct platform_device *pdev, int *rc) goto err; qce_setup_ce_sps_data(pce_dev); qce_disable_clk(pce_dev); - setup_dummy_req(pce_dev); - atomic_set(&pce_dev->no_of_queued_req, 0); - pce_dev->mode = IN_INTERRUPT_MODE; - init_timer(&(pce_dev->timer)); - pce_dev->timer.function = qce_multireq_timeout; - pce_dev->timer.data = (unsigned long)pce_dev; - pce_dev->timer.expires = jiffies + DELAY_IN_JIFFIES; - pce_dev->intr_cadence = 0; - pce_dev->dev_no = pcedev_no; - pcedev_no++; - pce_dev->owner = QCE_OWNER_NONE; - mutex_unlock(&qce_iomap_mutex); + return pce_dev; err: qce_disable_clk(pce_dev); @@ -6040,8 +5410,6 @@ void *qce_open(struct platform_device *pdev, int *rc) __qce_deinit_clk(pce_dev); err_mem: - kfree(pce_dev->dummyreq_in_buf); - kfree(pce_dev->iovec_vmem); if (pce_dev->coh_vmem) dma_free_coherent(pce_dev->pdev, pce_dev->memsize, pce_dev->coh_vmem, pce_dev->coh_pmem); @@ -6049,7 +5417,6 @@ void *qce_open(struct platform_device *pdev, int *rc) if (pce_dev->iobase) iounmap(pce_dev->iobase); err_pce_dev: - mutex_unlock(&qce_iomap_mutex); kfree(pce_dev); return NULL; } @@ -6063,7 +5430,6 @@ int qce_close(void *handle) if (handle == NULL) return -ENODEV; - mutex_lock(&qce_iomap_mutex); qce_enable_clk(pce_dev); qce_sps_exit(pce_dev); @@ -6072,12 +5438,10 @@ int qce_close(void *handle) if (pce_dev->coh_vmem) dma_free_coherent(pce_dev->pdev, pce_dev->memsize, pce_dev->coh_vmem, pce_dev->coh_pmem); - kfree(pce_dev->dummyreq_in_buf); - kfree(pce_dev->iovec_vmem); qce_disable_clk(pce_dev); __qce_deinit_clk(pce_dev); - mutex_unlock(&qce_iomap_mutex); + kfree(handle); return 0; @@ -6112,7 +5476,7 @@ int qce_hw_support(void *handle, struct ce_hw_support *ce_support) ce_support->hw_key = pce_dev->support_hw_key; ce_support->aes_ccm = true; ce_support->clk_mgmt_sus_res = pce_dev->support_clk_mgmt_sus_res; - if (pce_dev->ce_bam_info.minor_version) + if (pce_dev->ce_sps.minor_version) ce_support->aligned_only = false; else ce_support->aligned_only = true; @@ -6129,30 +5493,12 @@ int qce_hw_support(void *handle, struct ce_hw_support *ce_support) pce_dev->use_sw_hmac_algo; ce_support->use_sw_aes_ccm_algo = pce_dev->use_sw_aes_ccm_algo; - ce_support->ce_device = pce_dev->ce_bam_info.ce_device; - ce_support->ce_hw_instance = pce_dev->ce_bam_info.ce_hw_instance; - if (pce_dev->no_get_around) - ce_support->max_request = MAX_QCE_BAM_REQ; - else - ce_support->max_request = 1; + ce_support->ce_device = pce_dev->ce_sps.ce_device; + ce_support->ce_hw_instance = pce_dev->ce_sps.ce_hw_instance; return 0; } EXPORT_SYMBOL(qce_hw_support); -void qce_dump_req(void *handle) -{ - int i; - bool req_in_use; - struct qce_device *pce_dev = (struct qce_device *)handle; - - for (i = 0; i < MAX_QCE_BAM_REQ; i++) { - req_in_use = atomic_read(&pce_dev->ce_request_info[i].in_use); - pr_info("qce_dump_req %d %d\n", i, req_in_use); - if (req_in_use == true) - _qce_dump_descr_fifos(pce_dev, i); - } -} -EXPORT_SYMBOL(qce_dump_req); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Crypto Engine driver"); diff --git a/drivers/crypto/msm/qce50.h b/drivers/crypto/msm/qce50.h index 765eede0b583..8b832c80a6e3 100644 --- a/drivers/crypto/msm/qce50.h +++ b/drivers/crypto/msm/qce50.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -22,8 +22,8 @@ ((uintptr_t)pce_dev->coh_vmem + \ ((uintptr_t)x - (uintptr_t)pce_dev->coh_pmem)) #define GET_PHYS_ADDR(x) \ - (phys_addr_t)(((uintptr_t)pce_dev->coh_pmem + \ - ((uintptr_t)x - (uintptr_t)pce_dev->coh_vmem))) + ((uintptr_t)pce_dev->coh_pmem + \ + ((uintptr_t)x - (uintptr_t)pce_dev->coh_vmem)) #define CRYPTO_REG_SIZE 4 #define NUM_OF_CRYPTO_AUTH_IV_REG 16 @@ -38,9 +38,6 @@ #define QCE_MAX_NUM_DESC 128 #define SPS_MAX_PKT_SIZE (32 * 1024 - 64) -/* default bam ipc log level */ -#define QCE_BAM_DEFAULT_IPC_LOGLVL 2 - /* State of consumer/producer Pipe */ enum qce_pipe_st_enum { QCE_PIPE_STATE_IDLE = 0, @@ -49,15 +46,6 @@ enum qce_pipe_st_enum { QCE_PIPE_STATE_LAST }; -enum qce_xfer_type_enum { - QCE_XFER_HASHING, - QCE_XFER_CIPHERING, - QCE_XFER_AEAD, - QCE_XFER_F8, - QCE_XFER_F9, - QCE_XFER_TYPE_LAST -}; - struct qce_sps_ep_conn_data { struct sps_pipe *pipe; struct sps_connect connect; @@ -126,7 +114,6 @@ struct qce_cmdlistptr_ops { struct qce_cmdlist_info aead_hmac_sha256_cbc_3des; struct qce_cmdlist_info aead_aes_128_ccm; struct qce_cmdlist_info aead_aes_256_ccm; - struct qce_cmdlist_info cipher_null; struct qce_cmdlist_info f8_kasumi; struct qce_cmdlist_info f8_snow3g; struct qce_cmdlist_info f9_kasumi; @@ -178,67 +165,40 @@ struct qce_ce_cfg_reg_setting { uint32_t auth_cfg_snow3g; }; -struct ce_bam_info { +/* DM data structure with buffers, commandlists & commmand pointer lists */ +struct ce_sps_data { + uint32_t bam_irq; uint32_t bam_mem; void __iomem *bam_iobase; + + struct qce_sps_ep_conn_data producer; + struct qce_sps_ep_conn_data consumer; + struct sps_event_notify notify; + struct scatterlist *src; + struct scatterlist *dst; uint32_t ce_device; uint32_t ce_hw_instance; - uint32_t bam_ee; unsigned int pipe_pair_index; unsigned int src_pipe_index; unsigned int dest_pipe_index; unsigned long bam_handle; - int ce_burst_size; - uint32_t minor_version; - struct qce_sps_ep_conn_data producer; - struct qce_sps_ep_conn_data consumer; -}; -/* SPS data structure with buffers, commandlists & commmand pointer lists */ -struct ce_sps_data { + enum qce_pipe_st_enum consumer_state; /* Consumer pipe state */ enum qce_pipe_st_enum producer_state; /* Producer pipe state */ + int consumer_status; /* consumer pipe status */ int producer_status; /* producer pipe status */ + struct sps_transfer in_transfer; struct sps_transfer out_transfer; - struct qce_cmdlistptr_ops cmdlistptr; - uint32_t result_dump; /* reuslt dump virtual address */ - uint32_t result_dump_null; - uint32_t result_dump_phy; /* result dump physical address (32 bits) */ - uint32_t result_dump_null_phy; - - uint32_t ignore_buffer; /* ignore buffer virtual address */ - struct ce_result_dump_format *result; /* ponter to result dump */ - struct ce_result_dump_format *result_null; -}; -struct ce_request_info { - atomic_t in_use; - bool in_prog; - enum qce_xfer_type_enum xfer_type; - struct ce_sps_data ce_sps; - qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */ - void *user; - void *areq; - int assoc_nents; - int src_nents; - int dst_nents; - dma_addr_t phy_iv_in; - unsigned char dec_iv[16]; - int dir; - enum qce_cipher_mode_enum mode; - dma_addr_t phy_ota_src; - dma_addr_t phy_ota_dst; - unsigned int ota_size; - unsigned int req_len; -}; + int ce_burst_size; -struct qce_driver_stats { - int no_of_timeouts; - int no_of_dummy_reqs; - int current_mode; - int outstanding_reqs; + struct qce_cmdlistptr_ops cmdlistptr; + uint32_t result_dump; + uint32_t ignore_buffer; + struct ce_result_dump_format *result; + uint32_t minor_version; }; - #endif /* _DRIVERS_CRYPTO_MSM_QCE50_H */ diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c index a55f236961b8..d9a99b0c56b5 100644 --- a/drivers/crypto/msm/qcedev.c +++ b/drivers/crypto/msm/qcedev.c @@ -1,6 +1,6 @@ /* Qualcomm CE device driver. * - * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2010-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -35,12 +35,21 @@ #include "qcedevi.h" #include "qce.h" +#ifdef CONFIG_COMPAT #include #include "compat_qcedev.h" +#endif #define CACHE_LINE_SIZE 32 #define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE +#ifndef U32_MAX +#define U32_MAX ((u32)(~0U)) +#endif + +/* are FIPS integrity tests done ?? */ +static bool is_fips_qcedev_integritytest_done; + static uint8_t _std_init_vector_sha1_uint8[] = { 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89, 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76, @@ -172,6 +181,12 @@ static int qcedev_open(struct inode *inode, struct file *file) struct qcedev_handle *handle; struct qcedev_control *podev; + /* IF FIPS tests not passed, return error */ + if (((g_fips140_status == FIPS140_STATUS_FAIL) || + (g_fips140_status == FIPS140_STATUS_PASS_CRYPTO)) && + is_fips_qcedev_integritytest_done) + return -ENXIO; + podev = qcedev_minor_to_control(MINOR(inode->i_rdev)); if (podev == NULL) { pr_err("%s: no such device %d\n", __func__, @@ -1739,6 +1754,7 @@ long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg) mutex_unlock(&hash_access_lock); return err; } + if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) { pr_err("Invalid sha_ctxt.diglen %d\n", handle->sha_ctxt.diglen); @@ -1781,6 +1797,7 @@ long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg) mutex_unlock(&hash_access_lock); return err; } + if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) { pr_err("Invalid sha_ctxt.diglen %d\n", handle->sha_ctxt.diglen); @@ -1797,7 +1814,55 @@ long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg) return -EFAULT; } break; + /* + * This IOCTL call can be called only once + * by FIPS Integrity test. + */ + case QCEDEV_IOCTL_UPDATE_FIPS_STATUS: + { + enum fips_status status; + void *drbg_call_back = NULL; + + if (is_fips_qcedev_integritytest_done) + return -EPERM; + + if (copy_from_user(&status, (void __user *)arg, + sizeof(enum fips_status))) + return -EFAULT; + + g_fips140_status = _fips_update_status(status); + pr_debug("qcedev: FIPS140-2 Global status flag: %d\n", + g_fips140_status); + is_fips_qcedev_integritytest_done = true; + + if (g_fips140_status == FIPS140_STATUS_FAIL) { + pr_err("qcedev: FIPS140-2 Integrity test failed\n"); + break; + } + + if (!(_do_msm_fips_drbg_init(drbg_call_back)) && + (g_fips140_status != FIPS140_STATUS_NA)) + g_fips140_status = FIPS140_STATUS_PASS; + } + + pr_debug("qcedev: FIPS140-2 Global status flag: %d\n", + g_fips140_status); + + break; + + /* Read only IOCTL call to read the + current FIPS140-2 Status */ + case QCEDEV_IOCTL_QUERY_FIPS_STATUS: + { + enum fips_status status; + + status = g_fips140_status; + if (copy_to_user((void __user *)arg, &status, + sizeof(enum fips_status))) + return -EFAULT; + } + break; default: return -ENOTTY; } @@ -1873,6 +1938,24 @@ static int qcedev_probe(struct platform_device *pdev) goto err; } } + /* + * FIPS140-2 Known Answer Tests: + * IN case of any failure, do not Init the module + */ + is_fips_qcedev_integritytest_done = false; + if (g_fips140_status != FIPS140_STATUS_NA) { + if (_fips_qcedev_cipher_selftest(&qce_dev[0]) || + _fips_qcedev_sha_selftest(&qce_dev[0])) { + pr_err("qcedev: FIPS140-2 Known Answer Tests : Failed\n"); + BUG_ON(1); + rc = -1; + } else { + pr_debug("qcedev: FIPS140-2 Known Answer Tests : Successful\n"); + rc = 0; + } + } else { + pr_debug("qcedev: FIPS140-2 Known Answer Tests : Skipped\n"); + } if (rc >= 0) return 0; diff --git a/drivers/crypto/msm/qcedevi.h b/drivers/crypto/msm/qcedevi.h index ca358ac3d4c6..fced4537e33c 100644 --- a/drivers/crypto/msm/qcedevi.h +++ b/drivers/crypto/msm/qcedevi.h @@ -1,6 +1,6 @@ /* QTI crypto Driver * - * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2015, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -32,6 +32,7 @@ enum qcedev_crypto_oper_type { }; struct qcedev_handle; +extern enum fips_status g_fips140_status; struct qcedev_cipher_req { struct ablkcipher_request creq; @@ -121,4 +122,47 @@ void qcedev_cipher_req_cb(void *cookie, unsigned char *icv, void qcedev_sha_req_cb(void *cookie, unsigned char *digest, unsigned char *authdata, int ret); +extern int _do_msm_fips_drbg_init(void *rng_dev); + +#ifdef CONFIG_FIPS_ENABLE + +/* + * Self test for Cipher algorithms + */ +int _fips_qcedev_cipher_selftest(struct qcedev_control *podev); + +/* + * Self test for SHA / HMAC + */ + +int _fips_qcedev_sha_selftest(struct qcedev_control *podev); + +/* + * Update FIPs Global status Status + */ +static inline enum fips_status _fips_update_status(enum fips_status status) +{ + return (status == FIPS140_STATUS_PASS) ? + FIPS140_STATUS_QCRYPTO_ALLOWED : + FIPS140_STATUS_FAIL; +} + +#else + +static inline int _fips_qcedev_cipher_selftest(struct qcedev_control *podev) +{ + return 0; +} +static inline int _fips_qcedev_sha_selftest(struct qcedev_control *podev) +{ + return 0; +} + +static inline enum fips_status _fips_update_status(enum fips_status status) +{ + return FIPS140_STATUS_NA; +} + +#endif /* CONFIG_FIPS_ENABLE */ + #endif /* __CRYPTO_MSM_QCEDEVI_H */ diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c index f03115371c95..57401242e373 100644 --- a/drivers/crypto/msm/qcrypto.c +++ b/drivers/crypto/msm/qcrypto.c @@ -1,6 +1,6 @@ /* Qualcomm Crypto driver * - * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2010-2017, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -14,7 +14,6 @@ #include #include -#include #include #include #include @@ -24,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -32,7 +30,6 @@ #include #include #include -#include #include #include @@ -48,12 +45,11 @@ #include #include - +#include "qcryptoi.h" #include "qce.h" #define DEBUG_MAX_FNAME 16 -#define DEBUG_MAX_RW_BUF 4096 -#define QCRYPTO_BIG_NUMBER 9999999 /* a big number */ +#define DEBUG_MAX_RW_BUF 2048 /* * For crypto 5.0 which has burst size alignment requirement. @@ -62,20 +58,8 @@ #define QCRYPTO_HIGH_BANDWIDTH_TIMEOUT 1000 - - -/* Status of response workq */ -enum resp_workq_sts { - NOT_SCHEDULED = 0, - IS_SCHEDULED = 1, - SCHEDULE_AGAIN = 2 -}; - -/* Status of req processing by CEs */ -enum req_processing_sts { - STOPPED = 0, - IN_PROGRESS = 1 -}; +/* are FIPS self tests done ?? */ +static bool is_fips_qcrypto_tests_done; enum qcrypto_bus_state { BUS_NO_BANDWIDTH = 0, @@ -126,20 +110,15 @@ static struct dentry *_debug_dent; static char _debug_read_buf[DEBUG_MAX_RW_BUF]; static bool _qcrypto_init_assign; struct crypto_priv; -struct qcrypto_req_control { - unsigned int index; - bool in_use; - struct crypto_engine *pce; - struct crypto_async_request *req; - struct qcrypto_resp_ctx *arsp; - int res; /* execution result */ -}; - struct crypto_engine { struct list_head elist; void *qce; /* qce handle */ struct platform_device *pdev; /* platform device */ + struct crypto_async_request *req; /* current active request */ + struct qcrypto_resp_ctx *arsp; /* rsp associated with req */ + int res; /* execution result */ struct crypto_priv *pcp; + struct tasklet_struct done_tasklet; uint32_t bus_scale_handle; struct crypto_queue req_queue; /* * request queue for those requests @@ -165,18 +144,8 @@ struct crypto_engine { u32 last_active_seq; bool check_flag; - /*Added to support multi-requests*/ - unsigned int max_req; - struct qcrypto_req_control *preq_pool; - atomic_t req_count; - bool issue_req; /* an request is being issued to qce */ - bool first_engine; /* this engine is the first engine or not */ - unsigned int irq_cpu; /* the cpu running the irq of this engine */ - unsigned int max_req_used; /* debug stats */ }; -#define MAX_SMP_CPU 8 - struct crypto_priv { /* CE features supported by target device*/ struct msm_ce_hw_support platform_support; @@ -204,99 +173,10 @@ struct crypto_priv { * that waiting for an available * engine. */ - struct llist_head ordered_resp_list; /* Queue to maintain - * responses in sequence. - */ - atomic_t resp_cnt; - struct workqueue_struct *resp_wq; - struct work_struct resp_work; /* - * Workq to send responses - * in sequence. - */ - enum resp_workq_sts sched_resp_workq_status; - enum req_processing_sts ce_req_proc_sts; - int cpu_getting_irqs_frm_first_ce; - struct crypto_engine *first_engine; - struct crypto_engine *scheduled_eng; /* last engine scheduled */ - - /* debug stats */ - unsigned no_avail; - unsigned resp_stop; - unsigned resp_start; - unsigned max_qlen; - unsigned int queue_work_eng3; - unsigned int queue_work_not_eng3; - unsigned int queue_work_not_eng3_nz; - unsigned int max_resp_qlen; - unsigned int max_reorder_cnt; - unsigned int cpu_req[MAX_SMP_CPU+1]; }; static struct crypto_priv qcrypto_dev; static struct crypto_engine *_qcrypto_static_assign_engine( struct crypto_priv *cp); -static struct crypto_engine *_avail_eng(struct crypto_priv *cp); -static struct qcrypto_req_control *qcrypto_alloc_req_control( - struct crypto_engine *pce) -{ - int i; - struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool; - unsigned int req_count; - - for (i = 0; i < pce->max_req; i++) { - if (xchg(&pqcrypto_req_control->in_use, true) == false) { - req_count = atomic_inc_return(&pce->req_count); - if (req_count > pce->max_req_used) - pce->max_req_used = req_count; - return pqcrypto_req_control; - } - pqcrypto_req_control++; - } - return NULL; -} - -static void qcrypto_free_req_control(struct crypto_engine *pce, - struct qcrypto_req_control *preq) -{ - /* do this before free req */ - preq->req = NULL; - preq->arsp = NULL; - /* free req */ - if (xchg(&preq->in_use, false) == false) { - pr_warn("request info %pK free already\n", preq); - } else { - atomic_dec(&pce->req_count); - } -} - -static struct qcrypto_req_control *find_req_control_for_areq( - struct crypto_engine *pce, - struct crypto_async_request *areq) -{ - int i; - struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool; - - for (i = 0; i < pce->max_req; i++) { - if (pqcrypto_req_control->req == areq) - return pqcrypto_req_control; - pqcrypto_req_control++; - } - return NULL; -} - -static void qcrypto_init_req_control(struct crypto_engine *pce, - struct qcrypto_req_control *pqcrypto_req_control) -{ - int i; - - pce->preq_pool = pqcrypto_req_control; - atomic_set(&pce->req_count, 0); - for (i = 0; i < pce->max_req; i++) { - pqcrypto_req_control->index = i; - pqcrypto_req_control->in_use = false; - pqcrypto_req_control->pce = pce; - pqcrypto_req_control++; - } -} static struct crypto_engine *_qrypto_find_pengine_device(struct crypto_priv *cp, unsigned int device) @@ -416,14 +296,14 @@ struct qcrypto_cipher_ctx { u8 ccm4309_nonce[QCRYPTO_CCM4309_NONCE_LEN]; - struct crypto_ablkcipher *cipher_aes192_fb; - - struct crypto_ahash *ahash_aead_aes192_fb; + union { + struct crypto_ablkcipher *cipher_fb; + struct crypto_aead *aead_fb; + } fallback; }; struct qcrypto_resp_ctx { struct list_head list; - struct llist_node llist; struct crypto_async_request *async_req; /* async req */ int res; /* execution result */ }; @@ -449,27 +329,12 @@ struct qcrypto_cipher_req_ctx { struct scatterlist ssg; /* Source Data sg */ unsigned char *data; /* Incoming data pointer*/ - struct aead_request *aead_req; - struct ahash_request *fb_hash_req; - uint8_t fb_ahash_digest[SHA256_DIGEST_SIZE]; - struct scatterlist fb_ahash_sg[3]; - char *fb_ahash_assoc_iv; - char *fb_aes_iv; - unsigned int fb_ahash_length; - struct ablkcipher_request *fb_aes_req; - struct scatterlist *fb_aes_src; - struct scatterlist *fb_aes_dst; - unsigned int fb_aes_cryptlen; }; #define SHA_MAX_BLOCK_SIZE SHA256_BLOCK_SIZE #define SHA_MAX_STATE_SIZE (SHA256_DIGEST_SIZE / sizeof(u32)) #define SHA_MAX_DIGEST_SIZE SHA256_DIGEST_SIZE - -#define MSM_QCRYPTO_REQ_QUEUE_LENGTH 768 -#define COMPLETION_CB_BACKLOG_LENGTH_STOP 400 -#define COMPLETION_CB_BACKLOG_LENGTH_START \ - (COMPLETION_CB_BACKLOG_LENGTH_STOP / 2) +#define MSM_QCRYPTO_REQ_QUEUE_LENGTH 50 static uint8_t _std_init_vector_sha1_uint8[] = { 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89, @@ -685,11 +550,11 @@ static void qcrypto_bw_reaper_work(struct work_struct *work) (active_seq == pengine->last_active_seq)) { /* check if engine is stuck */ - if (atomic_read(&pengine->req_count) > 0) { + if (pengine->req) { if (pengine->check_flag) dev_warn(&pengine->pdev->dev, - "The engine appears to be stuck seq %d.\n", - active_seq); + "The engine appears to be stuck seq %d req %pK.\n", + active_seq, pengine->req); pengine->check_flag = false; goto ret; } @@ -742,8 +607,8 @@ static size_t qcrypto_sg_copy_from_buffer(struct scatterlist *sgl, size_t offset, len; for (i = 0, offset = 0; i < nents; ++i) { - if (NULL == sgl) { - pr_err("qcrypto.c: qcrypto_sg_copy_from_buffer, sgl = NULL"); + if (sgl == NULL) { + pr_err("qcrypto: sg_copy_from_buffer, sg = NULL\n"); break; } len = sg_copy_from_buffer(sgl, 1, buf, buflen); @@ -763,8 +628,8 @@ static size_t qcrypto_sg_copy_to_buffer(struct scatterlist *sgl, size_t offset, len; for (i = 0, offset = 0; i < nents; ++i) { - if (NULL == sgl) { - pr_err("qcrypto.c: qcrypto_sg_copy_from_buffer, sgl = NULL"); + if (sgl == NULL) { + pr_err("qcrypto: sg_copy_to_buffer, sg = NULL\n"); break; } len = sg_copy_to_buffer(sgl, 1, buf, buflen); @@ -819,6 +684,11 @@ static int _qcrypto_cipher_cra_init(struct crypto_tfm *tfm) struct qcrypto_alg *q_alg; struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + /* IF FIPS tests not passed, return error */ + if (((g_fips140_status == FIPS140_STATUS_FAIL) || + (g_fips140_status == FIPS140_STATUS_PASS_CRYPTO)) && + is_fips_qcrypto_tests_done) + return -ENXIO; q_alg = container_of(alg, struct qcrypto_alg, cipher_alg); ctx->flags = 0; @@ -848,6 +718,12 @@ static int _qcrypto_ahash_cra_init(struct crypto_tfm *tfm) struct qcrypto_alg *q_alg = container_of(alg, struct qcrypto_alg, sha_alg); + /* IF FIPS tests not passed, return error */ + if (((g_fips140_status == FIPS140_STATUS_FAIL) || + (g_fips140_status == FIPS140_STATUS_PASS_CRYPTO)) && + is_fips_qcrypto_tests_done) + return -ENXIO; + crypto_ahash_set_reqsize(ahash, sizeof(struct qcrypto_sha_req_ctx)); /* update context with ptr to cp */ sha_ctx->cp = q_alg->cp; @@ -919,15 +795,15 @@ static int _qcrypto_cra_aes_ablkcipher_init(struct crypto_tfm *tfm) struct crypto_priv *cp = &qcrypto_dev; if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) { - ctx->cipher_aes192_fb = NULL; + ctx->fallback.cipher_fb = NULL; return _qcrypto_cra_ablkcipher_init(tfm); } - ctx->cipher_aes192_fb = crypto_alloc_ablkcipher(name, 0, + ctx->fallback.cipher_fb = crypto_alloc_ablkcipher(name, 0, CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(ctx->cipher_aes192_fb)) { + if (IS_ERR(ctx->fallback.cipher_fb)) { pr_err("Error allocating fallback algo %s\n", name); - ret = PTR_ERR(ctx->cipher_aes192_fb); - ctx->cipher_aes192_fb = NULL; + ret = PTR_ERR(ctx->fallback.cipher_fb); + ctx->fallback.cipher_fb = NULL; return ret; } return _qcrypto_cra_ablkcipher_init(tfm); @@ -955,68 +831,6 @@ static int _qcrypto_cra_aead_sha256_init(struct crypto_tfm *tfm) return rc; } -static int _qcrypto_cra_aead_aes_sha1_init(struct crypto_tfm *tfm) -{ - int rc; - struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_priv *cp = &qcrypto_dev; - - tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx); - rc = _qcrypto_cipher_cra_init(tfm); - if (rc) - return rc; - ctx->cipher_aes192_fb = NULL; - ctx->ahash_aead_aes192_fb = NULL; - if (!cp->ce_support.aes_key_192) { - ctx->cipher_aes192_fb = crypto_alloc_ablkcipher( - "cbc(aes)", 0, 0); - if (IS_ERR(ctx->cipher_aes192_fb)) { - ctx->cipher_aes192_fb = NULL; - } else { - ctx->ahash_aead_aes192_fb = crypto_alloc_ahash( - "hmac(sha1)", 0, 0); - if (IS_ERR(ctx->ahash_aead_aes192_fb)) { - ctx->ahash_aead_aes192_fb = NULL; - crypto_free_ablkcipher(ctx->cipher_aes192_fb); - ctx->cipher_aes192_fb = NULL; - } - } - } - ctx->auth_alg = QCE_HASH_SHA1_HMAC; - return 0; -} - -static int _qcrypto_cra_aead_aes_sha256_init(struct crypto_tfm *tfm) -{ - int rc; - struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_priv *cp = &qcrypto_dev; - - tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx); - rc = _qcrypto_cipher_cra_init(tfm); - if (rc) - return rc; - ctx->cipher_aes192_fb = NULL; - ctx->ahash_aead_aes192_fb = NULL; - if (!cp->ce_support.aes_key_192) { - ctx->cipher_aes192_fb = crypto_alloc_ablkcipher( - "cbc(aes)", 0, 0); - if (IS_ERR(ctx->cipher_aes192_fb)) { - ctx->cipher_aes192_fb = NULL; - } else { - ctx->ahash_aead_aes192_fb = crypto_alloc_ahash( - "hmac(sha256)", 0, 0); - if (IS_ERR(ctx->ahash_aead_aes192_fb)) { - ctx->ahash_aead_aes192_fb = NULL; - crypto_free_ablkcipher(ctx->cipher_aes192_fb); - ctx->cipher_aes192_fb = NULL; - } - } - } - ctx->auth_alg = QCE_HASH_SHA256_HMAC; - return 0; -} - static int _qcrypto_cra_aead_ccm_init(struct crypto_tfm *tfm) { int rc; @@ -1052,9 +866,9 @@ static void _qcrypto_cra_aes_ablkcipher_exit(struct crypto_tfm *tfm) struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); _qcrypto_cra_ablkcipher_exit(tfm); - if (ctx->cipher_aes192_fb) - crypto_free_ablkcipher(ctx->cipher_aes192_fb); - ctx->cipher_aes192_fb = NULL; + if (ctx->fallback.cipher_fb) + crypto_free_ablkcipher(ctx->fallback.cipher_fb); + ctx->fallback.cipher_fb = NULL; } static void _qcrypto_cra_aead_exit(struct crypto_tfm *tfm) @@ -1063,21 +877,7 @@ static void _qcrypto_cra_aead_exit(struct crypto_tfm *tfm) if (!list_empty(&ctx->rsp_queue)) pr_err("_qcrypto__cra_aead_exit: requests still outstanding"); -} - -static void _qcrypto_cra_aead_aes_exit(struct crypto_tfm *tfm) -{ - struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); - - if (!list_empty(&ctx->rsp_queue)) - pr_err("_qcrypto__cra_aead_exit: requests still outstanding"); - if (ctx->cipher_aes192_fb) - crypto_free_ablkcipher(ctx->cipher_aes192_fb); - if (ctx->ahash_aead_aes192_fb) - crypto_free_ahash(ctx->ahash_aead_aes192_fb); - ctx->cipher_aes192_fb = NULL; - ctx->ahash_aead_aes192_fb = NULL; -} +}; static int _disp_stats(int id) { @@ -1086,7 +886,6 @@ static int _disp_stats(int id) unsigned long flags; struct crypto_priv *cp = &qcrypto_dev; struct crypto_engine *pe; - int i; pstat = &_qcrypto_stat; len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1, @@ -1208,18 +1007,6 @@ static int _disp_stats(int id) len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, " AHASH operation fail : %llu\n", pstat->ahash_op_fail); - len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, - " resp start, resp stop, max rsp queue reorder-cnt : %u %u %u %u\n", - cp->resp_start, cp->resp_stop, - cp->max_resp_qlen, cp->max_reorder_cnt); - len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, - " max queue legnth, no avail : %u %u\n", - cp->max_qlen, cp->no_avail); - len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, - " work queue : %u %u %u\n", - cp->queue_work_eng3, - cp->queue_work_not_eng3, - cp->queue_work_not_eng3_nz); len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, "\n"); spin_lock_irqsave(&cp->lock, flags); @@ -1227,9 +1014,8 @@ static int _disp_stats(int id) len += scnprintf( _debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, - " Engine %4d Req max %d : %llu\n", + " Engine %4d Req : %llu\n", pe->unit, - pe->max_req_used, pe->total_req ); len += scnprintf( @@ -1239,17 +1025,8 @@ static int _disp_stats(int id) pe->unit, pe->err_req ); - qce_get_driver_stats(pe->qce); } spin_unlock_irqrestore(&cp->lock, flags); - - for (i = 0; i < MAX_SMP_CPU+1; i++) - if (cp->cpu_req[i]) - len += scnprintf( - _debug_read_buf + len, - DEBUG_MAX_RW_BUF - len - 1, - "CPU %d Issue Req : %d\n", - i, cp->cpu_req[i]); return len; } @@ -1259,29 +1036,18 @@ static void _qcrypto_remove_engine(struct crypto_engine *pengine) struct qcrypto_alg *q_alg; struct qcrypto_alg *n; unsigned long flags; - struct crypto_engine *pe; cp = pengine->pcp; spin_lock_irqsave(&cp->lock, flags); list_del(&pengine->elist); - if (pengine->first_engine) { - cp->first_engine = NULL; - pe = list_first_entry(&cp->engine_list, struct crypto_engine, - elist); - if (pe) { - pe->first_engine = true; - cp->first_engine = pe; - } - } if (cp->next_engine == pengine) cp->next_engine = NULL; - if (cp->scheduled_eng == pengine) - cp->scheduled_eng = NULL; spin_unlock_irqrestore(&cp->lock, flags); cp->total_units--; + tasklet_kill(&pengine->done_tasklet); cancel_work_sync(&pengine->bw_reaper_ws); cancel_work_sync(&pengine->bw_allocate_ws); del_timer_sync(&pengine->bw_reaper_timer); @@ -1290,8 +1056,6 @@ static void _qcrypto_remove_engine(struct crypto_engine *pengine) msm_bus_scale_unregister_client(pengine->bus_scale_handle); pengine->bus_scale_handle = 0; - kzfree(pengine->preq_pool); - if (cp->total_units) return; @@ -1351,10 +1115,10 @@ static int _qcrypto_setkey_aes_192_fallback(struct crypto_ablkcipher *cipher, int ret; ctx->enc_key_len = AES_KEYSIZE_192; - ctx->cipher_aes192_fb->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; - ctx->cipher_aes192_fb->base.crt_flags |= + ctx->fallback.cipher_fb->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; + ctx->fallback.cipher_fb->base.crt_flags |= (cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); - ret = crypto_ablkcipher_setkey(ctx->cipher_aes192_fb, key, + ret = crypto_ablkcipher_setkey(ctx->fallback.cipher_fb, key, AES_KEYSIZE_192); if (ret) { tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; @@ -1375,7 +1139,7 @@ static int _qcrypto_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, return 0; if ((len == AES_KEYSIZE_192) && (!cp->ce_support.aes_key_192) - && ctx->cipher_aes192_fb) + && ctx->fallback.cipher_fb) return _qcrypto_setkey_aes_192_fallback(cipher, key); if (_qcrypto_check_aes_keylen(cipher, cp, len)) { @@ -1484,80 +1248,14 @@ static int _qcrypto_setkey_3des(struct crypto_ablkcipher *cipher, const u8 *key, return 0; }; -static void seq_response(struct work_struct *work) +static void _qcrypto_tfm_complete(struct crypto_priv *cp, u32 type, + void *tfm_ctx) { - struct crypto_priv *cp = container_of(work, struct crypto_priv, - resp_work); - struct llist_node *list; - struct llist_node *rev = NULL; - struct crypto_engine *pengine; - unsigned long flags; - int total_unit; - -again: - list = llist_del_all(&cp->ordered_resp_list); - - if (!list) - goto end; - - while (list) { - struct llist_node *t = list; - list = llist_next(list); - - t->next = rev; - rev = t; - } - - while (rev) { - struct qcrypto_resp_ctx *arsp; - struct crypto_async_request *areq; - - arsp = container_of(rev, struct qcrypto_resp_ctx, llist); - rev = llist_next(rev); - - areq = arsp->async_req; - local_bh_disable(); - areq->complete(areq, arsp->res); - local_bh_enable(); - atomic_dec(&cp->resp_cnt); - } - - if (atomic_read(&cp->resp_cnt) < COMPLETION_CB_BACKLOG_LENGTH_START && - (cmpxchg(&cp->ce_req_proc_sts, STOPPED, IN_PROGRESS) - == STOPPED)) { - cp->resp_start++; - for (total_unit = cp->total_units; total_unit-- > 0;) { - spin_lock_irqsave(&cp->lock, flags); - pengine = _avail_eng(cp); - spin_unlock_irqrestore(&cp->lock, flags); - if (pengine) - _start_qcrypto_process(cp, pengine); - else - break; - } - } -end: - if (cmpxchg(&cp->sched_resp_workq_status, SCHEDULE_AGAIN, - IS_SCHEDULED) == SCHEDULE_AGAIN) - goto again; - else if (cmpxchg(&cp->sched_resp_workq_status, IS_SCHEDULED, - NOT_SCHEDULED) == SCHEDULE_AGAIN) - goto end; -} - -#define SCHEUDLE_RSP_QLEN_THRESHOLD 64 - -static void _qcrypto_tfm_complete(struct crypto_engine *pengine, u32 type, - void *tfm_ctx, - struct qcrypto_resp_ctx *cur_arsp, - int res) -{ - struct crypto_priv *cp = pengine->pcp; unsigned long flags; struct qcrypto_resp_ctx *arsp; struct list_head *plist; - unsigned int resp_qlen; - unsigned int cnt = 0; + struct crypto_async_request *areq; + bool pending_list; switch (type) { case CRYPTO_ALG_TYPE_AHASH: @@ -1569,108 +1267,66 @@ static void _qcrypto_tfm_complete(struct crypto_engine *pengine, u32 type, plist = &((struct qcrypto_cipher_ctx *) tfm_ctx)->rsp_queue; break; } - +again: spin_lock_irqsave(&cp->lock, flags); - - cur_arsp->res = res; - while (!list_empty(plist)) { + if (list_empty(plist)) { + arsp = NULL; /* nothing to do */ + pending_list = false; + } else { arsp = list_first_entry(plist, - struct qcrypto_resp_ctx, list); + struct qcrypto_resp_ctx, list); if (arsp->res == -EINPROGRESS) - break; - else { - list_del(&arsp->list); - llist_add(&arsp->llist, &cp->ordered_resp_list); - atomic_inc(&cp->resp_cnt); - cnt++; - } - } - resp_qlen = atomic_read(&cp->resp_cnt); - if (resp_qlen > cp->max_resp_qlen) - cp->max_resp_qlen = resp_qlen; - if (cnt > cp->max_reorder_cnt) - cp->max_reorder_cnt = cnt; - if ((resp_qlen >= COMPLETION_CB_BACKLOG_LENGTH_STOP) && - cmpxchg(&cp->ce_req_proc_sts, IN_PROGRESS, - STOPPED) == IN_PROGRESS) { - cp->resp_stop++; + arsp = NULL; /* still in progress */ + else + list_del(&arsp->list); /* request is complete */ + if (list_empty(plist)) + pending_list = false; + else + pending_list = true; } - spin_unlock_irqrestore(&cp->lock, flags); - -retry: - if (!llist_empty(&cp->ordered_resp_list)) { - unsigned int cpu; - - if (pengine->first_engine) { - cpu = WORK_CPU_UNBOUND; - cp->queue_work_eng3++; - } else { - cp->queue_work_not_eng3++; - cpu = cp->cpu_getting_irqs_frm_first_ce; - /* - * If source not the first engine, and there - * are outstanding requests going on first engine, - * skip scheduling of work queue to anticipate - * more may be coming. If the response queue - * length exceeds threshold, to avoid further - * delay, schedule work queue immediately. - */ - if (cp->first_engine && atomic_read( - &cp->first_engine->req_count)) { - if (resp_qlen < SCHEUDLE_RSP_QLEN_THRESHOLD) - return; - cp->queue_work_not_eng3_nz++; - } - } - if (cmpxchg(&cp->sched_resp_workq_status, NOT_SCHEDULED, - IS_SCHEDULED) == NOT_SCHEDULED) - queue_work_on(cpu, cp->resp_wq, &cp->resp_work); - else if (cmpxchg(&cp->sched_resp_workq_status, IS_SCHEDULED, - SCHEDULE_AGAIN) == NOT_SCHEDULED) - goto retry; + if (arsp) { + areq = arsp->async_req; + areq->complete(areq, arsp->res); + if (pending_list) + goto again; } } -static void req_done(struct qcrypto_req_control *pqcrypto_req_control) +static void req_done(unsigned long data) { - struct crypto_engine *pengine; struct crypto_async_request *areq; + struct crypto_engine *pengine = (struct crypto_engine *)data; struct crypto_priv *cp; + unsigned long flags; struct qcrypto_resp_ctx *arsp; + int res; u32 type = 0; void *tfm_ctx = NULL; - unsigned int cpu; - int res; - pengine = pqcrypto_req_control->pce; cp = pengine->pcp; - areq = pqcrypto_req_control->req; - arsp = pqcrypto_req_control->arsp; - res = pqcrypto_req_control->res; - qcrypto_free_req_control(pengine, pqcrypto_req_control); + spin_lock_irqsave(&cp->lock, flags); + areq = pengine->req; + arsp = pengine->arsp; + res = pengine->res; + pengine->req = NULL; + pengine->arsp = NULL; if (areq) { type = crypto_tfm_alg_type(areq->tfm); tfm_ctx = crypto_tfm_ctx(areq->tfm); + arsp->res = res; } - cpu = smp_processor_id(); - pengine->irq_cpu = cpu; - if (pengine->first_engine) { - if (cpu != cp->cpu_getting_irqs_frm_first_ce) - cp->cpu_getting_irqs_frm_first_ce = cpu; - } + spin_unlock_irqrestore(&cp->lock, flags); + _start_qcrypto_process(cp, pengine); if (areq) - _qcrypto_tfm_complete(pengine, type, tfm_ctx, arsp, res); - if (ACCESS_ONCE(cp->ce_req_proc_sts) == IN_PROGRESS) - _start_qcrypto_process(cp, pengine); + _qcrypto_tfm_complete(cp, type, tfm_ctx); } static void _qce_ahash_complete(void *cookie, unsigned char *digest, unsigned char *authdata, int ret) { struct ahash_request *areq = (struct ahash_request *) cookie; - struct crypto_async_request *async_req; struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(areq->base.tfm); struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(areq); @@ -1679,27 +1335,17 @@ static void _qce_ahash_complete(void *cookie, unsigned char *digest, uint32_t diglen = crypto_ahash_digestsize(ahash); uint32_t *auth32 = (uint32_t *)authdata; struct crypto_engine *pengine; - struct qcrypto_req_control *pqcrypto_req_control; - async_req = &areq->base; pstat = &_qcrypto_stat; pengine = rctx->pengine; - pqcrypto_req_control = find_req_control_for_areq(pengine, - async_req); - if (pqcrypto_req_control == NULL) { - pr_err("async request not found\n"); - return; - } - #ifdef QCRYPTO_DEBUG dev_info(&pengine->pdev->dev, "_qce_ahash_complete: %pK ret %d\n", areq, ret); #endif if (digest) { memcpy(rctx->digest, digest, diglen); - if (rctx->last_blk) - memcpy(areq->result, digest, diglen); + memcpy(areq->result, digest, diglen); } if (authdata) { rctx->byte_count[0] = auth32[0]; @@ -1714,43 +1360,34 @@ static void _qce_ahash_complete(void *cookie, unsigned char *digest, rctx->first_blk = 0; if (ret) { - pqcrypto_req_control->res = -ENXIO; + pengine->res = -ENXIO; pstat->ahash_op_fail++; } else { - pqcrypto_req_control->res = 0; + pengine->res = 0; pstat->ahash_op_success++; } if (cp->ce_support.aligned_only) { areq->src = rctx->orig_src; kfree(rctx->data); } - req_done(pqcrypto_req_control); + + tasklet_schedule(&pengine->done_tasklet); }; static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb, unsigned char *iv, int ret) { struct ablkcipher_request *areq = (struct ablkcipher_request *) cookie; - struct crypto_async_request *async_req; struct crypto_ablkcipher *ablk = crypto_ablkcipher_reqtfm(areq); struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm); struct crypto_priv *cp = ctx->cp; struct crypto_stat *pstat; struct qcrypto_cipher_req_ctx *rctx; struct crypto_engine *pengine; - struct qcrypto_req_control *pqcrypto_req_control; - async_req = &areq->base; pstat = &_qcrypto_stat; rctx = ablkcipher_request_ctx(areq); pengine = rctx->pengine; - pqcrypto_req_control = find_req_control_for_areq(pengine, - async_req); - if (pqcrypto_req_control == NULL) { - pr_err("async request not found\n"); - return; - } - #ifdef QCRYPTO_DEBUG dev_info(&pengine->pdev->dev, "_qce_ablk_cipher_complete: %pK ret %d\n", areq, ret); @@ -1759,10 +1396,10 @@ static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb, memcpy(ctx->iv, iv, crypto_ablkcipher_ivsize(ablk)); if (ret) { - pqcrypto_req_control->res = -ENXIO; + pengine->res = -ENXIO; pstat->ablk_cipher_op_fail++; } else { - pqcrypto_req_control->res = 0; + pengine->res = 0; pstat->ablk_cipher_op_success++; } @@ -1783,7 +1420,8 @@ static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb, areq->nbytes); kzfree(rctx->data); } - req_done(pqcrypto_req_control); + + tasklet_schedule(&pengine->done_tasklet); }; @@ -1791,25 +1429,16 @@ static void _qce_aead_complete(void *cookie, unsigned char *icv, unsigned char *iv, int ret) { struct aead_request *areq = (struct aead_request *) cookie; - struct crypto_async_request *async_req; struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm); struct crypto_priv *cp = ctx->cp; struct qcrypto_cipher_req_ctx *rctx; struct crypto_stat *pstat; struct crypto_engine *pengine; - struct qcrypto_req_control *pqcrypto_req_control; - async_req = &areq->base; pstat = &_qcrypto_stat; rctx = aead_request_ctx(areq); pengine = rctx->pengine; - pqcrypto_req_control = find_req_control_for_areq(pengine, - async_req); - if (pqcrypto_req_control == NULL) { - pr_err("async request not found\n"); - return; - } if (rctx->mode == QCE_MODE_CCM) { if (cp->ce_support.aligned_only) { @@ -1905,8 +1534,9 @@ static void _qce_aead_complete(void *cookie, unsigned char *icv, else pstat->aead_op_success++; - pqcrypto_req_control->res = ret; - req_done(pqcrypto_req_control); + pengine->res = ret; + + tasklet_schedule(&pengine->done_tasklet); } static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize) @@ -1998,9 +1628,8 @@ static int qcrypto_aead_ccm_format_adata(struct qce_req *qreq, uint32_t alen, } static int _qcrypto_process_ablkcipher(struct crypto_engine *pengine, - struct qcrypto_req_control *pqcrypto_req_control) + struct crypto_async_request *async_req) { - struct crypto_async_request *async_req; struct qce_req qreq; int ret; struct qcrypto_cipher_req_ctx *rctx; @@ -2008,7 +1637,6 @@ static int _qcrypto_process_ablkcipher(struct crypto_engine *pengine, struct ablkcipher_request *req; struct crypto_ablkcipher *tfm; - async_req = pqcrypto_req_control->req; req = container_of(async_req, struct ablkcipher_request, base); cipher_ctx = crypto_tfm_ctx(async_req->tfm); rctx = ablkcipher_request_ctx(req); @@ -2064,16 +1692,14 @@ static int _qcrypto_process_ablkcipher(struct crypto_engine *pengine, } static int _qcrypto_process_ahash(struct crypto_engine *pengine, - struct qcrypto_req_control *pqcrypto_req_control) + struct crypto_async_request *async_req) { - struct crypto_async_request *async_req; struct ahash_request *req; struct qce_sha_req sreq; struct qcrypto_sha_req_ctx *rctx; struct qcrypto_sha_ctx *sha_ctx; int ret = 0; - async_req = pqcrypto_req_control->req; req = container_of(async_req, struct ahash_request, base); rctx = ahash_request_ctx(req); @@ -2123,19 +1749,16 @@ static int _qcrypto_process_ahash(struct crypto_engine *pengine, } static int _qcrypto_process_aead(struct crypto_engine *pengine, - struct qcrypto_req_control *pqcrypto_req_control) + struct crypto_async_request *async_req) { - struct crypto_async_request *async_req; struct qce_req qreq; int ret = 0; struct qcrypto_cipher_req_ctx *rctx; struct qcrypto_cipher_ctx *cipher_ctx; - struct aead_request *req; - struct crypto_aead *aead; + struct aead_request *req = container_of(async_req, + struct aead_request, base); + struct crypto_aead *aead = crypto_aead_reqtfm(req); - async_req = pqcrypto_req_control->req; - req = container_of(async_req, struct aead_request, base); - aead = crypto_aead_reqtfm(req); rctx = aead_request_ctx(req); rctx->pengine = pengine; cipher_ctx = crypto_tfm_ctx(async_req->tfm); @@ -2365,25 +1988,12 @@ static int _start_qcrypto_process(struct crypto_priv *cp, struct ahash_request *ahash_req; struct aead_request *aead_req; struct qcrypto_resp_ctx *arsp; - struct qcrypto_req_control *pqcrypto_req_control; - unsigned int cpu = MAX_SMP_CPU; - - if (ACCESS_ONCE(cp->ce_req_proc_sts) == STOPPED) - return 0; - - if (in_interrupt()) { - cpu = smp_processor_id(); - if (cpu >= MAX_SMP_CPU) - cpu = MAX_SMP_CPU - 1; - } else - cpu = MAX_SMP_CPU; pstat = &_qcrypto_stat; again: spin_lock_irqsave(&cp->lock, flags); - if (pengine->issue_req || - atomic_read(&pengine->req_count) >= (pengine->max_req)) { + if (pengine->req) { spin_unlock_irqrestore(&cp->lock, flags); return 0; } @@ -2410,12 +2020,6 @@ static int _start_qcrypto_process(struct crypto_priv *cp, return 0; } } - pqcrypto_req_control = qcrypto_alloc_req_control(pengine); - if (pqcrypto_req_control == NULL) { - pr_err("Allocation of request failed\n"); - spin_unlock_irqrestore(&cp->lock, flags); - return 0; - } /* add associated rsp entry to tfm response queue */ type = crypto_tfm_alg_type(async_req->tfm); @@ -2438,7 +2042,7 @@ static int _start_qcrypto_process(struct crypto_priv *cp, arsp = &cipher_rctx->rsp_entry; list_add_tail( &arsp->list, - &((struct qcrypto_cipher_ctx *)tfm_ctx) + &((struct qcrypto_sha_ctx *)tfm_ctx) ->rsp_queue); break; case CRYPTO_ALG_TYPE_AEAD: @@ -2449,23 +2053,18 @@ static int _start_qcrypto_process(struct crypto_priv *cp, arsp = &cipher_rctx->rsp_entry; list_add_tail( &arsp->list, - &((struct qcrypto_cipher_ctx *)tfm_ctx) + &((struct qcrypto_sha_ctx *)tfm_ctx) ->rsp_queue); break; } arsp->res = -EINPROGRESS; arsp->async_req = async_req; - pqcrypto_req_control->pce = pengine; - pqcrypto_req_control->req = async_req; - pqcrypto_req_control->arsp = arsp; + pengine->req = async_req; + pengine->arsp = arsp; pengine->active_seq++; pengine->check_flag = true; - pengine->issue_req = true; - cp->cpu_req[cpu]++; - smp_mb(); /* make it visible */ - spin_unlock_irqrestore(&cp->lock, flags); if (backlog_eng) backlog_eng->complete(backlog_eng, -EINPROGRESS); @@ -2473,26 +2072,25 @@ static int _start_qcrypto_process(struct crypto_priv *cp, backlog_cp->complete(backlog_cp, -EINPROGRESS); switch (type) { case CRYPTO_ALG_TYPE_ABLKCIPHER: - ret = _qcrypto_process_ablkcipher(pengine, - pqcrypto_req_control); + ret = _qcrypto_process_ablkcipher(pengine, async_req); break; case CRYPTO_ALG_TYPE_AHASH: - ret = _qcrypto_process_ahash(pengine, pqcrypto_req_control); + ret = _qcrypto_process_ahash(pengine, async_req); break; case CRYPTO_ALG_TYPE_AEAD: - ret = _qcrypto_process_aead(pengine, pqcrypto_req_control); + ret = _qcrypto_process_aead(pengine, async_req); break; default: ret = -EINVAL; }; - - pengine->issue_req = false; - smp_mb(); /* make it visible */ - pengine->total_req++; if (ret) { + arsp->res = ret; pengine->err_req++; - qcrypto_free_req_control(pengine, pqcrypto_req_control); + spin_lock_irqsave(&cp->lock, flags); + pengine->req = NULL; + pengine->arsp = NULL; + spin_unlock_irqrestore(&cp->lock, flags); if (type == CRYPTO_ALG_TYPE_ABLKCIPHER) pstat->ablk_cipher_op_fail++; @@ -2502,49 +2100,21 @@ static int _start_qcrypto_process(struct crypto_priv *cp, else pstat->aead_op_fail++; - _qcrypto_tfm_complete(pengine, type, tfm_ctx, arsp, ret); + _qcrypto_tfm_complete(cp, type, tfm_ctx); goto again; }; return ret; } -static inline struct crypto_engine *_next_eng(struct crypto_priv *cp, - struct crypto_engine *p) -{ - - if (p == NULL || list_is_last(&p->elist, &cp->engine_list)) - p = list_first_entry(&cp->engine_list, struct crypto_engine, - elist); - else - p = list_entry(p->elist.next, struct crypto_engine, elist); - return p; -} static struct crypto_engine *_avail_eng(struct crypto_priv *cp) { - /* call this function with spinlock set */ - struct crypto_engine *q = NULL; - struct crypto_engine *p = cp->scheduled_eng; - struct crypto_engine *q1; - int eng_cnt = cp->total_units; + struct crypto_engine *pe = NULL; - if (unlikely(list_empty(&cp->engine_list))) { - pr_err("%s: no valid ce to schedule\n", __func__); - return NULL; - } - - p = _next_eng(cp, p); - q1 = p; - while (eng_cnt-- > 0) { - if (!p->issue_req && atomic_read(&p->req_count) < p->max_req) { - q = p; - break; - } - p = _next_eng(cp, p); - if (q1 == p) - break; + list_for_each_entry(pe, &cp->engine_list, elist) { + if (pe->req == NULL) + return pe; } - cp->scheduled_eng = q; - return q; + return NULL; } static int _qcrypto_queue_req(struct crypto_priv *cp, @@ -2561,8 +2131,6 @@ static int _qcrypto_queue_req(struct crypto_priv *cp, } else { ret = crypto_enqueue_request(&cp->req_queue, req); pengine = _avail_eng(cp); - if (cp->req_queue.qlen > cp->max_qlen) - cp->max_qlen = cp->req_queue.qlen; } if (pengine) { switch (pengine->bw_state) { @@ -2588,11 +2156,9 @@ static int _qcrypto_queue_req(struct crypto_priv *cp, pengine = NULL; break; } - } else { - cp->no_avail++; } spin_unlock_irqrestore(&cp->lock, flags); - if (pengine && (ACCESS_ONCE(cp->ce_req_proc_sts) == IN_PROGRESS)) + if (pengine) _start_qcrypto_process(cp, pengine); return ret; } @@ -2604,7 +2170,7 @@ static int _qcrypto_enc_aes_192_fallback(struct ablkcipher_request *req) struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); int err; - ablkcipher_request_set_tfm(req, ctx->cipher_aes192_fb); + ablkcipher_request_set_tfm(req, ctx->fallback.cipher_fb); err = crypto_ablkcipher_encrypt(req); ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); return err; @@ -2617,7 +2183,7 @@ static int _qcrypto_dec_aes_192_fallback(struct ablkcipher_request *req) struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); int err; - ablkcipher_request_set_tfm(req, ctx->cipher_aes192_fb); + ablkcipher_request_set_tfm(req, ctx->fallback.cipher_fb); err = crypto_ablkcipher_decrypt(req); ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); return err; @@ -2641,7 +2207,7 @@ static int _qcrypto_enc_aes_ecb(struct ablkcipher_request *req) if ((ctx->enc_key_len == AES_KEYSIZE_192) && (!cp->ce_support.aes_key_192) && - ctx->cipher_aes192_fb) + ctx->fallback.cipher_fb) return _qcrypto_enc_aes_192_fallback(req); rctx = ablkcipher_request_ctx(req); @@ -2671,7 +2237,7 @@ static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req) if ((ctx->enc_key_len == AES_KEYSIZE_192) && (!cp->ce_support.aes_key_192) && - ctx->cipher_aes192_fb) + ctx->fallback.cipher_fb) return _qcrypto_enc_aes_192_fallback(req); rctx = ablkcipher_request_ctx(req); @@ -2701,7 +2267,7 @@ static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req) if ((ctx->enc_key_len == AES_KEYSIZE_192) && (!cp->ce_support.aes_key_192) && - ctx->cipher_aes192_fb) + ctx->fallback.cipher_fb) return _qcrypto_enc_aes_192_fallback(req); rctx = ablkcipher_request_ctx(req); @@ -2885,7 +2451,7 @@ static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req) if ((ctx->enc_key_len == AES_KEYSIZE_192) && (!cp->ce_support.aes_key_192) && - ctx->cipher_aes192_fb) + ctx->fallback.cipher_fb) return _qcrypto_dec_aes_192_fallback(req); rctx = ablkcipher_request_ctx(req); @@ -2915,7 +2481,7 @@ static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req) if ((ctx->enc_key_len == AES_KEYSIZE_192) && (!cp->ce_support.aes_key_192) && - ctx->cipher_aes192_fb) + ctx->fallback.cipher_fb) return _qcrypto_dec_aes_192_fallback(req); rctx = ablkcipher_request_ctx(req); @@ -2945,7 +2511,7 @@ static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req) if ((ctx->enc_key_len == AES_KEYSIZE_192) && (!cp->ce_support.aes_key_192) && - ctx->cipher_aes192_fb) + ctx->fallback.cipher_fb) return _qcrypto_dec_aes_192_fallback(req); rctx = ablkcipher_request_ctx(req); @@ -3167,7 +2733,6 @@ static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm); struct rtattr *rta = (struct rtattr *)key; struct crypto_authenc_key_param *param; - int ret; if (!RTA_OK(rta, keylen)) goto badkey; @@ -3193,20 +2758,6 @@ static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, memcpy(ctx->enc_key, key + ctx->auth_key_len, ctx->enc_key_len); memcpy(ctx->auth_key, key, ctx->auth_key_len); - if (ctx->enc_key_len == AES_KEYSIZE_192 && ctx->cipher_aes192_fb && - ctx->ahash_aead_aes192_fb) { - crypto_ahash_clear_flags(ctx->ahash_aead_aes192_fb, ~0); - ret = crypto_ahash_setkey(ctx->ahash_aead_aes192_fb, - ctx->auth_key, ctx->auth_key_len); - if (ret) - goto badkey; - crypto_ablkcipher_clear_flags(ctx->cipher_aes192_fb, ~0); - ret = crypto_ablkcipher_setkey(ctx->cipher_aes192_fb, - ctx->enc_key, ctx->enc_key_len); - if (ret) - goto badkey; - } - return 0; badkey: ctx->enc_key_len = 0; @@ -3256,253 +2807,6 @@ static int _qcrypto_aead_rfc4309_ccm_setkey(struct crypto_aead *aead, return ret; }; -static void _qcrypto_aead_aes_192_fb_a_cb(struct qcrypto_cipher_req_ctx *rctx, - int res) -{ - struct aead_request *req; - struct crypto_async_request *areq; - - req = rctx->aead_req; - areq = &req->base; - if (rctx->fb_aes_req) - ablkcipher_request_free(rctx->fb_aes_req); - if (rctx->fb_hash_req) - ahash_request_free(rctx->fb_hash_req); - rctx->fb_aes_req = NULL; - rctx->fb_hash_req = NULL; - kfree(rctx->fb_ahash_assoc_iv); - kfree(rctx->fb_aes_iv); - areq->complete(areq, res); -} - -static void _aead_aes_fb_stage2_ahash_complete( - struct crypto_async_request *base, int err) -{ - struct qcrypto_cipher_req_ctx *rctx; - struct aead_request *req; - struct qcrypto_cipher_ctx *ctx; - - rctx = base->data; - req = rctx->aead_req; - ctx = crypto_tfm_ctx(req->base.tfm); - /* copy icv */ - if (err == 0) - scatterwalk_map_and_copy(rctx->fb_ahash_digest, - req->dst, - req->cryptlen, - ctx->authsize, 1); - _qcrypto_aead_aes_192_fb_a_cb(rctx, err); -} - - -static int _start_aead_aes_fb_stage2_hmac(struct qcrypto_cipher_req_ctx *rctx) -{ - struct ahash_request *ahash_req; - - ahash_req = rctx->fb_hash_req; - ahash_request_set_callback(ahash_req, CRYPTO_TFM_REQ_MAY_BACKLOG, - _aead_aes_fb_stage2_ahash_complete, rctx); - - return crypto_ahash_digest(ahash_req); -} - -static void _aead_aes_fb_stage2_decrypt_complete( - struct crypto_async_request *base, int err) -{ - struct qcrypto_cipher_req_ctx *rctx; - - rctx = base->data; - _qcrypto_aead_aes_192_fb_a_cb(rctx, err); -} - -static int _start_aead_aes_fb_stage2_decrypt( - struct qcrypto_cipher_req_ctx *rctx) -{ - struct ablkcipher_request *aes_req; - - aes_req = rctx->fb_aes_req; - ablkcipher_request_set_callback(aes_req, CRYPTO_TFM_REQ_MAY_BACKLOG, - _aead_aes_fb_stage2_decrypt_complete, rctx); - return crypto_ablkcipher_decrypt(aes_req); -} - -static void _aead_aes_fb_stage1_ahash_complete( - struct crypto_async_request *base, int err) -{ - struct qcrypto_cipher_req_ctx *rctx; - struct aead_request *req; - struct qcrypto_cipher_ctx *ctx; - - rctx = base->data; - req = rctx->aead_req; - ctx = crypto_tfm_ctx(req->base.tfm); - - /* compare icv */ - if (err == 0) { - unsigned char tmp[ctx->authsize]; - - scatterwalk_map_and_copy(tmp, req->src, - req->cryptlen - ctx->authsize, ctx->authsize, 0); - if (memcmp(rctx->fb_ahash_digest, tmp, ctx->authsize) != 0) - err = -EBADMSG; - } - if (err) - _qcrypto_aead_aes_192_fb_a_cb(rctx, err); - else { - err = _start_aead_aes_fb_stage2_decrypt(rctx); - if (err != -EINPROGRESS && err != -EBUSY) - _qcrypto_aead_aes_192_fb_a_cb(rctx, err); - } -} - -static void _aead_aes_fb_stage1_encrypt_complete( - struct crypto_async_request *base, int err) -{ - struct qcrypto_cipher_req_ctx *rctx; - struct aead_request *req; - struct qcrypto_cipher_ctx *ctx; - - rctx = base->data; - req = rctx->aead_req; - ctx = crypto_tfm_ctx(req->base.tfm); - - memcpy(ctx->iv, rctx->fb_aes_iv, rctx->ivsize); - - if (err) { - _qcrypto_aead_aes_192_fb_a_cb(rctx, err); - return; - } - - err = _start_aead_aes_fb_stage2_hmac(rctx); - - /* copy icv */ - if (err == 0) { - scatterwalk_map_and_copy(rctx->fb_ahash_digest, - req->dst, - req->cryptlen, - ctx->authsize, 1); - } - if (err != -EINPROGRESS && err != -EBUSY) - _qcrypto_aead_aes_192_fb_a_cb(rctx, err); -} - -static int _qcrypto_aead_aes_192_fallback(struct aead_request *req, - bool is_encrypt) -{ - struct qcrypto_cipher_req_ctx *rctx = aead_request_ctx(req); - struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); - struct crypto_aead *aead_tfm = crypto_aead_reqtfm(req); - struct ablkcipher_request *aes_req = NULL; - struct ahash_request *ahash_req = NULL; - int rc = -EINVAL; - int nbytes; - int num_sg; - - rctx->fb_ahash_assoc_iv = NULL; - rctx->fb_aes_iv = NULL; - aes_req = ablkcipher_request_alloc(ctx->cipher_aes192_fb, GFP_KERNEL); - if (!aes_req) - return -ENOMEM; - ahash_req = ahash_request_alloc(ctx->ahash_aead_aes192_fb, GFP_KERNEL); - if (!ahash_req) - goto ret; - rctx->fb_aes_req = aes_req; - rctx->fb_hash_req = ahash_req; - rctx->aead_req = req; - num_sg = qcrypto_count_sg(req->assoc, req->assoclen); - rctx->fb_ahash_assoc_iv = kzalloc(req->assoclen + - crypto_aead_ivsize(aead_tfm), GFP_ATOMIC); - if (!rctx->fb_ahash_assoc_iv) - goto ret; - if (req->assoclen) - qcrypto_sg_copy_to_buffer(req->assoc, num_sg, - rctx->fb_ahash_assoc_iv, req->assoclen); - memcpy(rctx->fb_ahash_assoc_iv + req->assoclen, - req->iv, crypto_aead_ivsize(aead_tfm)); - memset(rctx->fb_ahash_sg, 0, sizeof(rctx->fb_ahash_sg)); - sg_set_buf(&rctx->fb_ahash_sg[0], rctx->fb_ahash_assoc_iv, - req->assoclen + crypto_aead_ivsize(aead_tfm)); - sg_mark_end(&rctx->fb_ahash_sg[1]); - - nbytes = req->cryptlen; - if (is_encrypt) { - sg_chain(&rctx->fb_ahash_sg[0], 2, req->dst); - } else { - sg_chain(&rctx->fb_ahash_sg[0], 2, req->src); - nbytes -= ctx->authsize; - } - rctx->fb_ahash_length = nbytes + crypto_aead_ivsize(aead_tfm) - + req->assoclen; - rctx->fb_aes_src = req->src; - rctx->fb_aes_dst = req->dst; - rctx->fb_aes_cryptlen = nbytes; - rctx->ivsize = crypto_aead_ivsize(aead_tfm); - rctx->fb_aes_iv = kzalloc(rctx->ivsize, GFP_ATOMIC); - if (!rctx->fb_aes_iv) - goto ret; - memcpy(rctx->fb_aes_iv, req->iv, rctx->ivsize); - ablkcipher_request_set_crypt(aes_req, rctx->fb_aes_src, - rctx->fb_aes_dst, - rctx->fb_aes_cryptlen, rctx->fb_aes_iv); - ahash_request_set_crypt(ahash_req, &rctx->fb_ahash_sg[0], - rctx->fb_ahash_digest, - rctx->fb_ahash_length); - - if (is_encrypt) { - - ablkcipher_request_set_callback(aes_req, - CRYPTO_TFM_REQ_MAY_BACKLOG, - _aead_aes_fb_stage1_encrypt_complete, rctx); - - rc = crypto_ablkcipher_encrypt(aes_req); - if (rc == 0) { - memcpy(ctx->iv, rctx->fb_aes_iv, rctx->ivsize); - rc = _start_aead_aes_fb_stage2_hmac(rctx); - if (rc == 0) { - /* copy icv */ - scatterwalk_map_and_copy(rctx->fb_ahash_digest, - req->dst, - req->cryptlen, - ctx->authsize, 1); - } - } - if (rc == -EINPROGRESS || rc == -EBUSY) - return rc; - goto ret; - - } else { - ahash_request_set_callback(ahash_req, - CRYPTO_TFM_REQ_MAY_BACKLOG, - _aead_aes_fb_stage1_ahash_complete, rctx); - - rc = crypto_ahash_digest(ahash_req); - if (rc == 0) { - unsigned char tmp[ctx->authsize]; - - /* compare icv */ - scatterwalk_map_and_copy(tmp, - req->src, req->cryptlen - ctx->authsize, - ctx->authsize, 0); - if (memcmp(rctx->fb_ahash_digest, tmp, - ctx->authsize) != 0) - rc = -EBADMSG; - else - rc = _start_aead_aes_fb_stage2_decrypt(rctx); - } - if (rc == -EINPROGRESS || rc == -EBUSY) - return rc; - goto ret; - } -ret: - if (aes_req) - ablkcipher_request_free(aes_req); - if (ahash_req) - ahash_request_free(ahash_req); - kfree(rctx->fb_ahash_assoc_iv); - kfree(rctx->fb_aes_iv); - return rc; -} - static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req) { struct qcrypto_cipher_req_ctx *rctx; @@ -3523,14 +2827,10 @@ static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req) rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CBC; rctx->iv = req->iv; - rctx->aead_req = req; if (ctx->auth_alg == QCE_HASH_SHA1_HMAC) pstat->aead_sha1_aes_enc++; else pstat->aead_sha256_aes_enc++; - if (ctx->enc_key_len == AES_KEYSIZE_192 && ctx->cipher_aes192_fb && - ctx->ahash_aead_aes192_fb) - return _qcrypto_aead_aes_192_fallback(req, true); return _qcrypto_queue_req(cp, ctx->pengine, &req->base); } @@ -3553,16 +2853,11 @@ static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req) rctx->dir = QCE_DECRYPT; rctx->mode = QCE_MODE_CBC; rctx->iv = req->iv; - rctx->aead_req = req; if (ctx->auth_alg == QCE_HASH_SHA1_HMAC) pstat->aead_sha1_aes_dec++; else pstat->aead_sha256_aes_dec++; - - if (ctx->enc_key_len == AES_KEYSIZE_192 && ctx->cipher_aes192_fb && - ctx->ahash_aead_aes192_fb) - return _qcrypto_aead_aes_192_fallback(req, false); return _qcrypto_queue_req(cp, ctx->pengine, &req->base); } @@ -3583,7 +2878,6 @@ static int _qcrypto_aead_givencrypt_aes_cbc(struct aead_givcrypt_request *req) rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CBC; rctx->iv = req->giv; /* generated iv */ - rctx->aead_req = areq; memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc)); /* avoid consecutive packets going out with same IV */ @@ -3593,11 +2887,6 @@ static int _qcrypto_aead_givencrypt_aes_cbc(struct aead_givcrypt_request *req) pstat->aead_sha1_aes_enc++; else pstat->aead_sha256_aes_enc++; - if (ctx->enc_key_len == AES_KEYSIZE_192 && ctx->cipher_aes192_fb && - ctx->ahash_aead_aes192_fb) { - areq->iv = req->giv; - return _qcrypto_aead_aes_192_fallback(areq, true); - } return _qcrypto_queue_req(cp, ctx->pengine, &areq->base); } @@ -3616,7 +2905,6 @@ static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req) rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CBC; rctx->iv = req->iv; - rctx->aead_req = req; if (ctx->auth_alg == QCE_HASH_SHA1_HMAC) pstat->aead_sha1_des_enc++; @@ -3640,7 +2928,6 @@ static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req) rctx->dir = QCE_DECRYPT; rctx->mode = QCE_MODE_CBC; rctx->iv = req->iv; - rctx->aead_req = req; if (ctx->auth_alg == QCE_HASH_SHA1_HMAC) pstat->aead_sha1_des_dec++; @@ -3666,7 +2953,6 @@ static int _qcrypto_aead_givencrypt_des_cbc(struct aead_givcrypt_request *req) rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CBC; rctx->iv = req->giv; /* generated iv */ - rctx->aead_req = areq; memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc)); /* avoid consecutive packets going out with same IV */ @@ -3693,7 +2979,6 @@ static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req) rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CBC; rctx->iv = req->iv; - rctx->aead_req = req; if (ctx->auth_alg == QCE_HASH_SHA1_HMAC) pstat->aead_sha1_3des_enc++; @@ -3717,7 +3002,6 @@ static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req) rctx->dir = QCE_DECRYPT; rctx->mode = QCE_MODE_CBC; rctx->iv = req->iv; - rctx->aead_req = req; if (ctx->auth_alg == QCE_HASH_SHA1_HMAC) pstat->aead_sha1_3des_dec++; @@ -3743,7 +3027,6 @@ static int _qcrypto_aead_givencrypt_3des_cbc(struct aead_givcrypt_request *req) rctx->dir = QCE_ENCRYPT; rctx->mode = QCE_MODE_CBC; rctx->iv = req->giv; /* generated iv */ - rctx->aead_req = areq; memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc)); /* avoid consecutive packets going out with same IV */ @@ -4019,8 +3302,8 @@ static int _sha_update(struct ahash_request *req, uint32_t sha_block_size) break; len += sg_last->length; sg_last = scatterwalk_sg_next(sg_last); - if (NULL == sg_last) { - pr_err("qcrypto.c: _sha_update, sg_last = NULL"); + if (sg_last == NULL) { + pr_err("qcrypto: _sha_update, sg = NULL\n"); break; } } @@ -4047,7 +3330,7 @@ static int _sha_update(struct ahash_request *req, uint32_t sha_block_size) if (sg_last) sg_mark_end(sg_last); else - pr_err("qcrypto: _sha_update, sg_last= NULL"); + pr_err("qcrypto: _sha_update, sg = NULL\n"); memset(rctx->sg, 0, sizeof(rctx->sg)); sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len); @@ -4055,11 +3338,12 @@ static int _sha_update(struct ahash_request *req, uint32_t sha_block_size) sg_chain(rctx->sg, 2, req->src); req->src = rctx->sg; } - } else + } else { if (sg_last) sg_mark_end(sg_last); else - pr_err("qcrypto.c: _sha_update, sg_last = NULL"); + pr_err("qcrypto: _sha_update, sg = NULL\n"); + } req->nbytes = nbytes; rctx->trailing_buf_len = trailing_buf_len; @@ -4227,7 +3511,7 @@ static int _sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, ret = wait_for_completion_interruptible( &ahash_req_complete); - reinit_completion(&sha_ctx->ahash_req_complete); + init_completion(&sha_ctx->ahash_req_complete); } kzfree(in_buf); @@ -4423,7 +3707,7 @@ static int _sha_hmac_inner_hash(struct ahash_request *req, if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible(&sha_ctx->ahash_req_complete); - reinit_completion(&sha_ctx->ahash_req_complete); + init_completion(&sha_ctx->ahash_req_complete); } return ret; @@ -4513,6 +3797,23 @@ static int _qcrypto_prefix_alg_cra_name(char cra_name[], unsigned int size) return 0; } +/* + * Fill up fips_selftest_data structure + */ + +static void _qcrypto_fips_selftest_d(struct fips_selftest_data *selftest_d, + struct ce_hw_support *ce_support, + char *prefix) +{ + strlcpy(selftest_d->algo_prefix, prefix, CRYPTO_MAX_ALG_NAME); + selftest_d->prefix_ahash_algo = ce_support->use_sw_ahash_algo; + selftest_d->prefix_hmac_algo = ce_support->use_sw_hmac_algo; + selftest_d->prefix_aes_xts_algo = ce_support->use_sw_aes_xts_algo; + selftest_d->prefix_aes_cbc_ecb_ctr_algo = + ce_support->use_sw_aes_cbc_ecb_ctr_algo; + selftest_d->prefix_aead_algo = ce_support->use_sw_aead_algo; + selftest_d->ce_device = ce_support->ce_device; +} int qcrypto_cipher_set_device(struct ablkcipher_request *req, unsigned int dev) { @@ -4986,8 +4287,8 @@ static struct crypto_alg _qcrypto_aead_sha1_hmac_algos[] = { .cra_alignmask = 0, .cra_type = &crypto_aead_type, .cra_module = THIS_MODULE, - .cra_init = _qcrypto_cra_aead_aes_sha1_init, - .cra_exit = _qcrypto_cra_aead_aes_exit, + .cra_init = _qcrypto_cra_aead_sha1_init, + .cra_exit = _qcrypto_cra_aead_exit, .cra_u = { .aead = { .ivsize = AES_BLOCK_SIZE, @@ -5064,8 +4365,8 @@ static struct crypto_alg _qcrypto_aead_sha256_hmac_algos[] = { .cra_alignmask = 0, .cra_type = &crypto_aead_type, .cra_module = THIS_MODULE, - .cra_init = _qcrypto_cra_aead_aes_sha256_init, - .cra_exit = _qcrypto_cra_aead_aes_exit, + .cra_init = _qcrypto_cra_aead_sha256_init, + .cra_exit = _qcrypto_cra_aead_exit, .cra_u = { .aead = { .ivsize = AES_BLOCK_SIZE, @@ -5192,7 +4493,10 @@ static int _qcrypto_probe(struct platform_device *pdev) struct msm_ce_hw_support *platform_support; struct crypto_engine *pengine; unsigned long flags; - struct qcrypto_req_control *pqcrypto_req_control = NULL; + + /* For FIPS140-2 Power on self tests */ + struct fips_selftest_data selftest_d; + char prefix[10] = ""; pengine = kzalloc(sizeof(*pengine), GFP_KERNEL); if (!pengine) { @@ -5213,6 +4517,7 @@ static int _qcrypto_probe(struct platform_device *pdev) pengine->qce = handle; pengine->pcp = cp; pengine->pdev = pdev; + pengine->req = NULL; pengine->signature = 0xdeadbeef; init_timer(&(pengine->bw_reaper_timer)); @@ -5224,9 +4529,8 @@ static int _qcrypto_probe(struct platform_device *pdev) pengine->active_seq = 0; pengine->last_active_seq = 0; pengine->check_flag = false; - pengine->max_req_used = 0; - pengine->issue_req = false; + tasklet_init(&pengine->done_tasklet, req_done, (unsigned long)pengine); crypto_init_queue(&pengine->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH); mutex_lock(&cp->engine_lock); @@ -5234,23 +4538,12 @@ static int _qcrypto_probe(struct platform_device *pdev) pengine->unit = cp->total_units; spin_lock_irqsave(&cp->lock, flags); - pengine->first_engine = list_empty(&cp->engine_list); - if (pengine->first_engine) - cp->first_engine = pengine; list_add_tail(&pengine->elist, &cp->engine_list); cp->next_engine = pengine; spin_unlock_irqrestore(&cp->lock, flags); qce_hw_support(pengine->qce, &cp->ce_support); pengine->ce_hw_instance = cp->ce_support.ce_hw_instance; - pengine->max_req = cp->ce_support.max_request; - pqcrypto_req_control = kzalloc(sizeof(struct qcrypto_req_control) * - pengine->max_req, GFP_KERNEL); - if (pqcrypto_req_control == NULL) { - rc = -ENOMEM; - goto err; - } - qcrypto_init_req_control(pengine, pqcrypto_req_control); if (cp->ce_support.bam) { cp->platform_support.ce_shared = cp->ce_support.is_shared; cp->platform_support.shared_ce_resource = 0; @@ -5298,7 +4591,7 @@ static int _qcrypto_probe(struct platform_device *pdev) if (cp->total_units != 1) { mutex_unlock(&cp->engine_lock); - return 0; + goto fips_selftest; } /* register crypto cipher algorithms the device supports */ @@ -5589,6 +4882,33 @@ static int _qcrypto_probe(struct platform_device *pdev) mutex_unlock(&cp->engine_lock); +fips_selftest: + /* + * FIPS140-2 Known Answer Tests : + * IN case of any failure, do not Init the module + */ + is_fips_qcrypto_tests_done = false; + + if (g_fips140_status != FIPS140_STATUS_NA) { + + _qcrypto_prefix_alg_cra_name(prefix, 0); + _qcrypto_fips_selftest_d(&selftest_d, &cp->ce_support, prefix); + if (_fips_qcrypto_sha_selftest(&selftest_d) || + _fips_qcrypto_cipher_selftest(&selftest_d) || + _fips_qcrypto_aead_selftest(&selftest_d)) { + pr_err("qcrypto: FIPS140-2 Known Answer Tests : Failed\n"); + BUG_ON(1); + rc = -1; + goto err; + } else + pr_info("qcrypto: FIPS140-2 Known Answer Tests: Successful\n"); + if (g_fips140_status != FIPS140_STATUS_PASS) + g_fips140_status = FIPS140_STATUS_PASS_CRYPTO; + + } else + pr_info("qcrypto: FIPS140-2 Known Answer Tests: Skipped\n"); + + is_fips_qcrypto_tests_done = true; return 0; err: @@ -5604,8 +4924,7 @@ static int _qcrypto_engine_in_use(struct crypto_engine *pengine) { struct crypto_priv *cp = pengine->pcp; - if ((atomic_read(&pengine->req_count) > 0) || pengine->req_queue.qlen - || cp->req_queue.qlen) + if (pengine->req || pengine->req_queue.qlen || cp->req_queue.qlen) return 1; return 0; } @@ -5760,27 +5079,13 @@ static ssize_t _debug_stats_write(struct file *file, const char __user *buf, unsigned long flags; struct crypto_priv *cp = &qcrypto_dev; struct crypto_engine *pe; - int i; memset((char *)&_qcrypto_stat, 0, sizeof(struct crypto_stat)); spin_lock_irqsave(&cp->lock, flags); list_for_each_entry(pe, &cp->engine_list, elist) { pe->total_req = 0; pe->err_req = 0; - qce_clear_driver_stats(pe->qce); - pe->max_req_used = 0; } - cp->max_qlen = 0; - cp->resp_start = 0; - cp->resp_stop = 0; - cp->no_avail = 0; - cp->max_resp_qlen = 0; - cp->queue_work_eng3 = 0; - cp->queue_work_not_eng3 = 0; - cp->queue_work_not_eng3_nz = 0; - cp->max_reorder_cnt = 0; - for (i = 0; i < MAX_SMP_CPU + 1; i++) - cp->cpu_req[i] = 0; spin_unlock_irqrestore(&cp->lock, flags); return count; } @@ -5830,21 +5135,11 @@ static int __init _qcrypto_init(void) return rc; INIT_LIST_HEAD(&pcp->alg_list); INIT_LIST_HEAD(&pcp->engine_list); - init_llist_head(&pcp->ordered_resp_list); spin_lock_init(&pcp->lock); mutex_init(&pcp->engine_lock); - pcp->resp_wq = alloc_workqueue("qcrypto_seq_response_wq", - WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1); - if (!pcp->resp_wq) { - pr_err("Error allocating workqueue\n"); - return -ENOMEM; - } - INIT_WORK(&pcp->resp_work, seq_response); pcp->total_units = 0; pcp->platform_support.bus_scale_table = NULL; pcp->next_engine = NULL; - pcp->scheduled_eng = NULL; - pcp->ce_req_proc_sts = IN_PROGRESS; crypto_init_queue(&pcp->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH); return platform_driver_register(&_qualcomm_crypto); } diff --git a/drivers/crypto/msm/qcryptoi.h b/drivers/crypto/msm/qcryptoi.h new file mode 100644 index 000000000000..fe89e99ddf0f --- /dev/null +++ b/drivers/crypto/msm/qcryptoi.h @@ -0,0 +1,72 @@ +/* QTI Crypto driver + * + * Copyright (c) 2014, 2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __CRYPTO_MSM_QCRYPTOI_H +#define __CRYPTO_MSM_QCRYPTOI_H + +/* The structure to hold data + * that selftests require + */ +struct fips_selftest_data { + + char algo_prefix[10]; + unsigned int ce_device; + bool prefix_ahash_algo; + bool prefix_hmac_algo; + bool prefix_aes_xts_algo; + bool prefix_aes_cbc_ecb_ctr_algo; + bool prefix_aead_algo; +}; + +extern enum fips_status g_fips140_status; + +#ifdef CONFIG_FIPS_ENABLE +/* + * Sha/HMAC self tests + */ +int _fips_qcrypto_sha_selftest(struct fips_selftest_data *selftest_d); + +/* +* Cipher algorithm self tests +*/ +int _fips_qcrypto_cipher_selftest(struct fips_selftest_data *selftest_d); + +/* + * AEAD algorithm self tests + */ +int _fips_qcrypto_aead_selftest(struct fips_selftest_data *selftest_d); + +#else + +static inline +int _fips_qcrypto_sha_selftest(struct fips_selftest_data *selftest_d) +{ + return 0; +} + +static inline +int _fips_qcrypto_cipher_selftest(struct fips_selftest_data *selftest_d) +{ + return 0; +} + +static +inline int _fips_qcrypto_aead_selftest(struct fips_selftest_data *selftest_d) +{ + return 0; +} + +#endif /* CONFIG_FIPS_ENABLE*/ + +#endif /* __CRYPTO_MSM_QCRYPTOI_H */ From ecd8cd0052dfc37d5e1c69d3953da80436e2a8cc Mon Sep 17 00:00:00 2001 From: Gustavo Solaira Date: Wed, 22 Aug 2018 21:18:57 -0700 Subject: [PATCH 029/151] ARM: dts: msm: Enable mhi_dev IPC_RTR export for mdm9650 CV2X Enable ipc_router export over mhi_dev transport to communicate with an External AP over PCIe. Change-Id: I75f8ec917cfc9ec3c7ea5eb59140ac8e770e9e8b Signed-off-by: Gustavo Solaira --- arch/arm/boot/dts/qcom/mdm9650-cv2x.dtsi | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/arm/boot/dts/qcom/mdm9650-cv2x.dtsi b/arch/arm/boot/dts/qcom/mdm9650-cv2x.dtsi index 50901501edff..037ac98cdbd9 100644 --- a/arch/arm/boot/dts/qcom/mdm9650-cv2x.dtsi +++ b/arch/arm/boot/dts/qcom/mdm9650-cv2x.dtsi @@ -56,6 +56,15 @@ pps { use-system-time-ts; }; + + qcom,ipc_router_external_ap_xprt { + compatible = "qcom,ipc-router-mhi-dev-xprt"; + qcom,out-chan-id = <20>; + qcom,in-chan-id = <21>; + qcom,xprt-remote = "external-ap"; + qcom,xprt-linkid = <2>; + qcom,xprt-version = <1>; + }; }; &cnss_pcie { From 31b5a03c73a2fec6822dccc135a14645d573e304 Mon Sep 17 00:00:00 2001 From: Mao Jinlong Date: Fri, 27 Oct 2017 11:15:55 +0800 Subject: [PATCH 030/151] Revert "rtc: alarm: Add power-on alarm feature" This reverts commit 2e1a4aefef66db901f9a906b79e30187f10dbecb. Power off alarm is not set via alarmtimer now. Remove the changes of power off alarm's previous design. CRs-Fixed: 2132189 Change-Id: I0f60bec0d94c93c4f2a89ae86a1b0a0d04aa9e48 Signed-off-by: Mao Jinlong --- fs/timerfd.c | 32 +++++------ include/linux/alarmtimer.h | 4 -- include/uapi/linux/time.h | 1 - kernel/time/alarmtimer.c | 114 +------------------------------------ 4 files changed, 15 insertions(+), 136 deletions(-) diff --git a/fs/timerfd.c b/fs/timerfd.c index 31374ec8f9bd..94de69ec6af6 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c @@ -50,8 +50,7 @@ static DEFINE_SPINLOCK(cancel_lock); static inline bool isalarm(struct timerfd_ctx *ctx) { return ctx->clockid == CLOCK_REALTIME_ALARM || - ctx->clockid == CLOCK_BOOTTIME_ALARM || - ctx->clockid == CLOCK_POWEROFF_ALARM; + ctx->clockid == CLOCK_BOOTTIME_ALARM; } /* @@ -143,8 +142,7 @@ static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags) { spin_lock(&ctx->cancel_lock); if ((ctx->clockid == CLOCK_REALTIME || - ctx->clockid == CLOCK_REALTIME_ALARM || - ctx->clockid == CLOCK_POWEROFF_ALARM) && + ctx->clockid == CLOCK_REALTIME_ALARM) && (flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) { if (!ctx->might_cancel) { ctx->might_cancel = true; @@ -176,7 +174,6 @@ static int timerfd_setup(struct timerfd_ctx *ctx, int flags, enum hrtimer_mode htmode; ktime_t texp; int clockid = ctx->clockid; - enum alarmtimer_type type; htmode = (flags & TFD_TIMER_ABSTIME) ? HRTIMER_MODE_ABS: HRTIMER_MODE_REL; @@ -187,8 +184,10 @@ static int timerfd_setup(struct timerfd_ctx *ctx, int flags, ctx->tintv = timespec_to_ktime(ktmr->it_interval); if (isalarm(ctx)) { - type = clock2alarm(ctx->clockid); - alarm_init(&ctx->t.alarm, type, timerfd_alarmproc); + alarm_init(&ctx->t.alarm, + ctx->clockid == CLOCK_REALTIME_ALARM ? + ALARM_REALTIME : ALARM_BOOTTIME, + timerfd_alarmproc); } else { hrtimer_init(&ctx->t.tmr, clockid, htmode); hrtimer_set_expires(&ctx->t.tmr, texp); @@ -387,7 +386,6 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags) { int ufd; struct timerfd_ctx *ctx; - enum alarmtimer_type type; /* Check the TFD_* constants for consistency. */ BUILD_BUG_ON(TFD_CLOEXEC != O_CLOEXEC); @@ -398,8 +396,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags) clockid != CLOCK_REALTIME && clockid != CLOCK_REALTIME_ALARM && clockid != CLOCK_BOOTTIME && - clockid != CLOCK_BOOTTIME_ALARM && - clockid != CLOCK_POWEROFF_ALARM)) + clockid != CLOCK_BOOTTIME_ALARM)) return -EINVAL; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); @@ -410,12 +407,13 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags) spin_lock_init(&ctx->cancel_lock); ctx->clockid = clockid; - if (isalarm(ctx)) { - type = clock2alarm(ctx->clockid); - alarm_init(&ctx->t.alarm, type, timerfd_alarmproc); - } else { + if (isalarm(ctx)) + alarm_init(&ctx->t.alarm, + ctx->clockid == CLOCK_REALTIME_ALARM ? + ALARM_REALTIME : ALARM_BOOTTIME, + timerfd_alarmproc); + else hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS); - } ctx->moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 }); @@ -487,10 +485,6 @@ static int do_timerfd_settime(int ufd, int flags, ret = timerfd_setup(ctx, flags, new); spin_unlock_irq(&ctx->wqh.lock); - - if (ctx->clockid == CLOCK_POWEROFF_ALARM) - set_power_on_alarm(); - fdput(f); return ret; } diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h index 9657030eacee..9de8bdba63cd 100644 --- a/include/linux/alarmtimer.h +++ b/include/linux/alarmtimer.h @@ -5,12 +5,10 @@ #include #include #include -#include enum alarmtimer_type { ALARM_REALTIME, ALARM_BOOTTIME, - ALARM_POWEROFF_REALTIME, ALARM_NUMTYPE, }; @@ -50,8 +48,6 @@ int alarm_start_relative(struct alarm *alarm, ktime_t start); void alarm_restart(struct alarm *alarm); int alarm_try_to_cancel(struct alarm *alarm); int alarm_cancel(struct alarm *alarm); -void set_power_on_alarm(void); -enum alarmtimer_type clock2alarm(clockid_t clockid); u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval); u64 alarm_forward_now(struct alarm *alarm, ktime_t interval); diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h index ebc357119745..8f96bff58dee 100644 --- a/include/uapi/linux/time.h +++ b/include/uapi/linux/time.h @@ -60,7 +60,6 @@ struct itimerval { #define CLOCK_BOOTTIME_ALARM 9 #define CLOCK_SGI_CYCLE 10 /* Hardware specific */ #define CLOCK_TAI 11 -#define CLOCK_POWEROFF_ALARM 12 #define MAX_CLOCKS 16 #define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC) diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 0a359d67d927..58a0a755ae45 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -29,7 +29,6 @@ #ifdef CONFIG_MSM_PM #include "lpm-levels.h" #endif -#include /** * struct alarm_base - Alarm timer bases @@ -51,84 +50,12 @@ static ktime_t freezer_delta; static DEFINE_SPINLOCK(freezer_delta_lock); static struct wakeup_source *ws; -static struct delayed_work work; -static struct workqueue_struct *power_off_alarm_workqueue; #ifdef CONFIG_RTC_CLASS /* rtc timer and device for setting alarm wakeups at suspend */ static struct rtc_timer rtctimer; static struct rtc_device *rtcdev; static DEFINE_SPINLOCK(rtcdev_lock); -static struct mutex power_on_alarm_lock; - -/** - * set_power_on_alarm - set power on alarm value into rtc register - * - * Get the soonest power off alarm timer and set the alarm value into rtc - * register. - */ -void set_power_on_alarm(void) -{ - int rc; - struct timespec wall_time, alarm_ts; - long alarm_secs = 0l; - long rtc_secs, alarm_time, alarm_delta; - struct rtc_time rtc_time; - struct rtc_wkalrm alarm; - struct rtc_device *rtc; - struct timerqueue_node *next; - unsigned long flags; - struct alarm_base *base = &alarm_bases[ALARM_POWEROFF_REALTIME]; - - rc = mutex_lock_interruptible(&power_on_alarm_lock); - if (rc != 0) - return; - - spin_lock_irqsave(&base->lock, flags); - next = timerqueue_getnext(&base->timerqueue); - spin_unlock_irqrestore(&base->lock, flags); - - rtc = alarmtimer_get_rtcdev(); - if (!rtc) - goto exit; - - if (next) { - alarm_ts = ktime_to_timespec(next->expires); - alarm_secs = alarm_ts.tv_sec; - } - - if (!alarm_secs) - goto disable_alarm; - - getnstimeofday(&wall_time); - - /* - * alarm_secs have to be bigger than "wall_time +1". - * It is to make sure that alarm time will be always - * bigger than wall time. - */ - if (alarm_secs <= wall_time.tv_sec + 1) - goto disable_alarm; - - rtc_read_time(rtc, &rtc_time); - rtc_tm_to_time(&rtc_time, &rtc_secs); - alarm_delta = wall_time.tv_sec - rtc_secs; - alarm_time = alarm_secs - alarm_delta; - - rtc_time_to_tm(alarm_time, &alarm.time); - alarm.enabled = 1; - rc = rtc_set_alarm(rtc, &alarm); - if (rc) - goto disable_alarm; - - mutex_unlock(&power_on_alarm_lock); - return; - -disable_alarm: - rtc_timer_cancel(rtc, &rtc->aie_timer); -exit: - mutex_unlock(&power_on_alarm_lock); -} static void alarmtimer_triggered_func(void *p) { @@ -200,8 +127,6 @@ static void alarmtimer_rtc_remove_device(struct device *dev, static inline void alarmtimer_rtc_timer_init(void) { - mutex_init(&power_on_alarm_lock); - rtc_timer_init(&rtctimer, NULL, NULL); } @@ -228,14 +153,8 @@ struct rtc_device *alarmtimer_get_rtcdev(void) static inline int alarmtimer_rtc_interface_setup(void) { return 0; } static inline void alarmtimer_rtc_interface_remove(void) { } static inline void alarmtimer_rtc_timer_init(void) { } -void set_power_on_alarm(void) { } #endif -static void alarm_work_func(struct work_struct *unused) -{ - set_power_on_alarm(); -} - /** * alarmtimer_enqueue - Adds an alarm timer to an alarm_base timerqueue * @base: pointer to the base where the timer is being run @@ -305,10 +224,6 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer) } spin_unlock_irqrestore(&base->lock, flags); - /* set next power off alarm */ - if (alarm->type == ALARM_POWEROFF_REALTIME) - queue_delayed_work(power_off_alarm_workqueue, &work, 0); - return ret; } @@ -341,8 +256,6 @@ static int alarmtimer_suspend(struct device *dev) int i; int ret = 0; - cancel_delayed_work_sync(&work); - spin_lock_irqsave(&freezer_delta_lock, flags); min = freezer_delta; freezer_delta = ktime_set(0, 0); @@ -406,8 +319,6 @@ static int alarmtimer_suspend(struct device *dev) int i; int ret; - cancel_delayed_work_sync(&work); - spin_lock_irqsave(&freezer_delta_lock, flags); min = freezer_delta; freezer_delta = ktime_set(0, 0); @@ -647,14 +558,12 @@ EXPORT_SYMBOL_GPL(alarm_forward_now); * clock2alarm - helper that converts from clockid to alarmtypes * @clockid: clockid. */ -enum alarmtimer_type clock2alarm(clockid_t clockid) +static enum alarmtimer_type clock2alarm(clockid_t clockid) { if (clockid == CLOCK_REALTIME_ALARM) return ALARM_REALTIME; if (clockid == CLOCK_BOOTTIME_ALARM) return ALARM_BOOTTIME; - if (clockid == CLOCK_POWEROFF_ALARM) - return ALARM_POWEROFF_REALTIME; return -1; } @@ -1050,13 +959,10 @@ static int __init alarmtimer_init(void) posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock); posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock); - posix_timers_register_clock(CLOCK_POWEROFF_ALARM, &alarm_clock); /* Initialize alarm bases */ alarm_bases[ALARM_REALTIME].base_clockid = CLOCK_REALTIME; alarm_bases[ALARM_REALTIME].gettime = &ktime_get_real; - alarm_bases[ALARM_POWEROFF_REALTIME].base_clockid = CLOCK_REALTIME; - alarm_bases[ALARM_POWEROFF_REALTIME].gettime = &ktime_get_real; alarm_bases[ALARM_BOOTTIME].base_clockid = CLOCK_BOOTTIME; alarm_bases[ALARM_BOOTTIME].gettime = &ktime_get_boottime; for (i = 0; i < ALARM_NUMTYPE; i++) { @@ -1078,24 +984,8 @@ static int __init alarmtimer_init(void) goto out_drv; } ws = wakeup_source_register("alarmtimer"); - if (!ws) { - error = -ENOMEM; - goto out_ws; - } - - INIT_DELAYED_WORK(&work, alarm_work_func); - power_off_alarm_workqueue = - create_singlethread_workqueue("power_off_alarm"); - if (!power_off_alarm_workqueue) { - error = -ENOMEM; - goto out_wq; - } - return 0; -out_wq: - wakeup_source_unregister(ws); -out_ws: - platform_device_unregister(pdev); + out_drv: platform_driver_unregister(&alarmtimer_driver); out_if: From 06b6d50c555b9551461fb17011280716b1b656af Mon Sep 17 00:00:00 2001 From: gaolez Date: Wed, 16 May 2018 13:11:47 +0800 Subject: [PATCH 031/151] ARM: dts: msm: Add wlan naples support to apq8009 dragon refboard Add wlan naples support to apq8009 dragon refboard. Change-Id: I8ff523b881004814b18403703e652e1890fe7d5a Signed-off-by: Gaole Zhang --- arch/arm/boot/dts/qcom/apq8009-dragon.dtsi | 83 +++++++++++++++++++++- 1 file changed, 82 insertions(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/qcom/apq8009-dragon.dtsi b/arch/arm/boot/dts/qcom/apq8009-dragon.dtsi index f651fa2c0c6c..e1bf02e7b5d2 100644 --- a/arch/arm/boot/dts/qcom/apq8009-dragon.dtsi +++ b/arch/arm/boot/dts/qcom/apq8009-dragon.dtsi @@ -80,6 +80,33 @@ status = "disabled"; }; + cnss_sdio: qcom,cnss_sdio { + compatible = "qcom,cnss_sdio"; + subsys-name = "AR6320"; + /** + * There is no vdd-wlan on board and this is not for DSRC. + * IO and XTAL share the same vreg. + */ + vdd-wlan-io-supply = <&pm8916_l5>; + qcom,wlan-ramdump-dynamic = <0x200000>; + qcom,msm-bus,name = "msm-cnss"; + qcom,msm-bus,num-cases = <4>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <79 512 0 0>, /* No vote */ + <79 512 6250 200000>, /* 50 Mbps */ + <79 512 25000 200000>, /* 200 Mbps */ + <79 512 2048000 4096000>; /* MAX */ + }; + + qcom,wcnss-wlan@a000000 { + status = "disabled"; + }; + + qcom,pronto@a21b000 { + status = "disabled"; + }; + bluetooth: bt_qca9379 { compatible = "qca,qca9379"; qca,bt-reset-gpio = <&msm_gpio 47 0>; /* BT_EN */ @@ -105,8 +132,62 @@ }; }; +&msm_gpio { + sdc2_wlan_gpio_on: sdc2_wlan_gpio_on { + mux { + pins = "gpio43"; + function = "gpio"; + }; + config { + pins = "gpio43"; + drive-strength = <10>; + bias-pull-up; + output-high; + }; + }; + + sdc2_wlan_gpio_off: sdc2_wlan_gpio_off { + mux { + pins = "gpio43"; + function = "gpio"; + }; + config { + pins = "gpio43"; + drive-strength = <2>; + bias-disable; + output-low; + }; + }; +}; + &sdhc_2 { - status = "disabled"; + /delete-property/cd-gpios; + #address-cells = <0>; + interrupt-parent = <&sdhc_2>; + interrupts = <0 1 2>; + #interrupt-cells = <1>; + interrupt-map-mask = <0xffffffff>; + interrupt-map = <0 &intc 0 125 0>, + <1 &intc 0 221 0>, + <2 &msm_gpio 40 0x1>; + interrupt-names = "hc_irq", "pwr_irq", "sdiowakeup_irq"; + + qcom,vdd-voltage-level = <1800000 2950000>; + qcom,vdd-current-level = <15000 400000>; + + qcom,vdd-io-voltage-level = <1800000 1800000>; + qcom,vdd-io-current-level = <200 50000>; + qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>; + qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104"; + + pinctrl-names = "active", "sleep"; + pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on + &sdc2_wlan_gpio_on>; + pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off + &sdc2_wlan_gpio_off>; + qcom,nonremovable; + qcom,core_3_0v_support; + status = "ok"; }; &usb_otg { From 7a22262828748d8e0c00661b10bec54d7d11fac4 Mon Sep 17 00:00:00 2001 From: Manoj Prabhu B Date: Tue, 28 Aug 2018 15:15:27 +0530 Subject: [PATCH 032/151] diag: Prevent out of bound access while initializing msg mask Move the mask_info mutex initialization outside mask structure to facilitate prevention of out of bound access while initializing msg mask during md session creation. Use separate msg_mask_tbl_count for ODL session msg mask and regular msg mask to prevent out of bound access in a possible race condition of accessing mask ranges. The chances of accessing uninitialized mask is prevented by adding null pointer checks for the mask structure and its member pointer. Change-Id: I87497c67daff8cc1797a1266d50456bdbd3a9c23 Signed-off-by: Manoj Prabhu B --- drivers/char/diag/diag_masks.c | 105 +++++++++++++++++++++++------- drivers/char/diag/diag_masks.h | 7 +- drivers/char/diag/diagchar.h | 1 + drivers/char/diag/diagchar_core.c | 13 ++-- drivers/char/diag/diagfwd_cntl.c | 4 +- 5 files changed, 97 insertions(+), 33 deletions(-) diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c index 4b24df4b6e4c..7c4ed8ebb09f 100644 --- a/drivers/char/diag/diag_masks.c +++ b/drivers/char/diag/diag_masks.c @@ -142,6 +142,9 @@ static void diag_send_log_mask_update(uint8_t peripheral, int equip_id) mutex_lock(&mask_info->lock); for (i = 0; i < MAX_EQUIP_ID; i++, mask++) { + if (!mask->ptr) + continue; + if (equip_id != i && equip_id != ALL_EQUIP_ID) continue; @@ -293,11 +296,12 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last) int temp_len = 0; uint8_t *buf = NULL; uint8_t *temp = NULL; + uint8_t msg_mask_tbl_count_local = 0; uint32_t mask_size = 0; struct diag_mask_info *mask_info = NULL; struct diag_msg_mask_t *mask = NULL; struct diag_ctrl_msg_mask header; - uint8_t msg_mask_tbl_count_local; + struct diag_md_session_t *md_session_info = NULL; if (peripheral >= NUM_PERIPHERALS) return; @@ -309,10 +313,11 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last) return; } - if (driver->md_session_mask != 0 && - (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral))) + if ((driver->md_session_mask != 0) && + (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral))) { + md_session_info = driver->md_session_map[peripheral]; mask_info = driver->md_session_map[peripheral]->msg_mask; - else + } else mask_info = &msg_mask; if (!mask_info) @@ -325,7 +330,10 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last) return; } buf = mask_info->update_buf; - msg_mask_tbl_count_local = driver->msg_mask_tbl_count; + if (md_session_info) + msg_mask_tbl_count_local = md_session_info->msg_mask_tbl_count; + else + msg_mask_tbl_count_local = driver->msg_mask_tbl_count; mutex_unlock(&driver->msg_mask_lock); mutex_lock(&mask_info->lock); switch (mask_info->status) { @@ -344,6 +352,8 @@ static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last) } for (i = 0; i < msg_mask_tbl_count_local; i++, mask++) { + if (!mask->ptr) + continue; mutex_lock(&driver->msg_mask_lock); if (((mask->ssid_first > first) || (mask->ssid_last_tools < last)) && first != ALL_SSID) { @@ -496,6 +506,7 @@ static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len, { int i; int write_len = 0; + uint8_t msg_mask_tbl_count = 0; struct diag_msg_mask_t *mask_ptr = NULL; struct diag_msg_ssid_query_t rsp; struct diag_ssid_range_t ssid_range; @@ -525,15 +536,17 @@ static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len, return 0; } mutex_lock(&driver->msg_mask_lock); + msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count : + driver->msg_mask_tbl_count; rsp.cmd_code = DIAG_CMD_MSG_CONFIG; rsp.sub_cmd = DIAG_CMD_OP_GET_SSID_RANGE; rsp.status = MSG_STATUS_SUCCESS; rsp.padding = 0; - rsp.count = driver->msg_mask_tbl_count; + rsp.count = msg_mask_tbl_count; memcpy(dest_buf, &rsp, sizeof(rsp)); write_len += sizeof(rsp); mask_ptr = (struct diag_msg_mask_t *)mask_info->ptr; - for (i = 0; i < driver->msg_mask_tbl_count; i++, mask_ptr++) { + for (i = 0; i < msg_mask_tbl_count; i++, mask_ptr++) { if (write_len + sizeof(ssid_range) > dest_len) { pr_err("diag: In %s, Truncating response due to size limitations of rsp buffer\n", __func__); @@ -579,6 +592,8 @@ static int diag_cmd_get_build_mask(unsigned char *src_buf, int src_len, rsp.padding = 0; build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr; for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) { + if (!build_mask->ptr) + continue; if (build_mask->ssid_first != req->ssid_first) continue; num_entries = req->ssid_last - req->ssid_first + 1; @@ -610,6 +625,7 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len, int i; int write_len = 0; uint32_t mask_size = 0; + uint8_t msg_mask_tbl_count = 0; struct diag_msg_mask_t *mask = NULL; struct diag_build_mask_req_t *req = NULL; struct diag_msg_build_mask_t rsp; @@ -640,6 +656,8 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len, } mutex_lock(&driver->msg_mask_lock); + msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count : + driver->msg_mask_tbl_count; req = (struct diag_build_mask_req_t *)src_buf; rsp.cmd_code = DIAG_CMD_MSG_CONFIG; rsp.sub_cmd = DIAG_CMD_OP_GET_MSG_MASK; @@ -655,7 +673,9 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len, mutex_unlock(&driver->md_session_lock); return -EINVAL; } - for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { + for (i = 0; i < msg_mask_tbl_count; i++, mask++) { + if (!mask->ptr) + continue; if ((req->ssid_first < mask->ssid_first) || (req->ssid_first > mask->ssid_last_tools)) { continue; @@ -692,6 +712,7 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len, struct diag_msg_mask_t *mask_next = NULL; uint32_t *temp = NULL; struct diag_md_session_t *info = NULL; + uint8_t msg_mask_tbl_count = 0; mutex_lock(&driver->md_session_lock); info = diag_md_session_get_pid(pid); @@ -724,8 +745,12 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len, mutex_unlock(&driver->md_session_lock); return -EINVAL; } - for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { - if (i < (driver->msg_mask_tbl_count - 1)) { + msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count : + driver->msg_mask_tbl_count; + for (i = 0; i < msg_mask_tbl_count; i++, mask++) { + if (!mask->ptr) + continue; + if (i < (msg_mask_tbl_count - 1)) { mask_next = mask; mask_next++; } else @@ -827,6 +852,7 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len, struct diag_msg_mask_t *mask = NULL; struct diag_mask_info *mask_info = NULL; struct diag_md_session_t *info = NULL; + uint8_t msg_mask_tbl_count = 0; mutex_lock(&driver->md_session_lock); info = diag_md_session_get_pid(pid); @@ -861,9 +887,11 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len, mutex_unlock(&driver->md_session_lock); return -EINVAL; } + msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count : + driver->msg_mask_tbl_count; mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED : DIAG_CTRL_MASK_ALL_DISABLED; - for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { + for (i = 0; i < msg_mask_tbl_count; i++, mask++) { if (mask && mask->ptr) { mutex_lock(&mask->lock); memset(mask->ptr, req->rt_mask, @@ -1451,7 +1479,8 @@ static int diag_create_msg_mask_table(void) mutex_lock(&msg_mask.lock); mutex_lock(&driver->msg_mask_lock); driver->msg_mask_tbl_count = MSG_MASK_TBL_CNT; - for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { + for (i = 0; (i < driver->msg_mask_tbl_count) && mask; + i++, mask++) { range.ssid_first = msg_mask_tbl[i].ssid_first; range.ssid_last = msg_mask_tbl[i].ssid_last; err = diag_create_msg_mask_table_entry(mask, &range); @@ -1476,7 +1505,8 @@ static int diag_create_build_time_mask(void) mutex_lock(&driver->msg_mask_lock); driver->bt_msg_mask_tbl_count = MSG_MASK_TBL_CNT; build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr; - for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) { + for (i = 0; (i < driver->bt_msg_mask_tbl_count) && build_mask; + i++, build_mask++) { range.ssid_first = msg_mask_tbl[i].ssid_first; range.ssid_last = msg_mask_tbl[i].ssid_last; err = diag_create_msg_mask_table_entry(build_mask, &range); @@ -1599,7 +1629,7 @@ static int diag_create_log_mask_table(void) mutex_lock(&log_mask.lock); mask = (struct diag_log_mask_t *)(log_mask.ptr); - for (i = 0; i < MAX_EQUIP_ID; i++, mask++) { + for (i = 0; (i < MAX_EQUIP_ID) && mask; i++, mask++) { mask->equip_id = i; mask->num_items = LOG_GET_ITEM_NUM(log_code_last_tbl[i]); mask->num_items_tools = mask->num_items; @@ -1643,7 +1673,6 @@ static int __diag_mask_init(struct diag_mask_info *mask_info, int mask_len, } kmemleak_not_leak(mask_info->update_buf); } - mutex_init(&mask_info->lock); return 0; } @@ -1667,9 +1696,10 @@ int diag_log_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src) struct diag_log_mask_t *src_mask = NULL; struct diag_log_mask_t *dest_mask = NULL; - if (!src) + if (!src || !dest) return -EINVAL; + mutex_init(&dest->lock); err = __diag_mask_init(dest, LOG_MASK_SIZE, APPS_BUF_SIZE); if (err) return err; @@ -1732,9 +1762,11 @@ static int diag_msg_mask_init(void) int err = 0; int i; + mutex_init(&msg_mask.lock); err = __diag_mask_init(&msg_mask, MSG_MASK_SIZE, APPS_BUF_SIZE); if (err) return err; + err = diag_create_msg_mask_table(); if (err) { pr_err("diag: Unable to create msg masks, err: %d\n", err); @@ -1749,7 +1781,8 @@ static int diag_msg_mask_init(void) return 0; } -int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src) +int diag_msg_mask_copy(struct diag_md_session_t *new_session, + struct diag_mask_info *dest, struct diag_mask_info *src) { int i; int err = 0; @@ -1760,17 +1793,25 @@ int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src) if (!src || !dest) return -EINVAL; - err = __diag_mask_init(dest, MSG_MASK_SIZE, APPS_BUF_SIZE); - if (err) - return err; + mutex_init(&dest->lock); mutex_lock(&dest->lock); mutex_lock(&driver->msg_mask_lock); + new_session->msg_mask_tbl_count = + driver->msg_mask_tbl_count; + err = __diag_mask_init(dest, + (new_session->msg_mask_tbl_count * + sizeof(struct diag_msg_mask_t)), APPS_BUF_SIZE); + if (err) { + mutex_unlock(&driver->msg_mask_lock); + mutex_unlock(&dest->lock); + return err; + } src_mask = (struct diag_msg_mask_t *)src->ptr; dest_mask = (struct diag_msg_mask_t *)dest->ptr; dest->mask_len = src->mask_len; dest->status = src->status; - for (i = 0; i < driver->msg_mask_tbl_count; i++) { + for (i = 0; i < new_session->msg_mask_tbl_count; i++) { range.ssid_first = src_mask->ssid_first; range.ssid_last = src_mask->ssid_last; err = diag_create_msg_mask_table_entry(dest_mask, &range); @@ -1786,10 +1827,12 @@ int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src) return err; } -void diag_msg_mask_free(struct diag_mask_info *mask_info) +void diag_msg_mask_free(struct diag_mask_info *mask_info, + struct diag_md_session_t *session_info) { int i; struct diag_msg_mask_t *mask = NULL; + uint8_t msg_mask_tbl_count = 0; if (!mask_info || !mask_info->ptr) return; @@ -1803,7 +1846,10 @@ void diag_msg_mask_free(struct diag_mask_info *mask_info) mutex_unlock(&mask_info->lock); return; } - for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { + msg_mask_tbl_count = (session_info) ? + session_info->msg_mask_tbl_count : + driver->msg_mask_tbl_count; + for (i = 0; i < msg_mask_tbl_count; i++, mask++) { kfree(mask->ptr); mask->ptr = NULL; } @@ -1834,6 +1880,7 @@ static int diag_build_time_mask_init(void) int err = 0; /* There is no need for update buffer for Build Time masks */ + mutex_init(&msg_bt_mask.lock); err = __diag_mask_init(&msg_bt_mask, MSG_MASK_SIZE, 0); if (err) return err; @@ -1867,6 +1914,7 @@ static int diag_log_mask_init(void) int err = 0; int i; + mutex_init(&log_mask.lock); err = __diag_mask_init(&log_mask, LOG_MASK_SIZE, APPS_BUF_SIZE); if (err) return err; @@ -1901,6 +1949,7 @@ static int diag_event_mask_init(void) int err = 0; int i; + mutex_init(&event_mask.lock); err = __diag_mask_init(&event_mask, EVENT_MASK_SIZE, APPS_BUF_SIZE); if (err) return err; @@ -1922,6 +1971,7 @@ int diag_event_mask_copy(struct diag_mask_info *dest, if (!src || !dest) return -EINVAL; + mutex_init(&dest->lock); err = __diag_mask_init(dest, EVENT_MASK_SIZE, APPS_BUF_SIZE); if (err) return err; @@ -1961,6 +2011,7 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count, struct diag_mask_info *mask_info = NULL; struct diag_msg_mask_t *mask = NULL; unsigned char *ptr = NULL; + uint8_t msg_mask_tbl_count = 0; if (!buf || count == 0) return -EINVAL; @@ -1993,7 +2044,11 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count, mutex_unlock(&mask_info->lock); return -EINVAL; } - for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) { + msg_mask_tbl_count = (info) ? info->msg_mask_tbl_count : + driver->msg_mask_tbl_count; + for (i = 0; i < msg_mask_tbl_count; i++, mask++) { + if (!mask->ptr) + continue; ptr = mask_info->update_buf; len = 0; mutex_lock(&mask->lock); @@ -2068,6 +2123,8 @@ int diag_copy_to_user_log_mask(char __user *buf, size_t count, return -EINVAL; } for (i = 0; i < MAX_EQUIP_ID; i++, mask++) { + if (!mask->ptr) + continue; ptr = mask_info->update_buf; len = 0; mutex_lock(&mask->lock); diff --git a/drivers/char/diag/diag_masks.h b/drivers/char/diag/diag_masks.h index 6edeee954d74..a736ff269e8d 100644 --- a/drivers/char/diag/diag_masks.h +++ b/drivers/char/diag/diag_masks.h @@ -160,12 +160,13 @@ int diag_masks_init(void); void diag_masks_exit(void); int diag_log_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src); -int diag_msg_mask_copy(struct diag_mask_info *dest, - struct diag_mask_info *src); +int diag_msg_mask_copy(struct diag_md_session_t *new_session, + struct diag_mask_info *dest, struct diag_mask_info *src); int diag_event_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src); void diag_log_mask_free(struct diag_mask_info *mask_info); -void diag_msg_mask_free(struct diag_mask_info *mask_info); +void diag_msg_mask_free(struct diag_mask_info *mask_info, + struct diag_md_session_t *session_info); void diag_event_mask_free(struct diag_mask_info *mask_info); int diag_process_apps_masks(unsigned char *buf, int len, int pid); void diag_send_updates_peripheral(uint8_t peripheral); diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h index f265cc7f01b8..42c0d7ca94f7 100644 --- a/drivers/char/diag/diagchar.h +++ b/drivers/char/diag/diagchar.h @@ -406,6 +406,7 @@ struct diag_md_session_t { int pid; int peripheral_mask; uint8_t hdlc_disabled; + uint8_t msg_mask_tbl_count; struct timer_list hdlc_reset_timer; struct diag_mask_info *msg_mask; struct diag_mask_info *log_mask; diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c index b686f5b79f7d..4a04fd58ed95 100644 --- a/drivers/char/diag/diagchar_core.c +++ b/drivers/char/diag/diagchar_core.c @@ -1255,7 +1255,8 @@ static void diag_md_session_exit(void) diag_log_mask_free(session_info->log_mask); kfree(session_info->log_mask); session_info->log_mask = NULL; - diag_msg_mask_free(session_info->msg_mask); + diag_msg_mask_free(session_info->msg_mask, + session_info); kfree(session_info->msg_mask); session_info->msg_mask = NULL; diag_event_mask_free(session_info->event_mask); @@ -1328,7 +1329,9 @@ int diag_md_session_create(int mode, int peripheral_mask, int proc) "return value of event copy. err %d\n", err); goto fail_peripheral; } - err = diag_msg_mask_copy(new_session->msg_mask, &msg_mask); + new_session->msg_mask_tbl_count = 0; + err = diag_msg_mask_copy(new_session, new_session->msg_mask, + &msg_mask); if (err) { DIAG_LOG(DIAG_DEBUG_USERSPACE, "return value of msg copy. err %d\n", err); @@ -1364,7 +1367,8 @@ int diag_md_session_create(int mode, int peripheral_mask, int proc) diag_event_mask_free(new_session->event_mask); kfree(new_session->event_mask); new_session->event_mask = NULL; - diag_msg_mask_free(new_session->msg_mask); + diag_msg_mask_free(new_session->msg_mask, + new_session); kfree(new_session->msg_mask); new_session->msg_mask = NULL; kfree(new_session); @@ -1392,7 +1396,8 @@ static void diag_md_session_close(int pid) diag_log_mask_free(session_info->log_mask); kfree(session_info->log_mask); session_info->log_mask = NULL; - diag_msg_mask_free(session_info->msg_mask); + diag_msg_mask_free(session_info->msg_mask, + session_info); kfree(session_info->msg_mask); session_info->msg_mask = NULL; diag_event_mask_free(session_info->event_mask); diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c index e2b2343d988d..c8f3e8e9c08b 100644 --- a/drivers/char/diag/diagfwd_cntl.c +++ b/drivers/char/diag/diagfwd_cntl.c @@ -519,7 +519,7 @@ static void process_ssid_range_report(uint8_t *buf, uint32_t len, mask_ptr = (struct diag_msg_mask_t *)msg_mask.ptr; found = 0; for (j = 0; j < driver->msg_mask_tbl_count; j++, mask_ptr++) { - if (!mask_ptr || !ssid_range) { + if (!mask_ptr->ptr || !ssid_range) { found = 1; break; } @@ -591,7 +591,7 @@ static void diag_build_time_mask_update(uint8_t *buf, num_items = range->ssid_last - range->ssid_first + 1; for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) { - if (!build_mask) { + if (!build_mask->ptr) { found = 1; break; } From 56cf270b752118317481cd08bf340a7557cafb2c Mon Sep 17 00:00:00 2001 From: Mohammed Javid Date: Fri, 8 Jun 2018 16:55:32 +0530 Subject: [PATCH 033/151] msm: ipa3: Add mutex to prevent race condition There is a race condition between ipa3_nat_init_cmd and ipa_read_nat4. The two thread will R/W the critical global variables. This will result in race conditions and possibly buffer overread/ overwrite issues. Add code to prevent this race condition. Change-Id: I6bf9a837ae941cf3ad9413da6e44821916acf196 Acked-by: Pooja Kumari Signed-off-by: Mohammed Javid --- drivers/platform/msm/ipa/ipa_v2/ipa_nat.c | 11 +++++++++++ drivers/platform/msm/ipa/ipa_v3/ipa_nat.c | 10 ++++++++++ 2 files changed, 21 insertions(+) diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c index 1be68b31656b..56d699e3801c 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c @@ -344,6 +344,9 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) IPAERR_RL("Detected overflow\n"); return -EPERM; } + + mutex_lock(&ipa_ctx->nat_mem.lock); + /* Check Table Entry offset is not beyond allocated size */ tmp = init->ipv4_rules_offset + @@ -353,6 +356,7 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n", init->ipv4_rules_offset, (init->table_entries + 1), tmp, ipa_ctx->nat_mem.size); + mutex_unlock(&ipa_ctx->nat_mem.lock); return -EPERM; } @@ -360,6 +364,7 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) if (init->expn_rules_offset > UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) { IPAERR_RL("Detected overflow\n"); + mutex_unlock(&ipa_ctx->nat_mem.lock); return -EPERM; } /* Check Expn Table Entry offset is not @@ -371,6 +376,7 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n", init->expn_rules_offset, init->expn_table_entries, tmp, ipa_ctx->nat_mem.size); + mutex_unlock(&ipa_ctx->nat_mem.lock); return -EPERM; } @@ -378,6 +384,7 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) if (init->index_offset > UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) { IPAERR_RL("Detected overflow\n"); + mutex_unlock(&ipa_ctx->nat_mem.lock); return -EPERM; } /* Check Indx Table Entry offset is not @@ -389,6 +396,7 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n", init->index_offset, (init->table_entries + 1), tmp, ipa_ctx->nat_mem.size); + mutex_unlock(&ipa_ctx->nat_mem.lock); return -EPERM; } @@ -396,6 +404,7 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) if (init->index_expn_offset > (UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries))) { IPAERR_RL("Detected overflow\n"); + mutex_unlock(&ipa_ctx->nat_mem.lock); return -EPERM; } /* Check Expn Table entry offset is not @@ -407,6 +416,7 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n", init->index_expn_offset, init->expn_table_entries, tmp, ipa_ctx->nat_mem.size); + mutex_unlock(&ipa_ctx->nat_mem.lock); return -EPERM; } @@ -555,6 +565,7 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) free_nop: kfree(reg_write_nop); bail: + mutex_unlock(&ipa_ctx->nat_mem.lock); return result; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c index 92cd32d6488a..e11b4280400f 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c @@ -368,6 +368,8 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) IPAERR_RL("Detected overflow\n"); return -EPERM; } + mutex_lock(&ipa3_ctx->nat_mem.lock); + /* Check Table Entry offset is not beyond allocated size */ tmp = init->ipv4_rules_offset + @@ -377,6 +379,7 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n", init->ipv4_rules_offset, (init->table_entries + 1), tmp, ipa3_ctx->nat_mem.size); + mutex_unlock(&ipa3_ctx->nat_mem.lock); return -EPERM; } @@ -384,6 +387,7 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) if (init->expn_rules_offset > (UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries))) { IPAERR_RL("Detected overflow\n"); + mutex_unlock(&ipa3_ctx->nat_mem.lock); return -EPERM; } /* Check Expn Table Entry offset is not @@ -395,6 +399,7 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n", init->expn_rules_offset, init->expn_table_entries, tmp, ipa3_ctx->nat_mem.size); + mutex_unlock(&ipa3_ctx->nat_mem.lock); return -EPERM; } @@ -402,6 +407,7 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) if (init->index_offset > UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) { IPAERR_RL("Detected overflow\n"); + mutex_unlock(&ipa3_ctx->nat_mem.lock); return -EPERM; } /* Check Indx Table Entry offset is not @@ -413,6 +419,7 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n", init->index_offset, (init->table_entries + 1), tmp, ipa3_ctx->nat_mem.size); + mutex_unlock(&ipa3_ctx->nat_mem.lock); return -EPERM; } @@ -420,6 +427,7 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) if (init->index_expn_offset > UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries)) { IPAERR_RL("Detected overflow\n"); + mutex_unlock(&ipa3_ctx->nat_mem.lock); return -EPERM; } /* Check Expn Table entry offset is not @@ -431,6 +439,7 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n", init->index_expn_offset, init->expn_table_entries, tmp, ipa3_ctx->nat_mem.size); + mutex_unlock(&ipa3_ctx->nat_mem.lock); return -EPERM; } @@ -580,6 +589,7 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init) free_nop: ipahal_destroy_imm_cmd(nop_cmd_pyld); bail: + mutex_unlock(&ipa3_ctx->nat_mem.lock); return result; } From a8434bcb8db69d62ef9f3f4b33feb99a59e3e8cb Mon Sep 17 00:00:00 2001 From: Mohammed Javid Date: Mon, 27 Aug 2018 15:32:35 +0530 Subject: [PATCH 034/151] msm:ipa: Prevent NAT table deletion only if public ip is not assigned Currnetly NAT table is not deleted even if public ip is assigned to NAT table. Add check to prevent deletion only if public ip is not assigned. Change-Id: I4855b21472d3f6bf541d07733b18592e9e677ce6 Acked-by: Pooja Kumari Signed-off-by: Mohammed Javid --- drivers/platform/msm/ipa/ipa_v2/ipa_nat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c index 56d699e3801c..dda4c0f36be4 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c @@ -784,7 +784,7 @@ int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del) return -EPERM; } - if (ipa_ctx->nat_mem.public_ip_addr) { + if (!ipa_ctx->nat_mem.public_ip_addr) { IPAERR_RL("Public IP addr not assigned and trying to delete\n"); return -EPERM; } From 41934b3bc98e535a55721e262bc5791e4199814c Mon Sep 17 00:00:00 2001 From: Gustavo Solaira Date: Thu, 30 Aug 2018 15:46:57 -0700 Subject: [PATCH 035/151] ARM: dts: msm: Invert interrupt polarity for K61 on mdm9650 CV2X Invert the interrupt to be on rising edge for K61 since the polarity has been reversed on the MCU firmware. Change-Id: Ibf75de531d8226731c8d49c39a36e559a155554b Signed-off-by: Gustavo Solaira --- arch/arm/boot/dts/qcom/mdm9650-cv2x.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/qcom/mdm9650-cv2x.dtsi b/arch/arm/boot/dts/qcom/mdm9650-cv2x.dtsi index 037ac98cdbd9..f0ee8b75c205 100644 --- a/arch/arm/boot/dts/qcom/mdm9650-cv2x.dtsi +++ b/arch/arm/boot/dts/qcom/mdm9650-cv2x.dtsi @@ -106,7 +106,7 @@ spi-max-frequency = <4800000>; reg = <0>; interrupt-parent = <&tlmm_pinmux>; - interrupts = <68 0>; + interrupts = <68 IRQ_TYPE_EDGE_RISING>; reset-gpio = <&tlmm_pinmux 89 GPIO_ACTIVE_LOW>; pinctrl-names = "active", "sleep"; pinctrl-0 = <&can_rst_on>; From d92b32df4979196d6ded6c130617b399a4f1ad36 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 19 Feb 2018 01:24:15 +0100 Subject: [PATCH 036/151] netfilter: ebtables: CONFIG_COMPAT: don't trust userland offsets We need to make sure the offsets are not out of range of the total size. Also check that they are in ascending order. The WARN_ON triggered by syzkaller (it sets panic_on_warn) is changed to also bail out, no point in continuing parsing. Briefly tested with simple ruleset of -A INPUT --limit 1/s' --log plus jump to custom chains using 32bit ebtables binary. Change-Id: Ic1b91b00521fb550f1774b916aa5b53c91940ed0 Reported-by: Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git Git-commit: b71812168571fa55e44cdd0254471331b9c4c4c6 Signed-off-by: Dennis Cagle --- net/bridge/netfilter/ebtables.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index d9a8c05d995d..653d72979ee1 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -2019,7 +2019,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, if (match_kern) match_kern->match_size = ret; - WARN_ON(type == EBT_COMPAT_TARGET && size_left); + if (WARN_ON(type == EBT_COMPAT_TARGET && size_left)) + return -EINVAL; + match32 = (struct compat_ebt_entry_mwt *) buf; } @@ -2076,6 +2078,15 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, * * offsets are relative to beginning of struct ebt_entry (i.e., 0). */ + for (i = 0; i < 4 ; ++i) { + if (offsets[i] >= *total) + return -EINVAL; + if (i == 0) + continue; + if (offsets[i-1] > offsets[i]) + return -EINVAL; + } + for (i = 0, j = 1 ; j < 4 ; j++, i++) { struct compat_ebt_entry_mwt *match32; unsigned int size; From fbdb8ff26f0d2162a917ad249943c48dbfecff61 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 19 Feb 2018 01:24:15 +0100 Subject: [PATCH 037/151] netfilter: ebtables: CONFIG_COMPAT: don't trust userland offsets We need to make sure the offsets are not out of range of the total size. Also check that they are in ascending order. The WARN_ON triggered by syzkaller (it sets panic_on_warn) is changed to also bail out, no point in continuing parsing. Briefly tested with simple ruleset of -A INPUT --limit 1/s' --log plus jump to custom chains using 32bit ebtables binary. Change-Id: Ic1b91b00521fb550f1774b916aa5b53c91940ed0 Reported-by: Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git Git-commit: b71812168571fa55e44cdd0254471331b9c4c4c6 Signed-off-by: Dennis Cagle --- net/bridge/netfilter/ebtables.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index d9a8c05d995d..653d72979ee1 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -2019,7 +2019,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, if (match_kern) match_kern->match_size = ret; - WARN_ON(type == EBT_COMPAT_TARGET && size_left); + if (WARN_ON(type == EBT_COMPAT_TARGET && size_left)) + return -EINVAL; + match32 = (struct compat_ebt_entry_mwt *) buf; } @@ -2076,6 +2078,15 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, * * offsets are relative to beginning of struct ebt_entry (i.e., 0). */ + for (i = 0; i < 4 ; ++i) { + if (offsets[i] >= *total) + return -EINVAL; + if (i == 0) + continue; + if (offsets[i-1] > offsets[i]) + return -EINVAL; + } + for (i = 0, j = 1 ; j < 4 ; j++, i++) { struct compat_ebt_entry_mwt *match32; unsigned int size; From a8725b82e41c9dc0fb6fccd1039e1c964da9d408 Mon Sep 17 00:00:00 2001 From: Aditya Bavanari Date: Tue, 19 Jun 2018 17:50:52 +0530 Subject: [PATCH 038/151] ASoC: msm: qdsp6v2: Fix rtac memory unmap issue in ASM driver During unmap of rtac block in ASM, mem_map_handle address is set to zero instead of the value. Set the map handle value to zero to avoid issue in freeing the ion memory. CRs-Fixed: 2254339 Change-Id: I6584be029d4c8dde235e722149c758df0db9916e Signed-off-by: Aditya Bavanari --- sound/soc/msm/qdsp6v2/q6asm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c index c986db20c2d1..6718a3c39de1 100644 --- a/sound/soc/msm/qdsp6v2/q6asm.c +++ b/sound/soc/msm/qdsp6v2/q6asm.c @@ -900,7 +900,7 @@ int q6asm_unmap_rtac_block(uint32_t *mem_map_handle) __func__, result2); result = result2; } else { - mem_map_handle = 0; + *mem_map_handle = 0; } result2 = q6asm_mmap_apr_dereg(); From 1d5ccca47c3bd55070747795f8ef77daa69e8def Mon Sep 17 00:00:00 2001 From: Kaustubh Pandey Date: Tue, 12 Jun 2018 10:09:53 +0530 Subject: [PATCH 039/151] net: core: null pointer derefernce in sockev_client_cb sockev_client_cb creates a netlink message and populates the nlmsg_data using the socket->sock information. If socket is closed, while the nlmsg_data is being populated, a null pointer dereference occurs. BUG: KASAN: null-ptr-deref in sockev_client_cb+0x1e4/0x310 Read of size 2 at addr 0000000000000010 by task syz-executor/9398 CPU: 6 PID: 9398 Comm: syz-executor Tainted: G W O 4.9.92+ #1 Call trace: [] sockev_client_cb+0x1e4/0x310 [] notifier_call_chain+0x94/0xe0 [] __blocking_notifier_call_chain+0x6c/0xb8 [] blocking_notifier_call_chain+0x40/0x50 [] sockev_notify net/socket.c:180 [inline] [] SYSC_listen net/socket.c:1446 [inline] [] SyS_listen+0x1e0/0x1f8 [] el0_svc_naked+0x24/0x28 CR's Fixed: 2251042 Change-Id: Iad9eb58cd05fcdc0b5cc1ed24de56b69abb532b4 Signed-off-by: Sharath Chandra Vurukala Signed-off-by: Tejaswi Tanikella Signed-off-by: Kaustubh Pandey Acked-by: Chinmay Agarwal --- net/core/sockev_nlmcast.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/net/core/sockev_nlmcast.c b/net/core/sockev_nlmcast.c index 22148bf76e0a..1e92c5632b97 100644 --- a/net/core/sockev_nlmcast.c +++ b/net/core/sockev_nlmcast.c @@ -69,14 +69,17 @@ static int sockev_client_cb(struct notifier_block *nb, struct nlmsghdr *nlh; struct sknlsockevmsg *smsg; struct socket *sock; + struct sock *sk; sock = (struct socket *)data; - if (socknlmsgsk == 0) + if (!socknlmsgsk || !sock) goto done; - if ((socknlmsgsk == NULL) || (sock == NULL) || (sock->sk == NULL)) + + sk = sock->sk; + if (!sk) goto done; - if (sock->sk->sk_family != AF_INET && sock->sk->sk_family != AF_INET6) + if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) goto done; if (event != SOCKEV_BIND && event != SOCKEV_LISTEN) @@ -98,12 +101,11 @@ static int sockev_client_cb(struct notifier_block *nb, memset(smsg, 0, sizeof(struct sknlsockevmsg)); smsg->pid = current->pid; _sockev_event(event, smsg->event, sizeof(smsg->event)); - smsg->skfamily = sock->sk->sk_family; - smsg->skstate = sock->sk->sk_state; - smsg->skprotocol = sock->sk->sk_protocol; - smsg->sktype = sock->sk->sk_type; - smsg->skflags = sock->sk->sk_flags; - + smsg->skfamily = sk->sk_family; + smsg->skstate = sk->sk_state; + smsg->skprotocol = sk->sk_protocol; + smsg->sktype = sk->sk_type; + smsg->skflags = sk->sk_flags; nlmsg_notify(socknlmsgsk, skb, 0, SKNLGRP_SOCKEV, 0, GFP_KERNEL); done: return 0; From 2a4d2c1bc2d6e0fddf0cb32b9fb727692ae0a592 Mon Sep 17 00:00:00 2001 From: Mohammed Javid Date: Mon, 6 Aug 2018 12:58:30 +0530 Subject: [PATCH 040/151] msm: ipa: Validate routing rule id IPA driver expose routing rule id IOCTL's to user space. There is a chance of getting invalid routing rule-id. Validate it before committing it to IPA hardware. Change-Id: If80b94d3a055f9212d25aff9a57d1b45001ba586 Signed-off-by: Mohammed Javid --- drivers/platform/msm/ipa/ipa_v3/ipa_flt.c | 6 +++--- drivers/platform/msm/ipa/ipa_v3/ipa_i.h | 4 ++++ drivers/platform/msm/ipa/ipa_v3/ipa_rt.c | 16 ++++++++++++++++ 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c index 3ba84463682d..cb9416e3ad8a 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1099,8 +1099,8 @@ static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule, } if (rule->rule_id) { - if (rule->rule_id >= IPA_RULE_ID_MIN_VAL && - rule->rule_id <= IPA_RULE_ID_MAX_VAL) { + if (rule->rule_id < IPA_RULE_ID_MIN || + rule->rule_id >= IPA_RULE_ID_MAX) { IPAERR("invalid rule_id provided 0x%x\n" "rule_id 0x%x - 0x%x are auto generated\n", rule->rule_id, diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index 61e8d8a3dea1..7230e7a7a0d2 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -207,6 +207,10 @@ #define IPA_RULE_ID_MAX_VAL (0x1FF) #define IPA_RULE_ID_RULE_MISS (0x3FF) +#define IPA_RULE_ID_MIN (0x200) +#define IPA_RULE_ID_MAX ((IPA_RULE_ID_MIN << 1) - 1) + + #define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE 8 #define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(start_ofst) \ (((start_ofst) + IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1) & \ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c index 30a1546dcdb7..0ee3a37e3d9f 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c @@ -1084,6 +1084,20 @@ static int __ipa_del_rt_tbl(struct ipa3_rt_tbl *entry) return 0; } +static int __ipa_rt_validate_rule_id(u16 rule_id) +{ + if (!rule_id) + return 0; + + if ((rule_id < IPA_RULE_ID_MIN) || + (rule_id >= IPA_RULE_ID_MAX)) { + IPAERR_RL("Invalid rule_id provided 0x%x\n", + rule_id); + return -EPERM; + } + + return 0; +} static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule, struct ipa3_hdr_entry **hdr, struct ipa3_hdr_proc_ctx_entry **proc_ctx) @@ -1198,6 +1212,8 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx)) goto error; + if (__ipa_rt_validate_rule_id(rule_id)) + goto error; tbl = __ipa_add_rt_tbl(ip, name); if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) { From 7cde623ae9bbcb43b92f108090d224571411fe75 Mon Sep 17 00:00:00 2001 From: Venkataraman Nerellapalli Date: Thu, 28 Jun 2018 19:34:15 +0530 Subject: [PATCH 041/151] ARM: dts: msm: Enable use-default-batt-values for apq8009-dragon Enable use-default-batt-values to return the default battery temperature which is 250. Change-Id: I4c3b3180fb1135a46db6f03c1e52395ea76329c2 Signed-off-by: c_vnerel --- arch/arm/boot/dts/qcom/apq8009-dragon.dtsi | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/arm/boot/dts/qcom/apq8009-dragon.dtsi b/arch/arm/boot/dts/qcom/apq8009-dragon.dtsi index e1bf02e7b5d2..7588da210b5a 100644 --- a/arch/arm/boot/dts/qcom/apq8009-dragon.dtsi +++ b/arch/arm/boot/dts/qcom/apq8009-dragon.dtsi @@ -245,3 +245,10 @@ status = "ok"; qcom,disable-bms; }; + +&pm8916_chg { + status = "ok"; + qcom,charging-disabled; + qcom,use-default-batt-values; +}; + From c30a6d5a94ce811a2e4a474cfb586ff5a06f6d70 Mon Sep 17 00:00:00 2001 From: Gustavo Solaira Date: Thu, 6 Sep 2018 16:04:39 -0700 Subject: [PATCH 042/151] ARM: dts: msm: Enable reset via PM_RESIN_N for mdm9650 CV2X Enable PMIC stage 2 reset for mdm9650 CV2X when the reset line is held low for 2 seconds. This is used by the PCIe RC host to force reset the MDM if it crashes or becomes unresponsive. Change-Id: Ie99cb0842b895bf474dd7dab8f78811834820ee0 Signed-off-by: Gustavo Solaira --- arch/arm/boot/dts/qcom/mdm9650-cv2x.dtsi | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/arm/boot/dts/qcom/mdm9650-cv2x.dtsi b/arch/arm/boot/dts/qcom/mdm9650-cv2x.dtsi index f0ee8b75c205..034e83ad5953 100644 --- a/arch/arm/boot/dts/qcom/mdm9650-cv2x.dtsi +++ b/arch/arm/boot/dts/qcom/mdm9650-cv2x.dtsi @@ -330,3 +330,18 @@ qcom,vadc-thermal-node; }; }; + +&pmd9650_pon { + interrupts = <0x0 0x8 0x0>, <0x0 0x8 0x1>; + interrupt-names = "kpdpwr", "resin"; + qcom,s3-src = "resin"; + + qcom,pon_2 { + qcom,pon-type = <1>; + qcom,support-reset = <1>; + qcom,s1-timer = <0>; + qcom,s2-timer = <2000>; + qcom,s2-type = <7>; + qcom,pull-up = <1>; + }; +}; From 511af0973209195cc64271f6e77009741a351ae7 Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Thu, 19 Jul 2018 17:10:39 +0530 Subject: [PATCH 043/151] msm: kgsl: Add a property to find alignment of secure buffers Add a property to determine the hardware alignment constraint on secure buffers. XPUv2 and below have a minimum requirement of 1 MBytes alignment and hence driver should allocate memory with minimum alignment on size. Change-Id: Ie3ca5da489bc94ae57ddc6695e402463fd7a88c2 Signed-off-by: Sunil Khatri --- drivers/gpu/msm/kgsl.c | 22 ++++++++++++++++++++++ include/uapi/linux/msm_kgsl.h | 1 + 2 files changed, 23 insertions(+) diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 510a5ef1d49d..c28009a33155 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -1348,6 +1348,28 @@ long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv, kgsl_context_put(context); break; } + case KGSL_PROP_SECURE_BUFFER_ALIGNMENT: + { + unsigned int align; + + if (param->sizebytes != sizeof(unsigned int)) { + result = -EINVAL; + break; + } + /* + * XPUv2 impose the constraint of 1MB memory alignment, + * on the other hand Hypervisor does not have such + * constraints. So driver should fulfill such + * requirements when allocating secure memory. + */ + align = MMU_FEATURE(&dev_priv->device->mmu, + KGSL_MMU_HYP_SECURE_ALLOC) ? PAGE_SIZE : SZ_1M; + + if (copy_to_user(param->value, &align, sizeof(align))) + result = -EFAULT; + + break; + } default: if (is_compat_task()) result = dev_priv->device->ftbl->getproperty_compat( diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h index a7fe50f13b1b..c8ca62174f49 100644 --- a/include/uapi/linux/msm_kgsl.h +++ b/include/uapi/linux/msm_kgsl.h @@ -311,6 +311,7 @@ enum kgsl_timestamp_type { #define KGSL_PROP_HIGHEST_BANK_BIT 0x17 #define KGSL_PROP_DEVICE_BITNESS 0x18 #define KGSL_PROP_DEVICE_QDSS_STM 0x19 +#define KGSL_PROP_SECURE_BUFFER_ALIGNMENT 0x23 struct kgsl_shadowprop { unsigned long gpuaddr; From f9b13f4d437951064a029842b032d4e49fdd6849 Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Fri, 10 Aug 2018 11:46:58 +0530 Subject: [PATCH 044/151] msm: kgsl: Add a property to find if secure context is supported Add a property to determine if a target support secure context for use cases like CPZ. This property can be used by userspace application to create a secure context if its supported on the target. Change-Id: I1ccc824378fb8fbd2cfbc7b811c6c3fdcd17803e Signed-off-by: Sunil Khatri --- drivers/gpu/msm/kgsl.c | 17 +++++++++++++++++ include/uapi/linux/msm_kgsl.h | 1 + 2 files changed, 18 insertions(+) diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index c28009a33155..3d31622154d1 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -1370,6 +1370,23 @@ long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv, break; } + case KGSL_PROP_SECURE_CTXT_SUPPORT: + { + unsigned int secure_ctxt; + + if (param->sizebytes != sizeof(unsigned int)) { + result = -EINVAL; + break; + } + + secure_ctxt = dev_priv->device->mmu.secured ? 1 : 0; + + if (copy_to_user(param->value, &secure_ctxt, + sizeof(secure_ctxt))) + result = -EFAULT; + + break; + } default: if (is_compat_task()) result = dev_priv->device->ftbl->getproperty_compat( diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h index c8ca62174f49..bc1624659396 100644 --- a/include/uapi/linux/msm_kgsl.h +++ b/include/uapi/linux/msm_kgsl.h @@ -312,6 +312,7 @@ enum kgsl_timestamp_type { #define KGSL_PROP_DEVICE_BITNESS 0x18 #define KGSL_PROP_DEVICE_QDSS_STM 0x19 #define KGSL_PROP_SECURE_BUFFER_ALIGNMENT 0x23 +#define KGSL_PROP_SECURE_CTXT_SUPPORT 0x24 struct kgsl_shadowprop { unsigned long gpuaddr; From 2f9e79e2c32a44104c24e6c58689f053608879f7 Mon Sep 17 00:00:00 2001 From: Ch Ganesh Kumar Date: Mon, 20 Aug 2018 13:35:37 +0530 Subject: [PATCH 045/151] msm: mdss: Fix Gamma LUT bounds condition Validate the Gamma correction feature with all bound condition. This change corrects the Gamma LUT block bound condition. Change-Id: I3fc460b6a6e2e76f7c07b649e1db1e01ce208476 Signed-off-by: Ch Ganesh Kumar --- drivers/video/msm/mdss/mdss_compat_utils.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/video/msm/mdss/mdss_compat_utils.c b/drivers/video/msm/mdss/mdss_compat_utils.c index 7a93d292a6da..b54ef434988b 100644 --- a/drivers/video/msm/mdss/mdss_compat_utils.c +++ b/drivers/video/msm/mdss/mdss_compat_utils.c @@ -1337,10 +1337,10 @@ static int __from_user_pgc_lut_data_legacy( return -EFAULT; if (num_r_stages > GC_LUT_SEGMENTS || num_b_stages > GC_LUT_SEGMENTS - || num_r_stages > GC_LUT_SEGMENTS || !num_r_stages || !num_b_stages + || num_g_stages > GC_LUT_SEGMENTS || !num_r_stages || !num_b_stages || !num_g_stages) { pr_err("invalid number of stages r_stages %d b_stages %d g_stages %d\n", - num_r_stages, num_b_stages, num_r_stages); + num_r_stages, num_b_stages, num_g_stages); return -EFAULT; } From ccdf226017148d7beb0b200d600046deff7b55a0 Mon Sep 17 00:00:00 2001 From: Yunfei Zhang Date: Thu, 21 Jun 2018 12:13:25 +0800 Subject: [PATCH 046/151] ASoC: msm: qdsp6v2: use correct stream id of next session Gapless playback can't work because incorrect stream id was used. Fix it by using correct stream id of next session. Change-Id: I938d62d0d563b9c5940ea88c96d9c256595a9d3c Signed-off-by: Yunfei Zhang --- sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c index b7d0b663f890..1251f7a7bbd0 100644 --- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c @@ -2388,13 +2388,13 @@ static int msm_compr_trigger(struct snd_compr_stream *cstream, int cmd) case (Q6_SUBSYS_AVS2_7): rc = q6asm_stream_open_write_v3(ac, prtd->codec, bits_per_sample, - ac->stream_id, + stream_id, prtd->gapless_state.use_dsp_gapless_mode); break; case (Q6_SUBSYS_AVS2_8): rc = q6asm_stream_open_write_v4(ac, prtd->codec, bits_per_sample, - ac->stream_id, + stream_id, prtd->gapless_state.use_dsp_gapless_mode); break; case (Q6_SUBSYS_INVALID): From 0b7ebfb97ed1c214b7b83bd17f08d34c0f990946 Mon Sep 17 00:00:00 2001 From: Ronnie Sahlberg Date: Wed, 22 Aug 2018 12:19:24 +1000 Subject: [PATCH 047/151] cifs: check if SMB2 PDU size has been padded and suppress the warning [ Upstream commit e6c47dd0da1e3a484e778046fc10da0b20606a86 ] Some SMB2/3 servers, Win2016 but possibly others too, adds padding not only between PDUs in a compound but also to the final PDU. This padding extends the PDU to a multiple of 8 bytes. Check if the unexpected length looks like this might be the case and avoid triggering the log messages for : "SMB2 server sent bad RFC1001 len %d not %d\n" Signed-off-by: Ronnie Sahlberg Signed-off-by: Steve French Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/cifs/smb2misc.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index b35c1398d459..494438195a1d 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -182,6 +182,13 @@ smb2_check_message(char *buf, unsigned int length) if (clc_len == 4 + len + 1) return 0; + /* + * Some windows servers (win2016) will pad also the final + * PDU in a compound to 8 bytes. + */ + if (((clc_len + 7) & ~7) == len) + return 0; + /* * MacOS server pads after SMB2.1 write response with 3 bytes * of junk. Other servers match RFC1001 len to actual From 8224be03ade3f5c17ed2533f26eaf4ef41312425 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Tue, 21 Aug 2018 21:59:12 -0700 Subject: [PATCH 048/151] hfsplus: don't return 0 when fill_super() failed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 7464726cb5998846306ed0a7d6714afb2e37b25d ] syzbot is reporting NULL pointer dereference at mount_fs() [1]. This is because hfsplus_fill_super() is by error returning 0 when hfsplus_fill_super() detected invalid filesystem image, and mount_bdev() is returning NULL because dget(s->s_root) == NULL if s->s_root == NULL, and mount_fs() is accessing root->d_sb because IS_ERR(root) == false if root == NULL. Fix this by returning -EINVAL when hfsplus_fill_super() detected invalid filesystem image. [1] https://syzkaller.appspot.com/bug?id=21acb6850cecbc960c927229e597158cf35f33d0 Link: http://lkml.kernel.org/r/d83ce31a-874c-dd5b-f790-41405983a5be@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa Reported-by: syzbot Reviewed-by: Ernesto A. Fernández Reviewed-by: Andrew Morton Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/hfsplus/super.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 2de895fecfe6..f26b5dab708a 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -518,8 +518,10 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str); if (!hfs_brec_read(&fd, &entry, sizeof(entry))) { hfs_find_exit(&fd); - if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) + if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) { + err = -EINVAL; goto out_put_root; + } inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id)); if (IS_ERR(inode)) { err = PTR_ERR(inode); From 5bbef604bfefc84feb7358df53e1e1a3e1866166 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ernesto=20A=2E=20Fern=C3=A1ndez?= Date: Thu, 23 Aug 2018 17:00:31 -0700 Subject: [PATCH 049/151] hfs: prevent crash on exit from failed search MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit dc2572791d3a41bab94400af2b6bca9d71ccd303 ] hfs_find_exit() expects fd->bnode to be NULL after a search has failed. hfs_brec_insert() may instead set it to an error-valued pointer. Fix this to prevent a crash. Link: http://lkml.kernel.org/r/53d9749a029c41b4016c495fc5838c9dba3afc52.1530294815.git.ernesto.mnd.fernandez@gmail.com Signed-off-by: Ernesto A. Fernández Cc: Anatoly Trosinenko Cc: Viacheslav Dubeyko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/hfs/brec.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c index 6fc766df0461..2a6f3c67cb3f 100644 --- a/fs/hfs/brec.c +++ b/fs/hfs/brec.c @@ -74,9 +74,10 @@ int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len) if (!fd->bnode) { if (!tree->root) hfs_btree_inc_height(tree); - fd->bnode = hfs_bnode_find(tree, tree->leaf_head); - if (IS_ERR(fd->bnode)) - return PTR_ERR(fd->bnode); + node = hfs_bnode_find(tree, tree->leaf_head); + if (IS_ERR(node)) + return PTR_ERR(node); + fd->bnode = node; fd->record = -1; } new_node = NULL; From 5d1346a118342d26e3b9ae03ee7f3b1bbf734b42 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Tue, 21 Aug 2018 22:00:58 -0700 Subject: [PATCH 050/151] fork: don't copy inconsistent signal handler state to child [ Upstream commit 06e62a46bbba20aa5286102016a04214bb446141 ] Before this change, if a multithreaded process forks while one of its threads is changing a signal handler using sigaction(), the memcpy() in copy_sighand() can race with the struct assignment in do_sigaction(). It isn't clear whether this can cause corruption of the userspace signal handler pointer, but it definitely can cause inconsistency between different fields of struct sigaction. Take the appropriate spinlock to avoid this. I have tested that this patch prevents inconsistency between sa_sigaction and sa_flags, which is possible before this patch. Link: http://lkml.kernel.org/r/20180702145108.73189-1-jannh@google.com Signed-off-by: Jann Horn Acked-by: Michal Hocko Reviewed-by: Andrew Morton Cc: Rik van Riel Cc: "Peter Zijlstra (Intel)" Cc: Kees Cook Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- kernel/fork.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/fork.c b/kernel/fork.c index f45e647f8e55..95338c74b361 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1014,7 +1014,9 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) if (!sig) return -ENOMEM; atomic_set(&sig->count, 1); + spin_lock_irq(¤t->sighand->siglock); memcpy(sig->action, current->sighand->action, sizeof(sig->action)); + spin_unlock_irq(¤t->sighand->siglock); return 0; } From c0aa3622ac3578163722733c018cb59267465c38 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 21 Aug 2018 21:59:34 -0700 Subject: [PATCH 051/151] reiserfs: change j_timestamp type to time64_t [ Upstream commit 8b73ce6a4bae4fe12bcb2c361c0da4183c2e1b6f ] This uses the deprecated time_t type but is write-only, and could be removed, but as Jeff explains, having a timestamp can be usefule for post-mortem analysis in crash dumps. In order to remove one of the last instances of time_t, this changes the type to time64_t, same as j_trans_start_time. Link: http://lkml.kernel.org/r/20180622133315.221210-1-arnd@arndb.de Signed-off-by: Arnd Bergmann Cc: Jan Kara Cc: Jeff Mahoney Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/reiserfs/reiserfs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h index 8fb8107710f7..6d03c1d87316 100644 --- a/fs/reiserfs/reiserfs.h +++ b/fs/reiserfs/reiserfs.h @@ -266,7 +266,7 @@ struct reiserfs_journal_list { struct mutex j_commit_mutex; unsigned int j_trans_id; - time_t j_timestamp; + time64_t j_timestamp; /* write-only but useful for crash dump analysis */ struct reiserfs_list_bitmap *j_list_bitmap; struct buffer_head *j_commit_bh; /* commit buffer head */ struct reiserfs_journal_cnode *j_realblock; From 35eb26e561e500e85562e3df773ee021fa31281b Mon Sep 17 00:00:00 2001 From: OGAWA Hirofumi Date: Tue, 21 Aug 2018 21:59:44 -0700 Subject: [PATCH 052/151] fat: validate ->i_start before using [ Upstream commit 0afa9626667c3659ef8bd82d42a11e39fedf235c ] On corrupted FATfs may have invalid ->i_start. To handle it, this checks ->i_start before using, and return proper error code. Link: http://lkml.kernel.org/r/87o9f8y1t5.fsf_-_@mail.parknet.co.jp Signed-off-by: OGAWA Hirofumi Reported-by: Anatoly Trosinenko Tested-by: Anatoly Trosinenko Cc: Alan Cox Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/fat/cache.c | 19 ++++++++++++------- fs/fat/fat.h | 5 +++++ fs/fat/fatent.c | 6 +++--- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/fs/fat/cache.c b/fs/fat/cache.c index 91ad9e1c9441..4c634b365516 100644 --- a/fs/fat/cache.c +++ b/fs/fat/cache.c @@ -226,7 +226,8 @@ static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus) int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) { struct super_block *sb = inode->i_sb; - const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits; + struct msdos_sb_info *sbi = MSDOS_SB(sb); + const int limit = sb->s_maxbytes >> sbi->cluster_bits; struct fat_entry fatent; struct fat_cache_id cid; int nr; @@ -235,6 +236,12 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) *fclus = 0; *dclus = MSDOS_I(inode)->i_start; + if (!fat_valid_entry(sbi, *dclus)) { + fat_fs_error_ratelimit(sb, + "%s: invalid start cluster (i_pos %lld, start %08x)", + __func__, MSDOS_I(inode)->i_pos, *dclus); + return -EIO; + } if (cluster == 0) return 0; @@ -251,9 +258,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) /* prevent the infinite loop of cluster chain */ if (*fclus > limit) { fat_fs_error_ratelimit(sb, - "%s: detected the cluster chain loop" - " (i_pos %lld)", __func__, - MSDOS_I(inode)->i_pos); + "%s: detected the cluster chain loop (i_pos %lld)", + __func__, MSDOS_I(inode)->i_pos); nr = -EIO; goto out; } @@ -263,9 +269,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) goto out; else if (nr == FAT_ENT_FREE) { fat_fs_error_ratelimit(sb, - "%s: invalid cluster chain (i_pos %lld)", - __func__, - MSDOS_I(inode)->i_pos); + "%s: invalid cluster chain (i_pos %lld)", + __func__, MSDOS_I(inode)->i_pos); nr = -EIO; goto out; } else if (nr == FAT_ENT_EOF) { diff --git a/fs/fat/fat.h b/fs/fat/fat.h index e0c4ba39a377..add94f15d5be 100644 --- a/fs/fat/fat.h +++ b/fs/fat/fat.h @@ -347,6 +347,11 @@ static inline void fatent_brelse(struct fat_entry *fatent) fatent->fat_inode = NULL; } +static inline bool fat_valid_entry(struct msdos_sb_info *sbi, int entry) +{ + return FAT_START_ENT <= entry && entry < sbi->max_cluster; +} + extern void fat_ent_access_init(struct super_block *sb); extern int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry); diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c index 260705c58062..08b3db146888 100644 --- a/fs/fat/fatent.c +++ b/fs/fat/fatent.c @@ -26,7 +26,7 @@ static void fat12_ent_blocknr(struct super_block *sb, int entry, { struct msdos_sb_info *sbi = MSDOS_SB(sb); int bytes = entry + (entry >> 1); - WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry); + WARN_ON(!fat_valid_entry(sbi, entry)); *offset = bytes & (sb->s_blocksize - 1); *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits); } @@ -36,7 +36,7 @@ static void fat_ent_blocknr(struct super_block *sb, int entry, { struct msdos_sb_info *sbi = MSDOS_SB(sb); int bytes = (entry << sbi->fatent_shift); - WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry); + WARN_ON(!fat_valid_entry(sbi, entry)); *offset = bytes & (sb->s_blocksize - 1); *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits); } @@ -356,7 +356,7 @@ int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry) int err, offset; sector_t blocknr; - if (entry < FAT_START_ENT || sbi->max_cluster <= entry) { + if (!fat_valid_entry(sbi, entry)) { fatent_brelse(fatent); fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry); return -EIO; From dd36e88857c85edee2dd9c16bfbf3bdd6827c4ef Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Aug 2018 12:30:38 -0700 Subject: [PATCH 053/151] scripts: modpost: check memory allocation results [ Upstream commit 1f3aa9002dc6a0d59a4b599b4fc8f01cf43ef014 ] Fix missing error check for memory allocation functions in scripts/mod/modpost.c. Fixes kernel bugzilla #200319: https://bugzilla.kernel.org/show_bug.cgi?id=200319 Signed-off-by: Randy Dunlap Cc: Yuexing Wang Signed-off-by: Masahiro Yamada Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- scripts/mod/modpost.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index a8a54640ba5f..9c06b5d62e90 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -647,7 +647,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info, if (ELF_ST_TYPE(sym->st_info) == STT_SPARC_REGISTER) break; if (symname[0] == '.') { - char *munged = strdup(symname); + char *munged = NOFAIL(strdup(symname)); munged[0] = '_'; munged[1] = toupper(munged[1]); symname = munged; @@ -1248,7 +1248,7 @@ static Elf_Sym *find_elf_symbol2(struct elf_info *elf, Elf_Addr addr, static char *sec2annotation(const char *s) { if (match(s, init_exit_sections)) { - char *p = malloc(20); + char *p = NOFAIL(malloc(20)); char *r = p; *p++ = '_'; @@ -1268,7 +1268,7 @@ static char *sec2annotation(const char *s) strcat(p, " "); return r; } else { - return strdup(""); + return NOFAIL(strdup("")); } } @@ -1826,7 +1826,7 @@ void buf_write(struct buffer *buf, const char *s, int len) { if (buf->size - buf->pos < len) { buf->size += len + SZ; - buf->p = realloc(buf->p, buf->size); + buf->p = NOFAIL(realloc(buf->p, buf->size)); } strncpy(buf->p + buf->pos, s, len); buf->pos += len; From 06d14c2d92d73f5a843706ae1b643fb9dafcd60f Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Fri, 17 Aug 2018 15:46:57 -0700 Subject: [PATCH 054/151] mm/fadvise.c: fix signed overflow UBSAN complaint [ Upstream commit a718e28f538441a3b6612da9ff226973376cdf0f ] Signed integer overflow is undefined according to the C standard. The overflow in ksys_fadvise64_64() is deliberate, but since it is signed overflow, UBSAN complains: UBSAN: Undefined behaviour in mm/fadvise.c:76:10 signed integer overflow: 4 + 9223372036854775805 cannot be represented in type 'long long int' Use unsigned types to do math. Unsigned overflow is defined so UBSAN will not complain about it. This patch doesn't change generated code. [akpm@linux-foundation.org: add comment explaining the casts] Link: http://lkml.kernel.org/r/20180629184453.7614-1-aryabinin@virtuozzo.com Signed-off-by: Andrey Ryabinin Reported-by: Reviewed-by: Andrew Morton Cc: Alexander Potapenko Cc: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- mm/fadvise.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/mm/fadvise.c b/mm/fadvise.c index 3bcfd81db45e..ebc997752d1c 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -66,8 +66,12 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) goto out; } - /* Careful about overflows. Len == 0 means "as much as possible" */ - endbyte = offset + len; + /* + * Careful about overflows. Len == 0 means "as much as possible". Use + * unsigned math because signed overflows are undefined and UBSan + * complains. + */ + endbyte = (u64)offset + (u64)len; if (!len || endbyte < len) endbyte = -1; else From 4b351b942bfb3c2b464a3f7826bfa42a854dab25 Mon Sep 17 00:00:00 2001 From: Tan Hu Date: Wed, 25 Jul 2018 15:23:07 +0800 Subject: [PATCH 055/151] ipvs: fix race between ip_vs_conn_new() and ip_vs_del_dest() [ Upstream commit a53b42c11815d2357e31a9403ae3950517525894 ] We came across infinite loop in ipvs when using ipvs in docker env. When ipvs receives new packets and cannot find an ipvs connection, it will create a new connection, then if the dest is unavailable (i.e. IP_VS_DEST_F_AVAILABLE), the packet will be dropped sliently. But if the dropped packet is the first packet of this connection, the connection control timer never has a chance to start and the ipvs connection cannot be released. This will lead to memory leak, or infinite loop in cleanup_net() when net namespace is released like this: ip_vs_conn_net_cleanup at ffffffffa0a9f31a [ip_vs] __ip_vs_cleanup at ffffffffa0a9f60a [ip_vs] ops_exit_list at ffffffff81567a49 cleanup_net at ffffffff81568b40 process_one_work at ffffffff810a851b worker_thread at ffffffff810a9356 kthread at ffffffff810b0b6f ret_from_fork at ffffffff81697a18 race condition: CPU1 CPU2 ip_vs_in() ip_vs_conn_new() ip_vs_del_dest() __ip_vs_unlink_dest() ~IP_VS_DEST_F_AVAILABLE cp->dest && !IP_VS_DEST_F_AVAILABLE __ip_vs_conn_put ... cleanup_net ---> infinite looping Fix this by checking whether the timer already started. Signed-off-by: Tan Hu Reviewed-by: Jiang Biao Acked-by: Julian Anastasov Acked-by: Simon Horman Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/netfilter/ipvs/ip_vs_core.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 3a2fa9c044f8..8425269c97c4 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -1692,13 +1692,20 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { /* the destination server is not available */ - if (sysctl_expire_nodest_conn(ipvs)) { + __u32 flags = cp->flags; + + /* when timer already started, silently drop the packet.*/ + if (timer_pending(&cp->timer)) + __ip_vs_conn_put(cp); + else + ip_vs_conn_put(cp); + + if (sysctl_expire_nodest_conn(ipvs) && + !(flags & IP_VS_CONN_F_ONE_PACKET)) { /* try to expire the connection immediately */ ip_vs_conn_expire_now(cp); } - /* don't restart its timer, and silently - drop the packet. */ - __ip_vs_conn_put(cp); + return NF_DROP; } From d03b202c592d344a924fd42dacb301f575d3c312 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Fri, 3 Aug 2018 20:59:51 -0700 Subject: [PATCH 056/151] mfd: sm501: Set coherent_dma_mask when creating subdevices [ Upstream commit 2f606da78230f09cf1a71fde6ee91d0c710fa2b2 ] Instantiating the sm501 OHCI subdevice results in a kernel warning. sm501-usb sm501-usb: SM501 OHCI sm501-usb sm501-usb: new USB bus registered, assigned bus number 1 WARNING: CPU: 0 PID: 1 at ./include/linux/dma-mapping.h:516 ohci_init+0x194/0x2d8 Modules linked in: CPU: 0 PID: 1 Comm: swapper Tainted: G W 4.18.0-rc7-00178-g0b5b1f9a78b5 #1 PC is at ohci_init+0x194/0x2d8 PR is at ohci_init+0x168/0x2d8 PC : 8c27844c SP : 8f81dd94 SR : 40008001 TEA : 29613060 R0 : 00000000 R1 : 00000000 R2 : 00000000 R3 : 00000202 R4 : 8fa98b88 R5 : 8c277e68 R6 : 00000000 R7 : 00000000 R8 : 8f965814 R9 : 8c388100 R10 : 8fa98800 R11 : 8fa98928 R12 : 8c48302c R13 : 8fa98920 R14 : 8c48302c MACH: 00000096 MACL: 0000017c GBR : 00000000 PR : 8c278420 Call trace: [<(ptrval)>] usb_add_hcd+0x1e8/0x6ec [<(ptrval)>] _dev_info+0x0/0x54 [<(ptrval)>] arch_local_save_flags+0x0/0x8 [<(ptrval)>] arch_local_irq_restore+0x0/0x24 [<(ptrval)>] ohci_hcd_sm501_drv_probe+0x114/0x2d8 ... Initialize coherent_dma_mask when creating SM501 subdevices to fix the problem. Fixes: b6d6454fdb66f ("mfd: SM501 core driver") Signed-off-by: Guenter Roeck Signed-off-by: Lee Jones Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/mfd/sm501.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index 6ce6e6200359..c9cbbca92e53 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c @@ -714,6 +714,7 @@ sm501_create_subdev(struct sm501_devdata *sm, char *name, smdev->pdev.name = name; smdev->pdev.id = sm->pdev_id; smdev->pdev.dev.parent = sm->dev; + smdev->pdev.dev.coherent_dma_mask = 0xffffffff; if (res_count) { smdev->pdev.resource = (struct resource *)(smdev+1); From 022e1fb14900c6cc6889e06169833ff1244b0c1f Mon Sep 17 00:00:00 2001 From: Aleh Filipovich Date: Fri, 10 Aug 2018 22:07:25 +0200 Subject: [PATCH 057/151] platform/x86: asus-nb-wmi: Add keymap entry for lid flip action on UX360 [ Upstream commit 880b29ac107d15644bf4da228376ba3cd6af6d71 ] Add entry to WMI keymap for lid flip event on Asus UX360. On Asus Zenbook ux360 flipping lid from/to tablet mode triggers keyscan code 0xfa which cannot be handled and results in kernel log message "Unknown key fa pressed". Signed-off-by: Aleh Filipovich Signed-off-by: Andy Shevchenko Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/asus-nb-wmi.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 5ea4c5a72a66..f13b5b95c00f 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -365,6 +365,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0xC4, { KEY_KBDILLUMUP } }, { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } }, { KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */ + { KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */ { KE_END, 0}, }; From 47e5fde58130789eb891f0985a4507d8a28b7e26 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Tue, 17 Jul 2018 19:14:45 -0700 Subject: [PATCH 058/151] net/9p: fix error path of p9_virtio_probe [ Upstream commit 92aef4675d5b1b55404e1532379e343bed0e5cf2 ] Currently when virtio_find_single_vq fails, we go through del_vqs which throws a warning (Trying to free already-free IRQ). Skip del_vqs if vq allocation failed. Link: http://lkml.kernel.org/r/20180524101021.49880-1-jean-philippe.brucker@arm.com Signed-off-by: Jean-Philippe Brucker Reviewed-by: Greg Kurz Cc: Eric Van Hensbergen Cc: Ron Minnich Cc: Latchesar Ionkov Signed-off-by: Andrew Morton Signed-off-by: Dominique Martinet Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/9p/trans_virtio.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 130180f67ae3..1956efa5fae8 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -537,7 +537,7 @@ static int p9_virtio_probe(struct virtio_device *vdev) chan->vq = virtio_find_single_vq(vdev, req_done, "requests"); if (IS_ERR(chan->vq)) { err = PTR_ERR(chan->vq); - goto out_free_vq; + goto out_free_chan; } chan->vq->vdev->priv = chan; spin_lock_init(&chan->lock); @@ -590,6 +590,7 @@ static int p9_virtio_probe(struct virtio_device *vdev) kfree(tag); out_free_vq: vdev->config->del_vqs(vdev); +out_free_chan: kfree(chan); fail: return err; From f0d26e4807275d45e9e01b35b5071f3824107f29 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 8 Aug 2018 14:57:24 +0300 Subject: [PATCH 059/151] powerpc: Fix size calculation using resource_size() [ Upstream commit c42d3be0c06f0c1c416054022aa535c08a1f9b39 ] The problem is the the calculation should be "end - start + 1" but the plus one is missing in this calculation. Fixes: 8626816e905e ("powerpc: add support for MPIC message register API") Signed-off-by: Dan Carpenter Reviewed-by: Tyrel Datwyler Signed-off-by: Michael Ellerman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/sysdev/mpic_msgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c index 7bdf3cc741e4..cfc3b9720763 100644 --- a/arch/powerpc/sysdev/mpic_msgr.c +++ b/arch/powerpc/sysdev/mpic_msgr.c @@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev) /* IO map the message register block. */ of_address_to_resource(np, 0, &rsrc); - msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start); + msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc)); if (!msgr_block_addr) { dev_err(&dev->dev, "Failed to iomap MPIC message registers"); return -EFAULT; From 4d94096ff923339931c4464332b0c87b4731e44e Mon Sep 17 00:00:00 2001 From: Stefan Haberland Date: Wed, 25 Jul 2018 14:00:47 +0200 Subject: [PATCH 060/151] s390/dasd: fix hanging offline processing due to canceled worker [ Upstream commit 669f3765b755fd8739ab46ce3a9c6292ce8b3d2a ] During offline processing two worker threads are canceled without freeing the device reference which leads to a hanging offline process. Reviewed-by: Jan Hoeppner Signed-off-by: Stefan Haberland Signed-off-by: Martin Schwidefsky Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/s390/block/dasd_eckd.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 3a3e1dfcb032..ff1ab6da8cff 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -2066,8 +2066,11 @@ static int dasd_eckd_basic_to_ready(struct dasd_device *device) static int dasd_eckd_online_to_ready(struct dasd_device *device) { - cancel_work_sync(&device->reload_device); - cancel_work_sync(&device->kick_validate); + if (cancel_work_sync(&device->reload_device)) + dasd_put_device(device); + if (cancel_work_sync(&device->kick_validate)) + dasd_put_device(device); + return 0; }; From bd724a4874e9b841690b80858b837bcdd48aca87 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 8 Aug 2018 17:29:09 +0300 Subject: [PATCH 061/151] scsi: aic94xx: fix an error code in aic94xx_init() [ Upstream commit 0756c57bce3d26da2592d834d8910b6887021701 ] We accidentally return success instead of -ENOMEM on this error path. Fixes: 2908d778ab3e ("[SCSI] aic94xx: new driver") Signed-off-by: Dan Carpenter Reviewed-by: Johannes Thumshirn Reviewed-by: John Garry Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/aic94xx/aic94xx_init.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index c56741fc4b99..f2d5dc164c00 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -1047,8 +1047,10 @@ static int __init aic94xx_init(void) aic94xx_transport_template = sas_domain_attach_transport(&aic94xx_transport_functions); - if (!aic94xx_transport_template) + if (!aic94xx_transport_template) { + err = -ENOMEM; goto out_destroy_caches; + } err = pci_register_driver(&aic94xx_pci_driver); if (err) From 801edd7519194dac6c21d6d4730b7840cf78730f Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Fri, 3 Aug 2018 16:38:44 +0200 Subject: [PATCH 062/151] PCI: mvebu: Fix I/O space end address calculation [ Upstream commit dfd0309fd7b30a5baffaf47b2fccb88b46d64d69 ] pcie->realio.end should be the address of last byte of the area, therefore using resource_size() of another resource is not correct, we must substract 1 to get the address of the last byte. Fixes: 11be65472a427 ("PCI: mvebu: Adapt to the new device tree layout") Signed-off-by: Thomas Petazzoni Signed-off-by: Lorenzo Pieralisi Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/pci/host/pci-mvebu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index b1315e197ffb..b7463ebfb2d9 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c @@ -940,7 +940,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev) pcie->realio.start = PCIBIOS_MIN_IO; pcie->realio.end = min_t(resource_size_t, IO_SPACE_LIMIT, - resource_size(&pcie->io)); + resource_size(&pcie->io) - 1); } else pcie->realio = pcie->io; From dc5b9912a7058c1f5665224287be8f34be841e31 Mon Sep 17 00:00:00 2001 From: John Pittman Date: Mon, 6 Aug 2018 15:53:12 -0400 Subject: [PATCH 063/151] dm kcopyd: avoid softlockup in run_complete_job [ Upstream commit 784c9a29e99eb40b842c29ecf1cc3a79e00fb629 ] It was reported that softlockups occur when using dm-snapshot ontop of slow (rbd) storage. E.g.: [ 4047.990647] watchdog: BUG: soft lockup - CPU#10 stuck for 22s! [kworker/10:23:26177] ... [ 4048.034151] Workqueue: kcopyd do_work [dm_mod] [ 4048.034156] RIP: 0010:copy_callback+0x41/0x160 [dm_snapshot] ... [ 4048.034190] Call Trace: [ 4048.034196] ? __chunk_is_tracked+0x70/0x70 [dm_snapshot] [ 4048.034200] run_complete_job+0x5f/0xb0 [dm_mod] [ 4048.034205] process_jobs+0x91/0x220 [dm_mod] [ 4048.034210] ? kcopyd_put_pages+0x40/0x40 [dm_mod] [ 4048.034214] do_work+0x46/0xa0 [dm_mod] [ 4048.034219] process_one_work+0x171/0x370 [ 4048.034221] worker_thread+0x1fc/0x3f0 [ 4048.034224] kthread+0xf8/0x130 [ 4048.034226] ? max_active_store+0x80/0x80 [ 4048.034227] ? kthread_bind+0x10/0x10 [ 4048.034231] ret_from_fork+0x35/0x40 [ 4048.034233] Kernel panic - not syncing: softlockup: hung tasks Fix this by calling cond_resched() after run_complete_job()'s callout to the dm_kcopyd_notify_fn (which is dm-snap.c:copy_callback in the above trace). Signed-off-by: John Pittman Signed-off-by: Mike Snitzer Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-kcopyd.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 3a7cade5e27d..77833b65e01e 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -454,6 +454,8 @@ static int run_complete_job(struct kcopyd_job *job) if (atomic_dec_and_test(&kc->nr_jobs)) wake_up(&kc->destroyq); + cond_resched(); + return 0; } From c2ff51938984a18eb7028e5449a2065046452e19 Mon Sep 17 00:00:00 2001 From: Ian Abbott Date: Mon, 6 Aug 2018 11:05:13 +0100 Subject: [PATCH 064/151] staging: comedi: ni_mio_common: fix subdevice flags for PFI subdevice [ Upstream commit e083926b3e269d4064825dcf2ad50c636fddf8cf ] The PFI subdevice flags indicate that the subdevice is readable and writeable, but that is only true for the supported "M-series" boards, not the older "E-series" boards. Only set the SDF_READABLE and SDF_WRITABLE subdevice flags for the M-series boards. These two flags are mainly for informational purposes. Signed-off-by: Ian Abbott Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/comedi/drivers/ni_mio_common.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index 04f7a3bd3ddc..0c36f5557abc 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -5601,11 +5601,11 @@ static int ni_E_init(struct comedi_device *dev, /* Digital I/O (PFI) subdevice */ s = &dev->subdevices[NI_PFI_DIO_SUBDEV]; s->type = COMEDI_SUBD_DIO; - s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; s->maxdata = 1; if (devpriv->is_m_series) { s->n_chan = 16; s->insn_bits = ni_pfi_insn_bits; + s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL; ni_writew(dev, s->state, M_Offset_PFI_DO); for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) { @@ -5614,6 +5614,7 @@ static int ni_E_init(struct comedi_device *dev, } } else { s->n_chan = 10; + s->subdev_flags = SDF_INTERNAL; } s->insn_config = ni_pfi_insn_config; From d4d534751b43ca82d7ec6146ee064b232918cfa2 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Tue, 7 Aug 2018 11:15:39 -0300 Subject: [PATCH 065/151] selftests/powerpc: Kill child processes on SIGINT [ Upstream commit 7c27a26e1ed5a7dd709aa19685d2c98f64e1cf0c ] There are some powerpc selftests, as tm/tm-unavailable, that run for a long period (>120 seconds), and if it is interrupted, as pressing CRTL-C (SIGINT), the foreground process (harness) dies but the child process and threads continue to execute (with PPID = 1 now) in background. In this case, you'd think the whole test exited, but there are remaining threads and processes being executed in background. Sometimes these zombies processes are doing annoying things, as consuming the whole CPU or dumping things to STDOUT. This patch fixes this problem by attaching an empty signal handler to SIGINT in the harness process. This handler will interrupt (EINTR) the parent process waitpid() call, letting the code to follow through the normal flow, which will kill all the processes in the child process group. This patch also fixes a typo. Signed-off-by: Breno Leitao Signed-off-by: Gustavo Romero Signed-off-by: Michael Ellerman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/powerpc/harness.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c index d1ed7bab65a5..fa0683118865 100644 --- a/tools/testing/selftests/powerpc/harness.c +++ b/tools/testing/selftests/powerpc/harness.c @@ -81,13 +81,13 @@ int run_test(int (test_function)(void), char *name) return status; } -static void alarm_handler(int signum) +static void sig_handler(int signum) { - /* Jut wake us up from waitpid */ + /* Just wake us up from waitpid */ } -static struct sigaction alarm_action = { - .sa_handler = alarm_handler, +static struct sigaction sig_action = { + .sa_handler = sig_handler, }; int test_harness(int (test_function)(void), char *name) @@ -97,8 +97,14 @@ int test_harness(int (test_function)(void), char *name) test_start(name); test_set_git_version(GIT_VERSION); - if (sigaction(SIGALRM, &alarm_action, NULL)) { - perror("sigaction"); + if (sigaction(SIGINT, &sig_action, NULL)) { + perror("sigaction (sigint)"); + test_error(name); + return 1; + } + + if (sigaction(SIGALRM, &sig_action, NULL)) { + perror("sigaction (sigalrm)"); test_error(name); return 1; } From bf638d3079a53843464d84aeeab9f1f5092f8843 Mon Sep 17 00:00:00 2001 From: Steve French Date: Wed, 1 Aug 2018 00:56:12 -0500 Subject: [PATCH 066/151] smb3: fix reset of bytes read and written stats [ Upstream commit c281bc0c7412308c7ec0888904f7c99353da4796 ] echo 0 > /proc/fs/cifs/Stats is supposed to reset the stats but there were four (see example below) that were not reset (bytes read and witten, total vfs ops and max ops at one time). ... 0 session 0 share reconnects Total vfs operations: 100 maximum at one time: 2 1) \\localhost\test SMBs: 0 Bytes read: 502092 Bytes written: 31457286 TreeConnects: 0 total 0 failed TreeDisconnects: 0 total 0 failed ... This patch fixes cifs_stats_proc_write to properly reset those four. Signed-off-by: Steve French Reviewed-by: Aurelien Aptel Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/cifs/cifs_debug.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index dab67ad53a1a..1b3a00a1eb8a 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -271,6 +271,10 @@ static ssize_t cifs_stats_proc_write(struct file *file, atomic_set(&totBufAllocCount, 0); atomic_set(&totSmBufAllocCount, 0); #endif /* CONFIG_CIFS_STATS2 */ + spin_lock(&GlobalMid_Lock); + GlobalMaxActiveXid = 0; + GlobalCurrentXid = 0; + spin_unlock(&GlobalMid_Lock); spin_lock(&cifs_tcp_ses_lock); list_for_each(tmp1, &cifs_tcp_ses_list) { server = list_entry(tmp1, struct TCP_Server_Info, @@ -283,6 +287,10 @@ static ssize_t cifs_stats_proc_write(struct file *file, struct cifs_tcon, tcon_list); atomic_set(&tcon->num_smbs_sent, 0); + spin_lock(&tcon->stat_lock); + tcon->bytes_read = 0; + tcon->bytes_written = 0; + spin_unlock(&tcon->stat_lock); if (server->ops->clear_stats) server->ops->clear_stats(tcon); } From 5a16326173c935c28e20ede6fdfb6b0668d1c8bd Mon Sep 17 00:00:00 2001 From: Steve French Date: Mon, 23 Jul 2018 09:15:18 -0500 Subject: [PATCH 067/151] SMB3: Number of requests sent should be displayed for SMB3 not just CIFS [ Upstream commit 289131e1f1e6ad8c661ec05e176b8f0915672059 ] For SMB2/SMB3 the number of requests sent was not displayed in /proc/fs/cifs/Stats unless CONFIG_CIFS_STATS2 was enabled (only number of failed requests displayed). As with earlier dialects, we should be displaying these counters if CONFIG_CIFS_STATS is enabled. They are important for debugging. e.g. when you cat /proc/fs/cifs/Stats (before the patch) Resources in use CIFS Session: 1 Share (unique mount targets): 2 SMB Request/Response Buffer: 1 Pool size: 5 SMB Small Req/Resp Buffer: 1 Pool size: 30 Operations (MIDs): 0 0 session 0 share reconnects Total vfs operations: 690 maximum at one time: 2 1) \\localhost\test SMBs: 975 Negotiates: 0 sent 0 failed SessionSetups: 0 sent 0 failed Logoffs: 0 sent 0 failed TreeConnects: 0 sent 0 failed TreeDisconnects: 0 sent 0 failed Creates: 0 sent 2 failed Closes: 0 sent 0 failed Flushes: 0 sent 0 failed Reads: 0 sent 0 failed Writes: 0 sent 0 failed Locks: 0 sent 0 failed IOCTLs: 0 sent 1 failed Cancels: 0 sent 0 failed Echos: 0 sent 0 failed QueryDirectories: 0 sent 63 failed Signed-off-by: Steve French Reviewed-by: Aurelien Aptel Reviewed-by: Pavel Shilovsky Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/cifs/smb2pdu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index b58ba8748e73..fc652288491e 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -301,7 +301,7 @@ small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon, smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon); if (tcon != NULL) { -#ifdef CONFIG_CIFS_STATS2 +#ifdef CONFIG_CIFS_STATS uint16_t com_code = le16_to_cpu(smb2_command); cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]); #endif From 5fcb40fd186b30b72de7bf42d84e44b62aa1b4f8 Mon Sep 17 00:00:00 2001 From: Mahesh Salgaonkar Date: Wed, 4 Jul 2018 23:27:02 +0530 Subject: [PATCH 068/151] powerpc/pseries: Avoid using the size greater than RTAS_ERROR_LOG_MAX. [ Upstream commit 74e96bf44f430cf7a01de19ba6cf49b361cdfd6e ] The global mce data buffer that used to copy rtas error log is of 2048 (RTAS_ERROR_LOG_MAX) bytes in size. Before the copy we read extended_log_length from rtas error log header, then use max of extended_log_length and RTAS_ERROR_LOG_MAX as a size of data to be copied. Ideally the platform (phyp) will never send extended error log with size > 2048. But if that happens, then we have a risk of buffer overrun and corruption. Fix this by using min_t instead. Fixes: d368514c3097 ("powerpc: Fix corruption when grabbing FWNMI data") Reported-by: Michal Suchanek Signed-off-by: Mahesh Salgaonkar Signed-off-by: Michael Ellerman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/platforms/pseries/ras.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index c85c76dc4400..a4dcfb24eb26 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -309,7 +309,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) int len, error_log_length; error_log_length = 8 + rtas_error_extended_log_length(h); - len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX); + len = min_t(int, error_log_length, RTAS_ERROR_LOG_MAX); memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX); memcpy(global_mce_data_buf, h, len); errhdr = (struct rtas_error_log *)global_mce_data_buf; From cb17dca7e62d4ef811d17c72d257383de44692ed Mon Sep 17 00:00:00 2001 From: Misono Tomohiro Date: Tue, 31 Jul 2018 16:20:21 +0900 Subject: [PATCH 069/151] btrfs: replace: Reset on-disk dev stats value after replace [ Upstream commit 1e7e1f9e3aba00c9b9c323bfeeddafe69ff21ff6 ] on-disk devs stats value is updated in btrfs_run_dev_stats(), which is called during commit transaction, if device->dev_stats_ccnt is not zero. Since current replace operation does not touch dev_stats_ccnt, on-disk dev stats value is not updated. Therefore "btrfs device stats" may return old device's value after umount/mount (Example: See "btrfs ins dump-t -t DEV $DEV" after btrfs/100 finish). Fix this by just incrementing dev_stats_ccnt in btrfs_dev_replace_finishing() when replace is succeeded and this will update the values. Signed-off-by: Misono Tomohiro Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/dev-replace.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 6f662b34ba0e..a57c7eb292c8 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -583,6 +583,12 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, btrfs_rm_dev_replace_unblocked(fs_info); + /* + * Increment dev_stats_ccnt so that btrfs_run_dev_stats() will + * update on-disk dev stats value during commit transaction + */ + atomic_inc(&tgt_device->dev_stats_ccnt); + /* * this is again a consistent state where no dev_replace procedure * is running, the target device is part of the filesystem, the From 56f338b39513de39679cf0c2da704d4803c9a0d5 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 3 Jul 2018 17:10:07 +0800 Subject: [PATCH 070/151] btrfs: relocation: Only remove reloc rb_trees if reloc control has been initialized [ Upstream commit 389305b2aa68723c754f88d9dbd268a400e10664 ] Invalid reloc tree can cause kernel NULL pointer dereference when btrfs does some cleanup of the reloc roots. It turns out that fs_info::reloc_ctl can be NULL in btrfs_recover_relocation() as we allocate relocation control after all reloc roots have been verified. So when we hit: note, we haven't called set_reloc_control() thus fs_info::reloc_ctl is still NULL. Link: https://bugzilla.kernel.org/show_bug.cgi?id=199833 Reported-by: Xu Wen Signed-off-by: Qu Wenruo Tested-by: Gu Jinxiang Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/relocation.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 74257d6436ad..31860a71341f 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1311,18 +1311,19 @@ static void __del_reloc_root(struct btrfs_root *root) struct mapping_node *node = NULL; struct reloc_control *rc = root->fs_info->reloc_ctl; - spin_lock(&rc->reloc_root_tree.lock); - rb_node = tree_search(&rc->reloc_root_tree.rb_root, - root->node->start); - if (rb_node) { - node = rb_entry(rb_node, struct mapping_node, rb_node); - rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); + if (rc) { + spin_lock(&rc->reloc_root_tree.lock); + rb_node = tree_search(&rc->reloc_root_tree.rb_root, + root->node->start); + if (rb_node) { + node = rb_entry(rb_node, struct mapping_node, rb_node); + rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root); + } + spin_unlock(&rc->reloc_root_tree.lock); + if (!node) + return; + BUG_ON((struct btrfs_root *)node->data != root); } - spin_unlock(&rc->reloc_root_tree.lock); - - if (!node) - return; - BUG_ON((struct btrfs_root *)node->data != root); spin_lock(&root->fs_info->trans_lock); list_del_init(&root->root_list); From 174b32e22a7bcedf76b35cce85569bdd70ee1934 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Fri, 22 Jun 2018 12:35:00 +0800 Subject: [PATCH 071/151] btrfs: Don't remove block group that still has pinned down bytes [ Upstream commit 43794446548730ac8461be30bbe47d5d027d1d16 ] [BUG] Under certain KVM load and LTP tests, it is possible to hit the following calltrace if quota is enabled: BTRFS critical (device vda2): unable to find logical 8820195328 length 4096 BTRFS critical (device vda2): unable to find logical 8820195328 length 4096 WARNING: CPU: 0 PID: 49 at ../block/blk-core.c:172 blk_status_to_errno+0x1a/0x30 CPU: 0 PID: 49 Comm: kworker/u2:1 Not tainted 4.12.14-15-default #1 SLE15 (unreleased) Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.0.0-prebuilt.qemu-project.org 04/01/2014 Workqueue: btrfs-endio-write btrfs_endio_write_helper [btrfs] task: ffff9f827b340bc0 task.stack: ffffb4f8c0304000 RIP: 0010:blk_status_to_errno+0x1a/0x30 Call Trace: submit_extent_page+0x191/0x270 [btrfs] ? btrfs_create_repair_bio+0x130/0x130 [btrfs] __do_readpage+0x2d2/0x810 [btrfs] ? btrfs_create_repair_bio+0x130/0x130 [btrfs] ? run_one_async_done+0xc0/0xc0 [btrfs] __extent_read_full_page+0xe7/0x100 [btrfs] ? run_one_async_done+0xc0/0xc0 [btrfs] read_extent_buffer_pages+0x1ab/0x2d0 [btrfs] ? run_one_async_done+0xc0/0xc0 [btrfs] btree_read_extent_buffer_pages+0x94/0xf0 [btrfs] read_tree_block+0x31/0x60 [btrfs] read_block_for_search.isra.35+0xf0/0x2e0 [btrfs] btrfs_search_slot+0x46b/0xa00 [btrfs] ? kmem_cache_alloc+0x1a8/0x510 ? btrfs_get_token_32+0x5b/0x120 [btrfs] find_parent_nodes+0x11d/0xeb0 [btrfs] ? leaf_space_used+0xb8/0xd0 [btrfs] ? btrfs_leaf_free_space+0x49/0x90 [btrfs] ? btrfs_find_all_roots_safe+0x93/0x100 [btrfs] btrfs_find_all_roots_safe+0x93/0x100 [btrfs] btrfs_find_all_roots+0x45/0x60 [btrfs] btrfs_qgroup_trace_extent_post+0x20/0x40 [btrfs] btrfs_add_delayed_data_ref+0x1a3/0x1d0 [btrfs] btrfs_alloc_reserved_file_extent+0x38/0x40 [btrfs] insert_reserved_file_extent.constprop.71+0x289/0x2e0 [btrfs] btrfs_finish_ordered_io+0x2f4/0x7f0 [btrfs] ? pick_next_task_fair+0x2cd/0x530 ? __switch_to+0x92/0x4b0 btrfs_worker_helper+0x81/0x300 [btrfs] process_one_work+0x1da/0x3f0 worker_thread+0x2b/0x3f0 ? process_one_work+0x3f0/0x3f0 kthread+0x11a/0x130 ? kthread_create_on_node+0x40/0x40 ret_from_fork+0x35/0x40 BTRFS critical (device vda2): unable to find logical 8820195328 length 16384 BTRFS: error (device vda2) in btrfs_finish_ordered_io:3023: errno=-5 IO failure BTRFS info (device vda2): forced readonly BTRFS error (device vda2): pending csums is 2887680 [CAUSE] It's caused by race with block group auto removal: - There is a meta block group X, which has only one tree block The tree block belongs to fs tree 257. - In current transaction, some operation modified fs tree 257 The tree block gets COWed, so the block group X is empty, and marked as unused, queued to be deleted. - Some workload (like fsync) wakes up cleaner_kthread() Which will call btrfs_delete_unused_bgs() to remove unused block groups. So block group X along its chunk map get removed. - Some delalloc work finished for fs tree 257 Quota needs to get the original reference of the extent, which will read tree blocks of commit root of 257. Then since the chunk map gets removed, the above warning gets triggered. [FIX] Just let btrfs_delete_unused_bgs() skip block group which still has pinned bytes. However there is a minor side effect: currently we only queue empty blocks at update_block_group(), and such empty block group with pinned bytes won't go through update_block_group() again, such block group won't be removed, until it gets new extent allocated and removed. Signed-off-by: Qu Wenruo Reviewed-by: Filipe Manana Signed-off-by: David Sterba Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/extent-tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b2937b645b62..06b7df488c50 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -9487,7 +9487,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) /* Don't want to race with allocators so take the groups_sem */ down_write(&space_info->groups_sem); spin_lock(&block_group->lock); - if (block_group->reserved || + if (block_group->reserved || block_group->pinned || btrfs_block_group_used(&block_group->item) || block_group->ro) { /* From fa2d7df55faafa55afc744bbfc01d75bace457cf Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Mon, 23 Jul 2018 14:25:31 -0700 Subject: [PATCH 072/151] debugobjects: Make stack check warning more informative commit fc91a3c4c27acdca0bc13af6fbb68c35cfd519f2 upstream. While debugging an issue debugobject tracking warned about an annotation issue of an object on stack. It turned out that the issue was due to the object in concern being on a different stack which was due to another issue. Thomas suggested to print the pointers and the location of the stack for the currently running task. This helped to figure out that the object was on the wrong stack. As this is general useful information for debugging similar issues, make the error message more informative by printing the pointers. [ tglx: Massaged changelog ] Signed-off-by: Joel Fernandes (Google) Signed-off-by: Thomas Gleixner Acked-by: Waiman Long Acked-by: Yang Shi Cc: kernel-team@android.com Cc: Arnd Bergmann Cc: astrachan@google.com Link: https://lkml.kernel.org/r/20180723212531.202328-1-joel@joelfernandes.org Signed-off-by: Greg Kroah-Hartman --- lib/debugobjects.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 547f7f923dbc..a26328ec39f1 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -295,9 +295,12 @@ static void debug_object_is_on_stack(void *addr, int onstack) limit++; if (is_on_stack) - pr_warn("object is on stack, but not annotated\n"); + pr_warn("object %p is on stack %p, but NOT annotated.\n", addr, + task_stack_page(current)); else - pr_warn("object is not on stack, but annotated\n"); + pr_warn("object %p is NOT on stack %p, but annotated.\n", addr, + task_stack_page(current)); + WARN_ON(1); } From 2daf1a7495833dad8ac2875906b471a0d55e6238 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 28 Aug 2018 12:59:10 -0700 Subject: [PATCH 073/151] kbuild: make missing $DEPMOD a Warning instead of an Error commit 914b087ff9e0e9a399a4927fa30793064afc0178 upstream. When $DEPMOD is not found, only print a warning instead of exiting with an error message and error status: Warning: 'make modules_install' requires /sbin/depmod. Please install it. This is probably in the kmod package. Change the Error to a Warning because "not all build hosts for cross compiling Linux are Linux systems and are able to provide a working port of depmod, especially at the file patch /sbin/depmod." I.e., "make modules_install" may be used to copy/install the loadable modules files to a target directory on a build system and then transferred to an embedded device where /sbin/depmod is run instead of it being run on the build system. Fixes: 934193a654c1 ("kbuild: verify that $DEPMOD is installed") Signed-off-by: Randy Dunlap Reported-by: H. Nikolaus Schaller Cc: stable@vger.kernel.org Cc: Lucas De Marchi Cc: Lucas De Marchi Cc: Michal Marek Cc: Jessica Yu Cc: Chih-Wei Huang Signed-off-by: Masahiro Yamada Signed-off-by: Maxim Zhukov Signed-off-by: Greg Kroah-Hartman --- scripts/depmod.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/depmod.sh b/scripts/depmod.sh index ea1e96921e3b..baedaef53ca0 100755 --- a/scripts/depmod.sh +++ b/scripts/depmod.sh @@ -15,9 +15,9 @@ if ! test -r System.map ; then fi if [ -z $(command -v $DEPMOD) ]; then - echo "'make modules_install' requires $DEPMOD. Please install it." >&2 + echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2 echo "This is probably in the kmod package." >&2 - exit 1 + exit 0 fi # older versions of depmod don't support -P From 9f0547e071f848a665056e18c70cc2741a96ca76 Mon Sep 17 00:00:00 2001 From: Tyler Hicks Date: Tue, 4 Sep 2018 15:24:04 +0000 Subject: [PATCH 074/151] irda: Fix memory leak caused by repeated binds of irda socket The irda_bind() function allocates memory for self->ias_obj without checking to see if the socket is already bound. A userspace process could repeatedly bind the socket, have each new object added into the LM-IAS database, and lose the reference to the old object assigned to the socket to exhaust memory resources. This patch errors out of the bind operation when self->ias_obj is already assigned. CVE-2018-6554 Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Tyler Hicks Reviewed-by: Seth Arnold Reviewed-by: Stefan Bader Signed-off-by: Greg Kroah-Hartman --- net/irda/af_irda.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 556e3b0f77ba..70a1fd69d4de 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -785,6 +785,13 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) return -EINVAL; lock_sock(sk); + + /* Ensure that the socket is not already bound */ + if (self->ias_obj) { + err = -EINVAL; + goto out; + } + #ifdef CONFIG_IRDA_ULTRA /* Special care for Ultra sockets */ if ((sk->sk_type == SOCK_DGRAM) && From f24049f47194791d0393450b4f5cebea9f0b5e89 Mon Sep 17 00:00:00 2001 From: Tyler Hicks Date: Tue, 4 Sep 2018 15:24:05 +0000 Subject: [PATCH 075/151] irda: Only insert new objects into the global database via setsockopt The irda_setsockopt() function conditionally allocates memory for a new self->ias_object or, in some cases, reuses the existing self->ias_object. Existing objects were incorrectly reinserted into the LM_IAS database which corrupted the doubly linked list used for the hashbin implementation of the LM_IAS database. When combined with a memory leak in irda_bind(), this issue could be leveraged to create a use-after-free vulnerability in the hashbin list. This patch fixes the issue by only inserting newly allocated objects into the database. CVE-2018-6555 Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Tyler Hicks Reviewed-by: Seth Arnold Reviewed-by: Stefan Bader Signed-off-by: Greg Kroah-Hartman --- net/irda/af_irda.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 70a1fd69d4de..1e6a240465a9 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -2051,7 +2051,11 @@ static int irda_setsockopt(struct socket *sock, int level, int optname, err = -EINVAL; goto out; } - irias_insert_object(ias_obj); + + /* Only insert newly allocated objects */ + if (free_ias) + irias_insert_object(ias_obj); + kfree(ias_opt); break; case IRLMP_IAS_DEL: From 566f4e263c0d13b2d34199fa1f4c337453369d19 Mon Sep 17 00:00:00 2001 From: Govindarajulu Varadarajan Date: Mon, 30 Jul 2018 09:56:54 -0700 Subject: [PATCH 076/151] enic: do not call enic_change_mtu in enic_probe commit cb5c6568867325f9905e80c96531d963bec8e5ea upstream. In commit ab123fe071c9 ("enic: handle mtu change for vf properly") ASSERT_RTNL() is added to _enic_change_mtu() to prevent it from being called without rtnl held. enic_probe() calls enic_change_mtu() without rtnl held. At this point netdev is not registered yet. Remove call to enic_change_mtu and assign the mtu to netdev->mtu. Fixes: ab123fe071c9 ("enic: handle mtu change for vf properly") Signed-off-by: Govindarajulu Varadarajan Signed-off-by: David S. Miller Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/cisco/enic/enic_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index c6775de7b4f2..04eb59e6b826 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2496,7 +2496,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ enic->port_mtu = enic->config.mtu; - (void)enic_change_mtu(netdev, enic->port_mtu); err = enic_set_mac_addr(netdev, enic->mac_addr); if (err) { @@ -2545,6 +2544,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->features |= NETIF_F_HIGHDMA; netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->mtu = enic->port_mtu; err = register_netdev(netdev); if (err) { From dd6ff2938d7be1a4eecdd7ba9af466959c95df3b Mon Sep 17 00:00:00 2001 From: Chas Williams Date: Thu, 6 Sep 2018 11:10:41 -0400 Subject: [PATCH 077/151] Fixes: Commit 86af955d02bb ("mm: numa: avoid waiting on freed migrated pages") Commit 86af955d02bb ("mm: numa: avoid waiting on freed migrated pages") was an incomplete backport of the upstream commit. It is necessary to always reset page_nid before attempting any early exit. The original commit conflicted due to lack of commit 82b0f8c39a38 ("mm: join struct fault_env and vm_fault") in 4.9 so it wasn't a clean application, and the change must have just gotten lost in the noise. Signed-off-by: Chas Williams Signed-off-by: Greg Kroah-Hartman --- mm/huge_memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 690d172436c4..bc402f39ac48 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1329,12 +1329,12 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, /* Migration could have started since the pmd_trans_migrating check */ if (!page_locked) { + page_nid = -1; if (!get_page_unless_zero(page)) goto out_unlock; spin_unlock(ptl); wait_on_page_locked(page); put_page(page); - page_nid = -1; goto out; } From cb4009e1c6baf715a241ddc508ccf5c5f456f60e Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 6 Aug 2018 07:14:51 -0500 Subject: [PATCH 078/151] ASoC: wm8994: Fix missing break in switch commit ad0eaee6195db1db1749dd46b9e6f4466793d178 upstream. Add missing break statement in order to prevent the code from falling through to the default case. Addresses-Coverity-ID: 115050 ("Missing break in switch") Reported-by: Valdis Kletnieks Signed-off-by: Gustavo A. R. Silva Acked-by: Charles Keepax Signed-off-by: Mark Brown Cc: stable@vger.kernel.org [Gustavo: Backported to 3.16..4.18 - Remove code comment removal] Signed-off-by: Gustavo A. R. Silva Signed-off-by: Greg Kroah-Hartman --- sound/soc/codecs/wm8994.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index ba7d8e95ba78..4a879b14a2fc 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -2427,6 +2427,7 @@ static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai, snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_2, WM8994_OPCLK_ENA, 0); } + break; default: return -EINVAL; From 933ce26bda5c1caa2e3cd77f0a6d93ce53f9a63a Mon Sep 17 00:00:00 2001 From: Shubhrajyoti Datta Date: Mon, 3 Sep 2018 15:11:11 +0530 Subject: [PATCH 079/151] i2c: xiic: Make the start and the byte count write atomic commit ae7304c3ea28a3ba47a7a8312c76c654ef24967e upstream. Disable interrupts while configuring the transfer and enable them back. We have below as the programming sequence 1. start and slave address 2. byte count and stop In some customer platform there was a lot of interrupts between 1 and 2 and after slave address (around 7 clock cyles) if 2 is not executed then the transaction is nacked. To fix this case make the 2 writes atomic. Signed-off-by: Shubhrajyoti Datta Signed-off-by: Michal Simek [wsa: added a newline for better readability] Signed-off-by: Wolfram Sang Cc: stable@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/i2c/busses/i2c-xiic.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index cc65ea0b818f..2490f80a2d98 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -500,6 +500,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c) { u8 rx_watermark; struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg; + unsigned long flags; /* Clear and enable Rx full interrupt. */ xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK); @@ -515,6 +516,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c) rx_watermark = IIC_RX_FIFO_DEPTH; xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1); + local_irq_save(flags); if (!(msg->flags & I2C_M_NOSTART)) /* write the address */ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, @@ -525,6 +527,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c) xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0)); + local_irq_restore(flags); + if (i2c->nmsgs == 1) /* very last, enable bus not busy as well */ xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK); From 689d21030d47833a73923c94b059a68099b55797 Mon Sep 17 00:00:00 2001 From: Ritesh Harjani Date: Wed, 9 Aug 2017 18:28:32 +0530 Subject: [PATCH 080/151] cfq: Give a chance for arming slice idle timer in case of group_idle commit b3193bc0dca9bb69c8ba1ec1a318105c76eb4172 upstream. In below scenario blkio cgroup does not work as per their assigned weights :- 1. When the underlying device is nonrotational with a single HW queue with depth of >= CFQ_HW_QUEUE_MIN 2. When the use case is forming two blkio cgroups cg1(weight 1000) & cg2(wight 100) and two processes(file1 and file2) doing sync IO in their respective blkio cgroups. For above usecase result of fio (without this patch):- file1: (groupid=0, jobs=1): err= 0: pid=685: Thu Jan 1 19:41:49 1970 write: IOPS=1315, BW=41.1MiB/s (43.1MB/s)(1024MiB/24906msec) <...> file2: (groupid=0, jobs=1): err= 0: pid=686: Thu Jan 1 19:41:49 1970 write: IOPS=1295, BW=40.5MiB/s (42.5MB/s)(1024MiB/25293msec) <...> // both the process BW is equal even though they belong to diff. cgroups with weight of 1000(cg1) and 100(cg2) In above case (for non rotational NCQ devices), as soon as the request from cg1 is completed and even though it is provided with higher set_slice=10, because of CFQ algorithm when the driver tries to fetch the request, CFQ expires this group without providing any idle time nor weight priority and schedules another cfq group (in this case cg2). And thus both cfq groups(cg1 & cg2) keep alternating to get the disk time and hence loses the cgroup weight based scheduling. Below patch gives a chance to cfq algorithm (cfq_arm_slice_timer) to arm the slice timer in case group_idle is enabled. In case if group_idle is also not required (including for nonrotational NCQ drives), we need to explicitly set group_idle = 0 from sysfs for such cases. With this patch result of fio(for above usecase) :- file1: (groupid=0, jobs=1): err= 0: pid=690: Thu Jan 1 00:06:08 1970 write: IOPS=1706, BW=53.3MiB/s (55.9MB/s)(1024MiB/19197msec) <..> file2: (groupid=0, jobs=1): err= 0: pid=691: Thu Jan 1 00:06:08 1970 write: IOPS=1043, BW=32.6MiB/s (34.2MB/s)(1024MiB/31401msec) <..> // In this processes BW is as per their respective cgroups weight. Signed-off-by: Ritesh Harjani Signed-off-by: Jens Axboe Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- block/cfq-iosched.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 5da8e6e9ab4b..a5f8f7922725 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2728,7 +2728,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) * for devices that support queuing, otherwise we still have a problem * with sync vs async workloads. */ - if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag) + if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag && + !cfqd->cfq_group_idle) return; WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); From 3fb2459d90e34356f47fd6810f1ee1e1ed21b446 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Tue, 9 May 2017 09:39:59 +0200 Subject: [PATCH 081/151] kthread: Fix use-after-free if kthread fork fails commit 4d6501dce079c1eb6bf0b1d8f528a5e81770109e upstream. If a kthread forks (e.g. usermodehelper since commit 1da5c46fa965) but fails in copy_process() between calling dup_task_struct() and setting p->set_child_tid, then the value of p->set_child_tid will be inherited from the parent and get prematurely freed by free_kthread_struct(). kthread() - worker_thread() - process_one_work() | - call_usermodehelper_exec_work() | - kernel_thread() | - _do_fork() | - copy_process() | - dup_task_struct() | - arch_dup_task_struct() | - tsk->set_child_tid = current->set_child_tid // implied | - ... | - goto bad_fork_* | - ... | - free_task(tsk) | - free_kthread_struct(tsk) | - kfree(tsk->set_child_tid) - ... - schedule() - __schedule() - wq_worker_sleeping() - kthread_data(task)->flags // UAF The problem started showing up with commit 1da5c46fa965 since it reused ->set_child_tid for the kthread worker data. A better long-term solution might be to get rid of the ->set_child_tid abuse. The comment in set_kthread_struct() also looks slightly wrong. Debugged-by: Jamie Iles Fixes: 1da5c46fa965 ("kthread: Make struct kthread kmalloc'ed") Signed-off-by: Vegard Nossum Acked-by: Oleg Nesterov Cc: Peter Zijlstra Cc: Greg Kroah-Hartman Cc: Andy Lutomirski Cc: Frederic Weisbecker Cc: Jamie Iles Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/20170509073959.17858-1-vegard.nossum@oracle.com Signed-off-by: Thomas Gleixner Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- kernel/fork.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/kernel/fork.c b/kernel/fork.c index 95338c74b361..384394beb701 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1243,6 +1243,18 @@ static struct task_struct *copy_process(unsigned long clone_flags, if (!p) goto fork_out; + /* + * This _must_ happen before we call free_task(), i.e. before we jump + * to any of the bad_fork_* labels. This is to avoid freeing + * p->set_child_tid which is (ab)used as a kthread's data pointer for + * kernel threads (PF_KTHREAD). + */ + p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; + /* + * Clear TID on mm_release()? + */ + p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; + ftrace_graph_init_task(p); rt_mutex_init_task(p); @@ -1406,11 +1418,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, goto bad_fork_cleanup_io; } - p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; - /* - * Clear TID on mm_release()? - */ - p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; #ifdef CONFIG_BLOCK p->plug = NULL; #endif From 7ec63987f0fb2359a6130b7d75fb3cfe5121b688 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Mon, 29 May 2017 09:22:07 +0200 Subject: [PATCH 082/151] kthread: fix boot hang (regression) on MIPS/OpenRISC commit b0f5a8f32e8bbdaae1abb8abe2d3cbafaba57e08 upstream. This fixes a regression in commit 4d6501dce079 where I didn't notice that MIPS and OpenRISC were reinitialising p->{set,clear}_child_tid to NULL after our initialisation in copy_process(). We can simply get rid of the arch-specific initialisation here since it is now always done in copy_process() before hitting copy_thread{,_tls}(). Review notes: - As far as I can tell, copy_process() is the only user of copy_thread_tls(), which is the only caller of copy_thread() for architectures that don't implement copy_thread_tls(). - After this patch, there is no arch-specific code touching p->set_child_tid or p->clear_child_tid whatsoever. - It may look like MIPS/OpenRISC wanted to always have these fields be NULL, but that's not true, as copy_process() would unconditionally set them again _after_ calling copy_thread_tls() before commit 4d6501dce079. Fixes: 4d6501dce079c1eb6bf0b1d8f528a5e81770109e ("kthread: Fix use-after-free if kthread fork fails") Reported-by: Guenter Roeck Tested-by: Guenter Roeck # MIPS only Acked-by: Stafford Horne Acked-by: Oleg Nesterov Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: Jonas Bonn Cc: Stefan Kristiansson Cc: openrisc@lists.librecores.org Cc: Jamie Iles Cc: Thomas Gleixner Signed-off-by: Vegard Nossum Signed-off-by: Linus Torvalds Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/process.c | 1 - arch/openrisc/kernel/process.c | 2 -- 2 files changed, 3 deletions(-) diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 7d09efd25b56..750e679fbbdc 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -87,7 +87,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, struct thread_info *ti = task_thread_info(p); struct pt_regs *childregs, *regs = current_pt_regs(); unsigned long childksp; - p->set_child_tid = p->clear_child_tid = NULL; childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c index 386af258591d..62d9bb2dbd82 100644 --- a/arch/openrisc/kernel/process.c +++ b/arch/openrisc/kernel/process.c @@ -152,8 +152,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp, top_of_kernel_stack = sp; - p->set_child_tid = p->clear_child_tid = NULL; - /* Locate userspace context on stack... */ sp -= STACK_FRAME_OVERHEAD; /* redzone */ sp -= sizeof(struct pt_regs); From 0ee00fd88e4eefdb5b6a3d7577e543675483b126 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Mon, 5 Jun 2017 15:30:16 +0800 Subject: [PATCH 083/151] staging: rt5208: Fix a sleep-in-atomic bug in xd_copy_page commit 498c4b4e9c23855d17ecc2a108d949bb68020481 upstream. The driver may sleep under a spin lock, and the function call path is: rtsx_exclusive_enter_ss (acquire the lock by spin_lock) rtsx_enter_ss rtsx_power_off_card xd_cleanup_work xd_delay_write xd_finish_write xd_copy_page wait_timeout schedule_timeout --> may sleep To fix it, "wait_timeout" is replaced with mdelay in xd_copy_page. Signed-off-by: Jia-Ju Bai Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- drivers/staging/rts5208/xd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c index 0d029fe92b40..0526dc783d64 100644 --- a/drivers/staging/rts5208/xd.c +++ b/drivers/staging/rts5208/xd.c @@ -1095,7 +1095,7 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk, reg = 0; rtsx_read_register(chip, XD_CTL, ®); if (reg & (XD_ECC1_ERROR | XD_ECC2_ERROR)) { - wait_timeout(100); + mdelay(100); if (detect_card_cd(chip, XD_CARD) != STATUS_SUCCESS) { From 45845e2f817e6b1dfa00a70af92dbd8d42795f33 Mon Sep 17 00:00:00 2001 From: Daniel Micay Date: Mon, 5 Jun 2017 21:52:34 -0700 Subject: [PATCH 084/151] staging/rts5208: Fix read overflow in memcpy commit 88a5b39b69ab1828fd4130e2baadd184109cea69 upstream. Noticed by FORTIFY_SOURCE, this swaps memcpy() for strncpy() to zero-value fill the end of the buffer instead of over-reading a string from .rodata. Signed-off-by: Daniel Micay [kees: wrote commit log] Signed-off-by: Kees Cook Cc: Greg Kroah-Hartman Cc: Wayne Porter Signed-off-by: Amit Pundir --- drivers/staging/rts5208/rtsx_scsi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c index 0615a7e77576..34cf9dc13acd 100644 --- a/drivers/staging/rts5208/rtsx_scsi.c +++ b/drivers/staging/rts5208/rtsx_scsi.c @@ -539,7 +539,7 @@ static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip) if (sendbytes > 8) { memcpy(buf, inquiry_buf, 8); - memcpy(buf + 8, inquiry_string, sendbytes - 8); + strncpy(buf + 8, inquiry_string, sendbytes - 8); if (pro_formatter_flag) { /* Additional Length */ buf[4] = 0x33; From 209c27b04ccc679bd7da0b84c2144733ce79c8a3 Mon Sep 17 00:00:00 2001 From: Johan Hedberg Date: Sat, 4 Aug 2018 23:40:26 +0300 Subject: [PATCH 085/151] Bluetooth: h5: Fix missing dependency on BT_HCIUART_SERDEV MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 6c3711ec64fd23a9abc8aaf59a9429569a6282df ] This driver was recently updated to use serdev, so add the appropriate dependency. Without this one can get compiler warnings like this if CONFIG_SERIAL_DEV_BUS is not enabled: CC [M] drivers/bluetooth/hci_h5.o drivers/bluetooth/hci_h5.c:934:36: warning: ‘h5_serdev_driver’ defined but not used [-Wunused-variable] static struct serdev_device_driver h5_serdev_driver = { ^~~~~~~~~~~~~~~~ Signed-off-by: Johan Hedberg Signed-off-by: Marcel Holtmann Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/bluetooth/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 4547dc238fc7..d59123d39e87 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -85,6 +85,7 @@ config BT_HCIUART_LL config BT_HCIUART_3WIRE bool "Three-wire UART (H5) protocol support" depends on BT_HCIUART + depends on BT_HCIUART_SERDEV help The HCI Three-wire UART Transport Layer makes it possible to user the Bluetooth HCI over a serial port interface. The HCI From cb8ef134ed5833f872fce2407dd814cf5ed0274d Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Thu, 2 Aug 2018 12:12:20 -0500 Subject: [PATCH 086/151] scsi: target: fix __transport_register_session locking [ Upstream commit 6a64f6e1591322beb8ce16e952a53582caf2a15c ] When __transport_register_session is called from transport_register_session irqs will already have been disabled, so we do not want the unlock irq call to enable them until the higher level has done the final spin_unlock_irqrestore/ spin_unlock_irq. This has __transport_register_session use the save/restore call. Signed-off-by: Mike Christie Reviewed-by: Bart Van Assche Reviewed-by: Christoph Hellwig Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/target/target_core_transport.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 26afe1c74ef4..96f13d713f18 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -323,6 +323,7 @@ void __transport_register_session( void *fabric_sess_ptr) { unsigned char buf[PR_REG_ISID_LEN]; + unsigned long flags; se_sess->se_tpg = se_tpg; se_sess->fabric_sess_ptr = fabric_sess_ptr; @@ -345,7 +346,7 @@ void __transport_register_session( } kref_get(&se_nacl->acl_kref); - spin_lock_irq(&se_nacl->nacl_sess_lock); + spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); /* * The se_nacl->nacl_sess pointer will be set to the * last active I_T Nexus for each struct se_node_acl. @@ -354,7 +355,7 @@ void __transport_register_session( list_add_tail(&se_sess->sess_acl_list, &se_nacl->acl_sess_list); - spin_unlock_irq(&se_nacl->nacl_sess_lock); + spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); } list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); From f2675e34bd585f3187b9becaa24a259f7361928b Mon Sep 17 00:00:00 2001 From: BingJing Chang Date: Wed, 1 Aug 2018 17:08:36 +0800 Subject: [PATCH 087/151] md/raid5: fix data corruption of replacements after originals dropped [ Upstream commit d63e2fc804c46e50eee825c5d3a7228e07048b47 ] During raid5 replacement, the stripes can be marked with R5_NeedReplace flag. Data can be read from being-replaced devices and written to replacing spares without reading all other devices. (It's 'replace' mode. s.replacing = 1) If a being-replaced device is dropped, the replacement progress will be interrupted and resumed with pure recovery mode. However, existing stripes before being interrupted cannot read from the dropped device anymore. It prints lots of WARN_ON messages. And it results in data corruption because existing stripes write problematic data into its replacement device and update the progress. \# Erase disks (1MB + 2GB) dd if=/dev/zero of=/dev/sda bs=1MB count=2049 dd if=/dev/zero of=/dev/sdb bs=1MB count=2049 dd if=/dev/zero of=/dev/sdc bs=1MB count=2049 dd if=/dev/zero of=/dev/sdd bs=1MB count=2049 mdadm -C /dev/md0 -amd -R -l5 -n3 -x0 /dev/sd[abc] -z 2097152 \# Ensure array stores non-zero data dd if=/root/data_4GB.iso of=/dev/md0 bs=1MB \# Start replacement mdadm /dev/md0 -a /dev/sdd mdadm /dev/md0 --replace /dev/sda Then, Hot-plug out /dev/sda during recovery, and wait for recovery done. echo check > /sys/block/md0/md/sync_action cat /sys/block/md0/md/mismatch_cnt # it will be greater than 0. Soon after you hot-plug out /dev/sda, you will see many WARN_ON messages. The replacement recovery will be interrupted shortly. After the recovery finishes, it will result in data corruption. Actually, it's just an unhandled case of replacement. In commit (md/raid5: fix interaction of 'replace' and 'recovery'.), if a NeedReplace device is not UPTODATE then that is an error, the commit just simply print WARN_ON but also mark these corrupted stripes with R5_WantReplace. (it means it's ready for writes.) To fix this case, we can leverage 'sync and replace' mode mentioned in commit <9a3e1101b827> (md/raid5: detect and handle replacements during recovery.). We can add logics to detect and use 'sync and replace' mode for these stripes. Reported-by: Alex Chen Reviewed-by: Alex Wu Reviewed-by: Chung-Chiang Cheng Signed-off-by: BingJing Chang Signed-off-by: Shaohua Li Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/md/raid5.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index cf178f475131..9f442b9418e5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3703,6 +3703,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) s->failed++; if (rdev && !test_bit(Faulty, &rdev->flags)) do_recovery = 1; + else if (!rdev) { + rdev = rcu_dereference( + conf->disks[i].replacement); + if (rdev && !test_bit(Faulty, &rdev->flags)) + do_recovery = 1; + } } } if (test_bit(STRIPE_SYNCING, &sh->state)) { From 9b48a687f1764d5fa301209927626c221ab01909 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 2 Aug 2018 11:24:47 +0300 Subject: [PATCH 088/151] uio: potential double frees if __uio_register_device() fails [ Upstream commit f019f07ecf6a6b8bd6d7853bce70925d90af02d1 ] The uio_unregister_device() function assumes that if "info->uio_dev" is non-NULL that means "info" is fully allocated. Setting info->uio_de has to be the last thing in the function. In the current code, if request_threaded_irq() fails then we return with info->uio_dev set to non-NULL but info is not fully allocated and it can lead to double frees. Fixes: beafc54c4e2f ("UIO: Add the User IO core code") Signed-off-by: Dan Carpenter Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/uio/uio.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 60fa6278fbce..86055f663a01 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -832,8 +832,6 @@ int __uio_register_device(struct module *owner, if (ret) goto err_uio_dev_add_attributes; - info->uio_dev = idev; - if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) { ret = devm_request_irq(idev->dev, info->irq, uio_interrupt, info->irq_flags, info->name, idev); @@ -841,6 +839,7 @@ int __uio_register_device(struct module *owner, goto err_request_irq; } + info->uio_dev = idev; return 0; err_request_irq: From 4d9f6cff68d4e54ee8ec8fa65f7962537af431fa Mon Sep 17 00:00:00 2001 From: Anton Vasilyev Date: Fri, 27 Jul 2018 16:39:31 +0300 Subject: [PATCH 089/151] tty: rocket: Fix possible buffer overwrite on register_PCI [ Upstream commit 0419056ec8fd01ddf5460d2dba0491aad22657dd ] If number of isa and pci boards exceed NUM_BOARDS on the path rp_init()->init_PCI()->register_PCI() then buffer overwrite occurs in register_PCI() on assign rcktpt_io_addr[i]. The patch adds check on upper bound for index of registered board in register_PCI. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Anton Vasilyev Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/tty/rocket.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c index 383c4c796637..347fe7ace24c 100644 --- a/drivers/tty/rocket.c +++ b/drivers/tty/rocket.c @@ -1928,7 +1928,7 @@ static __init int register_PCI(int i, struct pci_dev *dev) ByteIO_t UPCIRingInd = 0; if (!dev || !pci_match_id(rocket_pci_ids, dev) || - pci_enable_device(dev)) + pci_enable_device(dev) || i >= NUM_BOARDS) return 0; rcktpt_io_addr[i] = pci_resource_start(dev, 0); From 9e4dac32a9b8983a47260dcc364ef516153aee73 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Mon, 2 Jul 2018 04:21:18 -0400 Subject: [PATCH 090/151] macintosh/via-pmu: Add missing mmio accessors [ Upstream commit 576d5290d678a651b9f36050fc1717e0573aca13 ] Add missing in_8() accessors to init_pmu() and pmu_sr_intr(). This fixes several sparse warnings: drivers/macintosh/via-pmu.c:536:29: warning: dereference of noderef expression drivers/macintosh/via-pmu.c:537:33: warning: dereference of noderef expression drivers/macintosh/via-pmu.c:1455:17: warning: dereference of noderef expression drivers/macintosh/via-pmu.c:1456:69: warning: dereference of noderef expression Tested-by: Stan Johnson Signed-off-by: Finn Thain Reviewed-by: Geert Uytterhoeven Signed-off-by: Michael Ellerman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/macintosh/via-pmu.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index dee88e59f0d3..d7b90ed7c717 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -527,8 +527,9 @@ init_pmu(void) int timeout; struct adb_request req; - out_8(&via[B], via[B] | TREQ); /* negate TREQ */ - out_8(&via[DIRB], (via[DIRB] | TREQ) & ~TACK); /* TACK in, TREQ out */ + /* Negate TREQ. Set TACK to input and TREQ to output. */ + out_8(&via[B], in_8(&via[B]) | TREQ); + out_8(&via[DIRB], (in_8(&via[DIRB]) | TREQ) & ~TACK); pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); timeout = 100000; @@ -1450,8 +1451,8 @@ pmu_sr_intr(void) struct adb_request *req; int bite = 0; - if (via[B] & TREQ) { - printk(KERN_ERR "PMU: spurious SR intr (%x)\n", via[B]); + if (in_8(&via[B]) & TREQ) { + printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via[B])); out_8(&via[IFR], SR_INT); return NULL; } From 412d0c7892c4c3da023f0de916ae9a99206e3920 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Thu, 26 Jul 2018 15:59:48 +0200 Subject: [PATCH 091/151] ath10k: prevent active scans on potential unusable channels [ Upstream commit 3f259111583801013cb605bb4414aa529adccf1c ] The QCA4019 hw1.0 firmware 10.4-3.2.1-00050 and 10.4-3.5.3-00053 (and most likely all other) seem to ignore the WMI_CHAN_FLAG_DFS flag during the scan. This results in transmission (probe requests) on channels which are not "available" for transmissions. Since the firmware is closed source and nothing can be done from our side to fix the problem in it, the driver has to work around this problem. The WMI_CHAN_FLAG_PASSIVE seems to be interpreted by the firmware to not scan actively on a channel unless an AP was detected on it. Simple probe requests will then be transmitted by the STA on the channel. ath10k must therefore also use this flag when it queues a radar channel for scanning. This should reduce the chance of an active scan when the channel might be "unusable" for transmissions. Fixes: e8a50f8ba44b ("ath10k: introduce DFS implementation") Signed-off-by: Sven Eckelmann Signed-off-by: Kalle Valo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/ath/ath10k/mac.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 9573c8e4df41..e74991db25ce 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1728,6 +1728,13 @@ static int ath10k_update_channel_list(struct ath10k *ar) passive = channel->flags & IEEE80211_CHAN_NO_IR; ch->passive = passive; + /* the firmware is ignoring the "radar" flag of the + * channel and is scanning actively using Probe Requests + * on "Radar detection"/DFS channels which are not + * marked as "available" + */ + ch->passive |= ch->chan_radar; + ch->freq = channel->center_freq; ch->min_power = 0; ch->max_power = channel->max_power * 2; From 220146d5c8fa5b79db3a6ceda2f71df1c5790c7c Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 27 Jul 2018 18:23:19 -0700 Subject: [PATCH 092/151] MIPS: Fix ISA virt/bus conversion for non-zero PHYS_OFFSET [ Upstream commit 0494d7ffdcebc6935410ea0719b24ab626675351 ] isa_virt_to_bus() & isa_bus_to_virt() claim to treat ISA bus addresses as being identical to physical addresses, but they fail to do so in the presence of a non-zero PHYS_OFFSET. Correct this by having them use virt_to_phys() & phys_to_virt(), which consolidates the calculations to one place & ensures that ISA bus addresses do indeed match physical addresses. Signed-off-by: Paul Burton Patchwork: https://patchwork.linux-mips.org/patch/20047/ Cc: James Hogan Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: Vladimir Kondratiev Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/mips/include/asm/io.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index fdb3c978436d..b6e49c4cd5d2 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -141,14 +141,14 @@ static inline void * phys_to_virt(unsigned long address) /* * ISA I/O bus memory addresses are 1:1 with the physical address. */ -static inline unsigned long isa_virt_to_bus(volatile void * address) +static inline unsigned long isa_virt_to_bus(volatile void *address) { - return (unsigned long)address - PAGE_OFFSET; + return virt_to_phys(address); } -static inline void * isa_bus_to_virt(unsigned long address) +static inline void *isa_bus_to_virt(unsigned long address) { - return (void *)(address + PAGE_OFFSET); + return phys_to_virt(address); } #define isa_page_to_bus page_to_phys From a9ab273365d023de8296aa4b08a424a8b2ef8d9c Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 2 Jul 2018 12:01:53 -0700 Subject: [PATCH 093/151] ata: libahci: Correct setting of DEVSLP register [ Upstream commit 2dbb3ec29a6c069035857a2fc4c24e80e5dfe3cc ] We have seen that on some platforms, SATA device never show any DEVSLP residency. This prevent power gating of SATA IP, which prevent system to transition to low power mode in systems with SLP_S0 aka modern standby systems. The PHY logic is off only in DEVSLP not in slumber. Reference: https://www.intel.com/content/dam/www/public/us/en/documents/datasheets /332995-skylake-i-o-platform-datasheet-volume-1.pdf Section 28.7.6.1 Here driver is trying to do read-modify-write the devslp register. But not resetting the bits for which this driver will modify values (DITO, MDAT and DETO). So simply reset those bits before updating to new values. Signed-off-by: Srinivas Pandruvada Reviewed-by: Rafael J. Wysocki Reviewed-by: Hans de Goede Signed-off-by: Tejun Heo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/ata/libahci.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 794448ce2fc0..5469a81aa6d3 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -2052,6 +2052,8 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) deto = 20; } + /* Make dito, mdat, deto bits to 0s */ + devslp &= ~GENMASK_ULL(24, 2); devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) | (mdat << PORT_DEVSLP_MDAT_OFFSET) | (deto << PORT_DEVSLP_DETO_OFFSET) | From 9e8f26c2d5ab2471c2531204dcba88ce9eaacf28 Mon Sep 17 00:00:00 2001 From: Anton Vasilyev Date: Fri, 27 Jul 2018 16:51:57 +0300 Subject: [PATCH 094/151] scsi: 3ware: fix return 0 on the error path of probe [ Upstream commit 4dc98c1995482262e70e83ef029135247fafe0f2 ] tw_probe() returns 0 in case of fail of tw_initialize_device_extension(), pci_resource_start() or tw_reset_sequence() and releases resources. twl_probe() returns 0 in case of fail of twl_initialize_device_extension(), pci_iomap() and twl_reset_sequence(). twa_probe() returns 0 in case of fail of tw_initialize_device_extension(), ioremap() and twa_reset_sequence(). The patch adds retval initialization for these cases. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Anton Vasilyev Acked-by: Adam Radford Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/3w-9xxx.c | 6 +++++- drivers/scsi/3w-sas.c | 3 +++ drivers/scsi/3w-xxxx.c | 2 ++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 632295144766..944c44cc2cb9 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -2057,6 +2057,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) if (twa_initialize_device_extension(tw_dev)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension"); + retval = -ENOMEM; goto out_free_device_extension; } @@ -2079,6 +2080,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) tw_dev->base_addr = ioremap(mem_addr, mem_len); if (!tw_dev->base_addr) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap"); + retval = -ENOMEM; goto out_release_mem_region; } @@ -2086,8 +2088,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) TW_DISABLE_INTERRUPTS(tw_dev); /* Initialize the card */ - if (twa_reset_sequence(tw_dev, 0)) + if (twa_reset_sequence(tw_dev, 0)) { + retval = -ENOMEM; goto out_iounmap; + } /* Set host specific parameters */ if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) || diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index 2ee2e543ab73..e853e5c3608e 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -1613,6 +1613,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) if (twl_initialize_device_extension(tw_dev)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension"); + retval = -ENOMEM; goto out_free_device_extension; } @@ -1627,6 +1628,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) tw_dev->base_addr = pci_iomap(pdev, 1, 0); if (!tw_dev->base_addr) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap"); + retval = -ENOMEM; goto out_release_mem_region; } @@ -1636,6 +1638,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) /* Initialize the card */ if (twl_reset_sequence(tw_dev, 0)) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe"); + retval = -ENOMEM; goto out_iounmap; } diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index c1e1051f94cc..f09db8f428c2 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -2291,6 +2291,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) if (tw_initialize_device_extension(tw_dev)) { printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension."); + retval = -ENOMEM; goto out_free_device_extension; } @@ -2305,6 +2306,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) tw_dev->base_addr = pci_resource_start(pdev, 0); if (!tw_dev->base_addr) { printk(KERN_WARNING "3w-xxxx: Failed to get io address."); + retval = -ENOMEM; goto out_release_mem_region; } From 128b37430eb5113155e0db556cb903aa4220fdf3 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Mon, 30 Jul 2018 13:57:41 +0200 Subject: [PATCH 095/151] Bluetooth: hidp: Fix handling of strncpy for hid->name information MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit b3cadaa485f0c20add1644a5c877b0765b285c0c ] This fixes two issues with setting hid->name information. CC net/bluetooth/hidp/core.o In function ‘hidp_setup_hid’, inlined from ‘hidp_session_dev_init’ at net/bluetooth/hidp/core.c:815:9, inlined from ‘hidp_session_new’ at net/bluetooth/hidp/core.c:953:8, inlined from ‘hidp_connection_add’ at net/bluetooth/hidp/core.c:1366:8: net/bluetooth/hidp/core.c:778:2: warning: ‘strncpy’ output may be truncated copying 127 bytes from a string of length 127 [-Wstringop-truncation] strncpy(hid->name, req->name, sizeof(req->name) - 1); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CC net/bluetooth/hidp/core.o net/bluetooth/hidp/core.c: In function ‘hidp_setup_hid’: net/bluetooth/hidp/core.c:778:38: warning: argument to ‘sizeof’ in ‘strncpy’ call is the same expression as the source; did you mean to use the size of the destination? [-Wsizeof-pointer-memaccess] strncpy(hid->name, req->name, sizeof(req->name)); ^ Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/bluetooth/hidp/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 4f41b245ce5b..1c669fa3e61a 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -763,7 +763,7 @@ static int hidp_setup_hid(struct hidp_session *session, hid->version = req->version; hid->country = req->country; - strncpy(hid->name, req->name, sizeof(req->name) - 1); + strncpy(hid->name, req->name, sizeof(hid->name)); snprintf(hid->phys, sizeof(hid->phys), "%pMR", &l2cap_pi(session->ctrl_sock->sk)->chan->src); From 04924815bdc2a86e2a94b254397744712187352e Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 25 Jul 2018 17:48:01 +0200 Subject: [PATCH 096/151] x86/mm: Remove in_nmi() warning from vmalloc_fault() [ Upstream commit 6863ea0cda8725072522cd78bda332d9a0b73150 ] It is perfectly okay to take page-faults, especially on the vmalloc area while executing an NMI handler. Remove the warning. Signed-off-by: Joerg Roedel Signed-off-by: Thomas Gleixner Tested-by: David H. Gutteridge Cc: "H . Peter Anvin" Cc: linux-mm@kvack.org Cc: Linus Torvalds Cc: Andy Lutomirski Cc: Dave Hansen Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Peter Zijlstra Cc: Borislav Petkov Cc: Jiri Kosina Cc: Boris Ostrovsky Cc: Brian Gerst Cc: David Laight Cc: Denys Vlasenko Cc: Eduardo Valentin Cc: Greg KH Cc: Will Deacon Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Cc: Andrea Arcangeli Cc: Waiman Long Cc: Pavel Machek Cc: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Namhyung Kim Cc: joro@8bytes.org Link: https://lkml.kernel.org/r/1532533683-5988-2-git-send-email-joro@8bytes.org Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/fault.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 6fa245ae52c5..899b2f94f9c2 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -271,8 +271,6 @@ static noinline int vmalloc_fault(unsigned long address) if (!(address >= VMALLOC_START && address < VMALLOC_END)) return -1; - WARN_ON_ONCE(in_nmi()); - /* * Synchronize this task's top level page-table * with the 'reference' page table. From d8596403ebd873c7c47229939a423f8d431a96be Mon Sep 17 00:00:00 2001 From: Anton Vasilyev Date: Mon, 23 Jul 2018 19:53:30 +0300 Subject: [PATCH 097/151] gpio: ml-ioh: Fix buffer underwrite on probe error path [ Upstream commit 4bf4eed44bfe288f459496eaf38089502ef91a79 ] If ioh_gpio_probe() fails on devm_irq_alloc_descs() then chip may point to any element of chip_save array, so reverse iteration from pointer chip may become chip_save[-1] and gpiochip_remove() will operate with wrong memory. The patch fix the error path of ioh_gpio_probe() to correctly bypass chip_save array. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Anton Vasilyev Signed-off-by: Linus Walleij Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpio/gpio-ml-ioh.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c index 5536108aa9db..fe21734bbe5c 100644 --- a/drivers/gpio/gpio-ml-ioh.c +++ b/drivers/gpio/gpio-ml-ioh.c @@ -495,9 +495,10 @@ static int ioh_gpio_probe(struct pci_dev *pdev, chip = chip_save; err_gpiochip_add: + chip = chip_save; while (--i >= 0) { - chip--; gpiochip_remove(&chip->gpio); + chip++; } kfree(chip_save); From 351bce359e6e92cac585b0f8a8bcb96ccee8dfcf Mon Sep 17 00:00:00 2001 From: Yelena Krivosheev Date: Wed, 18 Jul 2018 18:10:51 +0200 Subject: [PATCH 098/151] net: mvneta: fix mtu change on port without link [ Upstream commit 8466baf788ec3e18836bd9c91ba0b1a07af25878 ] It is incorrect to enable TX/RX queues (call by mvneta_port_up()) for port without link. Indeed MTU change for interface without link causes TX queues to stuck. Fixes: c5aff18204da ("net: mvneta: driver for Marvell Armada 370/XP network unit") Signed-off-by: Yelena Krivosheev [gregory.clement: adding Fixes tags and rewording commit log] Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/marvell/mvneta.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index b6ac97636a08..6212177781d5 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2477,7 +2477,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) } mvneta_start_dev(pp); - mvneta_port_up(pp); netdev_update_features(dev); From 2541d77bf0c8733ea276f9fb798f850a494a8560 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 27 Jul 2018 15:26:55 +0300 Subject: [PATCH 099/151] net: dcb: For wild-card lookups, use priority -1, not 0 [ Upstream commit 08193d1a893c802c4b807e4d522865061f4e9f4f ] The function dcb_app_lookup walks the list of specified DCB APP entries, looking for one that matches a given criteria: ifindex, selector, protocol ID and optionally also priority. The "don't care" value for priority is set to 0, because that priority has not been allowed under CEE regime, which predates the IEEE standardization. Under IEEE, 0 is a valid priority number. But because dcb_app_lookup considers zero a wild card, attempts to add an APP entry with priority 0 fail when other entries exist for a given ifindex / selector / PID triplet. Fix by changing the wild-card value to -1. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/dcb/dcbnl.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 93ea80196f0e..db9d0c194243 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -1728,7 +1728,7 @@ static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app, if (itr->app.selector == app->selector && itr->app.protocol == app->protocol && itr->ifindex == ifindex && - (!prio || itr->app.priority == prio)) + ((prio == -1) || itr->app.priority == prio)) return itr; } @@ -1763,7 +1763,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app) u8 prio = 0; spin_lock_bh(&dcb_lock); - if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) + itr = dcb_app_lookup(app, dev->ifindex, -1); + if (itr) prio = itr->app.priority; spin_unlock_bh(&dcb_lock); @@ -1791,7 +1792,8 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new) spin_lock_bh(&dcb_lock); /* Search for existing match and replace */ - if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) { + itr = dcb_app_lookup(new, dev->ifindex, -1); + if (itr) { if (new->priority) itr->app.priority = new->priority; else { @@ -1824,7 +1826,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app) u8 prio = 0; spin_lock_bh(&dcb_lock); - if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) + itr = dcb_app_lookup(app, dev->ifindex, -1); + if (itr) prio |= 1 << itr->app.priority; spin_unlock_bh(&dcb_lock); From 845a0a1b29dc4af57e6ed7beb8d943bfd7e74617 Mon Sep 17 00:00:00 2001 From: Mauricio Faria de Oliveira Date: Wed, 25 Jul 2018 22:46:29 -0300 Subject: [PATCH 100/151] partitions/aix: append null character to print data from disk [ Upstream commit d43fdae7bac2def8c4314b5a49822cb7f08a45f1 ] Even if properly initialized, the lvname array (i.e., strings) is read from disk, and might contain corrupt data (e.g., lack the null terminating character for strings). So, make sure the partition name string used in pr_warn() has the null terminating character. Fixes: 6ceea22bbbc8 ("partitions: add aix lvm partition support files") Suggested-by: Daniel J. Axtens Signed-off-by: Mauricio Faria de Oliveira Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- block/partitions/aix.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/block/partitions/aix.c b/block/partitions/aix.c index f3ed7b2d89bf..fa74698e12a6 100644 --- a/block/partitions/aix.c +++ b/block/partitions/aix.c @@ -281,10 +281,14 @@ int aix_partition(struct parsed_partitions *state) next_lp_ix += 1; } for (i = 0; i < state->limit; i += 1) - if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) + if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) { + char tmp[sizeof(n[i].name) + 1]; // null char + + snprintf(tmp, sizeof(tmp), "%s", n[i].name); pr_warn("partition %s (%u pp's found) is " "not contiguous\n", - n[i].name, lvip[i].pps_found); + tmp, lvip[i].pps_found); + } kfree(pvd); } kfree(n); From c919a682712e802b82b81d6fa50ca1bcf31f5ac6 Mon Sep 17 00:00:00 2001 From: Mauricio Faria de Oliveira Date: Wed, 25 Jul 2018 22:46:28 -0300 Subject: [PATCH 101/151] partitions/aix: fix usage of uninitialized lv_info and lvname structures [ Upstream commit 14cb2c8a6c5dae57ee3e2da10fa3db2b9087e39e ] The if-block that sets a successful return value in aix_partition() uses 'lvip[].pps_per_lv' and 'n[].name' potentially uninitialized. For example, if 'numlvs' is zero or alloc_lvn() fails, neither is initialized, but are used anyway if alloc_pvd() succeeds after it. So, make the alloc_pvd() call conditional on their initialization. This has been hit when attaching an apparently corrupted/stressed AIX LUN, misleading the kernel to pr_warn() invalid data and hang. [...] partition (null) (11 pp's found) is not contiguous [...] partition (null) (2 pp's found) is not contiguous [...] partition (null) (3 pp's found) is not contiguous [...] partition (null) (64 pp's found) is not contiguous Fixes: 6ceea22bbbc8 ("partitions: add aix lvm partition support files") Signed-off-by: Mauricio Faria de Oliveira Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- block/partitions/aix.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/block/partitions/aix.c b/block/partitions/aix.c index fa74698e12a6..8e7d358e0226 100644 --- a/block/partitions/aix.c +++ b/block/partitions/aix.c @@ -177,7 +177,7 @@ int aix_partition(struct parsed_partitions *state) u32 vgda_sector = 0; u32 vgda_len = 0; int numlvs = 0; - struct pvd *pvd; + struct pvd *pvd = NULL; struct lv_info { unsigned short pps_per_lv; unsigned short pps_found; @@ -231,10 +231,11 @@ int aix_partition(struct parsed_partitions *state) if (lvip[i].pps_per_lv) foundlvs += 1; } + /* pvd loops depend on n[].name and lvip[].pps_per_lv */ + pvd = alloc_pvd(state, vgda_sector + 17); } put_dev_sector(sect); } - pvd = alloc_pvd(state, vgda_sector + 17); if (pvd) { int numpps = be16_to_cpu(pvd->pp_count); int psn_part1 = be32_to_cpu(pvd->psn_part1); From 7092eb81a06292f8284ef60d8a99137cae20e9b4 Mon Sep 17 00:00:00 2001 From: Zumeng Chen Date: Wed, 4 Jul 2018 12:35:29 +0800 Subject: [PATCH 102/151] mfd: ti_am335x_tscadc: Fix struct clk memory leak [ Upstream commit c2b1509c77a99a0dcea0a9051ca743cb88385f50 ] Use devm_elk_get() to let Linux manage struct clk memory to avoid the following memory leakage report: unreferenced object 0xdd75efc0 (size 64): comm "systemd-udevd", pid 186, jiffies 4294945126 (age 1195.750s) hex dump (first 32 bytes): 61 64 63 5f 74 73 63 5f 66 63 6b 00 00 00 00 00 adc_tsc_fck..... 00 00 00 00 92 03 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [] kmemleak_alloc+0x40/0x74 [] __kmalloc_track_caller+0x198/0x388 [] kstrdup+0x40/0x5c [] kstrdup_const+0x30/0x3c [] __clk_create_clk+0x60/0xac [] clk_get_sys+0x74/0x144 [] clk_get+0x5c/0x68 [] ti_tscadc_probe+0x260/0x468 [ti_am335x_tscadc] [] platform_drv_probe+0x60/0xac [] driver_probe_device+0x214/0x2dc [] __driver_attach+0x94/0xc0 [] bus_for_each_dev+0x90/0xa0 [] driver_attach+0x28/0x30 [] bus_add_driver+0x184/0x1ec [] driver_register+0xb0/0xf0 [] __platform_driver_register+0x40/0x54 Signed-off-by: Zumeng Chen Signed-off-by: Lee Jones Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/mfd/ti_am335x_tscadc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c index d877e777cce6..5726bf4e6c61 100644 --- a/drivers/mfd/ti_am335x_tscadc.c +++ b/drivers/mfd/ti_am335x_tscadc.c @@ -227,14 +227,13 @@ static int ti_tscadc_probe(struct platform_device *pdev) * The TSC_ADC_SS controller design assumes the OCP clock is * at least 6x faster than the ADC clock. */ - clk = clk_get(&pdev->dev, "adc_tsc_fck"); + clk = devm_clk_get(&pdev->dev, "adc_tsc_fck"); if (IS_ERR(clk)) { dev_err(&pdev->dev, "failed to get TSC fck\n"); err = PTR_ERR(clk); goto err_disable_clk; } clock_rate = clk_get_rate(clk); - clk_put(clk); tscadc->clk_div = clock_rate / ADC_CLK; /* TSCADC_CLKDIV needs to be configured to the value minus 1 */ From ae3ab8fd4b056dc1827167d90cc4eb6bcafb6933 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Sat, 23 Jun 2018 11:25:19 +0800 Subject: [PATCH 103/151] f2fs: fix to do sanity check with {sit,nat}_ver_bitmap_bytesize [ Upstream commit c77ec61ca0a49544ca81881cc5d5529858f7e196 ] This patch adds to do sanity check with {sit,nat}_ver_bitmap_bytesize during mount, in order to avoid accessing across cache boundary with this abnormal bitmap size. - Overview buffer overrun in build_sit_info() when mounting a crafted f2fs image - Reproduce - Kernel message [ 548.580867] F2FS-fs (loop0): Invalid log blocks per segment (8201) [ 548.580877] F2FS-fs (loop0): Can't find valid F2FS filesystem in 1th superblock [ 548.584979] ================================================================== [ 548.586568] BUG: KASAN: use-after-free in kmemdup+0x36/0x50 [ 548.587715] Read of size 64 at addr ffff8801e9c265ff by task mount/1295 [ 548.589428] CPU: 1 PID: 1295 Comm: mount Not tainted 4.18.0-rc1+ #4 [ 548.589432] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014 [ 548.589438] Call Trace: [ 548.589474] dump_stack+0x7b/0xb5 [ 548.589487] print_address_description+0x70/0x290 [ 548.589492] kasan_report+0x291/0x390 [ 548.589496] ? kmemdup+0x36/0x50 [ 548.589509] check_memory_region+0x139/0x190 [ 548.589514] memcpy+0x23/0x50 [ 548.589518] kmemdup+0x36/0x50 [ 548.589545] f2fs_build_segment_manager+0x8fa/0x3410 [ 548.589551] ? __asan_loadN+0xf/0x20 [ 548.589560] ? f2fs_sanity_check_ckpt+0x1be/0x240 [ 548.589566] ? f2fs_flush_sit_entries+0x10c0/0x10c0 [ 548.589587] ? __put_user_ns+0x40/0x40 [ 548.589604] ? find_next_bit+0x57/0x90 [ 548.589610] f2fs_fill_super+0x194b/0x2b40 [ 548.589617] ? f2fs_commit_super+0x1b0/0x1b0 [ 548.589637] ? set_blocksize+0x90/0x140 [ 548.589651] mount_bdev+0x1c5/0x210 [ 548.589655] ? f2fs_commit_super+0x1b0/0x1b0 [ 548.589667] f2fs_mount+0x15/0x20 [ 548.589672] mount_fs+0x60/0x1a0 [ 548.589683] ? alloc_vfsmnt+0x309/0x360 [ 548.589688] vfs_kern_mount+0x6b/0x1a0 [ 548.589699] do_mount+0x34a/0x18c0 [ 548.589710] ? lockref_put_or_lock+0xcf/0x160 [ 548.589716] ? copy_mount_string+0x20/0x20 [ 548.589728] ? memcg_kmem_put_cache+0x1b/0xa0 [ 548.589734] ? kasan_check_write+0x14/0x20 [ 548.589740] ? _copy_from_user+0x6a/0x90 [ 548.589744] ? memdup_user+0x42/0x60 [ 548.589750] ksys_mount+0x83/0xd0 [ 548.589755] __x64_sys_mount+0x67/0x80 [ 548.589781] do_syscall_64+0x78/0x170 [ 548.589797] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [ 548.589820] RIP: 0033:0x7f76fc331b9a [ 548.589821] Code: 48 8b 0d 01 c3 2b 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 49 89 ca b8 a5 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d ce c2 2b 00 f7 d8 64 89 01 48 [ 548.589880] RSP: 002b:00007ffd4f0a0e48 EFLAGS: 00000206 ORIG_RAX: 00000000000000a5 [ 548.589890] RAX: ffffffffffffffda RBX: 000000000146c030 RCX: 00007f76fc331b9a [ 548.589892] RDX: 000000000146c210 RSI: 000000000146df30 RDI: 0000000001474ec0 [ 548.589895] RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000013 [ 548.589897] R10: 00000000c0ed0000 R11: 0000000000000206 R12: 0000000001474ec0 [ 548.589900] R13: 000000000146c210 R14: 0000000000000000 R15: 0000000000000003 [ 548.590242] The buggy address belongs to the page: [ 548.591243] page:ffffea0007a70980 count:0 mapcount:0 mapping:0000000000000000 index:0x0 [ 548.592886] flags: 0x2ffff0000000000() [ 548.593665] raw: 02ffff0000000000 dead000000000100 dead000000000200 0000000000000000 [ 548.595258] raw: 0000000000000000 0000000000000000 00000000ffffffff 0000000000000000 [ 548.603713] page dumped because: kasan: bad access detected [ 548.605203] Memory state around the buggy address: [ 548.606198] ffff8801e9c26480: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff [ 548.607676] ffff8801e9c26500: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff [ 548.609157] >ffff8801e9c26580: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff [ 548.610629] ^ [ 548.612088] ffff8801e9c26600: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff [ 548.613674] ffff8801e9c26680: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff [ 548.615141] ================================================================== [ 548.616613] Disabling lock debugging due to kernel taint [ 548.622871] WARNING: CPU: 1 PID: 1295 at mm/page_alloc.c:4065 __alloc_pages_slowpath+0xe4a/0x1420 [ 548.622878] Modules linked in: snd_hda_codec_generic snd_hda_intel snd_hda_codec snd_hwdep snd_hda_core snd_pcm snd_timer snd mac_hid i2c_piix4 soundcore ib_iser rdma_cm iw_cm ib_cm ib_core iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi raid10 raid456 async_raid6_recov async_memcpy async_pq async_xor async_tx raid1 raid0 multipath linear 8139too crct10dif_pclmul crc32_pclmul qxl drm_kms_helper syscopyarea aesni_intel sysfillrect sysimgblt fb_sys_fops ttm drm aes_x86_64 crypto_simd cryptd 8139cp glue_helper mii pata_acpi floppy [ 548.623217] CPU: 1 PID: 1295 Comm: mount Tainted: G B 4.18.0-rc1+ #4 [ 548.623219] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014 [ 548.623226] RIP: 0010:__alloc_pages_slowpath+0xe4a/0x1420 [ 548.623227] Code: ff ff 01 89 85 c8 fe ff ff e9 91 fc ff ff 41 89 c5 e9 5c fc ff ff 0f 0b 89 f8 25 ff ff f7 ff 89 85 8c fe ff ff e9 d5 f2 ff ff <0f> 0b e9 65 f2 ff ff 65 8b 05 38 81 d2 47 f6 c4 01 74 1c 65 48 8b [ 548.623281] RSP: 0018:ffff8801f28c7678 EFLAGS: 00010246 [ 548.623284] RAX: 0000000000000000 RBX: 00000000006040c0 RCX: ffffffffb82f73b7 [ 548.623287] RDX: 1ffff1003e518eeb RSI: 000000000000000c RDI: 0000000000000000 [ 548.623290] RBP: ffff8801f28c7880 R08: 0000000000000000 R09: ffffed0047fff2c5 [ 548.623292] R10: 0000000000000001 R11: ffffed0047fff2c4 R12: ffff8801e88de040 [ 548.623295] R13: 00000000006040c0 R14: 000000000000000c R15: ffff8801f28c7938 [ 548.623299] FS: 00007f76fca51840(0000) GS:ffff8801f6f00000(0000) knlGS:0000000000000000 [ 548.623302] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 548.623304] CR2: 00007f19b9171760 CR3: 00000001ed952000 CR4: 00000000000006e0 [ 548.623317] Call Trace: [ 548.623325] ? kasan_check_read+0x11/0x20 [ 548.623330] ? __zone_watermark_ok+0x92/0x240 [ 548.623336] ? get_page_from_freelist+0x1c3/0x1d90 [ 548.623347] ? _raw_spin_lock_irqsave+0x2a/0x60 [ 548.623353] ? warn_alloc+0x250/0x250 [ 548.623358] ? save_stack+0x46/0xd0 [ 548.623361] ? kasan_kmalloc+0xad/0xe0 [ 548.623366] ? __isolate_free_page+0x2a0/0x2a0 [ 548.623370] ? mount_fs+0x60/0x1a0 [ 548.623374] ? vfs_kern_mount+0x6b/0x1a0 [ 548.623378] ? do_mount+0x34a/0x18c0 [ 548.623383] ? ksys_mount+0x83/0xd0 [ 548.623387] ? __x64_sys_mount+0x67/0x80 [ 548.623391] ? do_syscall_64+0x78/0x170 [ 548.623396] ? entry_SYSCALL_64_after_hwframe+0x44/0xa9 [ 548.623401] __alloc_pages_nodemask+0x3c5/0x400 [ 548.623407] ? __alloc_pages_slowpath+0x1420/0x1420 [ 548.623412] ? __mutex_lock_slowpath+0x20/0x20 [ 548.623417] ? kvmalloc_node+0x31/0x80 [ 548.623424] alloc_pages_current+0x75/0x110 [ 548.623436] kmalloc_order+0x24/0x60 [ 548.623442] kmalloc_order_trace+0x24/0xb0 [ 548.623448] __kmalloc_track_caller+0x207/0x220 [ 548.623455] ? f2fs_build_node_manager+0x399/0xbb0 [ 548.623460] kmemdup+0x20/0x50 [ 548.623465] f2fs_build_node_manager+0x399/0xbb0 [ 548.623470] f2fs_fill_super+0x195e/0x2b40 [ 548.623477] ? f2fs_commit_super+0x1b0/0x1b0 [ 548.623481] ? set_blocksize+0x90/0x140 [ 548.623486] mount_bdev+0x1c5/0x210 [ 548.623489] ? f2fs_commit_super+0x1b0/0x1b0 [ 548.623495] f2fs_mount+0x15/0x20 [ 548.623498] mount_fs+0x60/0x1a0 [ 548.623503] ? alloc_vfsmnt+0x309/0x360 [ 548.623508] vfs_kern_mount+0x6b/0x1a0 [ 548.623513] do_mount+0x34a/0x18c0 [ 548.623518] ? lockref_put_or_lock+0xcf/0x160 [ 548.623523] ? copy_mount_string+0x20/0x20 [ 548.623528] ? memcg_kmem_put_cache+0x1b/0xa0 [ 548.623533] ? kasan_check_write+0x14/0x20 [ 548.623537] ? _copy_from_user+0x6a/0x90 [ 548.623542] ? memdup_user+0x42/0x60 [ 548.623547] ksys_mount+0x83/0xd0 [ 548.623552] __x64_sys_mount+0x67/0x80 [ 548.623557] do_syscall_64+0x78/0x170 [ 548.623562] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [ 548.623566] RIP: 0033:0x7f76fc331b9a [ 548.623567] Code: 48 8b 0d 01 c3 2b 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 49 89 ca b8 a5 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d ce c2 2b 00 f7 d8 64 89 01 48 [ 548.623632] RSP: 002b:00007ffd4f0a0e48 EFLAGS: 00000206 ORIG_RAX: 00000000000000a5 [ 548.623636] RAX: ffffffffffffffda RBX: 000000000146c030 RCX: 00007f76fc331b9a [ 548.623639] RDX: 000000000146c210 RSI: 000000000146df30 RDI: 0000000001474ec0 [ 548.623641] RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000013 [ 548.623643] R10: 00000000c0ed0000 R11: 0000000000000206 R12: 0000000001474ec0 [ 548.623646] R13: 000000000146c210 R14: 0000000000000000 R15: 0000000000000003 [ 548.623650] ---[ end trace 4ce02f25ff7d3df5 ]--- [ 548.623656] F2FS-fs (loop0): Failed to initialize F2FS node manager [ 548.627936] F2FS-fs (loop0): Invalid log blocks per segment (8201) [ 548.627940] F2FS-fs (loop0): Can't find valid F2FS filesystem in 1th superblock [ 548.635835] F2FS-fs (loop0): Failed to initialize F2FS node manager - Location https://elixir.bootlin.com/linux/v4.18-rc1/source/fs/f2fs/segment.c#L3578 sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); Buffer overrun happens when doing memcpy. I suspect there is missing (inconsistent) checks on bitmap_size. Reported by Wen Xu (wen.xu@gatech.edu) from SSLab, Gatech. Reported-by: Wen Xu Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/f2fs/super.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 8d275fad465d..5a8aa54fa489 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -933,12 +933,17 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi) struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); unsigned int main_segs, blocks_per_seg; + unsigned int sit_segs, nat_segs; + unsigned int sit_bitmap_size, nat_bitmap_size; + unsigned int log_blocks_per_seg; int i; total = le32_to_cpu(raw_super->segment_count); fsmeta = le32_to_cpu(raw_super->segment_count_ckpt); - fsmeta += le32_to_cpu(raw_super->segment_count_sit); - fsmeta += le32_to_cpu(raw_super->segment_count_nat); + sit_segs = le32_to_cpu(raw_super->segment_count_sit); + fsmeta += sit_segs; + nat_segs = le32_to_cpu(raw_super->segment_count_nat); + fsmeta += nat_segs; fsmeta += le32_to_cpu(ckpt->rsvd_segment_count); fsmeta += le32_to_cpu(raw_super->segment_count_ssa); @@ -959,6 +964,18 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi) return 1; } + sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); + nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); + log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); + + if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 || + nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) { + f2fs_msg(sbi->sb, KERN_ERR, + "Wrong bitmap size: sit: %u, nat:%u", + sit_bitmap_size, nat_bitmap_size); + return 1; + } + if (unlikely(f2fs_cp_error(sbi))) { f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); return 1; From 84562954bd421824b98d91f6f6115262f6c87a01 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 25 Nov 2016 18:46:09 +0000 Subject: [PATCH 104/151] MIPS: WARN_ON invalid DMA cache maintenance, not BUG_ON [ Upstream commit d4da0e97baea8768b3d66ccef3967bebd50dfc3b ] If a driver causes DMA cache maintenance with a zero length then we currently BUG and kill the kernel. As this is a scenario that we may well be able to recover from, WARN & return in the condition instead. Signed-off-by: Paul Burton Acked-by: Florian Fainelli Patchwork: https://patchwork.linux-mips.org/patch/14623/ Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/mips/mm/c-r4k.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index fbcd8674ff1d..af126fda630c 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -703,7 +703,8 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end) static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) { /* Catch bad driver code */ - BUG_ON(size == 0); + if (WARN_ON(size == 0)) + return; preempt_disable(); if (cpu_has_inclusive_pcaches) { @@ -736,7 +737,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) { /* Catch bad driver code */ - BUG_ON(size == 0); + if (WARN_ON(size == 0)) + return; preempt_disable(); if (cpu_has_inclusive_pcaches) { From 449fab4df70f6d43f64f8bd2afafdf9c62c4bbc2 Mon Sep 17 00:00:00 2001 From: Mathias Nyman Date: Thu, 3 May 2018 17:30:07 +0300 Subject: [PATCH 105/151] xhci: Fix use-after-free in xhci_free_virt_device commit 44a182b9d17765514fa2b1cc911e4e65134eef93 upstream. KASAN found a use-after-free in xhci_free_virt_device+0x33b/0x38e where xhci_free_virt_device() sets slot id to 0 if udev exists: if (dev->udev && dev->udev->slot_id) dev->udev->slot_id = 0; dev->udev will be true even if udev is freed because dev->udev is not set to NULL. set dev->udev pointer to NULL in xhci_free_dev() The original patch went to stable so this fix needs to be applied there as well. Fixes: a400efe455f7 ("xhci: zero usb device slot_id member when disabling and freeing a xhci slot") Cc: Reported-by: Guenter Roeck Reviewed-by: Guenter Roeck Tested-by: Guenter Roeck Signed-off-by: Mathias Nyman Signed-off-by: Matthias Kaehlcke Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index f77b89e83c3e..628e66b39a58 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3630,6 +3630,9 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) } spin_lock_irqsave(&xhci->lock, flags); + + virt_dev->udev = NULL; + /* Don't disable the slot if the host controller is dead. */ state = readl(&xhci->op_regs->status); if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || From a4508e0383daf5a97aa8c0bbcbfc3914d6948010 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 5 Oct 2017 02:50:07 -0700 Subject: [PATCH 106/151] netfilter: x_tables: avoid stack-out-of-bounds read in xt_copy_counters_from_user commit e466af75c074e76107ae1cd5a2823e9c61894ffb upstream. syzkaller reports an out of bound read in strlcpy(), triggered by xt_copy_counters_from_user() Fix this by using memcpy(), then forcing a zero byte at the last position of the destination, as Florian did for the non COMPAT code. Fixes: d7591f0c41ce ("netfilter: x_tables: introduce and use xt_copy_counters_from_user") Signed-off-by: Eric Dumazet Cc: Willem de Bruijn Acked-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso Cc: Greg Hackmann Signed-off-by: Greg Kroah-Hartman --- net/netfilter/x_tables.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index ede118a0d6b0..97c37cf56019 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -877,7 +877,7 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len, if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0) return ERR_PTR(-EFAULT); - strlcpy(info->name, compat_tmp.name, sizeof(info->name)); + memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1); info->num_counters = compat_tmp.num_counters; user += sizeof(compat_tmp); } else @@ -890,9 +890,9 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len, if (copy_from_user(info, user, sizeof(*info)) != 0) return ERR_PTR(-EFAULT); - info->name[sizeof(info->name) - 1] = '\0'; user += sizeof(*info); } + info->name[sizeof(info->name) - 1] = '\0'; size = sizeof(struct xt_counters); size *= info->num_counters; From 2b8f74c8f0a4aab0a20b9e77fdc3d17e8f2405dd Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 12 Sep 2018 23:57:48 -1000 Subject: [PATCH 107/151] mm: get rid of vmacache_flush_all() entirely commit 7a9cdebdcc17e426fb5287e4a82db1dfe86339b2 upstream. Jann Horn points out that the vmacache_flush_all() function is not only potentially expensive, it's buggy too. It also happens to be entirely unnecessary, because the sequence number overflow case can be avoided by simply making the sequence number be 64-bit. That doesn't even grow the data structures in question, because the other adjacent fields are already 64-bit. So simplify the whole thing by just making the sequence number overflow case go away entirely, which gets rid of all the complications and makes the code faster too. Win-win. [ Oleg Nesterov points out that the VMACACHE_FULL_FLUSHES statistics also just goes away entirely with this ] Reported-by: Jann Horn Suggested-by: Will Deacon Acked-by: Davidlohr Bueso Cc: Oleg Nesterov Cc: stable@kernel.org Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/linux/mm_types.h | 2 +- include/linux/sched.h | 2 +- include/linux/vmacache.h | 5 ----- mm/debug.c | 4 ++-- mm/vmacache.c | 36 ------------------------------------ 5 files changed, 4 insertions(+), 45 deletions(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 6e0b286649f1..3ffdb0fcb4dc 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -345,7 +345,7 @@ struct kioctx_table; struct mm_struct { struct vm_area_struct *mmap; /* list of VMAs */ struct rb_root mm_rb; - u32 vmacache_seqnum; /* per-thread vmacache */ + u64 vmacache_seqnum; /* per-thread vmacache */ #ifdef CONFIG_MMU unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, diff --git a/include/linux/sched.h b/include/linux/sched.h index ab10455e5b02..4e41e629afb1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1315,7 +1315,7 @@ struct task_struct { unsigned brk_randomized:1; #endif /* per-thread vma caching */ - u32 vmacache_seqnum; + u64 vmacache_seqnum; struct vm_area_struct *vmacache[VMACACHE_SIZE]; #if defined(SPLIT_RSS_COUNTING) struct task_rss_stat rss_stat; diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h index c3fa0fd43949..4f58ff2dacd6 100644 --- a/include/linux/vmacache.h +++ b/include/linux/vmacache.h @@ -15,7 +15,6 @@ static inline void vmacache_flush(struct task_struct *tsk) memset(tsk->vmacache, 0, sizeof(tsk->vmacache)); } -extern void vmacache_flush_all(struct mm_struct *mm); extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma); extern struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr); @@ -29,10 +28,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, static inline void vmacache_invalidate(struct mm_struct *mm) { mm->vmacache_seqnum++; - - /* deal with overflows */ - if (unlikely(mm->vmacache_seqnum == 0)) - vmacache_flush_all(mm); } #endif /* __LINUX_VMACACHE_H */ diff --git a/mm/debug.c b/mm/debug.c index 5ce45c9a29b5..fbf16575c512 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -166,7 +166,7 @@ EXPORT_SYMBOL(dump_vma); void dump_mm(const struct mm_struct *mm) { - pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n" + pr_emerg("mm %p mmap %p seqnum %llu task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %p\n" #endif @@ -196,7 +196,7 @@ void dump_mm(const struct mm_struct *mm) #endif "%s", /* This is here to hold the comma */ - mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, + mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif diff --git a/mm/vmacache.c b/mm/vmacache.c index 9f25af825dec..e6e6e92d0d72 100644 --- a/mm/vmacache.c +++ b/mm/vmacache.c @@ -5,42 +5,6 @@ #include #include -/* - * Flush vma caches for threads that share a given mm. - * - * The operation is safe because the caller holds the mmap_sem - * exclusively and other threads accessing the vma cache will - * have mmap_sem held at least for read, so no extra locking - * is required to maintain the vma cache. - */ -void vmacache_flush_all(struct mm_struct *mm) -{ - struct task_struct *g, *p; - - /* - * Single threaded tasks need not iterate the entire - * list of process. We can avoid the flushing as well - * since the mm's seqnum was increased and don't have - * to worry about other threads' seqnum. Current's - * flush will occur upon the next lookup. - */ - if (atomic_read(&mm->mm_users) == 1) - return; - - rcu_read_lock(); - for_each_process_thread(g, p) { - /* - * Only flush the vmacache pointers as the - * mm seqnum is already set and curr's will - * be set upon invalidation when the next - * lookup is done. - */ - if (mm == p->mm) - vmacache_flush(p); - } - rcu_read_unlock(); -} - /* * This task may be accessing a foreign mm via (for example) * get_user_pages()->find_vma(). The vmacache is task-local and this From 4601c4744202d6017bd9833896f5d4e8df630641 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 25 Jul 2018 23:00:48 +0200 Subject: [PATCH 108/151] ALSA: msnd: Fix the default sample sizes [ Upstream commit 7c500f9ea139d0c9b80fdea5a9c911db3166ea54 ] The default sample sizes set by msnd driver are bogus; it sets ALSA PCM format, not the actual bit width. Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- sound/isa/msnd/msnd_pinnacle.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c index cf70dba80124..741714332365 100644 --- a/sound/isa/msnd/msnd_pinnacle.c +++ b/sound/isa/msnd/msnd_pinnacle.c @@ -82,10 +82,10 @@ static void set_default_audio_parameters(struct snd_msnd *chip) { - chip->play_sample_size = DEFSAMPLESIZE; + chip->play_sample_size = snd_pcm_format_width(DEFSAMPLESIZE); chip->play_sample_rate = DEFSAMPLERATE; chip->play_channels = DEFCHANNELS; - chip->capture_sample_size = DEFSAMPLESIZE; + chip->capture_sample_size = snd_pcm_format_width(DEFSAMPLESIZE); chip->capture_sample_rate = DEFSAMPLERATE; chip->capture_channels = DEFCHANNELS; } From b741bcfe2b58a78627535fad91bfc6e55cc270a0 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 25 Jul 2018 23:00:46 +0200 Subject: [PATCH 109/151] ALSA: usb-audio: Fix multiple definitions in AU0828_DEVICE() macro [ Upstream commit bd1cd0eb2ce9141100628d476ead4de485501b29 ] AU0828_DEVICE() macro in quirks-table.h uses USB_DEVICE_VENDOR_SPEC() for expanding idVendor and idProduct fields. However, the latter macro adds also match_flags and bInterfaceClass, which are different from the values AU0828_DEVICE() macro sets after that. For fixing them, just expand idVendor and idProduct fields manually in AU0828_DEVICE(). This fixes sparse warnings like: sound/usb/quirks-table.h:2892:1: warning: Initializer entry defined twice Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- sound/usb/quirks-table.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index adec087725d1..e86fecaa26ec 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -2910,7 +2910,8 @@ YAMAHA_DEVICE(0x7010, "UB99"), */ #define AU0828_DEVICE(vid, pid, vname, pname) { \ - USB_DEVICE_VENDOR_SPEC(vid, pid), \ + .idVendor = vid, \ + .idProduct = pid, \ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ USB_DEVICE_ID_MATCH_INT_CLASS | \ USB_DEVICE_ID_MATCH_INT_SUBCLASS, \ From 0d46dece53d25de1f0113077525224156bb6cf3e Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 25 Jul 2018 16:54:33 +0800 Subject: [PATCH 110/151] xfrm: fix 'passing zero to ERR_PTR()' warning [ Upstream commit 934ffce1343f22ed5e2d0bd6da4440f4848074de ] Fix a static code checker warning: net/xfrm/xfrm_policy.c:1836 xfrm_resolve_and_create_bundle() warn: passing zero to 'ERR_PTR' xfrm_tmpl_resolve return 0 just means no xdst found, return NULL instead of passing zero to ERR_PTR. Fixes: d809ec895505 ("xfrm: do not assume that template resolving always returns xfrms") Signed-off-by: YueHaibing Signed-off-by: Steffen Klassert Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/xfrm/xfrm_policy.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 2f5b3dd702dc..c6a4ce894b40 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1809,7 +1809,10 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, /* Try to instantiate a bundle */ err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); if (err <= 0) { - if (err != 0 && err != -EAGAIN) + if (err == 0) + return NULL; + + if (err != -EAGAIN) XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); return ERR_PTR(err); } From 3916385fc4812d4b75b1f07ed042ba67f3934911 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Wed, 25 Jul 2018 18:45:08 +0100 Subject: [PATCH 111/151] gfs2: Special-case rindex for gfs2_grow [ Upstream commit 776125785a87ff05d49938bd5b9f336f2a05bff6 ] To speed up the common case of appending to a file, gfs2_write_alloc_required presumes that writing beyond the end of a file will always require additional blocks to be allocated. This assumption is incorrect for preallocates files, but there are no negative consequences as long as *some* space is still left on the filesystem. One special file that always has some space preallocated beyond the end of the file is the rindex: when growing a filesystem, gfs2_grow adds one or more new resource groups and appends records describing those resource groups to the rindex; the preallocated space ensures that this is always possible. However, when a filesystem is completely full, gfs2_write_alloc_required will indicate that an additional allocation is required, and appending the next record to the rindex will fail even though space for that record has already been preallocated. To fix that, skip the incorrect optimization in gfs2_write_alloc_required, but for the rindex only. Other writes to preallocated space beyond the end of the file are still allowed to fail on completely full filesystems. Signed-off-by: Andreas Gruenbacher Reviewed-by: Bob Peterson Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/gfs2/bmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index f0b945ab853e..2f29c2a81448 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1476,7 +1476,7 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift; lblock = offset >> shift; lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; - if (lblock_stop > end_of_file) + if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex)) return 1; size = (lblock_stop - lblock) << shift; From 40c8637843d8cb2799cb23679854916dee890ae9 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Fri, 20 Jul 2018 13:58:22 +0200 Subject: [PATCH 112/151] MIPS: ath79: fix system restart [ Upstream commit f8a7bfe1cb2c1ebfa07775c9c8ac0ad3ba8e5ff5 ] This patch disables irq on reboot to fix hang issues that were observed due to pending interrupts. Signed-off-by: Felix Fietkau Signed-off-by: John Crispin Signed-off-by: Paul Burton Patchwork: https://patchwork.linux-mips.org/patch/19913/ Cc: James Hogan Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/mips/ath79/setup.c | 1 + arch/mips/include/asm/mach-ath79/ath79.h | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index 64807a4809d0..2b3b66780152 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c @@ -40,6 +40,7 @@ static char ath79_sys_type[ATH79_SYS_TYPE_LEN]; static void ath79_restart(char *command) { + local_irq_disable(); ath79_device_reset_set(AR71XX_RESET_FULL_CHIP); for (;;) if (cpu_wait) diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h index 1557934aaca9..39a10a136f53 100644 --- a/arch/mips/include/asm/mach-ath79/ath79.h +++ b/arch/mips/include/asm/mach-ath79/ath79.h @@ -132,6 +132,7 @@ static inline u32 ath79_pll_rr(unsigned reg) static inline void ath79_reset_wr(unsigned reg, u32 val) { __raw_writel(val, ath79_reset_base + reg); + (void) __raw_readl(ath79_reset_base + reg); /* flush */ } static inline u32 ath79_reset_rr(unsigned reg) From 955a815d168c45af6731acd8bcc2f57771c9fc5f Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 24 Jul 2018 11:29:01 -0700 Subject: [PATCH 113/151] mtd/maps: fix solutionengine.c printk format warnings [ Upstream commit 1d25e3eeed1d987404e2d2e451eebac8c15cecc1 ] Fix 2 printk format warnings (this driver is currently only used by arch/sh/) by using "%pap" instead of "%lx". Fixes these build warnings: ../drivers/mtd/maps/solutionengine.c: In function 'init_soleng_maps': ../include/linux/kern_levels.h:5:18: warning: format '%lx' expects argument of type 'long unsigned int', but argument 2 has type 'resource_size_t' {aka 'unsigned int'} [-Wformat=] ../drivers/mtd/maps/solutionengine.c:62:54: note: format string is defined here printk(KERN_NOTICE "Solution Engine: Flash at 0x%08lx, EPROM at 0x%08lx\n", ~~~~^ %08x ../include/linux/kern_levels.h:5:18: warning: format '%lx' expects argument of type 'long unsigned int', but argument 3 has type 'resource_size_t' {aka 'unsigned int'} [-Wformat=] ../drivers/mtd/maps/solutionengine.c:62:72: note: format string is defined here printk(KERN_NOTICE "Solution Engine: Flash at 0x%08lx, EPROM at 0x%08lx\n", ~~~~^ %08x Cc: David Woodhouse Cc: Brian Norris Cc: Boris Brezillon Cc: Marek Vasut Cc: Richard Weinberger Cc: linux-mtd@lists.infradead.org Cc: Yoshinori Sato Cc: Rich Felker Cc: linux-sh@vger.kernel.org Cc: Sergei Shtylyov Signed-off-by: Randy Dunlap Signed-off-by: Boris Brezillon Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/maps/solutionengine.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c index bb580bc16445..c07f21b20463 100644 --- a/drivers/mtd/maps/solutionengine.c +++ b/drivers/mtd/maps/solutionengine.c @@ -59,9 +59,9 @@ static int __init init_soleng_maps(void) return -ENXIO; } } - printk(KERN_NOTICE "Solution Engine: Flash at 0x%08lx, EPROM at 0x%08lx\n", - soleng_flash_map.phys & 0x1fffffff, - soleng_eprom_map.phys & 0x1fffffff); + printk(KERN_NOTICE "Solution Engine: Flash at 0x%pap, EPROM at 0x%pap\n", + &soleng_flash_map.phys, + &soleng_eprom_map.phys); flash_mtd->owner = THIS_MODULE; eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map); From bae63ef69feaa70d33dcddf65b717b3733386ec8 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 24 Jul 2018 19:11:28 +0200 Subject: [PATCH 114/151] fbdev: omapfb: off by one in omapfb_register_client() [ Upstream commit 5ec1ec35b2979b59d0b33381e7c9aac17e159d16 ] The omapfb_register_client[] array has OMAPFB_PLANE_NUM elements so the > should be >= or we are one element beyond the end of the array. Fixes: 8b08cf2b64f5 ("OMAP: add TI OMAP framebuffer driver") Signed-off-by: Dan Carpenter Cc: Imre Deak Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/omap/omapfb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c index d8d028d98711..42cf6381e449 100644 --- a/drivers/video/fbdev/omap/omapfb_main.c +++ b/drivers/video/fbdev/omap/omapfb_main.c @@ -982,7 +982,7 @@ int omapfb_register_client(struct omapfb_notifier_block *omapfb_nb, { int r; - if ((unsigned)omapfb_nb->plane_idx > OMAPFB_PLANE_NUM) + if ((unsigned)omapfb_nb->plane_idx >= OMAPFB_PLANE_NUM) return -EINVAL; if (!notifier_inited) { From f697031f57b570697e77dda8d8178274d4ef694d Mon Sep 17 00:00:00 2001 From: Anton Vasilyev Date: Tue, 24 Jul 2018 19:11:27 +0200 Subject: [PATCH 115/151] video: goldfishfb: fix memory leak on driver remove [ Upstream commit 5958fde72d04e7b8c6de3669d1f794a90997e3eb ] goldfish_fb_probe() allocates memory for fb, but goldfish_fb_remove() does not have deallocation of fb, which leads to memory leak on probe/remove. The patch adds deallocation into goldfish_fb_remove(). Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Anton Vasilyev Cc: Aleksandar Markovic Cc: Miodrag Dinic Cc: Goran Ferenc Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/goldfishfb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c index 7f6c9e6cfc6c..14a93cb21310 100644 --- a/drivers/video/fbdev/goldfishfb.c +++ b/drivers/video/fbdev/goldfishfb.c @@ -301,6 +301,7 @@ static int goldfish_fb_remove(struct platform_device *pdev) dma_free_coherent(&pdev->dev, framesize, (void *)fb->fb.screen_base, fb->fb.fix.smem_start); iounmap(fb->reg_base); + kfree(fb); return 0; } From a4e5e18d2c04747057af01ec70ad3c7b045991bd Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 24 Jul 2018 19:11:27 +0200 Subject: [PATCH 116/151] fbdev/via: fix defined but not used warning [ Upstream commit b6566b47a67e07fdca44cf51abb14e2fbe17d3eb ] Fix a build warning in viafbdev.c when CONFIG_PROC_FS is not enabled by marking the unused function as __maybe_unused. ../drivers/video/fbdev/via/viafbdev.c:1471:12: warning: 'viafb_sup_odev_proc_show' defined but not used [-Wunused-function] Signed-off-by: Randy Dunlap Cc: Florian Tobias Schandinat Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/via/viafbdev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c index 325c43c6ff97..34bafcd904d2 100644 --- a/drivers/video/fbdev/via/viafbdev.c +++ b/drivers/video/fbdev/via/viafbdev.c @@ -19,6 +19,7 @@ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ +#include #include #include #include @@ -1468,7 +1469,7 @@ static const struct file_operations viafb_vt1636_proc_fops = { #endif /* CONFIG_FB_VIA_DIRECT_PROCFS */ -static int viafb_sup_odev_proc_show(struct seq_file *m, void *v) +static int __maybe_unused viafb_sup_odev_proc_show(struct seq_file *m, void *v) { via_odev_to_seq(m, supported_odev_map[ viaparinfo->shared->chip_info.gfx_chip_name]); From 2a227c0acd512fb90acec358a0858ec87cd377ca Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Tue, 10 Jul 2018 19:28:14 +0530 Subject: [PATCH 117/151] perf powerpc: Fix callchain ip filtering when return address is in a register [ Upstream commit 9068533e4f470daf2b0f29c71d865990acd8826e ] For powerpc64, perf will filter out the second entry in the callchain, i.e. the LR value, if the return address of the function corresponding to the probed location has already been saved on its caller's stack. The state of the return address is determined using debug information. At any point within a function, if the return address is already saved somewhere, a DWARF expression can tell us about its location. If the return address in still in LR only, no DWARF expression would exist. Typically, the instructions in a function's prologue first copy the LR value to R0 and then pushes R0 on to the stack. If LR has already been copied to R0 but R0 is yet to be pushed to the stack, we can still get a DWARF expression that says that the return address is in R0. This is indicating that getting a DWARF expression for the return address does not guarantee the fact that it has already been saved on the stack. This can be observed on a powerpc64le system running Fedora 27 as shown below. # objdump -d /usr/lib64/libc-2.26.so | less ... 000000000015af20 : 15af20: 0b 00 4c 3c addis r2,r12,11 15af24: e0 c1 42 38 addi r2,r2,-15904 15af28: a6 02 08 7c mflr r0 15af2c: f0 ff c1 fb std r30,-16(r1) 15af30: f8 ff e1 fb std r31,-8(r1) 15af34: 78 1b 7f 7c mr r31,r3 15af38: 78 23 83 7c mr r3,r4 15af3c: 78 2b be 7c mr r30,r5 15af40: 10 00 01 f8 std r0,16(r1) 15af44: c1 ff 21 f8 stdu r1,-64(r1) 15af48: 28 00 81 f8 std r4,40(r1) ... # readelf --debug-dump=frames-interp /usr/lib64/libc-2.26.so | less ... 00027024 0000000000000024 00027028 FDE cie=00000000 pc=000000000015af20..000000000015af88 LOC CFA r30 r31 ra 000000000015af20 r1+0 u u u 000000000015af34 r1+0 c-16 c-8 r0 000000000015af48 r1+64 c-16 c-8 c+16 000000000015af5c r1+0 c-16 c-8 c+16 000000000015af78 r1+0 u u ... # perf probe -x /usr/lib64/libc-2.26.so -a inet_pton+0x18 # perf record -e probe_libc:inet_pton -g ping -6 -c 1 ::1 # perf script Before: ping 2829 [005] 512917.460174: probe_libc:inet_pton: (7fff7e2baf38) 7fff7e2baf38 __GI___inet_pton+0x18 (/usr/lib64/libc-2.26.so) 7fff7e2705b4 getaddrinfo+0x164 (/usr/lib64/libc-2.26.so) 12f152d70 _init+0xbfc (/usr/bin/ping) 7fff7e1836a0 generic_start_main.isra.0+0x140 (/usr/lib64/libc-2.26.so) 7fff7e183898 __libc_start_main+0xb8 (/usr/lib64/libc-2.26.so) 0 [unknown] ([unknown]) After: ping 2829 [005] 512917.460174: probe_libc:inet_pton: (7fff7e2baf38) 7fff7e2baf38 __GI___inet_pton+0x18 (/usr/lib64/libc-2.26.so) 7fff7e26fa54 gaih_inet.constprop.7+0xf44 (/usr/lib64/libc-2.26.so) 7fff7e2705b4 getaddrinfo+0x164 (/usr/lib64/libc-2.26.so) 12f152d70 _init+0xbfc (/usr/bin/ping) 7fff7e1836a0 generic_start_main.isra.0+0x140 (/usr/lib64/libc-2.26.so) 7fff7e183898 __libc_start_main+0xb8 (/usr/lib64/libc-2.26.so) 0 [unknown] ([unknown]) Reported-by: Ravi Bangoria Signed-off-by: Sandipan Das Cc: Jiri Olsa Cc: Maynard Johnson Cc: Naveen N. Rao Cc: Ravi Bangoria Cc: Sukadev Bhattiprolu Link: http://lkml.kernel.org/r/66e848a7bdf2d43b39210a705ff6d828a0865661.1530724939.git.sandipan@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- tools/perf/arch/powerpc/util/skip-callchain-idx.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c index 8e24f39f9158..b070893bfcfe 100644 --- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c +++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c @@ -58,9 +58,13 @@ static int check_return_reg(int ra_regno, Dwarf_Frame *frame) } /* - * Check if return address is on the stack. + * Check if return address is on the stack. If return address + * is in a register (typically R0), it is yet to be saved on + * the stack. */ - if (nops != 0 || ops != NULL) + if ((nops != 0 || ops != NULL) && + !(nops == 1 && ops[0].atom == DW_OP_regx && + ops[0].number2 == 0 && ops[0].offset == 0)) return 0; /* From 026d9d4baae9af3fccb0285206c4c99a001aab19 Mon Sep 17 00:00:00 2001 From: Fredrik Noring Date: Tue, 24 Jul 2018 19:11:24 +0200 Subject: [PATCH 118/151] fbdev: Distinguish between interlaced and progressive modes [ Upstream commit 1ba0a59cea41ea05fda92daaf2a2958a2246b9cf ] I discovered the problem when developing a frame buffer driver for the PlayStation 2 (not yet merged), using the following video modes for the PlayStation 3 in drivers/video/fbdev/ps3fb.c: }, { /* 1080if */ "1080if", 50, 1920, 1080, 13468, 148, 484, 36, 4, 88, 5, FB_SYNC_BROADCAST, FB_VMODE_INTERLACED }, { /* 1080pf */ "1080pf", 50, 1920, 1080, 6734, 148, 484, 36, 4, 88, 5, FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED }, In ps3fb_probe, the mode_option module parameter is used with fb_find_mode but it can only select the interlaced variant of 1920x1080 since the loop matching the modes does not take the difference between interlaced and progressive modes into account. In short, without the patch, progressive 1920x1080 cannot be chosen as a mode_option parameter since fb_find_mode (falsely) thinks interlace is a perfect match. Signed-off-by: Fredrik Noring Cc: "Maciej W. Rozycki" [b.zolnierkie: updated patch description] Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/core/modedb.c | 41 ++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 11 deletions(-) diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c index 388f7971494b..620d9ec664e1 100644 --- a/drivers/video/fbdev/core/modedb.c +++ b/drivers/video/fbdev/core/modedb.c @@ -533,7 +533,7 @@ static int fb_try_mode(struct fb_var_screeninfo *var, struct fb_info *info, * * Valid mode specifiers for @mode_option: * - * x[M][R][-][@][i][m] or + * x[M][R][-][@][i][p][m] or * [-][@] * * with , , and decimal numbers and @@ -542,10 +542,10 @@ static int fb_try_mode(struct fb_var_screeninfo *var, struct fb_info *info, * If 'M' is present after yres (and before refresh/bpp if present), * the function will compute the timings using VESA(tm) Coordinated * Video Timings (CVT). If 'R' is present after 'M', will compute with - * reduced blanking (for flatpanels). If 'i' is present, compute - * interlaced mode. If 'm' is present, add margins equal to 1.8% - * of xres rounded down to 8 pixels, and 1.8% of yres. The char - * 'i' and 'm' must be after 'M' and 'R'. Example: + * reduced blanking (for flatpanels). If 'i' or 'p' are present, compute + * interlaced or progressive mode. If 'm' is present, add margins equal + * to 1.8% of xres rounded down to 8 pixels, and 1.8% of yres. The chars + * 'i', 'p' and 'm' must be after 'M' and 'R'. Example: * * 1024x768MR-8@60m - Reduced blank with margins at 60Hz. * @@ -586,7 +586,8 @@ int fb_find_mode(struct fb_var_screeninfo *var, unsigned int namelen = strlen(name); int res_specified = 0, bpp_specified = 0, refresh_specified = 0; unsigned int xres = 0, yres = 0, bpp = default_bpp, refresh = 0; - int yres_specified = 0, cvt = 0, rb = 0, interlace = 0; + int yres_specified = 0, cvt = 0, rb = 0; + int interlace_specified = 0, interlace = 0; int margins = 0; u32 best, diff, tdiff; @@ -637,9 +638,17 @@ int fb_find_mode(struct fb_var_screeninfo *var, if (!cvt) margins = 1; break; + case 'p': + if (!cvt) { + interlace = 0; + interlace_specified = 1; + } + break; case 'i': - if (!cvt) + if (!cvt) { interlace = 1; + interlace_specified = 1; + } break; default: goto done; @@ -708,11 +717,21 @@ int fb_find_mode(struct fb_var_screeninfo *var, if ((name_matches(db[i], name, namelen) || (res_specified && res_matches(db[i], xres, yres))) && !fb_try_mode(var, info, &db[i], bpp)) { - if (refresh_specified && db[i].refresh == refresh) - return 1; + const int db_interlace = (db[i].vmode & + FB_VMODE_INTERLACED ? 1 : 0); + int score = abs(db[i].refresh - refresh); + + if (interlace_specified) + score += abs(db_interlace - interlace); + + if (!interlace_specified || + db_interlace == interlace) + if (refresh_specified && + db[i].refresh == refresh) + return 1; - if (abs(db[i].refresh - refresh) < diff) { - diff = abs(db[i].refresh - refresh); + if (score < diff) { + diff = score; best = i; } } From 8eb6f3c1e90879b7407890acedebafce5e01ede1 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Tue, 10 Jul 2018 19:28:13 +0530 Subject: [PATCH 119/151] perf powerpc: Fix callchain ip filtering [ Upstream commit c715fcfda5a08edabaa15508742be926b7ee51db ] For powerpc64, redundant entries in the callchain are filtered out by determining the state of the return address and the stack frame using DWARF debug information. For making these filtering decisions we must analyze the debug information for the location corresponding to the program counter value, i.e. the first entry in the callchain, and not the LR value; otherwise, perf may filter out either the second or the third entry in the callchain incorrectly. This can be observed on a powerpc64le system running Fedora 27 as shown below. Case 1 - Attaching a probe at inet_pton+0x8 (binary offset 0x15af28). Return address is still in LR and a new stack frame is not yet allocated. The LR value, i.e. the second entry, should not be filtered out. # objdump -d /usr/lib64/libc-2.26.so | less ... 000000000010eb10 : ... 10fa48: 78 bb e4 7e mr r4,r23 10fa4c: 0a 00 60 38 li r3,10 10fa50: d9 b4 04 48 bl 15af28 10fa54: 00 00 00 60 nop 10fa58: ac f4 ff 4b b 10ef04 ... 0000000000110450 : ... 1105a8: 54 00 ff 38 addi r7,r31,84 1105ac: 58 00 df 38 addi r6,r31,88 1105b0: 69 e5 ff 4b bl 10eb18 1105b4: 78 1b 71 7c mr r17,r3 1105b8: 50 01 7f e8 ld r3,336(r31) ... 000000000015af20 : 15af20: 0b 00 4c 3c addis r2,r12,11 15af24: e0 c1 42 38 addi r2,r2,-15904 15af28: a6 02 08 7c mflr r0 15af2c: f0 ff c1 fb std r30,-16(r1) 15af30: f8 ff e1 fb std r31,-8(r1) ... # perf probe -x /usr/lib64/libc-2.26.so -a inet_pton+0x8 # perf record -e probe_libc:inet_pton -g ping -6 -c 1 ::1 # perf script Before: ping 4507 [002] 514985.546540: probe_libc:inet_pton: (7fffa7dbaf28) 7fffa7dbaf28 __GI___inet_pton+0x8 (/usr/lib64/libc-2.26.so) 7fffa7d705b4 getaddrinfo+0x164 (/usr/lib64/libc-2.26.so) 13fb52d70 _init+0xbfc (/usr/bin/ping) 7fffa7c836a0 generic_start_main.isra.0+0x140 (/usr/lib64/libc-2.26.so) 7fffa7c83898 __libc_start_main+0xb8 (/usr/lib64/libc-2.26.so) 0 [unknown] ([unknown]) After: ping 4507 [002] 514985.546540: probe_libc:inet_pton: (7fffa7dbaf28) 7fffa7dbaf28 __GI___inet_pton+0x8 (/usr/lib64/libc-2.26.so) 7fffa7d6fa54 gaih_inet.constprop.7+0xf44 (/usr/lib64/libc-2.26.so) 7fffa7d705b4 getaddrinfo+0x164 (/usr/lib64/libc-2.26.so) 13fb52d70 _init+0xbfc (/usr/bin/ping) 7fffa7c836a0 generic_start_main.isra.0+0x140 (/usr/lib64/libc-2.26.so) 7fffa7c83898 __libc_start_main+0xb8 (/usr/lib64/libc-2.26.so) 0 [unknown] ([unknown]) Case 2 - Attaching a probe at _int_malloc+0x180 (binary offset 0x9cf10). Return address in still in LR and a new stack frame has already been allocated but not used. The caller's caller, i.e. the third entry, is invalid and should be filtered out and not the second one. # objdump -d /usr/lib64/libc-2.26.so | less ... 000000000009cd90 <_int_malloc>: 9cd90: 17 00 4c 3c addis r2,r12,23 9cd94: 70 a3 42 38 addi r2,r2,-23696 9cd98: 26 00 80 7d mfcr r12 9cd9c: f8 ff e1 fb std r31,-8(r1) 9cda0: 17 00 e4 3b addi r31,r4,23 9cda4: d8 ff 61 fb std r27,-40(r1) 9cda8: 78 23 9b 7c mr r27,r4 9cdac: 1f 00 bf 2b cmpldi cr7,r31,31 9cdb0: f0 ff c1 fb std r30,-16(r1) 9cdb4: b0 ff c1 fa std r22,-80(r1) 9cdb8: 78 1b 7e 7c mr r30,r3 9cdbc: 08 00 81 91 stw r12,8(r1) 9cdc0: 11 ff 21 f8 stdu r1,-240(r1) 9cdc4: 4c 01 9d 41 bgt cr7,9cf10 <_int_malloc+0x180> 9cdc8: 20 00 a4 2b cmpldi cr7,r4,32 ... 9cf08: 00 00 00 60 nop 9cf0c: 00 00 42 60 ori r2,r2,0 9cf10: e4 06 ff 7b rldicr r31,r31,0,59 9cf14: 40 f8 a4 7f cmpld cr7,r4,r31 9cf18: 68 05 9d 41 bgt cr7,9d480 <_int_malloc+0x6f0> ... 000000000009e3c0 : ... 9e420: 40 02 80 38 li r4,576 9e424: 78 fb e3 7f mr r3,r31 9e428: 71 e9 ff 4b bl 9cd98 <_int_malloc+0x8> 9e42c: 00 00 a3 2f cmpdi cr7,r3,0 9e430: 78 1b 7e 7c mr r30,r3 ... 000000000009f7a0 <__libc_malloc>: ... 9f8f8: 00 00 89 2f cmpwi cr7,r9,0 9f8fc: 1c ff 9e 40 bne cr7,9f818 <__libc_malloc+0x78> 9f900: c9 ea ff 4b bl 9e3c8 9f904: 00 00 00 60 nop 9f908: e8 90 22 e9 ld r9,-28440(r2) ... # perf probe -x /usr/lib64/libc-2.26.so -a _int_malloc+0x180 # perf record -e probe_libc:_int_malloc -g ./test-malloc # perf script Before: test-malloc 6554 [009] 515975.797403: probe_libc:_int_malloc: (7fffa6e6cf10) 7fffa6e6cf10 _int_malloc+0x180 (/usr/lib64/libc-2.26.so) 7fffa6dd0000 [unknown] (/usr/lib64/libc-2.26.so) 7fffa6e6f904 malloc+0x164 (/usr/lib64/libc-2.26.so) 7fffa6e6f9fc malloc+0x25c (/usr/lib64/libc-2.26.so) 100006b4 main+0x38 (/home/testuser/test-malloc) 7fffa6df36a0 generic_start_main.isra.0+0x140 (/usr/lib64/libc-2.26.so) 7fffa6df3898 __libc_start_main+0xb8 (/usr/lib64/libc-2.26.so) 0 [unknown] ([unknown]) After: test-malloc 6554 [009] 515975.797403: probe_libc:_int_malloc: (7fffa6e6cf10) 7fffa6e6cf10 _int_malloc+0x180 (/usr/lib64/libc-2.26.so) 7fffa6e6e42c tcache_init.part.4+0x6c (/usr/lib64/libc-2.26.so) 7fffa6e6f904 malloc+0x164 (/usr/lib64/libc-2.26.so) 7fffa6e6f9fc malloc+0x25c (/usr/lib64/libc-2.26.so) 100006b4 main+0x38 (/home/sandipan/test-malloc) 7fffa6df36a0 generic_start_main.isra.0+0x140 (/usr/lib64/libc-2.26.so) 7fffa6df3898 __libc_start_main+0xb8 (/usr/lib64/libc-2.26.so) 0 [unknown] ([unknown]) Signed-off-by: Sandipan Das Cc: Jiri Olsa Cc: Maynard Johnson Cc: Naveen N. Rao Cc: Ravi Bangoria Cc: Sukadev Bhattiprolu Fixes: a60335ba3298 ("perf tools powerpc: Adjust callchain based on DWARF debug info") Link: http://lkml.kernel.org/r/24bb726d91ed173aebc972ec3f41a2ef2249434e.1530724939.git.sandipan@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- tools/perf/arch/powerpc/util/skip-callchain-idx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c index b070893bfcfe..d03992ee217d 100644 --- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c +++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c @@ -237,7 +237,7 @@ int arch_skip_callchain_idx(struct machine *machine, struct thread *thread, if (!chain || chain->nr < 3) return skip_slot; - ip = chain->ips[2]; + ip = chain->ips[1]; thread__find_addr_location(thread, machine, PERF_RECORD_MISC_USER, MAP__FUNCTION, ip, &al); From 98cc98203b9f457aab3c928e6c98d3c80ff36460 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 1 May 2018 00:55:44 +1000 Subject: [PATCH 120/151] powerpc/powernv: opal_put_chars partial write fix [ Upstream commit bd90284cc6c1c9e8e48c8eadd0c79574fcce0b81 ] The intention here is to consume and discard the remaining buffer upon error. This works if there has not been a previous partial write. If there has been, then total_len is no longer total number of bytes to copy. total_len is always "bytes left to copy", so it should be added to written bytes. This code may not be exercised any more if partial writes will not be hit, but this is a small bugfix before a larger change. Reviewed-by: Benjamin Herrenschmidt Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/platforms/powernv/opal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index d019b081df9d..ff3e1fe4936b 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -452,7 +452,7 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len) /* Closed or other error drop */ if (rc != OPAL_SUCCESS && rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) { - written = total_len; + written += total_len; break; } if (rc == OPAL_SUCCESS) { From 67f5abb4933bc4f96f467615fd312cea0a414cc5 Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Tue, 10 Jul 2018 16:48:27 +0530 Subject: [PATCH 121/151] mac80211: restrict delayed tailroom needed decrement [ Upstream commit 133bf90dbb8b873286f8ec2e81ba26e863114b8c ] As explained in ieee80211_delayed_tailroom_dec(), during roam, keys of the old AP will be destroyed and new keys will be installed. Deletion of the old key causes crypto_tx_tailroom_needed_cnt to go from 1 to 0 and the new key installation causes a transition from 0 to 1. Whenever crypto_tx_tailroom_needed_cnt transitions from 0 to 1, we invoke synchronize_net(); the reason for doing this is to avoid a race in the TX path as explained in increment_tailroom_need_count(). This synchronize_net() operation can be slow and can affect the station roam time. To avoid this, decrementing the crypto_tx_tailroom_needed_cnt is delayed for a while so that upon installation of new key the transition would be from 1 to 2 instead of 0 to 1 and thereby improving the roam time. This is all correct for a STA iftype, but deferring the tailroom_needed decrement for other iftypes may be unnecessary. For example, let's consider the case of a 4-addr client connecting to an AP for which AP_VLAN interface is also created, let the initial value for tailroom_needed on the AP be 1. * 4-addr client connects to the AP (AP: tailroom_needed = 1) * AP will clear old keys, delay decrement of tailroom_needed count * AP_VLAN is created, it takes the tailroom count from master (AP_VLAN: tailroom_needed = 1, AP: tailroom_needed = 1) * Install new key for the station, assume key is plumbed in the HW, there won't be any change in tailroom_needed count on AP iface * Delayed decrement of tailroom_needed count on AP (AP: tailroom_needed = 0, AP_VLAN: tailroom_needed = 1) Because of the delayed decrement on AP iface, tailroom_needed count goes out of sync between AP(master iface) and AP_VLAN(slave iface) and there would be unnecessary tailroom created for the packets going through AP_VLAN iface. Also, WARN_ONs were observed while trying to bring down the AP_VLAN interface: (warn_slowpath_common) (warn_slowpath_null+0x18/0x20) (warn_slowpath_null) (ieee80211_free_keys+0x114/0x1e4) (ieee80211_free_keys) (ieee80211_del_virtual_monitor+0x51c/0x850) (ieee80211_del_virtual_monitor) (ieee80211_stop+0x30/0x3c) (ieee80211_stop) (__dev_close_many+0x94/0xb8) (__dev_close_many) (dev_close_many+0x5c/0xc8) Restricting delayed decrement to station interface alone fixes the problem and it makes sense to do so because delayed decrement is done to improve roam time which is applicable only for client devices. Signed-off-by: Manikanta Pubbisetty Signed-off-by: Johannes Berg Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/mac80211/cfg.c | 2 +- net/mac80211/key.c | 24 +++++++++++++++--------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index d061814d1ae1..9d224151f639 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -276,7 +276,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, goto out_unlock; } - ieee80211_key_free(key, true); + ieee80211_key_free(key, sdata->vif.type == NL80211_IFTYPE_STATION); ret = 0; out_unlock: diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 375ff902a142..6bd74d2556ae 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -515,11 +515,15 @@ int ieee80211_key_link(struct ieee80211_key *key, { struct ieee80211_local *local = sdata->local; struct ieee80211_key *old_key; - int idx, ret; - bool pairwise; - - pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; - idx = key->conf.keyidx; + int idx = key->conf.keyidx; + bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; + /* + * We want to delay tailroom updates only for station - in that + * case it helps roaming speed, but in other cases it hurts and + * can cause warnings to appear. + */ + bool delay_tailroom = sdata->vif.type == NL80211_IFTYPE_STATION; + int ret; mutex_lock(&sdata->local->key_mtx); @@ -547,14 +551,14 @@ int ieee80211_key_link(struct ieee80211_key *key, increment_tailroom_need_count(sdata); ieee80211_key_replace(sdata, sta, pairwise, old_key, key); - ieee80211_key_destroy(old_key, true); + ieee80211_key_destroy(old_key, delay_tailroom); ieee80211_debugfs_key_add(key); if (!local->wowlan) { ret = ieee80211_key_enable_hw_accel(key); if (ret) - ieee80211_key_free(key, true); + ieee80211_key_free(key, delay_tailroom); } else { ret = 0; } @@ -705,7 +709,8 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local, ieee80211_key_replace(key->sdata, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); - __ieee80211_key_destroy(key, true); + __ieee80211_key_destroy(key, key->sdata->vif.type == + NL80211_IFTYPE_STATION); } for (i = 0; i < NUM_DEFAULT_KEYS; i++) { @@ -715,7 +720,8 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local, ieee80211_key_replace(key->sdata, key->sta, key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, key, NULL); - __ieee80211_key_destroy(key, true); + __ieee80211_key_destroy(key, key->sdata->vif.type == + NL80211_IFTYPE_STATION); } mutex_unlock(&local->key_mtx); From 1437dd3977d1d16e2c274dcf69456ba05abe9c38 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Jul 2018 12:43:48 +0200 Subject: [PATCH 122/151] s390/qeth: fix race in used-buffer accounting [ Upstream commit a702349a4099cd5a7bab0904689d8e0bf8dcd622 ] By updating q->used_buffers only _after_ do_QDIO() has completed, there is a potential race against the buffer's TX completion. In the unlikely case that the TX completion path wins, qeth_qdio_output_handler() would decrement the counter before qeth_flush_buffers() even incremented it. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/s390/net/qeth_core_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 906e8014ebb9..595c140cc79c 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3489,13 +3489,14 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, qdio_flags = QDIO_FLAG_SYNC_OUTPUT; if (atomic_read(&queue->set_pci_flags_count)) qdio_flags |= QDIO_FLAG_PCI_OUT; + atomic_add(count, &queue->used_buffers); + rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, queue->queue_no, index, count); if (queue->card->options.performance_stats) queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() - queue->card->perf_stats.outbound_do_qdio_start_time; - atomic_add(count, &queue->used_buffers); if (rc) { queue->card->stats.tx_errors += count; /* ignore temporary SIGA errors without busy condition */ From 9cac5f329dcc8bf89685c9def4cf5bc61c6423f7 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Jul 2018 12:43:49 +0200 Subject: [PATCH 123/151] s390/qeth: reset layer2 attribute on layer switch [ Upstream commit 70551dc46ffa3555a0b5f3545b0cd87ab67fd002 ] After the subdriver's remove() routine has completed, the card's layer mode is undetermined again. Reflect this in the layer2 field. If qeth_dev_layer2_store() hits an error after remove() was called, the card _always_ requires a setup(), even if the previous layer mode is requested again. But qeth_dev_layer2_store() bails out early if the requested layer mode still matches the current one. So unless we reset the layer2 field, re-probing the card back to its previous mode is currently not possible. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/s390/net/qeth_core_sys.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 9d5f746faf72..9eeabfe30747 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -456,6 +456,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, if (card->discipline) { card->discipline->remove(card->gdev); qeth_core_free_discipline(card); + card->options.layer2 = -1; } rc = qeth_core_load_discipline(card, newdis); From 31f0ff10ccc3a0692625c94b3b77c9e82739e185 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 6 Jul 2018 20:53:09 -0700 Subject: [PATCH 124/151] platform/x86: toshiba_acpi: Fix defined but not used build warnings [ Upstream commit c2e2a618eb7104e18fdcf739d4d911563812a81c ] Fix a build warning in toshiba_acpi.c when CONFIG_PROC_FS is not enabled by marking the unused function as __maybe_unused. ../drivers/platform/x86/toshiba_acpi.c:1685:12: warning: 'version_proc_show' defined but not used [-Wunused-function] Signed-off-by: Randy Dunlap Cc: Azael Avalos Cc: platform-driver-x86@vger.kernel.org Cc: Andy Shevchenko Signed-off-by: Darren Hart (VMware) Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/toshiba_acpi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index ab6151f05420..c0b64e571a5e 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -41,6 +41,7 @@ #define TOSHIBA_ACPI_VERSION "0.20" #define PROC_INTERFACE_VERSION 1 +#include #include #include #include @@ -1233,7 +1234,7 @@ static const struct file_operations keys_proc_fops = { .write = keys_proc_write, }; -static int version_proc_show(struct seq_file *m, void *v) +static int __maybe_unused version_proc_show(struct seq_file *m, void *v) { seq_printf(m, "driver: %s\n", TOSHIBA_ACPI_VERSION); seq_printf(m, "proc_interface: %d\n", PROC_INTERFACE_VERSION); From e6858a99540555179f7ed1027e9307563bada64b Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Thu, 30 Aug 2018 08:35:19 +0300 Subject: [PATCH 125/151] RDMA/cma: Protect cma dev list with lock commit 954a8e3aea87e896e320cf648c1a5bbe47de443e upstream. When AF_IB addresses are used during rdma_resolve_addr() a lock is not held. A cma device can get removed while list traversal is in progress which may lead to crash. ie CPU0 CPU1 ==== ==== rdma_resolve_addr() cma_resolve_ib_dev() list_for_each() cma_remove_one() cur_dev->device mutex_lock(&lock) list_del(); mutex_unlock(&lock); cma_process_remove(); Therefore, hold a lock while traversing the list which avoids such situation. Cc: # 3.10 Fixes: f17df3b0dede ("RDMA/cma: Add support for AF_IB to rdma_resolve_addr()") Signed-off-by: Parav Pandit Reviewed-by: Daniel Jurgens Signed-off-by: Leon Romanovsky Reviewed-by: Dennis Dalessandro Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/cma.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index c5364ae3b57a..9ba24ed2845f 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -434,6 +434,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) dgid = (union ib_gid *) &addr->sib_addr; pkey = ntohs(addr->sib_pkey); + mutex_lock(&lock); list_for_each_entry(cur_dev, &dev_list, list) { if (rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB) continue; @@ -455,18 +456,19 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) cma_dev = cur_dev; sgid = gid; id_priv->id.port_num = p; + goto found; } } } } - - if (!cma_dev) - return -ENODEV; + mutex_unlock(&lock); + return -ENODEV; found: cma_attach_to_dev(id_priv, cma_dev); - addr = (struct sockaddr_ib *) cma_src_addr(id_priv); - memcpy(&addr->sib_addr, &sgid, sizeof sgid); + mutex_unlock(&lock); + addr = (struct sockaddr_ib *)cma_src_addr(id_priv); + memcpy(&addr->sib_addr, &sgid, sizeof(sgid)); cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); return 0; } From 36a9916189b46b78dbf4fa542739b464444f0537 Mon Sep 17 00:00:00 2001 From: Bin Yang Date: Wed, 12 Sep 2018 03:36:34 +0000 Subject: [PATCH 126/151] pstore: Fix incorrect persistent ram buffer mapping commit 831b624df1b420c8f9281ed1307a8db23afb72df upstream. persistent_ram_vmap() returns the page start vaddr. persistent_ram_iomap() supports non-page-aligned mapping. persistent_ram_buffer_map() always adds offset-in-page to the vaddr returned from these two functions, which causes incorrect mapping of non-page-aligned persistent ram buffer. By default ftrace_size is 4096 and max_ftrace_cnt is nr_cpu_ids. Without this patch, the zone_sz in ramoops_init_przs() is 4096/nr_cpu_ids which might not be page aligned. If the offset-in-page > 2048, the vaddr will be in next page. If the next page is not mapped, it will cause kernel panic: [ 0.074231] BUG: unable to handle kernel paging request at ffffa19e0081b000 ... [ 0.075000] RIP: 0010:persistent_ram_new+0x1f8/0x39f ... [ 0.075000] Call Trace: [ 0.075000] ramoops_init_przs.part.10.constprop.15+0x105/0x260 [ 0.075000] ramoops_probe+0x232/0x3a0 [ 0.075000] platform_drv_probe+0x3e/0xa0 [ 0.075000] driver_probe_device+0x2cd/0x400 [ 0.075000] __driver_attach+0xe4/0x110 [ 0.075000] ? driver_probe_device+0x400/0x400 [ 0.075000] bus_for_each_dev+0x70/0xa0 [ 0.075000] driver_attach+0x1e/0x20 [ 0.075000] bus_add_driver+0x159/0x230 [ 0.075000] ? do_early_param+0x95/0x95 [ 0.075000] driver_register+0x70/0xc0 [ 0.075000] ? init_pstore_fs+0x4d/0x4d [ 0.075000] __platform_driver_register+0x36/0x40 [ 0.075000] ramoops_init+0x12f/0x131 [ 0.075000] do_one_initcall+0x4d/0x12c [ 0.075000] ? do_early_param+0x95/0x95 [ 0.075000] kernel_init_freeable+0x19b/0x222 [ 0.075000] ? rest_init+0xbb/0xbb [ 0.075000] kernel_init+0xe/0xfc [ 0.075000] ret_from_fork+0x3a/0x50 Signed-off-by: Bin Yang [kees: add comments describing the mapping differences, updated commit log] Fixes: 24c3d2f342ed ("staging: android: persistent_ram: Make it possible to use memory outside of bootmem") Cc: stable@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: Greg Kroah-Hartman --- fs/pstore/ram_core.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c index 57b165ba849c..288ce45f613e 100644 --- a/fs/pstore/ram_core.c +++ b/fs/pstore/ram_core.c @@ -414,7 +414,12 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size, vaddr = vmap(pages, page_count, VM_MAP, prot); kfree(pages); - return vaddr; + /* + * Since vmap() uses page granularity, we must add the offset + * into the page here, to get the byte granularity address + * into the mapping to represent the actual "start" location. + */ + return vaddr + offset_in_page(start); } static void *persistent_ram_iomap(phys_addr_t start, size_t size, @@ -436,6 +441,11 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size, else va = ioremap_wc(start, size); + /* + * Since request_mem_region() and ioremap() are byte-granularity + * there is no need handle anything special like we do when the + * vmap() case in persistent_ram_vmap() above. + */ return va; } @@ -456,7 +466,7 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size, return -ENOMEM; } - prz->buffer = prz->vaddr + offset_in_page(start); + prz->buffer = prz->vaddr; prz->buffer_size = size - sizeof(struct persistent_ram_buffer); return 0; @@ -503,7 +513,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz) if (prz->vaddr) { if (pfn_valid(prz->paddr >> PAGE_SHIFT)) { - vunmap(prz->vaddr); + /* We must vunmap() at page-granularity. */ + vunmap(prz->vaddr - offset_in_page(prz->paddr)); } else { iounmap(prz->vaddr); release_mem_region(prz->paddr, prz->size); From bd4971c341a348fd456780b7c058079de9492d18 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Fri, 7 Sep 2018 14:21:30 +0200 Subject: [PATCH 127/151] xen/netfront: fix waiting for xenbus state change commit 8edfe2e992b75aee3da9316e9697c531194c2f53 upstream. Commit 822fb18a82aba ("xen-netfront: wait xenbus state change when load module manually") added a new wait queue to wait on for a state change when the module is loaded manually. Unfortunately there is no wakeup anywhere to stop that waiting. Instead of introducing a new wait queue rename the existing module_unload_q to module_wq and use it for both purposes (loading and unloading). As any state change of the backend might be intended to stop waiting do the wake_up_all() in any case when netback_changed() is called. Fixes: 822fb18a82aba ("xen-netfront: wait xenbus state change when load module manually") Cc: #4.18 Signed-off-by: Juergen Gross Reviewed-by: Boris Ostrovsky Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/xen-netfront.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index b780c059cc03..3bbfb09af65f 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -85,8 +85,7 @@ struct netfront_cb { /* IRQ name is queue name with "-tx" or "-rx" appended */ #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) -static DECLARE_WAIT_QUEUE_HEAD(module_load_q); -static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); +static DECLARE_WAIT_QUEUE_HEAD(module_wq); struct netfront_stats { u64 rx_packets; @@ -1360,11 +1359,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netif_carrier_off(netdev); xenbus_switch_state(dev, XenbusStateInitialising); - wait_event(module_load_q, - xenbus_read_driver_state(dev->otherend) != - XenbusStateClosed && - xenbus_read_driver_state(dev->otherend) != - XenbusStateUnknown); + wait_event(module_wq, + xenbus_read_driver_state(dev->otherend) != + XenbusStateClosed && + xenbus_read_driver_state(dev->otherend) != + XenbusStateUnknown); return netdev; exit: @@ -2068,15 +2067,14 @@ static void netback_changed(struct xenbus_device *dev, dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); + wake_up_all(&module_wq); + switch (backend_state) { case XenbusStateInitialising: case XenbusStateInitialised: case XenbusStateReconfiguring: case XenbusStateReconfigured: - break; - case XenbusStateUnknown: - wake_up_all(&module_unload_q); break; case XenbusStateInitWait: @@ -2092,12 +2090,10 @@ static void netback_changed(struct xenbus_device *dev, break; case XenbusStateClosed: - wake_up_all(&module_unload_q); if (dev->state == XenbusStateClosed) break; /* Missed the backend's CLOSING state -- fallthrough */ case XenbusStateClosing: - wake_up_all(&module_unload_q); xenbus_frontend_closed(dev); break; } @@ -2321,14 +2317,14 @@ static int xennet_remove(struct xenbus_device *dev) if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { xenbus_switch_state(dev, XenbusStateClosing); - wait_event(module_unload_q, + wait_event(module_wq, xenbus_read_driver_state(dev->otherend) == XenbusStateClosing || xenbus_read_driver_state(dev->otherend) == XenbusStateUnknown); xenbus_switch_state(dev, XenbusStateClosed); - wait_event(module_unload_q, + wait_event(module_wq, xenbus_read_driver_state(dev->otherend) == XenbusStateClosed || xenbus_read_driver_state(dev->otherend) == From 9778987cf346862c59d6b43666e1c6795682b79d Mon Sep 17 00:00:00 2001 From: Aaron Knister Date: Fri, 24 Aug 2018 08:42:46 -0400 Subject: [PATCH 128/151] IB/ipoib: Avoid a race condition between start_xmit and cm_rep_handler commit 816e846c2eb9129a3e0afa5f920c8bbc71efecaa upstream. Inside of start_xmit() the call to check if the connection is up and the queueing of the packets for later transmission is not atomic which leaves a window where cm_rep_handler can run, set the connection up, dequeue pending packets and leave the subsequently queued packets by start_xmit() sitting on neigh->queue until they're dropped when the connection is torn down. This only applies to connected mode. These dropped packets can really upset TCP, for example, and cause multi-minute delays in transmission for open connections. Here's the code in start_xmit where we check to see if the connection is up: if (ipoib_cm_get(neigh)) { if (ipoib_cm_up(neigh)) { ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); goto unref; } } The race occurs if cm_rep_handler execution occurs after the above connection check (specifically if it gets to the point where it acquires priv->lock to dequeue pending skb's) but before the below code snippet in start_xmit where packets are queued. if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { push_pseudo_header(skb, phdr->hwaddr); spin_lock_irqsave(&priv->lock, flags); __skb_queue_tail(&neigh->queue, skb); spin_unlock_irqrestore(&priv->lock, flags); } else { ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); } The patch acquires the netif tx lock in cm_rep_handler for the section where it sets the connection up and dequeues and retransmits deferred skb's. Fixes: 839fcaba355a ("IPoIB: Connected mode experimental support") Cc: stable@vger.kernel.org Signed-off-by: Aaron Knister Tested-by: Ira Weiny Reviewed-by: Ira Weiny Signed-off-by: Jason Gunthorpe Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/ulp/ipoib/ipoib_cm.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index d88f8dbe62d9..be5833e0306c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -997,12 +997,14 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even skb_queue_head_init(&skqueue); + netif_tx_lock_bh(p->dev); spin_lock_irq(&priv->lock); set_bit(IPOIB_FLAG_OPER_UP, &p->flags); if (p->neigh) while ((skb = __skb_dequeue(&p->neigh->queue))) __skb_queue_tail(&skqueue, skb); spin_unlock_irq(&priv->lock); + netif_tx_unlock_bh(p->dev); while ((skb = __skb_dequeue(&skqueue))) { skb->dev = p->dev; From 9ca1bf5eeff31769dd4a2d5ccbb808368a37cfd7 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Fri, 10 Aug 2018 23:06:07 +0000 Subject: [PATCH 129/151] Tools: hv: Fix a bug in the key delete code commit 86503bd35dec0ce363e9fdbf5299927422ed3899 upstream. Fix a bug in the key delete code - the num_records range from 0 to num_records-1. Signed-off-by: K. Y. Srinivasan Reported-by: David Binderman Cc: Reviewed-by: Michael Kelley Signed-off-by: Greg Kroah-Hartman --- tools/hv/hv_kvp_daemon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c index f660d3f69ce7..ac32a9234dd2 100644 --- a/tools/hv/hv_kvp_daemon.c +++ b/tools/hv/hv_kvp_daemon.c @@ -289,7 +289,7 @@ static int kvp_key_delete(int pool, const char *key, int key_size) * Found a match; just move the remaining * entries up. */ - if (i == num_records) { + if (i == (num_records - 1)) { kvp_file_info[pool].num_records--; kvp_update_file(pool); return 0; From 15567156db7acc1845296fdff669353f711c8e49 Mon Sep 17 00:00:00 2001 From: Mathias Nyman Date: Tue, 4 Sep 2018 17:35:16 +0300 Subject: [PATCH 130/151] usb: Don't die twice if PCI xhci host is not responding in resume commit f3dc41c5d22b2ca14a0802a65d8cdc33a3882d4e upstream. usb_hc_died() should only be called once, and with the primary HCD as parameter. It will mark both primary and secondary hcd's dead. Remove the extra call to usb_cd_died with the shared hcd as parameter. Fixes: ff9d78b36f76 ("USB: Set usb_hcd->state and flags for shared roothubs") Signed-off-by: Mathias Nyman Cc: stable Acked-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/hcd-pci.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index a4c0b855faeb..ca2bd6ee5500 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -530,8 +530,6 @@ static int resume_common(struct device *dev, int event) event == PM_EVENT_RESTORE); if (retval) { dev_err(dev, "PCI post-resume error %d!\n", retval); - if (hcd->shared_hcd) - usb_hc_died(hcd->shared_hcd); usb_hc_died(hcd); } } From fbc35884e1cadd0da82e5ff8a95235964bc1ca93 Mon Sep 17 00:00:00 2001 From: Tim Anderson Date: Thu, 9 Aug 2018 14:55:34 -0700 Subject: [PATCH 131/151] USB: Add quirk to support DJI CineSSD commit f45681f9becaa65111ed0a691ccf080a0cd5feb8 upstream. This device does not correctly handle the LPM operations. Also, the device cannot handle ATA pass-through commands and locks up when attempted while running in super speed. This patch adds the equivalent quirk logic as found in uas. Signed-off-by: Tim Anderson Acked-by: Alan Stern Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/quirks.c | 3 +++ drivers/usb/storage/scsiglue.c | 9 +++++++++ drivers/usb/storage/unusual_devs.h | 7 +++++++ 3 files changed, 19 insertions(+) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index e005b0307b84..ab99f1dc827f 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -249,6 +249,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x2040, 0x7200), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* DJI CineSSD */ + { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, + /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index 0e400f382f3a..12b082082102 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -341,6 +341,15 @@ static int queuecommand_lck(struct scsi_cmnd *srb, return 0; } + if ((us->fflags & US_FL_NO_ATA_1X) && + (srb->cmnd[0] == ATA_12 || srb->cmnd[0] == ATA_16)) { + memcpy(srb->sense_buffer, usb_stor_sense_invalidCDB, + sizeof(usb_stor_sense_invalidCDB)); + srb->result = SAM_STAT_CHECK_CONDITION; + done(srb); + return 0; + } + /* enqueue the command and wake up the control thread */ srb->scsi_done = done; us->srb = srb; diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 2b6f7e5e52c3..1f0b2c8db161 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2188,6 +2188,13 @@ UNUSUAL_DEV( 0x4146, 0xba01, 0x0100, 0x0100, "Micro Mini 1GB", USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ), +/* Reported-by: Tim Anderson */ +UNUSUAL_DEV( 0x2ca3, 0x0031, 0x0000, 0x9999, + "DJI", + "CineSSD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + /* * Nick Bowler * SCSI stack spams (otherwise harmless) error messages. From da7a5b5401b57641efc6efad2c20bead86ab4f08 Mon Sep 17 00:00:00 2001 From: Mathias Nyman Date: Mon, 3 Sep 2018 15:44:16 +0300 Subject: [PATCH 132/151] usb: Avoid use-after-free by flushing endpoints early in usb_set_interface() commit f9a5b4f58b280c1d26255376713c132f93837621 upstream. The steps taken by usb core to set a new interface is very different from what is done on the xHC host side. xHC hardware will do everything in one go. One command is used to set up new endpoints, free old endpoints, check bandwidth, and run the new endpoints. All this is done by xHC when usb core asks the hcd to check for available bandwidth. At this point usb core has not yet flushed the old endpoints, which will cause use-after-free issues in xhci driver as queued URBs are cancelled on a re-allocated endpoint. To resolve this add a call to usb_disable_interface() which will flush the endpoints before calling usb_hcd_alloc_bandwidth() Additional checks in xhci driver will also be implemented to gracefully handle stale URB cancel on freed and re-allocated endpoints Cc: Reported-by: Sudip Mukherjee Signed-off-by: Mathias Nyman Acked-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/message.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 251b44300b38..f8eb72d350ac 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1282,6 +1282,11 @@ void usb_enable_interface(struct usb_device *dev, * is submitted that needs that bandwidth. Some other operating systems * allocate bandwidth early, when a configuration is chosen. * + * xHCI reserves bandwidth and configures the alternate setting in + * usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting + * may be disabled. Drivers cannot rely on any particular alternate + * setting being in effect after a failure. + * * This call is synchronous, and may not be used in an interrupt context. * Also, drivers must not change altsettings while urbs are scheduled for * endpoints in that interface; all such urbs must first be completed @@ -1317,6 +1322,12 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate) alternate); return -EINVAL; } + /* + * usb3 hosts configure the interface in usb_hcd_alloc_bandwidth, + * including freeing dropped endpoint ring buffers. + * Make sure the interface endpoints are flushed before that + */ + usb_disable_interface(dev, iface, false); /* Make sure we have enough bandwidth for this alternate interface. * Remove the current alt setting and add the new alt setting. From c601d90f7a9eb2b547bd215f83741ab75facdb35 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 1 Sep 2018 17:23:47 +0800 Subject: [PATCH 133/151] usb: host: u132-hcd: Fix a sleep-in-atomic-context bug in u132_get_frame() commit 6d4f268fa132742fe96dad22307c68d237356d88 upstream. i_usX2Y_subs_startup in usbusx2yaudio.c is a completion handler function for the USB driver. So it should not sleep, but it is can sleep according to the function call paths (from bottom to top) in Linux-4.16. [FUNC] msleep drivers/usb/host/u132-hcd.c, 2558: msleep in u132_get_frame drivers/usb/core/hcd.c, 2231: [FUNC_PTR]u132_get_frame in usb_hcd_get_frame_number drivers/usb/core/usb.c, 822: usb_hcd_get_frame_number in usb_get_current_frame_number sound/usb/usx2y/usbusx2yaudio.c, 303: usb_get_current_frame_number in i_usX2Y_urb_complete sound/usb/usx2y/usbusx2yaudio.c, 366: i_usX2Y_urb_complete in i_usX2Y_subs_startup Note that [FUNC_PTR] means a function pointer call is used. To fix this bug, msleep() is replaced with mdelay(). This bug is found by my static analysis tool DSAC. Signed-off-by: Jia-Ju Bai Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/u132-hcd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c index c0671750671f..ab5128755672 100644 --- a/drivers/usb/host/u132-hcd.c +++ b/drivers/usb/host/u132-hcd.c @@ -2569,7 +2569,7 @@ static int u132_get_frame(struct usb_hcd *hcd) } else { int frame = 0; dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n"); - msleep(100); + mdelay(100); return frame; } } From a9cdf4ce5cb8ccda956400c636829df67cb59ad6 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 21 Aug 2018 11:59:52 +0200 Subject: [PATCH 134/151] USB: serial: io_ti: fix array underflow in completion handler commit 691a03cfe8ca483f9c48153b869d354e4ae3abef upstream. As reported by Dan Carpenter, a malicious USB device could set port_number to a negative value and we would underflow the port array in the interrupt completion handler. As these devices only have one or two ports, fix this by making sure we only consider the seventh bit when determining the port number (and ignore bits 0xb0 which are typically set to 0x30). Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: stable Reported-by: Dan Carpenter Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/io_ti.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/serial/io_ti.h b/drivers/usb/serial/io_ti.h index 1bd67b24f916..bc9ff5ebd67c 100644 --- a/drivers/usb/serial/io_ti.h +++ b/drivers/usb/serial/io_ti.h @@ -178,7 +178,7 @@ struct ump_interrupt { } __attribute__((packed)); -#define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 4) - 3) +#define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 6) & 0x01) #define TIUMP_GET_FUNC_FROM_CODE(c) ((c) & 0x0f) #define TIUMP_INTERRUPT_CODE_LSR 0x03 #define TIUMP_INTERRUPT_CODE_MSR 0x04 From 2fd95e988e49f0a969314f907556965352e3bb8e Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 1 Sep 2018 16:25:08 +0800 Subject: [PATCH 135/151] usb: misc: uss720: Fix two sleep-in-atomic-context bugs commit bc8acc214d3f1cafebcbcd101a695bbac716595d upstream. async_complete() in uss720.c is a completion handler function for the USB driver. So it should not sleep, but it is can sleep according to the function call paths (from bottom to top) in Linux-4.16. [FUNC] set_1284_register(GFP_KERNEL) drivers/usb/misc/uss720.c, 372: set_1284_register in parport_uss720_frob_control drivers/parport/ieee1284.c, 560: [FUNC_PTR]parport_uss720_frob_control in parport_ieee1284_ack_data_avail drivers/parport/ieee1284.c, 577: parport_ieee1284_ack_data_avail in parport_ieee1284_interrupt ./include/linux/parport.h, 474: parport_ieee1284_interrupt in parport_generic_irq drivers/usb/misc/uss720.c, 116: parport_generic_irq in async_complete [FUNC] get_1284_register(GFP_KERNEL) drivers/usb/misc/uss720.c, 382: get_1284_register in parport_uss720_read_status drivers/parport/ieee1284.c, 555: [FUNC_PTR]parport_uss720_read_status in parport_ieee1284_ack_data_avail drivers/parport/ieee1284.c, 577: parport_ieee1284_ack_data_avail in parport_ieee1284_interrupt ./include/linux/parport.h, 474: parport_ieee1284_interrupt in parport_generic_irq drivers/usb/misc/uss720.c, 116: parport_generic_irq in async_complete Note that [FUNC_PTR] means a function pointer call is used. To fix these bugs, GFP_KERNEL is replaced with GFP_ATOMIC. These bugs are found by my static analysis tool DSAC. Signed-off-by: Jia-Ju Bai Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/uss720.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index 3cb05eb5f1df..9bec15f28d56 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -392,7 +392,7 @@ static unsigned char parport_uss720_frob_control(struct parport *pp, unsigned ch mask &= 0x0f; val &= 0x0f; d = (priv->reg[1] & (~mask)) ^ val; - if (set_1284_register(pp, 2, d, GFP_KERNEL)) + if (set_1284_register(pp, 2, d, GFP_ATOMIC)) return 0; priv->reg[1] = d; return d & 0xf; @@ -402,7 +402,7 @@ static unsigned char parport_uss720_read_status(struct parport *pp) { unsigned char ret; - if (get_1284_register(pp, 1, &ret, GFP_KERNEL)) + if (get_1284_register(pp, 1, &ret, GFP_ATOMIC)) return 0; return ret & 0xf8; } From a89c7b5a8fb9b17a994d4f74160dcd4dfd1bd6fc Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 15 Aug 2018 21:44:25 +0100 Subject: [PATCH 136/151] USB: yurex: Fix buffer over-read in yurex_write() commit 7e10f14ebface44a48275c8d6dc1caae3668d5a9 upstream. If the written data starts with a digit, yurex_write() tries to parse it as an integer using simple_strtoull(). This requires a null- terminator, and currently there's no guarantee that there is one. (The sample program at https://github.com/NeoCat/YUREX-driver-for-Linux/blob/master/sample/yurex_clock.pl writes an integer without a null terminator. It seems like it must have worked by chance!) Always add a null byte after the written data. Enlarge the buffer to allow for this. Cc: stable@vger.kernel.org Signed-off-by: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/yurex.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 10f93ca826d1..83eaccfa9ee5 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -437,13 +437,13 @@ static ssize_t yurex_write(struct file *file, const char *user_buffer, size_t co { struct usb_yurex *dev; int i, set = 0, retval = 0; - char buffer[16]; + char buffer[16 + 1]; char *data = buffer; unsigned long long c, c2 = 0; signed long timeout = 0; DEFINE_WAIT(wait); - count = min(sizeof(buffer), count); + count = min(sizeof(buffer) - 1, count); dev = file->private_data; /* verify that we actually have some data to write */ @@ -462,6 +462,7 @@ static ssize_t yurex_write(struct file *file, const char *user_buffer, size_t co retval = -EFAULT; goto error; } + buffer[count] = 0; memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE); switch (buffer[0]) { From 6f3e0e8ac116f2a88e2d21d4e66e1aaa8bc59e75 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 1 Sep 2018 16:12:10 +0800 Subject: [PATCH 137/151] usb: cdc-wdm: Fix a sleep-in-atomic-context bug in service_outstanding_interrupt() commit 6e22e3af7bb3a7b9dc53cb4687659f6e63fca427 upstream. wdm_in_callback() is a completion handler function for the USB driver. So it should not sleep. But it calls service_outstanding_interrupt(), which calls usb_submit_urb() with GFP_KERNEL. To fix this bug, GFP_KERNEL is replaced with GFP_ATOMIC. This bug is found by my static analysis tool DSAC. Signed-off-by: Jia-Ju Bai Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/usb/class/cdc-wdm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index df0878c4810c..8e073c9739e9 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -452,7 +452,7 @@ static int clear_wdm_read_flag(struct wdm_device *desc) set_bit(WDM_RESPONDING, &desc->flags); spin_unlock_irq(&desc->iuspin); - rv = usb_submit_urb(desc->response, GFP_KERNEL); + rv = usb_submit_urb(desc->response, GFP_ATOMIC); spin_lock_irq(&desc->iuspin); if (rv) { dev_err(&desc->intf->dev, From cece1875d935c58d9dd6a2733be852948d5e1b6d Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 6 Sep 2018 12:47:51 +0300 Subject: [PATCH 138/151] cifs: prevent integer overflow in nxt_dir_entry() commit 8ad8aa353524d89fa2e09522f3078166ff78ec42 upstream. The "old_entry + le32_to_cpu(pDirInfo->NextEntryOffset)" can wrap around so I have added a check for integer overflow. Reported-by: Dr Silvio Cesare of InfoSect Reviewed-by: Ronnie Sahlberg Reviewed-by: Aurelien Aptel Signed-off-by: Dan Carpenter Signed-off-by: Steve French CC: Stable Signed-off-by: Greg Kroah-Hartman --- fs/cifs/readdir.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 404b084614aa..707d8cf95348 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -366,8 +366,15 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level) new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) + pfData->FileNameLength; - } else - new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset); + } else { + u32 next_offset = le32_to_cpu(pDirInfo->NextEntryOffset); + + if (old_entry + next_offset < old_entry) { + cifs_dbg(VFS, "invalid offset %u\n", next_offset); + return NULL; + } + new_entry = old_entry + next_offset; + } cifs_dbg(FYI, "new entry %p old entry %p\n", new_entry, old_entry); /* validate that new_entry is not past end of SMB */ if (new_entry >= end_of_smb) { From 34f39bdb040b3927ff843ca78d911d05617d8967 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 6 Sep 2018 12:48:22 +0300 Subject: [PATCH 139/151] CIFS: fix wrapping bugs in num_entries() commit 56446f218af1133c802dad8e9e116f07f381846c upstream. The problem is that "entryptr + next_offset" and "entryptr + len + size" can wrap. I ended up changing the type of "entryptr" because it makes the math easier when we don't have to do so much casting. Signed-off-by: Dan Carpenter Signed-off-by: Steve French Reviewed-by: Aurelien Aptel Reviewed-by: Pavel Shilovsky CC: Stable Signed-off-by: Greg Kroah-Hartman --- fs/cifs/smb2pdu.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index fc652288491e..2667de60b2cb 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -2188,33 +2188,38 @@ num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size) int len; unsigned int entrycount = 0; unsigned int next_offset = 0; - FILE_DIRECTORY_INFO *entryptr; + char *entryptr; + FILE_DIRECTORY_INFO *dir_info; if (bufstart == NULL) return 0; - entryptr = (FILE_DIRECTORY_INFO *)bufstart; + entryptr = bufstart; while (1) { - entryptr = (FILE_DIRECTORY_INFO *) - ((char *)entryptr + next_offset); - - if ((char *)entryptr + size > end_of_buf) { + if (entryptr + next_offset < entryptr || + entryptr + next_offset > end_of_buf || + entryptr + next_offset + size > end_of_buf) { cifs_dbg(VFS, "malformed search entry would overflow\n"); break; } - len = le32_to_cpu(entryptr->FileNameLength); - if ((char *)entryptr + len + size > end_of_buf) { + entryptr = entryptr + next_offset; + dir_info = (FILE_DIRECTORY_INFO *)entryptr; + + len = le32_to_cpu(dir_info->FileNameLength); + if (entryptr + len < entryptr || + entryptr + len > end_of_buf || + entryptr + len + size > end_of_buf) { cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n", end_of_buf); break; } - *lastentry = (char *)entryptr; + *lastentry = entryptr; entrycount++; - next_offset = le32_to_cpu(entryptr->NextEntryOffset); + next_offset = le32_to_cpu(dir_info->NextEntryOffset); if (!next_offset) break; } From 1a92b397cc527facb363617e5711702aeceb371e Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Tue, 15 May 2018 23:32:45 +0100 Subject: [PATCH 140/151] binfmt_elf: Respect error return from `regset->active' [ Upstream commit 2f819db565e82e5f73cd42b39925098986693378 ] The regset API documented in defines -ENODEV as the result of the `->active' handler to be used where the feature requested is not available on the hardware found. However code handling core file note generation in `fill_thread_core_info' interpretes any non-zero result from the `->active' handler as the regset requested being active. Consequently processing continues (and hopefully gracefully fails later on) rather than being abandoned right away for the regset requested. Fix the problem then by making the code proceed only if a positive result is returned from the `->active' handler. Signed-off-by: Maciej W. Rozycki Signed-off-by: Paul Burton Fixes: 4206d3aa1978 ("elf core dump: notes user_regset") Patchwork: https://patchwork.linux-mips.org/patch/19332/ Cc: Alexander Viro Cc: James Hogan Cc: Ralf Baechle Cc: linux-fsdevel@vger.kernel.org Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/binfmt_elf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index e39fe28f1ea0..c3b57886b5bc 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1552,7 +1552,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, const struct user_regset *regset = &view->regsets[i]; do_thread_regset_writeback(t->task, regset); if (regset->core_note_type && regset->get && - (!regset->active || regset->active(t->task, regset))) { + (!regset->active || regset->active(t->task, regset) > 0)) { int ret; size_t size = regset->n * regset->size; void *data = kmalloc(size, GFP_KERNEL); From 2e866a2750fb683bd6d6b9422856b4f10be6659b Mon Sep 17 00:00:00 2001 From: Ronny Chevalier Date: Wed, 11 Jul 2018 14:39:37 +0200 Subject: [PATCH 141/151] audit: fix use-after-free in audit_add_watch [ Upstream commit baa2a4fdd525c8c4b0f704d20457195b29437839 ] audit_add_watch stores locally krule->watch without taking a reference on watch. Then, it calls audit_add_to_parent, and uses the watch stored locally. Unfortunately, it is possible that audit_add_to_parent updates krule->watch. When it happens, it also drops a reference of watch which could free the watch. How to reproduce (with KASAN enabled): auditctl -w /etc/passwd -F success=0 -k test_passwd auditctl -w /etc/passwd -F success=1 -k test_passwd2 The second call to auditctl triggers the use-after-free, because audit_to_parent updates krule->watch to use a previous existing watch and drops the reference to the newly created watch. To fix the issue, we grab a reference of watch and we release it at the end of the function. Signed-off-by: Ronny Chevalier Reviewed-by: Richard Guy Briggs Signed-off-by: Paul Moore Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- kernel/audit_watch.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index d39e6e3f1e0a..23484712654e 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -414,6 +414,13 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list) struct path parent_path; int h, ret = 0; + /* + * When we will be calling audit_add_to_parent, krule->watch might have + * been updated and watch might have been freed. + * So we need to keep a reference of watch. + */ + audit_get_watch(watch); + mutex_unlock(&audit_filter_mutex); /* Avoid calling path_lookup under audit_filter_mutex. */ @@ -422,8 +429,10 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list) /* caller expects mutex locked */ mutex_lock(&audit_filter_mutex); - if (ret) + if (ret) { + audit_put_watch(watch); return ret; + } /* either find an old parent or attach a new one */ parent = audit_find_parent(parent_path.dentry->d_inode); @@ -444,6 +453,7 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list) *list = &audit_inode_hash[h]; error: path_put(&parent_path); + audit_put_watch(watch); return ret; } From 0bcaafbf8a2a3d583a737d88a672a868a44b2902 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Sat, 7 Jul 2018 05:37:22 +0200 Subject: [PATCH 142/151] mtdchar: fix overflows in adjustment of `count` [ Upstream commit 6c6bc9ea84d0008024606bf5ba10519e20d851bf ] The first checks in mtdchar_read() and mtdchar_write() attempt to limit `count` such that `*ppos + count <= mtd->size`. However, they ignore the possibility of `*ppos > mtd->size`, allowing the calculation of `count` to wrap around. `mtdchar_lseek()` prevents seeking beyond mtd->size, but the pread/pwrite syscalls bypass this. I haven't found any codepath on which this actually causes dangerous behavior, but it seems like a sensible change anyway. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Jann Horn Signed-off-by: Boris Brezillon Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/mtdchar.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 53563955931b..77f8f598344b 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -190,8 +190,12 @@ static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count, pr_debug("MTD_read\n"); - if (*ppos + count > mtd->size) - count = mtd->size - *ppos; + if (*ppos + count > mtd->size) { + if (*ppos < mtd->size) + count = mtd->size - *ppos; + else + count = 0; + } if (!count) return 0; @@ -276,7 +280,7 @@ static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t c pr_debug("MTD_write\n"); - if (*ppos == mtd->size) + if (*ppos >= mtd->size) return -ENOSPC; if (*ppos + count > mtd->size) From ebb9196a1ed43830a4790386483d272fb840e75e Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Mon, 16 Jul 2018 08:26:36 -0700 Subject: [PATCH 143/151] MIPS: loongson64: cs5536: Fix PCI_OHCI_INT_REG reads [ Upstream commit cd87668d601f622e0ebcfea4f78d116d5f572f4d ] The PCI_OHCI_INT_REG case in pci_ohci_read_reg() contains the following if statement: if ((lo & 0x00000f00) == CS5536_USB_INTR) CS5536_USB_INTR expands to the constant 11, which gives us the following condition which can never evaluate true: if ((lo & 0xf00) == 11) At least when using GCC 8.1.0 this falls foul of the tautoligcal-compare warning, and since the code is built with the -Werror flag the build fails. Fix this by shifting lo right by 8 bits in order to match the corresponding PCI_OHCI_INT_REG case in pci_ohci_write_reg(). Signed-off-by: Paul Burton Patchwork: https://patchwork.linux-mips.org/patch/19861/ Cc: Huacai Chen Cc: James Hogan Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/mips/loongson/common/cs5536/cs5536_ohci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/loongson/common/cs5536/cs5536_ohci.c b/arch/mips/loongson/common/cs5536/cs5536_ohci.c index f7c905e50dc4..92dc6bafc127 100644 --- a/arch/mips/loongson/common/cs5536/cs5536_ohci.c +++ b/arch/mips/loongson/common/cs5536/cs5536_ohci.c @@ -138,7 +138,7 @@ u32 pci_ohci_read_reg(int reg) break; case PCI_OHCI_INT_REG: _rdmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), &hi, &lo); - if ((lo & 0x00000f00) == CS5536_USB_INTR) + if (((lo >> PIC_YSEL_LOW_USB_SHIFT) & 0xf) == CS5536_USB_INTR) conf_data = 1; break; default: From 073b26b9b5de681008db7cdf099506f66b945b7f Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Thu, 12 Jul 2018 11:28:24 +0200 Subject: [PATCH 144/151] ARM: hisi: handle of_iomap and fix missing of_node_put [ Upstream commit d396cb185c0337aae5664b250cdd9a73f6eb1503 ] Relying on an unchecked of_iomap() which can return NULL is problematic here, an explicit check seems mandatory. Also the call to of_find_compatible_node() returns a device node with refcount incremented therefor an explicit of_node_put() is needed here. Signed-off-by: Nicholas Mc Guire Fixes: commit 22bae4290457 ("ARM: hi3xxx: add hotplug support") Signed-off-by: Wei Xu Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/arm/mach-hisi/hotplug.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/arch/arm/mach-hisi/hotplug.c b/arch/arm/mach-hisi/hotplug.c index 84e6919f68c7..6385abcd321e 100644 --- a/arch/arm/mach-hisi/hotplug.c +++ b/arch/arm/mach-hisi/hotplug.c @@ -145,13 +145,20 @@ static int hi3xxx_hotplug_init(void) struct device_node *node; node = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl"); - if (node) { - ctrl_base = of_iomap(node, 0); - id = HI3620_CTRL; - return 0; + if (!node) { + id = ERROR_CTRL; + return -ENOENT; } - id = ERROR_CTRL; - return -ENOENT; + + ctrl_base = of_iomap(node, 0); + of_node_put(node); + if (!ctrl_base) { + id = ERROR_CTRL; + return -ENOMEM; + } + + id = HI3620_CTRL; + return 0; } void hi3xxx_set_cpu(int cpu, bool enable) From 03ace9d6d931aa3de6746652dc9cd1f853d3c978 Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Thu, 12 Jul 2018 11:28:23 +0200 Subject: [PATCH 145/151] ARM: hisi: check of_iomap and fix missing of_node_put [ Upstream commit 81646a3d39ef14749301374a3a0b8311384cd412 ] of_find_compatible_node() returns a device node with refcount incremented and thus needs an explicit of_node_put(). Further relying on an unchecked of_iomap() which can return NULL is problematic here, after all ctrl_base is critical enough for hix5hd2_set_cpu() to call BUG() if not available so a check seems mandated here. Signed-off-by: Nicholas Mc Guire 0002 Fixes: commit 06cc5c1d4d73 ("ARM: hisi: enable hix5hd2 SoC") Signed-off-by: Wei Xu Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/arm/mach-hisi/hotplug.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/arch/arm/mach-hisi/hotplug.c b/arch/arm/mach-hisi/hotplug.c index 6385abcd321e..f31b4d99b5fb 100644 --- a/arch/arm/mach-hisi/hotplug.c +++ b/arch/arm/mach-hisi/hotplug.c @@ -177,11 +177,15 @@ static bool hix5hd2_hotplug_init(void) struct device_node *np; np = of_find_compatible_node(NULL, NULL, "hisilicon,cpuctrl"); - if (np) { - ctrl_base = of_iomap(np, 0); - return true; - } - return false; + if (!np) + return false; + + ctrl_base = of_iomap(np, 0); + of_node_put(np); + if (!ctrl_base) + return false; + + return true; } void hix5hd2_set_cpu(int cpu, bool enable) From 4d50d942bdd8bb750ce01fae8cf875431a4d479f Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Thu, 12 Jul 2018 22:29:55 +0100 Subject: [PATCH 146/151] parport: sunbpp: fix error return code [ Upstream commit faa1a47388b33623e4d504c23569188907b039a0 ] Return an error code on failure. Change leading spaces to tab on the first if. Problem found using Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: Sudip Mukherjee Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/parport/parport_sunbpp.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c index dffd6d0bd15b..634a243734e3 100644 --- a/drivers/parport/parport_sunbpp.c +++ b/drivers/parport/parport_sunbpp.c @@ -286,12 +286,16 @@ static int bpp_probe(struct platform_device *op) ops = kmemdup(&parport_sunbpp_ops, sizeof(struct parport_operations), GFP_KERNEL); - if (!ops) + if (!ops) { + err = -ENOMEM; goto out_unmap; + } dprintk(("register_port\n")); - if (!(p = parport_register_port((unsigned long)base, irq, dma, ops))) + if (!(p = parport_register_port((unsigned long)base, irq, dma, ops))) { + err = -ENOMEM; goto out_free_ops; + } p->size = size; p->dev = &op->dev; From 4a8fd0459d2c910b4d086e52990c8989bcbf5147 Mon Sep 17 00:00:00 2001 From: Zhouyang Jia Date: Tue, 12 Jun 2018 12:40:03 +0800 Subject: [PATCH 147/151] rtc: bq4802: add error handling for devm_ioremap [ Upstream commit 7874b919866ba91bac253fa219d3d4c82bb944df ] When devm_ioremap fails, the lack of error-handling code may cause unexpected results. This patch adds error-handling code after calling devm_ioremap. Signed-off-by: Zhouyang Jia Signed-off-by: Alexandre Belloni Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/rtc/rtc-bq4802.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/rtc/rtc-bq4802.c b/drivers/rtc/rtc-bq4802.c index fc0ff87aa5df..f53198207e93 100644 --- a/drivers/rtc/rtc-bq4802.c +++ b/drivers/rtc/rtc-bq4802.c @@ -164,6 +164,10 @@ static int bq4802_probe(struct platform_device *pdev) } else if (p->r->flags & IORESOURCE_MEM) { p->regs = devm_ioremap(&pdev->dev, p->r->start, resource_size(p->r)); + if (!p->regs){ + err = -ENOMEM; + goto out; + } p->read = bq4802_read_mem; p->write = bq4802_write_mem; } else { From 32ab7310c02164a0c7d47fbe99f07dca88ee624d Mon Sep 17 00:00:00 2001 From: Timo Wischer Date: Tue, 10 Jul 2018 17:28:45 +0200 Subject: [PATCH 148/151] ALSA: pcm: Fix snd_interval_refine first/last with open min/max [ Upstream commit ff2d6acdf6f13d9f8fdcd890844c6d7535ac1f10 ] Without this commit the following intervals [x y), (x y) were be replaced to (y-1 y) by snd_interval_refine_last(). This was also done if y-1 is part of the previous interval. With this changes it will be replaced with [y-1 y) in case of y-1 is part of the previous interval. A similar behavior will be used for snd_interval_refine_first(). This commit adapts the changes for alsa-lib of commit 9bb985c ("pcm: snd_interval_refine_first/last: exclude value only if also excluded before") Signed-off-by: Timo Wischer Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- sound/core/pcm_lib.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 8f04ccc44eee..25d4d20138fe 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -714,27 +714,33 @@ EXPORT_SYMBOL(snd_interval_refine); static int snd_interval_refine_first(struct snd_interval *i) { + const unsigned int last_max = i->max; + if (snd_BUG_ON(snd_interval_empty(i))) return -EINVAL; if (snd_interval_single(i)) return 0; i->max = i->min; - i->openmax = i->openmin; - if (i->openmax) + if (i->openmin) i->max++; + /* only exclude max value if also excluded before refine */ + i->openmax = (i->openmax && i->max >= last_max); return 1; } static int snd_interval_refine_last(struct snd_interval *i) { + const unsigned int last_min = i->min; + if (snd_BUG_ON(snd_interval_empty(i))) return -EINVAL; if (snd_interval_single(i)) return 0; i->min = i->max; - i->openmin = i->openmax; - if (i->openmin) + if (i->openmax) i->min--; + /* only exclude min value if also excluded before refine */ + i->openmin = (i->openmin && i->min <= last_min); return 1; } From cf9bbea6229e8aca683c3a59d43fc926dc19163a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 4 Jul 2018 12:38:09 +0300 Subject: [PATCH 149/151] drm/panel: type promotion bug in s6e8aa0_read_mtp_id() [ Upstream commit cd0e0ca69109d025b1a1b6609f70682db62138b0 ] The ARRAY_SIZE() macro is type size_t. If s6e8aa0_dcs_read() returns a negative error code, then "ret < ARRAY_SIZE(id)" is false because the negative error code is type promoted to a high positive value. Fixes: 02051ca06371 ("drm/panel: add S6E8AA0 driver") Signed-off-by: Dan Carpenter Reviewed-by: Andrzej Hajda Signed-off-by: Thierry Reding Link: https://patchwork.freedesktop.org/patch/msgid/20180704093807.s3lqsb2v6dg2k43d@kili.mountain Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/panel/panel-s6e8aa0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-s6e8aa0.c index b5217fe37f02..0e46b6762cf0 100644 --- a/drivers/gpu/drm/panel/panel-s6e8aa0.c +++ b/drivers/gpu/drm/panel/panel-s6e8aa0.c @@ -835,7 +835,7 @@ static void s6e8aa0_read_mtp_id(struct s6e8aa0 *ctx) int ret, i; ret = s6e8aa0_dcs_read(ctx, 0xd1, id, ARRAY_SIZE(id)); - if (ret < ARRAY_SIZE(id) || id[0] == 0x00) { + if (ret < 0 || ret < ARRAY_SIZE(id) || id[0] == 0x00) { dev_err(ctx->dev, "read id failed\n"); ctx->error = -EIO; return; From 66688f6191c9299ccb8ccf3e1dc99cc59b194587 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 21 Aug 2018 11:59:53 +0200 Subject: [PATCH 150/151] USB: serial: ti_usb_3410_5052: fix array underflow in completion handler commit 5dfdd24eb3d39d815bc952ae98128e967c9bba49 upstream. Similarly to a recently reported bug in io_ti, a malicious USB device could set port_number to a negative value and we would underflow the port array in the interrupt completion handler. As these devices only have one or two ports, fix this by making sure we only consider the seventh bit when determining the port number (and ignore bits 0xb0 which are typically set to 0x30). Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: stable Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/ti_usb_3410_5052.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h index 4a2423e84d55..d0ca4e4abb68 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.h +++ b/drivers/usb/serial/ti_usb_3410_5052.h @@ -223,7 +223,7 @@ struct ti_interrupt { } __attribute__((packed)); /* Interrupt codes */ -#define TI_GET_PORT_FROM_CODE(c) (((c) >> 4) - 3) +#define TI_GET_PORT_FROM_CODE(c) (((c) >> 6) & 0x01) #define TI_GET_FUNC_FROM_CODE(c) ((c) & 0x0f) #define TI_CODE_HARDWARE_ERROR 0xFF #define TI_CODE_DATA_ERROR 0x03 From 921b2fed6a79439ef1609ef4af0ada5cccb3555c Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 26 Sep 2018 08:33:59 +0200 Subject: [PATCH 151/151] Linux 3.18.123 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 7f8ee7afb801..7943b4c59499 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 18 -SUBLEVEL = 122 +SUBLEVEL = 123 EXTRAVERSION = NAME = Diseased Newt