From 92d021e4f08d8b735b720f220a4e95aa103a21a4 Mon Sep 17 00:00:00 2001 From: Quantummech2000 Date: Tue, 23 Oct 2018 14:48:21 +0000 Subject: [PATCH] :gear: Hotplug: Fix Thunderplug.c Signed-off-by: Quantummech2000 --- arch/arm64/hotplug/thunderplug.c | 1105 +++++++++++++++--------------- 1 file changed, 537 insertions(+), 568 deletions(-) diff --git a/arch/arm64/hotplug/thunderplug.c b/arch/arm64/hotplug/thunderplug.c index e4de3be5ec31..2ae827124626 100644 --- a/arch/arm64/hotplug/thunderplug.c +++ b/arch/arm64/hotplug/thunderplug.c @@ -22,80 +22,39 @@ #include #include #include -#include -#include "thunderplug.h" +#ifdef CONFIG_STATE_NOTIFIER +#include +#endif -#define DEBUG 0 +#define DEBUG 0 -#define THUNDERPLUG "thunderplug" +#define THUNDERPLUG "thunderplug" -#ifdef CONFIG_SCHED_HMP -#define DRIVER_VERSION 5 -#else -#define DRIVER_VERSION 3 -#define DRIVER_SUBVER 0 -#endif +#define DRIVER_VERSION 5 +#define DRIVER_SUBVER 4 -#define DEFAULT_CPU_LOAD_THRESHOLD (65) -#define MIN_CPU_LOAD_THRESHOLD (10) +#define DEFAULT_CPU_LOAD_THRESHOLD (90) -#ifdef CONFIG_USES_MALI_MP2_GPU -#define GPU_HOTPLUG_ENABLED (0) -#define DEFAULT_MIN_GPU_LOAD_THRESHOLD (65) -#endif +#define HOTPLUG_ENABLED (0) +#define STARTDELAY 1000 -#define HOTPLUG_ENABLED (0) -#define DEFAULT_HOTPLUG_STYLE HOTPLUG_SCHED -#define DEFAULT_SCHED_MODE BALANCED +#define DEF_SAMPLING_MS (20) +#define MIN_SAMLING_MS (10) +#define MIN_CPU_UP_TIME (300) -#define DEF_SAMPLING_MS (500) -#define MIN_SAMLING_MS (50) -#define MIN_CPU_UP_TIME (750) -#define TOUCH_BOOST_ENABLED (0) +#define DEFAULT_BOOST_LOCK_DUR 500 * 1000L +#define DEFAULT_NR_CPUS_BOOSTED 2 +#define MIN_INPUT_INTERVAL 150 * 1000L static bool isSuspended = false; -static int suspend_cpu_num = 2, resume_cpu_num = (NR_CPUS -1); -static int endurance_level = 0; -static int core_limit = NR_CPUS; - static int now[8], last_time[8]; - -static int sampling_time = DEF_SAMPLING_MS; -static int load_threshold = DEFAULT_CPU_LOAD_THRESHOLD; -static int stop_boost = 0; - -#ifdef CONFIG_USES_MALI_MP2_GPU -extern void gpu_enable(int num); -extern u32 _mali_ukk_utilization_gp_pp(void); -extern u32 mali_pp_scheduler_get_num_cores_enabled(void); -#endif - struct cpufreq_policy old_policy[NR_CPUS]; - -#ifdef CONFIG_SCHED_HMP -static int tplug_hp_style = DEFAULT_HOTPLUG_STYLE; -static int tplug_sched_mode = DEFAULT_SCHED_MODE; -#else -static int tplug_hp_enabled = HOTPLUG_ENABLED; -#endif -static int touch_boost_enabled = TOUCH_BOOST_ENABLED; - -#ifdef CONFIG_USES_MALI_MP2_GPU -static int gpu_hotplug_enabled = GPU_HOTPLUG_ENABLED; -static int gpu_min_load_threshold = DEFAULT_MIN_GPU_LOAD_THRESHOLD; -#endif - static struct workqueue_struct *tplug_wq; static struct delayed_work tplug_work; -static struct workqueue_struct *tplug_boost_wq; -static struct delayed_work tplug_boost; - -static struct workqueue_struct *tplug_resume_wq; -static struct delayed_work tplug_resume_work; - static unsigned int last_load[8] = { 0 }; +static u64 last_boost_time; struct cpu_load_data { u64 prev_cpu_idle; @@ -107,58 +66,59 @@ struct cpu_load_data { cpumask_var_t related_cpus; }; -static DEFINE_PER_CPU(struct cpu_load_data, cpuload); +static struct thunder_param_struct { + unsigned int cpus_boosted; + unsigned int target_cpus; + u64 boost_lock_dur; + u64 last_input; + int hotplug_suspend; + unsigned int sampling_time; + int suspend_cpu_num; + int resume_cpu_num; + int max_core_online; + int min_core_online; + int tplug_hp_enabled; + int load_threshold; + struct work_struct up_work; + struct notifier_block thunder_state_notif; +} thunder_param = { + .cpus_boosted = DEFAULT_NR_CPUS_BOOSTED, + .boost_lock_dur = DEFAULT_BOOST_LOCK_DUR, + .suspend_cpu_num = 3, + .resume_cpu_num = (NR_CPUS -1), + .max_core_online = NR_CPUS, + .min_core_online = 1, + .sampling_time = DEF_SAMPLING_MS, + .load_threshold = DEFAULT_CPU_LOAD_THRESHOLD, + .tplug_hp_enabled = HOTPLUG_ENABLED, + .hotplug_suspend = 0, +}; +static DEFINE_PER_CPU(struct cpu_load_data, cpuload); -/* Two Endurance Levels for Octa Cores, - * Two for Quad Cores and - * One for Dual - */ static inline void offline_cpus(void) { unsigned int cpu; - switch(endurance_level) { - case 1: - if(suspend_cpu_num > NR_CPUS / 2 ) - suspend_cpu_num = NR_CPUS / 2; - break; - case 2: - if( NR_CPUS >=4 && suspend_cpu_num > NR_CPUS / 4) - suspend_cpu_num = NR_CPUS / 4; - break; - default: - break; - } - for(cpu = NR_CPUS - 1; cpu > (suspend_cpu_num - 1); cpu--) { + + for (cpu = NR_CPUS - 1; cpu > + (thunder_param.suspend_cpu_num - 1); cpu--) { if (cpu_online(cpu)) cpu_down(cpu); } - pr_info("%s: %d cpus were offlined\n", THUNDERPLUG, (NR_CPUS - suspend_cpu_num)); + pr_info("%s: %d cpus were offlined\n", + THUNDERPLUG, + (NR_CPUS - thunder_param.suspend_cpu_num)); } static inline void cpus_online_all(void) { unsigned int cpu; - switch(endurance_level) { - case 1: - if(resume_cpu_num > (NR_CPUS / 2) - 1 || resume_cpu_num == 1) - resume_cpu_num = ((NR_CPUS / 2) - 1); - break; - case 2: - if( NR_CPUS >= 4 && resume_cpu_num > ((NR_CPUS / 4) - 1)) - resume_cpu_num = ((NR_CPUS / 4) - 1); - break; - case 0: - resume_cpu_num = (NR_CPUS - 1); - break; - default: - break; - } - if(DEBUG) - pr_info("%s: resume_cpu_num = %d\n",THUNDERPLUG, resume_cpu_num); + if (DEBUG) + pr_info("%s: resume_cpu_num = %d\n",THUNDERPLUG, + thunder_param.resume_cpu_num); - for (cpu = 1; cpu <= resume_cpu_num; cpu++) { + for (cpu = 1; cpu <= thunder_param.resume_cpu_num; cpu++) { if (cpu_is_offline(cpu)) cpu_up(cpu); } @@ -166,251 +126,216 @@ static inline void cpus_online_all(void) pr_info("%s: all cpus were onlined\n", THUNDERPLUG); } -static void __ref tplug_boost_work_fn(struct work_struct *work) -{ - struct cpufreq_policy policy; - int cpu, ret; - for(cpu = 1; cpu < NR_CPUS; cpu++) { -#ifdef CONFIG_SCHED_HMP - if(tplug_hp_style == 1) -#else - if(tplug_hp_enabled == 1) -#endif - if(cpu_is_offline(cpu)) - cpu_up(cpu); - ret = cpufreq_get_policy(&policy, cpu); - if (ret) - continue; - old_policy[cpu] = policy; - policy.min = policy.max; - cpufreq_update_policy(cpu); - } - if(stop_boost == 0) - queue_delayed_work_on(0, tplug_boost_wq, &tplug_boost, - msecs_to_jiffies(10)); -} - -static void tplug_input_event(struct input_handle *handle, unsigned int type, - unsigned int code, int value) +static ssize_t thunderplug_hotplug_suspend_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { - if (type == EV_KEY && code == BTN_TOUCH) { - if(DEBUG) - pr_info("%s : type = %d, code = %d, value = %d\n", THUNDERPLUG, type, code, value); - if(value == 0) { - stop_boost = 1; - if(DEBUG) - pr_info("%s: stopping boost\n", THUNDERPLUG); - } - else { - stop_boost = 0; - if(DEBUG) - pr_info("%s: starting boost\n", THUNDERPLUG); - } - } -#ifdef CONFIG_SCHED_HMP - if ((type == EV_KEY) && (code == BTN_TOUCH) && (value == 1) - && touch_boost_enabled == 1) -#else - if ((type == EV_KEY) && (code == BTN_TOUCH) && (value == 1) - && touch_boost_enabled == 1) -#endif - { - if(DEBUG) - pr_info("%s : touch boost\n", THUNDERPLUG); - queue_delayed_work_on(0, tplug_boost_wq, &tplug_boost, - msecs_to_jiffies(0)); - } + return sprintf(buf, "%d", thunder_param.hotplug_suspend); } -static int tplug_input_connect(struct input_handler *handler, - struct input_dev *dev, const struct input_device_id *id) +static ssize_t thunderplug_hotplug_suspend_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) { - struct input_handle *handle; - int error; - - handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); - if (!handle) - return -ENOMEM; - - handle->dev = dev; - handle->handler = handler; - handle->name = "cpufreq"; - - error = input_register_handle(handle); - if (error) - goto err2; - - error = input_open_device(handle); - if (error) - goto err1; + int val; - return 0; -err1: - input_unregister_handle(handle); -err2: - kfree(handle); - return error; -} + sscanf(buf, "%d", &val); -static void tplug_input_disconnect(struct input_handle *handle) -{ - input_close_device(handle); - input_unregister_handle(handle); - kfree(handle); + switch(val) { + case 0: + case 1: + thunder_param.hotplug_suspend = val; + break; + default: + pr_info("%s: invalid value! set 0 or 1 here.\n", + THUNDERPLUG); + thunder_param.hotplug_suspend = 0; + break; + } + return count; } -static const struct input_device_id tplug_ids[] = { - { .driver_info = 1 }, - { }, -}; - -static struct input_handler tplug_input_handler = { - .event = tplug_input_event, - .connect = tplug_input_connect, - .disconnect = tplug_input_disconnect, - .name = "tplug_handler", - .id_table = tplug_ids, -}; - -static ssize_t thunderplug_suspend_cpus_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +static ssize_t thunderplug_suspend_cpus_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { - return sprintf(buf, "%d", suspend_cpu_num); + return sprintf(buf, "%d", thunder_param.suspend_cpu_num); } -static ssize_t thunderplug_suspend_cpus_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +static ssize_t thunderplug_suspend_cpus_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) { int val; + sscanf(buf, "%d", &val); - if(val < 1 || val > NR_CPUS) + + if (val < 1 || val > NR_CPUS) pr_info("%s: suspend cpus off-limits\n", THUNDERPLUG); else - suspend_cpu_num = val; + thunder_param.suspend_cpu_num = val; return count; } -static ssize_t thunderplug_endurance_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +static ssize_t thunderplug_max_core_online_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { - return sprintf(buf, "%d", endurance_level); + return sprintf(buf, "%d", thunder_param.max_core_online); } -static ssize_t __ref thunderplug_endurance_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +static ssize_t __ref thunderplug_max_core_online_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) { int val; + sscanf(buf, "%d", &val); -#ifdef CONFIG_SCHED_HMP - if(tplug_hp_style==1) { -#else - if(tplug_hp_enabled) { -#endif + switch(val) { - case 0: - case 1: - case 2: - if(endurance_level!=val && - !(endurance_level > 1 && NR_CPUS < 4)) { - endurance_level = val; - offline_cpus(); - cpus_online_all(); - } - break; - default: - pr_info("%s: invalid endurance level\n", THUNDERPLUG); - break; - } + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + if (thunder_param.tplug_hp_enabled && + thunder_param.max_core_online != val) { + thunder_param.max_core_online = val; + offline_cpus(); + cpus_online_all(); + } + break; + default: + pr_info("%s: invalid max_core value\n", + THUNDERPLUG); + break; } - else - pr_info("%s: per-core hotplug style is disabled, ignoring endurance mode values\n", THUNDERPLUG); - return count; } -static ssize_t thunderplug_sampling_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +static ssize_t thunderplug_min_core_online_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { - return sprintf(buf, "%d", sampling_time); + return sprintf(buf, "%d", thunder_param.min_core_online); } -static ssize_t __ref thunderplug_sampling_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +static ssize_t __ref thunderplug_min_core_online_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) { int val; + sscanf(buf, "%d", &val); - if(val > MIN_SAMLING_MS) - sampling_time = val; + switch(val) { + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + if (thunder_param.tplug_hp_enabled && + thunder_param.min_core_online != val) { + thunder_param.min_core_online = val; + offline_cpus(); + cpus_online_all(); + } + break; + default: + pr_info("%s: invalid min_core value\n", + THUNDERPLUG); + break; + } return count; } -static ssize_t thunderplug_tb_enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +static ssize_t thunderplug_sampling_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { - return sprintf(buf, "%d", touch_boost_enabled); + return sprintf(buf, "%d", thunder_param.sampling_time); } -static ssize_t __ref thunderplug_tb_enabled_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +static ssize_t thunderplug_sampling_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) { int val; + sscanf(buf, "%d", &val); - switch(val) - { - case 0: - case 1: - touch_boost_enabled = val; - break; - default: - pr_info("%s : invalid choice\n", THUNDERPLUG); - break; - } + + if (val >= MIN_SAMLING_MS) + thunder_param.sampling_time = val; return count; } -static ssize_t thunderplug_load_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +static ssize_t thunderplug_load_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { - return sprintf(buf, "%d", load_threshold); + return sprintf(buf, "%d", thunder_param.load_threshold); } -static ssize_t __ref thunderplug_load_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +static ssize_t thunderplug_load_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) { int val; + sscanf(buf, "%d", &val); - if(val > 10) - load_threshold = val; + + if (val > 10) + thunder_param.load_threshold = val; return count; } -#ifdef CONFIG_USES_MALI_MP2_GPU -static ssize_t thunderplug_gpu_load_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +static ssize_t thunderplug_boost_lock_duration_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { - return sprintf(buf, "%d", gpu_min_load_threshold); + return sprintf(buf, "%llu\n", + div_u64(thunder_param.boost_lock_dur, 1000)); } -static ssize_t __ref thunderplug_gpu_load_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +static ssize_t thunderplug_boost_lock_duration_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) { - int val; - sscanf(buf, "%d", &val); - if(val > 10) - gpu_min_load_threshold = val; + int ret; + u64 val; + + ret = sscanf(buf, "%llu", &val); + if (ret != 1) + return -EINVAL; + + thunder_param.boost_lock_dur = val * 1000; return count; } -static unsigned int get_gpu_load(void) { - int util, load; - util = (int) _mali_ukk_utilization_gp_pp(); - load = (util * 100 ) / 256; - return load; +static ssize_t thunderplug_cpus_boosted_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", thunder_param.cpus_boosted); } -static int get_gpu_cores_enabled(void) { - int cores; - cores = (int) mali_pp_scheduler_get_num_cores_enabled(); - return cores; -} +static ssize_t thunderplug_cpus_boosted_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int ret; + unsigned int val; -static void enable_gpu_cores(int num) { - gpu_enable(num); + ret = sscanf(buf, "%u", &val); + if (ret != 1 || val < 1 || val > CONFIG_NR_CPUS) + return -EINVAL; + + thunder_param.cpus_boosted = val; + + return count; } -#endif static unsigned int get_curr_load(unsigned int cpu) { @@ -437,52 +362,22 @@ static unsigned int get_curr_load(unsigned int cpu) return 0; cur_load = 100 * (wall_time - idle_time) / wall_time; - return cur_load; -} - -static void thunderplug_suspend(void) -{ - offline_cpus(); - - pr_info("%s: suspend\n", THUNDERPLUG); -} - -static void __ref thunderplug_resume(void) -{ - cpus_online_all(); - - pr_info("%s: resume\n", THUNDERPLUG); -} -static void __cpuinit tplug_resume_work_fn(struct work_struct *work) -{ - thunderplug_resume(); + return cur_load; } static void __cpuinit tplug_work_fn(struct work_struct *work) { int i; unsigned int load[8], avg_load[8]; + unsigned int nr_cpu_online; + u64 time_now; - switch(endurance_level) - { - case 0: - core_limit = NR_CPUS; - break; - case 1: - core_limit = NR_CPUS / 2; - break; - case 2: - core_limit = NR_CPUS / 4; - break; - default: - core_limit = NR_CPUS; - break; - } + if (!thunder_param.tplug_hp_enabled) + return; - for(i = 0 ; i < core_limit; i++) - { - if(cpu_online(i)) + for (i = 0 ; i < thunder_param.max_core_online - 1; i++) { + if (cpu_online(i)) load[i] = get_curr_load(i); else load[i] = 0; @@ -491,348 +386,422 @@ static void __cpuinit tplug_work_fn(struct work_struct *work) last_load[i] = load[i]; } - for(i = 0 ; i < core_limit; i++) - { - if(cpu_online(i) && avg_load[i] > load_threshold && cpu_is_offline(i+1)) - { - if(DEBUG) - pr_info("%s : bringing back cpu%d\n", THUNDERPLUG,i); - if(!((i+1) > 7)) { - last_time[i+1] = ktime_to_ms(ktime_get()); - cpu_up(i+1); - } - } - else if(cpu_online(i) && avg_load[i] < load_threshold && cpu_online(i+1)) - { - if(DEBUG) - pr_info("%s : offlining cpu%d\n", THUNDERPLUG,i); - if(!(i+1)==0) { - now[i+1] = ktime_to_ms(ktime_get()); - if((now[i+1] - last_time[i+1]) > MIN_CPU_UP_TIME) - cpu_down(i+1); + for (i = 0 ; i < thunder_param.max_core_online - 1; i++) { + if (cpu_online(i) && avg_load[i] > + thunder_param.load_threshold && + cpu_is_offline(i + 1)) { + if (DEBUG) + pr_info("%s : bringing back cpu%d\n", + THUNDERPLUG,i); + if (!((i + 1) > 7)) { + last_time[i + 1] = ktime_to_ms(ktime_get()); + cpu_up(i + 1); } - } - } -#ifdef CONFIG_USES_MALI_MP2_GPU - if(gpu_hotplug_enabled) { - if(DEBUG) - pr_info("%s: current gpu load %d\n", THUNDERPLUG, get_gpu_load()); - if(get_gpu_load() > gpu_min_load_threshold) { - if(get_gpu_cores_enabled() < 2) { - enable_gpu_cores(2); - if(DEBUG) - pr_info("%s: gpu1 onlined\n", THUNDERPLUG); + } else if (cpu_online(i) && avg_load[i] < + thunder_param.load_threshold && + cpu_online(i + 1)) { + if (DEBUG) + pr_info("%s : offlining cpu%d\n", + THUNDERPLUG,i); + /* count online cores */ + nr_cpu_online = num_online_cpus(); + + if (nr_cpu_online > thunder_param.min_core_online) { + /* + * check if core touch boosted + * before cpu_down + */ + time_now = ktime_to_us(ktime_get()); + if (nr_cpu_online <= + thunder_param.cpus_boosted && + (time_now - + thunder_param.last_input < + thunder_param.boost_lock_dur)) + goto reschedule; + + if (!(i + 1) == 0) { + now[i + 1] = ktime_to_ms(ktime_get()); + if ((now[i + 1] - last_time[i + 1]) > + MIN_CPU_UP_TIME) + cpu_down(i + 1); + } } } - else { - if(get_gpu_cores_enabled() > 1) { - enable_gpu_cores(1); - if(DEBUG) - pr_info("%s: gpu1 offlined\n", THUNDERPLUG); - } - } - } -#endif - -#ifdef CONFIG_SCHED_HMP - if(tplug_hp_style == 1 && !isSuspended) -#else - if(tplug_hp_enabled != 0 && !isSuspended) -#endif - queue_delayed_work_on(0, tplug_wq, &tplug_work, - msecs_to_jiffies(sampling_time)); - else { - if(!isSuspended) - cpus_online_all(); - else - thunderplug_suspend(); } +reschedule: + queue_delayed_work_on(0, tplug_wq, &tplug_work, + msecs_to_jiffies(thunder_param.sampling_time)); } -static void tplug_es_suspend_work(struct early_suspend *p) { - isSuspended = true; - pr_info("thunderplug : suspend called\n"); +#ifdef CONFIG_STATE_NOTIFIER +static void __ref thunderplug_suspend(void) +{ + if (isSuspended == false) { + isSuspended = true; + cancel_delayed_work_sync(&tplug_work); + offline_cpus(); + pr_info("%s: suspend\n", THUNDERPLUG); + } } -static void tplug_es_resume_work(struct early_suspend *p) { - isSuspended = false; -#ifdef CONFIG_SCHED_HMP - if(tplug_hp_style==1) -#else - if(tplug_hp_enabled) -#endif - queue_delayed_work_on(0, tplug_wq, &tplug_work, - msecs_to_jiffies(sampling_time)); - else - queue_delayed_work_on(0, tplug_resume_wq, &tplug_resume_work, - msecs_to_jiffies(10)); - pr_info("thunderplug : resume called\n"); -} - -/* Thunderplug load balancer */ -#ifdef CONFIG_SCHED_HMP - -static void set_sched_profile(int mode) { - switch(mode) { - case 1: - /* Balanced */ - sched_set_boost(DISABLED); - break; - case 2: - /* Turbo */ - sched_set_boost(ENABLED); - break; - default: - pr_info("%s: Invalid mode\n", THUNDERPLUG); - break; +static void __ref thunderplug_resume(void) +{ + if (isSuspended == true) { + isSuspended = false; + cpus_online_all(); + pr_info("%s: resume\n", THUNDERPLUG); + queue_delayed_work_on(0, tplug_wq, &tplug_work, + msecs_to_jiffies(thunder_param.sampling_time)); } } -static ssize_t thunderplug_sched_mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +static int state_notifier_callback(struct notifier_block *this, + unsigned long event, void *data) { - return sprintf(buf, "%d", tplug_sched_mode); + if (!thunder_param.hotplug_suspend) + return NOTIFY_OK; + + if (!thunder_param.tplug_hp_enabled) + return NOTIFY_OK; + + switch (event) { + case STATE_NOTIFIER_ACTIVE: + thunderplug_resume(); + break; + case STATE_NOTIFIER_SUSPEND: + thunderplug_suspend(); + break; + default: + break; + } + return NOTIFY_OK; } +#endif -static ssize_t __ref thunderplug_sched_mode_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +static void __ref cpu_up_work(struct work_struct *work) { - int val; - sscanf(buf, "%d", &val); - set_sched_profile(val); - tplug_sched_mode = val; - return count; + int cpu; + unsigned int target = thunder_param.target_cpus; + + for_each_cpu_not(cpu, cpu_online_mask) { + if (target <= num_online_cpus()) + break; + if (cpu == 0) + continue; + cpu_up(cpu); + } } -static ssize_t thunderplug_hp_style_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +static void online_cpu(unsigned int target) { - return sprintf(buf, "%d", tplug_hp_style); + unsigned int online_cpus; + + online_cpus = num_online_cpus(); + + /* + * Do not online more CPUs if max_cpus_online reached + * and cancel online task if target already achieved. + */ + if (target <= online_cpus || + online_cpus >= thunder_param.max_core_online) + return; + + thunder_param.target_cpus = target; + queue_work_on(0, tplug_wq, &thunder_param.up_work); } -static ssize_t __ref thunderplug_hp_style_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +static void thunder_input_event(struct input_handle *handle, unsigned int type, + unsigned int code, int value) { - int val, last_val; - sscanf(buf, "%d", &val); - last_val = tplug_hp_style; - switch(val) - { - case HOTPLUG_PERCORE: - case HOTPLUG_SCHED: - tplug_hp_style = val; - break; - default: - pr_info("%s : invalid choice\n", THUNDERPLUG); - break; - } + u64 time_now; - if(tplug_hp_style == HOTPLUG_PERCORE && tplug_hp_style != last_val) { - pr_info("%s: Switching to Per-core hotplug model\n", THUNDERPLUG); - sched_set_boost(DISABLED); - queue_delayed_work_on(0, tplug_wq, &tplug_work, - msecs_to_jiffies(sampling_time)); - } - else if(tplug_hp_style==2) { - pr_info("%s: Switching to sched based hotplug model\n", THUNDERPLUG); - set_sched_profile(tplug_sched_mode); - } + if (isSuspended == true) + return; + if (!thunder_param.tplug_hp_enabled) + return; - return count; -} + time_now = ktime_to_us(ktime_get()); + thunder_param.last_input = time_now; + if (time_now - last_boost_time < MIN_INPUT_INTERVAL) + return; -static struct kobj_attribute thunderplug_hp_style_attribute = - __ATTR(hotplug_style, - 0664, - thunderplug_hp_style_show, thunderplug_hp_style_store); + if (num_online_cpus() >= thunder_param.cpus_boosted || + thunder_param.cpus_boosted <= + thunder_param.min_core_online) + return; -static struct kobj_attribute thunderplug_mode_attribute = - __ATTR(sched_mode, - 0664, - thunderplug_sched_mode_show, thunderplug_sched_mode_store); + online_cpu(thunder_param.cpus_boosted); + last_boost_time = ktime_to_us(ktime_get()); +} -#else -static ssize_t thunderplug_hp_enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +static int thunder_input_connect(struct input_handler *handler, + struct input_dev *dev, + const struct input_device_id *id) { - return sprintf(buf, "%d", tplug_hp_enabled); + struct input_handle *handle; + int err; + + handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = handler->name; + + err = input_register_handle(handle); + if (err) + goto err_register; + + err = input_open_device(handle); + if (err) + goto err_open; + + return 0; +err_open: + input_unregister_handle(handle); +err_register: + kfree(handle); + return err; } -static ssize_t __ref thunderplug_hp_enabled_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +static void thunder_input_disconnect(struct input_handle *handle) { - int val; - int last_val = tplug_hp_enabled; - sscanf(buf, "%d", &val); - switch(val) - { - case 0: - case 1: - tplug_hp_enabled = val; - break; - default: - pr_info("%s : invalid choice\n", THUNDERPLUG); - break; - } + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} - if(tplug_hp_enabled == 1 && tplug_hp_enabled != last_val) - queue_delayed_work_on(0, tplug_wq, &tplug_work, - msecs_to_jiffies(sampling_time)); +static const struct input_device_id thunder_ids[] = { + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | + INPUT_DEVICE_ID_MATCH_ABSBIT, + .evbit = { BIT_MASK(EV_ABS) }, + .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] = + BIT_MASK(ABS_MT_POSITION_X) | + BIT_MASK(ABS_MT_POSITION_Y) }, + }, /* multi-touch touchscreen */ + { + .flags = INPUT_DEVICE_ID_MATCH_KEYBIT | + INPUT_DEVICE_ID_MATCH_ABSBIT, + .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) }, + .absbit = { [BIT_WORD(ABS_X)] = + BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) }, + }, /* touchpad */ + { }, +}; - return count; -} +static struct input_handler thunder_input_handler = { + .event = thunder_input_event, + .connect = thunder_input_connect, + .disconnect = thunder_input_disconnect, + .name = THUNDERPLUG, + .id_table = thunder_ids, +}; -#ifdef CONFIG_USES_MALI_MP2_GPU -static ssize_t thunderplug_gpu_hp_enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +static ssize_t thunderplug_hp_enabled_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { - return sprintf(buf, "%d", gpu_hotplug_enabled); + return sprintf(buf, "%d", thunder_param.tplug_hp_enabled); } -static ssize_t __ref thunderplug_gpu_hp_enabled_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) +static ssize_t __ref thunderplug_hp_enabled_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) { - int val; + int val, last_val; + int ret = 0, cpu; + sscanf(buf, "%d", &val); - switch(val) - { + + last_val = thunder_param.tplug_hp_enabled; + switch(val) { case 0: case 1: - gpu_hotplug_enabled = val; - break; + thunder_param.tplug_hp_enabled = val; + break; default: pr_info("%s : invalid choice\n", THUNDERPLUG); - break; + break; + } + + if (thunder_param.tplug_hp_enabled == 1 && !last_val) { + pr_info("%s : Starting hotplug driver\n", THUNDERPLUG); + tplug_wq = alloc_workqueue("tplug", + WQ_HIGHPRI | WQ_FREEZABLE, 0); + if (!tplug_wq) { + pr_err("%s: Failed to allocate hotplug workqueue\n", + __FUNCTION__); + thunder_param.tplug_hp_enabled = 0; + return 0; + } + ret = input_register_handler(&thunder_input_handler); + if (ret) { + pr_err("%s: Failed to register input handler: %d\n", + THUNDERPLUG, ret); + return 0; + } + INIT_DELAYED_WORK(&tplug_work, tplug_work_fn); + INIT_WORK(&thunder_param.up_work, cpu_up_work); + queue_delayed_work_on(0, tplug_wq, &tplug_work, + msecs_to_jiffies(thunder_param.sampling_time)); + } else if (thunder_param.tplug_hp_enabled == 1 && last_val == 1) { + pr_info("%s : Already Working\n", THUNDERPLUG); + } else if (thunder_param.tplug_hp_enabled == 0 && last_val == 0) { + pr_info("%s : Already Offline\n", THUNDERPLUG); + } else { + if (last_val) { + input_unregister_handler(&thunder_input_handler); + flush_workqueue(tplug_wq); + cancel_work_sync(&thunder_param.up_work); + cancel_delayed_work_sync(&tplug_work); + destroy_workqueue(tplug_wq); + + /* Put all sibling cores to sleep */ + for_each_online_cpu(cpu) { + if (cpu == 0) + continue; + cpu_down(cpu); + } + pr_info("%s : Stopping hotplug driver\n", THUNDERPLUG); + } } return count; } -#endif static struct kobj_attribute thunderplug_hp_enabled_attribute = - __ATTR(hotplug_enabled, - 0664, - thunderplug_hp_enabled_show, thunderplug_hp_enabled_store); - -#ifdef CONFIG_USES_MALI_MP2_GPU -static struct kobj_attribute thunderplug_gpu_hp_enabled_attribute = - __ATTR(gpu_hotplug_enabled, - 0664, - thunderplug_gpu_hp_enabled_show, thunderplug_gpu_hp_enabled_store); -#endif // MALI_MP2_GPU - -#endif //SCHED_HMP + __ATTR(hotplug_enabled, + 0644, thunderplug_hp_enabled_show, + thunderplug_hp_enabled_store); -static ssize_t thunderplug_ver_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +static ssize_t thunderplug_ver_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { - return sprintf(buf, "ThunderPlug %u.%u", DRIVER_VERSION, DRIVER_SUBVER); + return sprintf(buf, "ThunderPlug %u.%u", DRIVER_VERSION, + DRIVER_SUBVER); } static struct kobj_attribute thunderplug_ver_attribute = - __ATTR(version, - 0444, - thunderplug_ver_show, NULL); + __ATTR(version, + 0444, thunderplug_ver_show, NULL); + +static struct kobj_attribute thunderplug_hotplug_suspend_attribute = + __ATTR(hotplug_suspend, + 0644, thunderplug_hotplug_suspend_show, + thunderplug_hotplug_suspend_store); static struct kobj_attribute thunderplug_suspend_cpus_attribute = - __ATTR(suspend_cpus, - 0664, - thunderplug_suspend_cpus_show, thunderplug_suspend_cpus_store); + __ATTR(suspend_cpus, + 0644, thunderplug_suspend_cpus_show, + thunderplug_suspend_cpus_store); + +static struct kobj_attribute thunderplug_max_core_online_attribute = + __ATTR(max_core_online, + 0644, thunderplug_max_core_online_show, + thunderplug_max_core_online_store); -static struct kobj_attribute thunderplug_endurance_attribute = - __ATTR(endurance_level, - 0664, - thunderplug_endurance_show, thunderplug_endurance_store); +static struct kobj_attribute thunderplug_min_core_online_attribute = + __ATTR(min_core_online, + 0644, thunderplug_min_core_online_show, + thunderplug_min_core_online_store); static struct kobj_attribute thunderplug_sampling_attribute = - __ATTR(sampling_rate, - 0664, - thunderplug_sampling_show, thunderplug_sampling_store); + __ATTR(sampling_rate, + 0644, thunderplug_sampling_show, + thunderplug_sampling_store); static struct kobj_attribute thunderplug_load_attribute = - __ATTR(load_threshold, - 0664, - thunderplug_load_show, thunderplug_load_store); - -#ifdef CONFIG_USES_MALI_MP2_GPU -static struct kobj_attribute thunderplug_gpu_load_attribute = - __ATTR(gpu_load_threshold, - 0664, - thunderplug_gpu_load_show, thunderplug_gpu_load_store); -#endif - -static struct kobj_attribute thunderplug_tb_enabled_attribute = - __ATTR(touch_boost, - 0664, - thunderplug_tb_enabled_show, thunderplug_tb_enabled_store); - -static struct attribute *thunderplug_attrs[] = - { - &thunderplug_ver_attribute.attr, - &thunderplug_suspend_cpus_attribute.attr, - &thunderplug_endurance_attribute.attr, - &thunderplug_sampling_attribute.attr, - &thunderplug_load_attribute.attr, -#ifdef CONFIG_SCHED_HMP - &thunderplug_mode_attribute.attr, - &thunderplug_hp_style_attribute.attr, -#else - &thunderplug_hp_enabled_attribute.attr, -#endif - &thunderplug_tb_enabled_attribute.attr, -#ifdef CONFIG_USES_MALI_MP2_GPU - &thunderplug_gpu_load_attribute.attr, - &thunderplug_gpu_hp_enabled_attribute.attr, -#endif - NULL, - }; + __ATTR(load_threshold, + 0644, thunderplug_load_show, + thunderplug_load_store); + +static struct kobj_attribute thunderplug_boost_lock_duration_attribute = + __ATTR(boost_lock_duration, + 0644, thunderplug_boost_lock_duration_show, + thunderplug_boost_lock_duration_store); + +static struct kobj_attribute thunderplug_cpus_boosted_attribute = + __ATTR(cpus_boosted, + 0644, thunderplug_cpus_boosted_show, + thunderplug_cpus_boosted_store); + +static struct attribute *thunderplug_attrs[] = { + &thunderplug_ver_attribute.attr, + &thunderplug_hotplug_suspend_attribute.attr, + &thunderplug_suspend_cpus_attribute.attr, + &thunderplug_max_core_online_attribute.attr, + &thunderplug_min_core_online_attribute.attr, + &thunderplug_sampling_attribute.attr, + &thunderplug_load_attribute.attr, + &thunderplug_hp_enabled_attribute.attr, + &thunderplug_boost_lock_duration_attribute.attr, + &thunderplug_cpus_boosted_attribute.attr, + NULL, +}; static struct attribute_group thunderplug_attr_group = - { - .attrs = thunderplug_attrs, - }; +{ + .attrs = thunderplug_attrs, +}; static struct kobject *thunderplug_kobj; static int __init thunderplug_init(void) { - int ret = 0; - int sysfs_result; - printk(KERN_DEBUG "[%s]\n",__func__); - register_early_suspend(&tplug_early_suspend_handler); - - thunderplug_kobj = kobject_create_and_add("thunderplug", kernel_kobj); + int ret = 0; + int sysfs_result; - if (!thunderplug_kobj) { - pr_err("%s Interface create failed!\n", - __FUNCTION__); - return -ENOMEM; - } + printk(KERN_DEBUG "[%s]\n",__func__); - sysfs_result = sysfs_create_group(thunderplug_kobj, &thunderplug_attr_group); - - if (sysfs_result) { - pr_info("%s sysfs create failed!\n", __FUNCTION__); - kobject_put(thunderplug_kobj); - } + thunderplug_kobj = kobject_create_and_add("thunderplug", kernel_kobj); + if (!thunderplug_kobj) { + pr_err("%s Interface create failed!\n", + __FUNCTION__); + return -ENOMEM; + } - pr_info("%s : registering input boost", THUNDERPLUG); - ret = input_register_handler(&tplug_input_handler); - if (ret) { - pr_err("%s: Failed to register input handler: %d\n", - THUNDERPLUG, ret); - } + sysfs_result = sysfs_create_group(thunderplug_kobj, + &thunderplug_attr_group); + if (sysfs_result) { + pr_info("%s sysfs create failed!\n", __FUNCTION__); + kobject_put(thunderplug_kobj); + } + if (thunder_param.tplug_hp_enabled) { tplug_wq = alloc_workqueue("tplug", - WQ_HIGHPRI | WQ_UNBOUND, 1); + WQ_HIGHPRI | WQ_FREEZABLE, 0); + if (!tplug_wq) { + pr_err("%s: Failed to allocate hotplug workqueue\n", + __FUNCTION__); + ret = -ENOMEM; + goto err_out; + } + INIT_DELAYED_WORK(&tplug_work, tplug_work_fn); + queue_delayed_work_on(0, tplug_wq, &tplug_work, + msecs_to_jiffies(STARTDELAY)); + } - tplug_resume_wq = alloc_workqueue("tplug_resume", - WQ_HIGHPRI | WQ_UNBOUND, 1); +#ifdef CONFIG_STATE_NOTIFIER + thunder_param.thunder_state_notif.notifier_call = + state_notifier_callback; + if (state_register_client(&thunder_param.thunder_state_notif)) { + pr_err("%s: Failed to register State notifier callback\n", + __func__); + goto err_out; + } +#endif - tplug_boost_wq = alloc_workqueue("tplug_boost", - WQ_HIGHPRI | WQ_UNBOUND, 1); + pr_info("%s: init\n", THUNDERPLUG); - INIT_DELAYED_WORK(&tplug_work, tplug_work_fn); - INIT_DELAYED_WORK(&tplug_resume_work, tplug_resume_work_fn); - INIT_DELAYED_WORK(&tplug_boost, tplug_boost_work_fn); - queue_delayed_work_on(0, tplug_wq, &tplug_work, - msecs_to_jiffies(10)); + return ret; - pr_info("%s: init\n", THUNDERPLUG); +err_out: + thunder_param.tplug_hp_enabled = 0; + destroy_workqueue(tplug_wq); - return ret; + return ret; } MODULE_LICENSE("GPL and additional rights");