PageRenderTime 51ms CodeModel.GetById 22ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/soc/qcom/mako_hotplug.c

https://gitlab.com/Hache/Singularity-DeYuS
C | 566 lines | 367 code | 115 blank | 84 comment | 43 complexity | 703ec4497338571089569d7037328392 MD5 | raw file
  1. /*
  2. * Copyright (c) 2013-2015, Francisco Franco <franciscofranco.1990@gmail.com>.
  3. *
  4. * This software is licensed under the terms of the GNU General Public
  5. * License version 2, as published by the Free Software Foundation, and
  6. * may be copied, distributed, and modified under those terms.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * Simple no bullshit hot[un]plug driver for SMP
  14. */
  15. #include <linux/module.h>
  16. #include <linux/kernel.h>
  17. #include <linux/init.h>
  18. #include <linux/device.h>
  19. #include <linux/miscdevice.h>
  20. #include <linux/cpu.h>
  21. #include <linux/workqueue.h>
  22. #include <linux/sched.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/timer.h>
  25. #include <linux/cpufreq.h>
  26. #include <linux/delay.h>
  27. #include <linux/input.h>
  28. #include <linux/jiffies.h>
  29. #define MAKO_HOTPLUG "mako_hotplug"
  30. #define DEFAULT_HOTPLUG_ENABLED 0
  31. #define DEFAULT_LOAD_THRESHOLD 80
  32. #define DEFAULT_HIGH_LOAD_COUNTER 10
  33. #define DEFAULT_MAX_LOAD_COUNTER 20
  34. #define DEFAULT_CPUFREQ_UNPLUG_LIMIT 1800000
  35. #define DEFAULT_MIN_TIME_CPU_ONLINE 1
  36. #define DEFAULT_TIMER 1
  37. #define MIN_CPU_UP_US (200 * USEC_PER_MSEC)
  38. #define NUM_POSSIBLE_CPUS num_possible_cpus()
  39. #define HIGH_LOAD (95)
  40. struct cpu_stats {
  41. unsigned int counter;
  42. u64 timestamp;
  43. } hotplug_stats = {
  44. .counter = 0,
  45. .timestamp = 0,
  46. };
  47. struct hotplug_tunables {
  48. /**
  49. * whether make_hotplug is enabled or not
  50. */
  51. unsigned int enabled;
  52. /*
  53. * system load threshold to decide when online or offline cores
  54. * from 0 to 100
  55. */
  56. unsigned int load_threshold;
  57. /*
  58. * counter to filter online/offline calls. The load needs to be above
  59. * load_threshold X high_load_counter times for the cores to go online
  60. * otherwise they stay offline
  61. */
  62. unsigned int high_load_counter;
  63. /*
  64. * max number of samples counters allowed to be counted. The higher the
  65. * value the longer it will take the driver to offline cores after a
  66. * period of high and continuous load
  67. */
  68. unsigned int max_load_counter;
  69. /*
  70. * if the current CPU freq is above this limit don't offline the cores
  71. * for a couple of extra samples
  72. */
  73. unsigned int cpufreq_unplug_limit;
  74. /*
  75. * minimum time in seconds that a core stays online to avoid too many
  76. * online/offline calls
  77. */
  78. unsigned int min_time_cpu_online;
  79. /*
  80. * sample timer in seconds. The default value of 1 equals to 10
  81. * samples every second. The higher the value the less samples
  82. * per second it runs
  83. */
  84. unsigned int timer;
  85. } tunables;
  86. static struct workqueue_struct *wq;
  87. static struct delayed_work decide_hotplug;
  88. static inline void cpus_online_work(void)
  89. {
  90. unsigned int cpu;
  91. for (cpu = 2; cpu < 4; cpu++) {
  92. if (cpu_is_offline(cpu))
  93. cpu_up(cpu);
  94. }
  95. pr_info("%s: all cpus were onlined\n", MAKO_HOTPLUG);
  96. }
  97. static inline void cpus_offline_work(void)
  98. {
  99. unsigned int cpu;
  100. for (cpu = 3; cpu > 1; cpu--) {
  101. if (cpu_online(cpu))
  102. cpu_down(cpu);
  103. }
  104. pr_info("%s: all cpus were offlined\n", MAKO_HOTPLUG);
  105. }
  106. static inline bool cpus_cpufreq_work(void)
  107. {
  108. struct cpufreq_policy *policy = cpufreq_cpu_get(0);
  109. struct hotplug_tunables *t = &tunables;
  110. unsigned int current_freq = 0;
  111. unsigned int cpu;
  112. if (policy) {
  113. if (policy->min >= t->cpufreq_unplug_limit)
  114. return false;
  115. }
  116. for (cpu = 2; cpu < 4; cpu++)
  117. current_freq += cpufreq_quick_get(cpu);
  118. current_freq >>= 1;
  119. return current_freq >= t->cpufreq_unplug_limit;
  120. }
  121. static void cpu_revive(unsigned int load)
  122. {
  123. struct hotplug_tunables *t = &tunables;
  124. unsigned int counter_hysteria = 3;
  125. if (unlikely(nr_running() >= 10))
  126. goto online_all;
  127. /*
  128. * we should care about a very high load spike and online the
  129. * cpus in question. If the device is under stress for at least 300ms
  130. * online all cores, no questions asked. 300ms here equals three samples
  131. */
  132. if (load >= HIGH_LOAD && hotplug_stats.counter >= counter_hysteria)
  133. goto online_all;
  134. else if (hotplug_stats.counter < t->high_load_counter)
  135. return;
  136. online_all:
  137. cpus_online_work();
  138. hotplug_stats.timestamp = ktime_to_us(ktime_get());
  139. }
  140. static void cpu_smash(unsigned int load)
  141. {
  142. struct hotplug_tunables *t = &tunables;
  143. u64 extra_time = MIN_CPU_UP_US;
  144. if (hotplug_stats.counter >= t->high_load_counter)
  145. return;
  146. /*
  147. * offline the cpu only if its freq is lower than
  148. * CPUFREQ_UNPLUG_LIMIT. Else update the timestamp to now and
  149. * postpone the cpu offline process to at least another second
  150. */
  151. if (cpus_cpufreq_work())
  152. hotplug_stats.timestamp = ktime_to_us(ktime_get());
  153. /*
  154. * Let's not unplug this cpu unless its been online for longer than
  155. * 500ms to avoid consecutive ups and downs if the load is varying
  156. * closer to the threshold point.
  157. */
  158. if (t->min_time_cpu_online > 1)
  159. extra_time = t->min_time_cpu_online * MIN_CPU_UP_US;
  160. if (ktime_to_us(ktime_get()) < hotplug_stats.timestamp + extra_time)
  161. return;
  162. /*
  163. * If current load is higher than our threshold we can skip offlining
  164. * on the next sample
  165. */
  166. if (load >= t->load_threshold)
  167. return;
  168. cpus_offline_work();
  169. /*
  170. * reset the counter yo
  171. */
  172. hotplug_stats.counter = 0;
  173. }
  174. static void __ref decide_hotplug_func(struct work_struct *work)
  175. {
  176. struct hotplug_tunables *t = &tunables;
  177. unsigned int cur_load = 0;
  178. unsigned int cpu;
  179. unsigned int online_cpus = num_online_cpus();
  180. if (!t->enabled)
  181. goto reschedule;
  182. /*
  183. * reschedule early when the user doesn't want more than 2 cores online
  184. */
  185. if (unlikely(t->load_threshold == 100 && online_cpus == 2))
  186. goto reschedule;
  187. /*
  188. * reschedule early when users desire to run with all cores online
  189. */
  190. if (unlikely(!t->load_threshold &&
  191. online_cpus == NUM_POSSIBLE_CPUS)) {
  192. goto reschedule;
  193. }
  194. for (cpu = 0; cpu < 2; cpu++)
  195. cur_load += cpufreq_quick_get_util(cpu);
  196. cur_load >>= 1;
  197. if (cur_load >= t->load_threshold) {
  198. if (hotplug_stats.counter < t->max_load_counter)
  199. ++hotplug_stats.counter;
  200. if (online_cpus <= 2)
  201. cpu_revive(cur_load);
  202. } else {
  203. if (hotplug_stats.counter)
  204. --hotplug_stats.counter;
  205. if (online_cpus > 2)
  206. cpu_smash(cur_load);
  207. }
  208. queue_delayed_work(wq, &decide_hotplug,
  209. msecs_to_jiffies(t->timer * HZ));
  210. return;
  211. reschedule:
  212. /*
  213. * This reschedule is specially for cases where the user wants to
  214. * run either dual-core or quad-core permanently - for that reason
  215. * we don't need to run this work every 100ms, but rather just
  216. * once every 2 seconds
  217. */
  218. queue_delayed_work(wq, &decide_hotplug, HZ * 2);
  219. }
  220. /*
  221. * Sysfs get/set entries start
  222. */
  223. static ssize_t make_hotplug_enabled_show(struct device *dev,
  224. struct device_attribute *attr, char *buf)
  225. {
  226. struct hotplug_tunables *t = &tunables;
  227. return snprintf(buf, PAGE_SIZE, "%u\n", t->enabled);
  228. }
  229. static ssize_t make_hotplug_enabled_store(struct device *dev,
  230. struct device_attribute *attr, const char *buf, size_t size)
  231. {
  232. struct hotplug_tunables *t = &tunables;
  233. int ret;
  234. unsigned long new_val;
  235. ret = kstrtoul(buf, 0, &new_val);
  236. if (ret < 0)
  237. return ret;
  238. t->enabled = new_val > 1 ? 1 : new_val;
  239. return size;
  240. }
  241. static ssize_t load_threshold_show(struct device *dev,
  242. struct device_attribute *attr, char *buf)
  243. {
  244. struct hotplug_tunables *t = &tunables;
  245. return snprintf(buf, 10, "%u\n", t->load_threshold);
  246. }
  247. static ssize_t load_threshold_store(struct device *dev,
  248. struct device_attribute *attr, const char *buf, size_t size)
  249. {
  250. struct hotplug_tunables *t = &tunables;
  251. int ret;
  252. unsigned long new_val;
  253. ret = kstrtoul(buf, 0, &new_val);
  254. if (ret < 0)
  255. return ret;
  256. t->load_threshold = new_val > 100 ? 100 : new_val;
  257. return size;
  258. }
  259. static ssize_t high_load_counter_show(struct device *dev,
  260. struct device_attribute *attr, char *buf)
  261. {
  262. struct hotplug_tunables *t = &tunables;
  263. return snprintf(buf, 10, "%u\n", t->high_load_counter);
  264. }
  265. static ssize_t high_load_counter_store(struct device *dev,
  266. struct device_attribute *attr, const char *buf, size_t size)
  267. {
  268. struct hotplug_tunables *t = &tunables;
  269. int ret;
  270. unsigned long new_val;
  271. ret = kstrtoul(buf, 0, &new_val);
  272. if (ret < 0)
  273. return ret;
  274. t->high_load_counter = new_val > 50 ? 50 : new_val;
  275. return size;
  276. }
  277. static ssize_t max_load_counter_show(struct device *dev,
  278. struct device_attribute *attr, char *buf)
  279. {
  280. struct hotplug_tunables *t = &tunables;
  281. return snprintf(buf, 10, "%u\n", t->max_load_counter);
  282. }
  283. static ssize_t max_load_counter_store(struct device *dev,
  284. struct device_attribute *attr, const char *buf, size_t size)
  285. {
  286. struct hotplug_tunables *t = &tunables;
  287. int ret;
  288. unsigned long new_val;
  289. ret = kstrtoul(buf, 0, &new_val);
  290. if (ret < 0)
  291. return ret;
  292. t->max_load_counter = new_val > 50 ? 50 : new_val;
  293. return size;
  294. }
  295. static ssize_t cpufreq_unplug_limit_show(struct device *dev,
  296. struct device_attribute *attr, char *buf)
  297. {
  298. struct hotplug_tunables *t = &tunables;
  299. return snprintf(buf, 10, "%u\n", t->cpufreq_unplug_limit);
  300. }
  301. static ssize_t cpufreq_unplug_limit_store(struct device *dev,
  302. struct device_attribute *attr, const char *buf, size_t size)
  303. {
  304. struct hotplug_tunables *t = &tunables;
  305. int ret;
  306. unsigned long new_val;
  307. ret = kstrtoul(buf, 0, &new_val);
  308. if (ret < 0)
  309. return ret;
  310. t->cpufreq_unplug_limit = new_val > ULONG_MAX ? ULONG_MAX : new_val;
  311. return size;
  312. }
  313. static ssize_t min_time_cpu_online_show(struct device *dev,
  314. struct device_attribute *attr, char *buf)
  315. {
  316. struct hotplug_tunables *t = &tunables;
  317. return snprintf(buf, 10, "%u\n", t->min_time_cpu_online);
  318. }
  319. static ssize_t min_time_cpu_online_store(struct device *dev,
  320. struct device_attribute *attr, const char *buf, size_t size)
  321. {
  322. struct hotplug_tunables *t = &tunables;
  323. int ret;
  324. unsigned long new_val;
  325. ret = kstrtoul(buf, 0, &new_val);
  326. if (ret < 0)
  327. return ret;
  328. t->min_time_cpu_online = new_val > 10 ? 10 : new_val;
  329. return size;
  330. }
  331. static ssize_t timer_show(struct device *dev, struct device_attribute *attr,
  332. char *buf)
  333. {
  334. struct hotplug_tunables *t = &tunables;
  335. return snprintf(buf, 10, "%u\n", t->timer);
  336. }
  337. static ssize_t timer_store(struct device *dev, struct device_attribute *attr,
  338. const char *buf, size_t size)
  339. {
  340. struct hotplug_tunables *t = &tunables;
  341. int ret;
  342. unsigned long new_val;
  343. ret = kstrtoul(buf, 0, &new_val);
  344. if (ret < 0)
  345. return ret;
  346. t->timer = new_val > 100 ? 100 : new_val;
  347. return size;
  348. }
  349. static DEVICE_ATTR(enabled, 0664, make_hotplug_enabled_show,
  350. make_hotplug_enabled_store);
  351. static DEVICE_ATTR(load_threshold, 0664, load_threshold_show,
  352. load_threshold_store);
  353. static DEVICE_ATTR(high_load_counter, 0664, high_load_counter_show,
  354. high_load_counter_store);
  355. static DEVICE_ATTR(max_load_counter, 0664, max_load_counter_show,
  356. max_load_counter_store);
  357. static DEVICE_ATTR(cpufreq_unplug_limit, 0664, cpufreq_unplug_limit_show,
  358. cpufreq_unplug_limit_store);
  359. static DEVICE_ATTR(min_time_cpu_online, 0664, min_time_cpu_online_show,
  360. min_time_cpu_online_store);
  361. static DEVICE_ATTR(timer, 0664, timer_show, timer_store);
  362. static struct attribute *mako_hotplug_control_attributes[] = {
  363. &dev_attr_enabled.attr,
  364. &dev_attr_load_threshold.attr,
  365. &dev_attr_high_load_counter.attr,
  366. &dev_attr_max_load_counter.attr,
  367. &dev_attr_cpufreq_unplug_limit.attr,
  368. &dev_attr_min_time_cpu_online.attr,
  369. &dev_attr_timer.attr,
  370. NULL
  371. };
  372. static struct attribute_group mako_hotplug_control_group = {
  373. .attrs = mako_hotplug_control_attributes,
  374. };
  375. static struct miscdevice mako_hotplug_control_device = {
  376. .minor = MISC_DYNAMIC_MINOR,
  377. .name = "mako_hotplug_control",
  378. };
  379. /*
  380. * Sysfs get/set entries end
  381. */
  382. static int mako_hotplug_probe(struct platform_device *pdev)
  383. {
  384. int ret = 0;
  385. struct hotplug_tunables *t = &tunables;
  386. wq = alloc_workqueue("mako_hotplug_workqueue",
  387. WQ_FREEZABLE |
  388. WQ_UNBOUND, 1);
  389. if (!wq) {
  390. ret = -ENOMEM;
  391. goto err;
  392. }
  393. t->enabled = DEFAULT_HOTPLUG_ENABLED;
  394. t->load_threshold = DEFAULT_LOAD_THRESHOLD;
  395. t->high_load_counter = DEFAULT_HIGH_LOAD_COUNTER;
  396. t->max_load_counter = DEFAULT_MAX_LOAD_COUNTER;
  397. t->cpufreq_unplug_limit = DEFAULT_CPUFREQ_UNPLUG_LIMIT;
  398. t->min_time_cpu_online = DEFAULT_MIN_TIME_CPU_ONLINE;
  399. t->timer = DEFAULT_TIMER;
  400. ret = misc_register(&mako_hotplug_control_device);
  401. if (ret) {
  402. ret = -EINVAL;
  403. goto err;
  404. }
  405. ret = sysfs_create_group(&mako_hotplug_control_device.this_device->kobj,
  406. &mako_hotplug_control_group);
  407. if (ret) {
  408. ret = -EINVAL;
  409. goto err;
  410. }
  411. INIT_DELAYED_WORK(&decide_hotplug, decide_hotplug_func);
  412. queue_delayed_work(wq, &decide_hotplug, HZ * 30);
  413. err:
  414. return ret;
  415. }
  416. static struct platform_device mako_hotplug_device = {
  417. .name = MAKO_HOTPLUG,
  418. .id = -1,
  419. };
  420. static int mako_hotplug_remove(struct platform_device *pdev)
  421. {
  422. destroy_workqueue(wq);
  423. return 0;
  424. }
  425. static struct platform_driver mako_hotplug_driver = {
  426. .probe = mako_hotplug_probe,
  427. .remove = mako_hotplug_remove,
  428. .driver = {
  429. .name = MAKO_HOTPLUG,
  430. .owner = THIS_MODULE,
  431. },
  432. };
  433. static int __init mako_hotplug_init(void)
  434. {
  435. int ret;
  436. ret = platform_driver_register(&mako_hotplug_driver);
  437. if (ret)
  438. return ret;
  439. ret = platform_device_register(&mako_hotplug_device);
  440. if (ret)
  441. return ret;
  442. pr_info("%s: init\n", MAKO_HOTPLUG);
  443. return ret;
  444. }
  445. static void __exit mako_hotplug_exit(void)
  446. {
  447. platform_device_unregister(&mako_hotplug_device);
  448. platform_driver_unregister(&mako_hotplug_driver);
  449. }
  450. late_initcall(mako_hotplug_init);
  451. module_exit(mako_hotplug_exit);