PageRenderTime 40ms CodeModel.GetById 23ms RepoModel.GetById 0ms app.codeStats 0ms

/arch/powerpc/platforms/pseries/ras.c

https://github.com/othane/linux
C | 424 lines | 262 code | 72 blank | 90 comment | 40 complexity | e146736169de98218f0b919d7a0424f9 MD5 | raw file
  1. /*
  2. * Copyright (C) 2001 Dave Engebretsen IBM Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/irq.h>
  21. #include <linux/of.h>
  22. #include <linux/fs.h>
  23. #include <linux/reboot.h>
  24. #include <asm/machdep.h>
  25. #include <asm/rtas.h>
  26. #include <asm/firmware.h>
  27. #include "pseries.h"
  28. static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX];
  29. static DEFINE_SPINLOCK(ras_log_buf_lock);
  30. static char global_mce_data_buf[RTAS_ERROR_LOG_MAX];
  31. static DEFINE_PER_CPU(__u64, mce_data_buf);
  32. static int ras_check_exception_token;
  33. #define EPOW_SENSOR_TOKEN 9
  34. #define EPOW_SENSOR_INDEX 0
  35. /* EPOW events counter variable */
  36. static int num_epow_events;
  37. static irqreturn_t ras_epow_interrupt(int irq, void *dev_id);
  38. static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
  39. /*
  40. * Initialize handlers for the set of interrupts caused by hardware errors
  41. * and power system events.
  42. */
  43. static int __init init_ras_IRQ(void)
  44. {
  45. struct device_node *np;
  46. ras_check_exception_token = rtas_token("check-exception");
  47. /* Internal Errors */
  48. np = of_find_node_by_path("/event-sources/internal-errors");
  49. if (np != NULL) {
  50. request_event_sources_irqs(np, ras_error_interrupt,
  51. "RAS_ERROR");
  52. of_node_put(np);
  53. }
  54. /* EPOW Events */
  55. np = of_find_node_by_path("/event-sources/epow-events");
  56. if (np != NULL) {
  57. request_event_sources_irqs(np, ras_epow_interrupt, "RAS_EPOW");
  58. of_node_put(np);
  59. }
  60. return 0;
  61. }
  62. machine_subsys_initcall(pseries, init_ras_IRQ);
  63. #define EPOW_SHUTDOWN_NORMAL 1
  64. #define EPOW_SHUTDOWN_ON_UPS 2
  65. #define EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS 3
  66. #define EPOW_SHUTDOWN_AMBIENT_TEMPERATURE_TOO_HIGH 4
  67. static void handle_system_shutdown(char event_modifier)
  68. {
  69. switch (event_modifier) {
  70. case EPOW_SHUTDOWN_NORMAL:
  71. pr_emerg("Power off requested\n");
  72. orderly_poweroff(true);
  73. break;
  74. case EPOW_SHUTDOWN_ON_UPS:
  75. pr_emerg("Loss of system power detected. System is running on"
  76. " UPS/battery. Check RTAS error log for details\n");
  77. orderly_poweroff(true);
  78. break;
  79. case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
  80. pr_emerg("Loss of system critical functions detected. Check"
  81. " RTAS error log for details\n");
  82. orderly_poweroff(true);
  83. break;
  84. case EPOW_SHUTDOWN_AMBIENT_TEMPERATURE_TOO_HIGH:
  85. pr_emerg("High ambient temperature detected. Check RTAS"
  86. " error log for details\n");
  87. orderly_poweroff(true);
  88. break;
  89. default:
  90. pr_err("Unknown power/cooling shutdown event (modifier = %d)\n",
  91. event_modifier);
  92. }
  93. }
  94. struct epow_errorlog {
  95. unsigned char sensor_value;
  96. unsigned char event_modifier;
  97. unsigned char extended_modifier;
  98. unsigned char reserved;
  99. unsigned char platform_reason;
  100. };
  101. #define EPOW_RESET 0
  102. #define EPOW_WARN_COOLING 1
  103. #define EPOW_WARN_POWER 2
  104. #define EPOW_SYSTEM_SHUTDOWN 3
  105. #define EPOW_SYSTEM_HALT 4
  106. #define EPOW_MAIN_ENCLOSURE 5
  107. #define EPOW_POWER_OFF 7
  108. static void rtas_parse_epow_errlog(struct rtas_error_log *log)
  109. {
  110. struct pseries_errorlog *pseries_log;
  111. struct epow_errorlog *epow_log;
  112. char action_code;
  113. char modifier;
  114. pseries_log = get_pseries_errorlog(log, PSERIES_ELOG_SECT_ID_EPOW);
  115. if (pseries_log == NULL)
  116. return;
  117. epow_log = (struct epow_errorlog *)pseries_log->data;
  118. action_code = epow_log->sensor_value & 0xF; /* bottom 4 bits */
  119. modifier = epow_log->event_modifier & 0xF; /* bottom 4 bits */
  120. switch (action_code) {
  121. case EPOW_RESET:
  122. if (num_epow_events) {
  123. pr_info("Non critical power/cooling issue cleared\n");
  124. num_epow_events--;
  125. }
  126. break;
  127. case EPOW_WARN_COOLING:
  128. pr_info("Non-critical cooling issue detected. Check RTAS error"
  129. " log for details\n");
  130. break;
  131. case EPOW_WARN_POWER:
  132. pr_info("Non-critical power issue detected. Check RTAS error"
  133. " log for details\n");
  134. break;
  135. case EPOW_SYSTEM_SHUTDOWN:
  136. handle_system_shutdown(epow_log->event_modifier);
  137. break;
  138. case EPOW_SYSTEM_HALT:
  139. pr_emerg("Critical power/cooling issue detected. Check RTAS"
  140. " error log for details. Powering off.\n");
  141. orderly_poweroff(true);
  142. break;
  143. case EPOW_MAIN_ENCLOSURE:
  144. case EPOW_POWER_OFF:
  145. pr_emerg("System about to lose power. Check RTAS error log "
  146. " for details. Powering off immediately.\n");
  147. emergency_sync();
  148. kernel_power_off();
  149. break;
  150. default:
  151. pr_err("Unknown power/cooling event (action code = %d)\n",
  152. action_code);
  153. }
  154. /* Increment epow events counter variable */
  155. if (action_code != EPOW_RESET)
  156. num_epow_events++;
  157. }
  158. /* Handle environmental and power warning (EPOW) interrupts. */
  159. static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
  160. {
  161. int status;
  162. int state;
  163. int critical;
  164. status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX,
  165. &state);
  166. if (state > 3)
  167. critical = 1; /* Time Critical */
  168. else
  169. critical = 0;
  170. spin_lock(&ras_log_buf_lock);
  171. status = rtas_call(ras_check_exception_token, 6, 1, NULL,
  172. RTAS_VECTOR_EXTERNAL_INTERRUPT,
  173. virq_to_hw(irq),
  174. RTAS_EPOW_WARNING,
  175. critical, __pa(&ras_log_buf),
  176. rtas_get_error_log_max());
  177. log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0);
  178. rtas_parse_epow_errlog((struct rtas_error_log *)ras_log_buf);
  179. spin_unlock(&ras_log_buf_lock);
  180. return IRQ_HANDLED;
  181. }
  182. /*
  183. * Handle hardware error interrupts.
  184. *
  185. * RTAS check-exception is called to collect data on the exception. If
  186. * the error is deemed recoverable, we log a warning and return.
  187. * For nonrecoverable errors, an error is logged and we stop all processing
  188. * as quickly as possible in order to prevent propagation of the failure.
  189. */
  190. static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
  191. {
  192. struct rtas_error_log *rtas_elog;
  193. int status;
  194. int fatal;
  195. spin_lock(&ras_log_buf_lock);
  196. status = rtas_call(ras_check_exception_token, 6, 1, NULL,
  197. RTAS_VECTOR_EXTERNAL_INTERRUPT,
  198. virq_to_hw(irq),
  199. RTAS_INTERNAL_ERROR, 1 /* Time Critical */,
  200. __pa(&ras_log_buf),
  201. rtas_get_error_log_max());
  202. rtas_elog = (struct rtas_error_log *)ras_log_buf;
  203. if (status == 0 &&
  204. rtas_error_severity(rtas_elog) >= RTAS_SEVERITY_ERROR_SYNC)
  205. fatal = 1;
  206. else
  207. fatal = 0;
  208. /* format and print the extended information */
  209. log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal);
  210. if (fatal) {
  211. pr_emerg("Fatal hardware error detected. Check RTAS error"
  212. " log for details. Powering off immediately\n");
  213. emergency_sync();
  214. kernel_power_off();
  215. } else {
  216. pr_err("Recoverable hardware error detected\n");
  217. }
  218. spin_unlock(&ras_log_buf_lock);
  219. return IRQ_HANDLED;
  220. }
  221. /*
  222. * Some versions of FWNMI place the buffer inside the 4kB page starting at
  223. * 0x7000. Other versions place it inside the rtas buffer. We check both.
  224. */
  225. #define VALID_FWNMI_BUFFER(A) \
  226. ((((A) >= 0x7000) && ((A) < 0x7ff0)) || \
  227. (((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16))))
  228. /*
  229. * Get the error information for errors coming through the
  230. * FWNMI vectors. The pt_regs' r3 will be updated to reflect
  231. * the actual r3 if possible, and a ptr to the error log entry
  232. * will be returned if found.
  233. *
  234. * If the RTAS error is not of the extended type, then we put it in a per
  235. * cpu 64bit buffer. If it is the extended type we use global_mce_data_buf.
  236. *
  237. * The global_mce_data_buf does not have any locks or protection around it,
  238. * if a second machine check comes in, or a system reset is done
  239. * before we have logged the error, then we will get corruption in the
  240. * error log. This is preferable over holding off on calling
  241. * ibm,nmi-interlock which would result in us checkstopping if a
  242. * second machine check did come in.
  243. */
  244. static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
  245. {
  246. unsigned long *savep;
  247. struct rtas_error_log *h, *errhdr = NULL;
  248. /* Mask top two bits */
  249. regs->gpr[3] &= ~(0x3UL << 62);
  250. if (!VALID_FWNMI_BUFFER(regs->gpr[3])) {
  251. printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]);
  252. return NULL;
  253. }
  254. savep = __va(regs->gpr[3]);
  255. regs->gpr[3] = savep[0]; /* restore original r3 */
  256. /* If it isn't an extended log we can use the per cpu 64bit buffer */
  257. h = (struct rtas_error_log *)&savep[1];
  258. if (!rtas_error_extended(h)) {
  259. memcpy(this_cpu_ptr(&mce_data_buf), h, sizeof(__u64));
  260. errhdr = (struct rtas_error_log *)this_cpu_ptr(&mce_data_buf);
  261. } else {
  262. int len, error_log_length;
  263. error_log_length = 8 + rtas_error_extended_log_length(h);
  264. len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
  265. memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
  266. memcpy(global_mce_data_buf, h, len);
  267. errhdr = (struct rtas_error_log *)global_mce_data_buf;
  268. }
  269. return errhdr;
  270. }
  271. /* Call this when done with the data returned by FWNMI_get_errinfo.
  272. * It will release the saved data area for other CPUs in the
  273. * partition to receive FWNMI errors.
  274. */
  275. static void fwnmi_release_errinfo(void)
  276. {
  277. int ret = rtas_call(rtas_token("ibm,nmi-interlock"), 0, 1, NULL);
  278. if (ret != 0)
  279. printk(KERN_ERR "FWNMI: nmi-interlock failed: %d\n", ret);
  280. }
  281. int pSeries_system_reset_exception(struct pt_regs *regs)
  282. {
  283. if (fwnmi_active) {
  284. struct rtas_error_log *errhdr = fwnmi_get_errinfo(regs);
  285. if (errhdr) {
  286. /* XXX Should look at FWNMI information */
  287. }
  288. fwnmi_release_errinfo();
  289. }
  290. return 0; /* need to perform reset */
  291. }
  292. /*
  293. * See if we can recover from a machine check exception.
  294. * This is only called on power4 (or above) and only via
  295. * the Firmware Non-Maskable Interrupts (fwnmi) handler
  296. * which provides the error analysis for us.
  297. *
  298. * Return 1 if corrected (or delivered a signal).
  299. * Return 0 if there is nothing we can do.
  300. */
  301. static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err)
  302. {
  303. int recovered = 0;
  304. int disposition = rtas_error_disposition(err);
  305. if (!(regs->msr & MSR_RI)) {
  306. /* If MSR_RI isn't set, we cannot recover */
  307. recovered = 0;
  308. } else if (disposition == RTAS_DISP_FULLY_RECOVERED) {
  309. /* Platform corrected itself */
  310. recovered = 1;
  311. } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
  312. /* Platform corrected itself but could be degraded */
  313. printk(KERN_ERR "MCE: limited recovery, system may "
  314. "be degraded\n");
  315. recovered = 1;
  316. } else if (user_mode(regs) && !is_global_init(current) &&
  317. rtas_error_severity(err) == RTAS_SEVERITY_ERROR_SYNC) {
  318. /*
  319. * If we received a synchronous error when in userspace
  320. * kill the task. Firmware may report details of the fail
  321. * asynchronously, so we can't rely on the target and type
  322. * fields being valid here.
  323. */
  324. printk(KERN_ERR "MCE: uncorrectable error, killing task "
  325. "%s:%d\n", current->comm, current->pid);
  326. _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
  327. recovered = 1;
  328. }
  329. log_error((char *)err, ERR_TYPE_RTAS_LOG, 0);
  330. return recovered;
  331. }
  332. /*
  333. * Handle a machine check.
  334. *
  335. * Note that on Power 4 and beyond Firmware Non-Maskable Interrupts (fwnmi)
  336. * should be present. If so the handler which called us tells us if the
  337. * error was recovered (never true if RI=0).
  338. *
  339. * On hardware prior to Power 4 these exceptions were asynchronous which
  340. * means we can't tell exactly where it occurred and so we can't recover.
  341. */
  342. int pSeries_machine_check_exception(struct pt_regs *regs)
  343. {
  344. struct rtas_error_log *errp;
  345. if (fwnmi_active) {
  346. errp = fwnmi_get_errinfo(regs);
  347. fwnmi_release_errinfo();
  348. if (errp && recover_mce(regs, errp))
  349. return 1;
  350. }
  351. return 0;
  352. }