PageRenderTime 51ms CodeModel.GetById 23ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/s390/char/monreader.c

https://github.com/kipill-nn/Kernel-for-Mega
C | 530 lines | 415 code | 71 blank | 44 comment | 55 complexity | e266bdc741597ee1be0d0f5964257ce4 MD5 | raw file
  1. /*
  2. * drivers/s390/char/monreader.c
  3. *
  4. * Character device driver for reading z/VM *MONITOR service records.
  5. *
  6. * Copyright IBM Corp. 2004, 2008
  7. * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
  8. */
  9. #define KMSG_COMPONENT "monreader"
  10. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  11. #include <linux/module.h>
  12. #include <linux/moduleparam.h>
  13. #include <linux/init.h>
  14. #include <linux/smp_lock.h>
  15. #include <linux/errno.h>
  16. #include <linux/types.h>
  17. #include <linux/kernel.h>
  18. #include <linux/miscdevice.h>
  19. #include <linux/ctype.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/poll.h>
  23. #include <net/iucv/iucv.h>
  24. #include <asm/uaccess.h>
  25. #include <asm/ebcdic.h>
  26. #include <asm/extmem.h>
  27. #define MON_COLLECT_SAMPLE 0x80
  28. #define MON_COLLECT_EVENT 0x40
  29. #define MON_SERVICE "*MONITOR"
  30. #define MON_IN_USE 0x01
  31. #define MON_MSGLIM 255
  32. static char mon_dcss_name[9] = "MONDCSS\0";
  33. struct mon_msg {
  34. u32 pos;
  35. u32 mca_offset;
  36. struct iucv_message msg;
  37. char msglim_reached;
  38. char replied_msglim;
  39. };
  40. struct mon_private {
  41. struct iucv_path *path;
  42. struct mon_msg *msg_array[MON_MSGLIM];
  43. unsigned int write_index;
  44. unsigned int read_index;
  45. atomic_t msglim_count;
  46. atomic_t read_ready;
  47. atomic_t iucv_connected;
  48. atomic_t iucv_severed;
  49. };
  50. static unsigned long mon_in_use = 0;
  51. static unsigned long mon_dcss_start;
  52. static unsigned long mon_dcss_end;
  53. static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue);
  54. static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue);
  55. static u8 user_data_connect[16] = {
  56. /* Version code, must be 0x01 for shared mode */
  57. 0x01,
  58. /* what to collect */
  59. MON_COLLECT_SAMPLE | MON_COLLECT_EVENT,
  60. /* DCSS name in EBCDIC, 8 bytes padded with blanks */
  61. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  62. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  63. };
  64. static u8 user_data_sever[16] = {
  65. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  66. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
  67. };
  68. /******************************************************************************
  69. * helper functions *
  70. *****************************************************************************/
  71. /*
  72. * Create the 8 bytes EBCDIC DCSS segment name from
  73. * an ASCII name, incl. padding
  74. */
  75. static void dcss_mkname(char *ascii_name, char *ebcdic_name)
  76. {
  77. int i;
  78. for (i = 0; i < 8; i++) {
  79. if (ascii_name[i] == '\0')
  80. break;
  81. ebcdic_name[i] = toupper(ascii_name[i]);
  82. };
  83. for (; i < 8; i++)
  84. ebcdic_name[i] = ' ';
  85. ASCEBC(ebcdic_name, 8);
  86. }
  87. static inline unsigned long mon_mca_start(struct mon_msg *monmsg)
  88. {
  89. return *(u32 *) &monmsg->msg.rmmsg;
  90. }
  91. static inline unsigned long mon_mca_end(struct mon_msg *monmsg)
  92. {
  93. return *(u32 *) &monmsg->msg.rmmsg[4];
  94. }
  95. static inline u8 mon_mca_type(struct mon_msg *monmsg, u8 index)
  96. {
  97. return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index);
  98. }
  99. static inline u32 mon_mca_size(struct mon_msg *monmsg)
  100. {
  101. return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1;
  102. }
  103. static inline u32 mon_rec_start(struct mon_msg *monmsg)
  104. {
  105. return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4));
  106. }
  107. static inline u32 mon_rec_end(struct mon_msg *monmsg)
  108. {
  109. return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8));
  110. }
  111. static int mon_check_mca(struct mon_msg *monmsg)
  112. {
  113. if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) ||
  114. (mon_rec_start(monmsg) < mon_dcss_start) ||
  115. (mon_rec_end(monmsg) > mon_dcss_end) ||
  116. (mon_mca_type(monmsg, 0) == 0) ||
  117. (mon_mca_size(monmsg) % 12 != 0) ||
  118. (mon_mca_end(monmsg) <= mon_mca_start(monmsg)) ||
  119. (mon_mca_end(monmsg) > mon_dcss_end) ||
  120. (mon_mca_start(monmsg) < mon_dcss_start) ||
  121. ((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0)))
  122. return -EINVAL;
  123. return 0;
  124. }
  125. static int mon_send_reply(struct mon_msg *monmsg,
  126. struct mon_private *monpriv)
  127. {
  128. int rc;
  129. rc = iucv_message_reply(monpriv->path, &monmsg->msg,
  130. IUCV_IPRMDATA, NULL, 0);
  131. atomic_dec(&monpriv->msglim_count);
  132. if (likely(!monmsg->msglim_reached)) {
  133. monmsg->pos = 0;
  134. monmsg->mca_offset = 0;
  135. monpriv->read_index = (monpriv->read_index + 1) %
  136. MON_MSGLIM;
  137. atomic_dec(&monpriv->read_ready);
  138. } else
  139. monmsg->replied_msglim = 1;
  140. if (rc) {
  141. pr_err("Reading monitor data failed with rc=%i\n", rc);
  142. return -EIO;
  143. }
  144. return 0;
  145. }
  146. static void mon_free_mem(struct mon_private *monpriv)
  147. {
  148. int i;
  149. for (i = 0; i < MON_MSGLIM; i++)
  150. if (monpriv->msg_array[i])
  151. kfree(monpriv->msg_array[i]);
  152. kfree(monpriv);
  153. }
  154. static struct mon_private *mon_alloc_mem(void)
  155. {
  156. int i;
  157. struct mon_private *monpriv;
  158. monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
  159. if (!monpriv)
  160. return NULL;
  161. for (i = 0; i < MON_MSGLIM; i++) {
  162. monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg),
  163. GFP_KERNEL);
  164. if (!monpriv->msg_array[i]) {
  165. mon_free_mem(monpriv);
  166. return NULL;
  167. }
  168. }
  169. return monpriv;
  170. }
  171. static inline void mon_next_mca(struct mon_msg *monmsg)
  172. {
  173. if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
  174. return;
  175. monmsg->mca_offset += 12;
  176. monmsg->pos = 0;
  177. }
  178. static struct mon_msg *mon_next_message(struct mon_private *monpriv)
  179. {
  180. struct mon_msg *monmsg;
  181. if (!atomic_read(&monpriv->read_ready))
  182. return NULL;
  183. monmsg = monpriv->msg_array[monpriv->read_index];
  184. if (unlikely(monmsg->replied_msglim)) {
  185. monmsg->replied_msglim = 0;
  186. monmsg->msglim_reached = 0;
  187. monmsg->pos = 0;
  188. monmsg->mca_offset = 0;
  189. monpriv->read_index = (monpriv->read_index + 1) %
  190. MON_MSGLIM;
  191. atomic_dec(&monpriv->read_ready);
  192. return ERR_PTR(-EOVERFLOW);
  193. }
  194. return monmsg;
  195. }
  196. /******************************************************************************
  197. * IUCV handler *
  198. *****************************************************************************/
  199. static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
  200. {
  201. struct mon_private *monpriv = path->private;
  202. atomic_set(&monpriv->iucv_connected, 1);
  203. wake_up(&mon_conn_wait_queue);
  204. }
  205. static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
  206. {
  207. struct mon_private *monpriv = path->private;
  208. pr_err("z/VM *MONITOR system service disconnected with rc=%i\n",
  209. ipuser[0]);
  210. iucv_path_sever(path, NULL);
  211. atomic_set(&monpriv->iucv_severed, 1);
  212. wake_up(&mon_conn_wait_queue);
  213. wake_up_interruptible(&mon_read_wait_queue);
  214. }
  215. static void mon_iucv_message_pending(struct iucv_path *path,
  216. struct iucv_message *msg)
  217. {
  218. struct mon_private *monpriv = path->private;
  219. memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
  220. msg, sizeof(*msg));
  221. if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
  222. pr_warning("The read queue for monitor data is full\n");
  223. monpriv->msg_array[monpriv->write_index]->msglim_reached = 1;
  224. }
  225. monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM;
  226. atomic_inc(&monpriv->read_ready);
  227. wake_up_interruptible(&mon_read_wait_queue);
  228. }
  229. static struct iucv_handler monreader_iucv_handler = {
  230. .path_complete = mon_iucv_path_complete,
  231. .path_severed = mon_iucv_path_severed,
  232. .message_pending = mon_iucv_message_pending,
  233. };
  234. /******************************************************************************
  235. * file operations *
  236. *****************************************************************************/
  237. static int mon_open(struct inode *inode, struct file *filp)
  238. {
  239. struct mon_private *monpriv;
  240. int rc;
  241. /*
  242. * only one user allowed
  243. */
  244. lock_kernel();
  245. rc = -EBUSY;
  246. if (test_and_set_bit(MON_IN_USE, &mon_in_use))
  247. goto out;
  248. rc = -ENOMEM;
  249. monpriv = mon_alloc_mem();
  250. if (!monpriv)
  251. goto out_use;
  252. /*
  253. * Connect to *MONITOR service
  254. */
  255. monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
  256. if (!monpriv->path)
  257. goto out_priv;
  258. rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
  259. MON_SERVICE, NULL, user_data_connect, monpriv);
  260. if (rc) {
  261. pr_err("Connecting to the z/VM *MONITOR system service "
  262. "failed with rc=%i\n", rc);
  263. rc = -EIO;
  264. goto out_path;
  265. }
  266. /*
  267. * Wait for connection confirmation
  268. */
  269. wait_event(mon_conn_wait_queue,
  270. atomic_read(&monpriv->iucv_connected) ||
  271. atomic_read(&monpriv->iucv_severed));
  272. if (atomic_read(&monpriv->iucv_severed)) {
  273. atomic_set(&monpriv->iucv_severed, 0);
  274. atomic_set(&monpriv->iucv_connected, 0);
  275. rc = -EIO;
  276. goto out_path;
  277. }
  278. filp->private_data = monpriv;
  279. unlock_kernel();
  280. return nonseekable_open(inode, filp);
  281. out_path:
  282. kfree(monpriv->path);
  283. out_priv:
  284. mon_free_mem(monpriv);
  285. out_use:
  286. clear_bit(MON_IN_USE, &mon_in_use);
  287. out:
  288. unlock_kernel();
  289. return rc;
  290. }
  291. static int mon_close(struct inode *inode, struct file *filp)
  292. {
  293. int rc, i;
  294. struct mon_private *monpriv = filp->private_data;
  295. /*
  296. * Close IUCV connection and unregister
  297. */
  298. rc = iucv_path_sever(monpriv->path, user_data_sever);
  299. if (rc)
  300. pr_warning("Disconnecting the z/VM *MONITOR system service "
  301. "failed with rc=%i\n", rc);
  302. atomic_set(&monpriv->iucv_severed, 0);
  303. atomic_set(&monpriv->iucv_connected, 0);
  304. atomic_set(&monpriv->read_ready, 0);
  305. atomic_set(&monpriv->msglim_count, 0);
  306. monpriv->write_index = 0;
  307. monpriv->read_index = 0;
  308. for (i = 0; i < MON_MSGLIM; i++)
  309. kfree(monpriv->msg_array[i]);
  310. kfree(monpriv);
  311. clear_bit(MON_IN_USE, &mon_in_use);
  312. return 0;
  313. }
  314. static ssize_t mon_read(struct file *filp, char __user *data,
  315. size_t count, loff_t *ppos)
  316. {
  317. struct mon_private *monpriv = filp->private_data;
  318. struct mon_msg *monmsg;
  319. int ret;
  320. u32 mce_start;
  321. monmsg = mon_next_message(monpriv);
  322. if (IS_ERR(monmsg))
  323. return PTR_ERR(monmsg);
  324. if (!monmsg) {
  325. if (filp->f_flags & O_NONBLOCK)
  326. return -EAGAIN;
  327. ret = wait_event_interruptible(mon_read_wait_queue,
  328. atomic_read(&monpriv->read_ready) ||
  329. atomic_read(&monpriv->iucv_severed));
  330. if (ret)
  331. return ret;
  332. if (unlikely(atomic_read(&monpriv->iucv_severed)))
  333. return -EIO;
  334. monmsg = monpriv->msg_array[monpriv->read_index];
  335. }
  336. if (!monmsg->pos)
  337. monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset;
  338. if (mon_check_mca(monmsg))
  339. goto reply;
  340. /* read monitor control element (12 bytes) first */
  341. mce_start = mon_mca_start(monmsg) + monmsg->mca_offset;
  342. if ((monmsg->pos >= mce_start) && (monmsg->pos < mce_start + 12)) {
  343. count = min(count, (size_t) mce_start + 12 - monmsg->pos);
  344. ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
  345. count);
  346. if (ret)
  347. return -EFAULT;
  348. monmsg->pos += count;
  349. if (monmsg->pos == mce_start + 12)
  350. monmsg->pos = mon_rec_start(monmsg);
  351. goto out_copy;
  352. }
  353. /* read records */
  354. if (monmsg->pos <= mon_rec_end(monmsg)) {
  355. count = min(count, (size_t) mon_rec_end(monmsg) - monmsg->pos
  356. + 1);
  357. ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
  358. count);
  359. if (ret)
  360. return -EFAULT;
  361. monmsg->pos += count;
  362. if (monmsg->pos > mon_rec_end(monmsg))
  363. mon_next_mca(monmsg);
  364. goto out_copy;
  365. }
  366. reply:
  367. ret = mon_send_reply(monmsg, monpriv);
  368. return ret;
  369. out_copy:
  370. *ppos += count;
  371. return count;
  372. }
  373. static unsigned int mon_poll(struct file *filp, struct poll_table_struct *p)
  374. {
  375. struct mon_private *monpriv = filp->private_data;
  376. poll_wait(filp, &mon_read_wait_queue, p);
  377. if (unlikely(atomic_read(&monpriv->iucv_severed)))
  378. return POLLERR;
  379. if (atomic_read(&monpriv->read_ready))
  380. return POLLIN | POLLRDNORM;
  381. return 0;
  382. }
  383. static const struct file_operations mon_fops = {
  384. .owner = THIS_MODULE,
  385. .open = &mon_open,
  386. .release = &mon_close,
  387. .read = &mon_read,
  388. .poll = &mon_poll,
  389. };
  390. static struct miscdevice mon_dev = {
  391. .name = "monreader",
  392. .fops = &mon_fops,
  393. .minor = MISC_DYNAMIC_MINOR,
  394. };
  395. /******************************************************************************
  396. * module init/exit *
  397. *****************************************************************************/
  398. static int __init mon_init(void)
  399. {
  400. int rc;
  401. if (!MACHINE_IS_VM) {
  402. pr_err("The z/VM *MONITOR record device driver cannot be "
  403. "loaded without z/VM\n");
  404. return -ENODEV;
  405. }
  406. /*
  407. * Register with IUCV and connect to *MONITOR service
  408. */
  409. rc = iucv_register(&monreader_iucv_handler, 1);
  410. if (rc) {
  411. pr_err("The z/VM *MONITOR record device driver failed to "
  412. "register with IUCV\n");
  413. return rc;
  414. }
  415. rc = segment_type(mon_dcss_name);
  416. if (rc < 0) {
  417. segment_warning(rc, mon_dcss_name);
  418. goto out_iucv;
  419. }
  420. if (rc != SEG_TYPE_SC) {
  421. pr_err("The specified *MONITOR DCSS %s does not have the "
  422. "required type SC\n", mon_dcss_name);
  423. rc = -EINVAL;
  424. goto out_iucv;
  425. }
  426. rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
  427. &mon_dcss_start, &mon_dcss_end);
  428. if (rc < 0) {
  429. segment_warning(rc, mon_dcss_name);
  430. rc = -EINVAL;
  431. goto out_iucv;
  432. }
  433. dcss_mkname(mon_dcss_name, &user_data_connect[8]);
  434. rc = misc_register(&mon_dev);
  435. if (rc < 0 )
  436. goto out;
  437. return 0;
  438. out:
  439. segment_unload(mon_dcss_name);
  440. out_iucv:
  441. iucv_unregister(&monreader_iucv_handler, 1);
  442. return rc;
  443. }
  444. static void __exit mon_exit(void)
  445. {
  446. segment_unload(mon_dcss_name);
  447. WARN_ON(misc_deregister(&mon_dev) != 0);
  448. iucv_unregister(&monreader_iucv_handler, 1);
  449. return;
  450. }
  451. module_init(mon_init);
  452. module_exit(mon_exit);
  453. module_param_string(mondcss, mon_dcss_name, 9, 0444);
  454. MODULE_PARM_DESC(mondcss, "Name of DCSS segment to be used for *MONITOR "
  455. "service, max. 8 chars. Default is MONDCSS");
  456. MODULE_AUTHOR("Gerald Schaefer <geraldsc@de.ibm.com>");
  457. MODULE_DESCRIPTION("Character device driver for reading z/VM "
  458. "monitor service records.");
  459. MODULE_LICENSE("GPL");