PageRenderTime 62ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 0ms

/sound/core/seq/seq_clientmgr.c

https://bitbucket.org/abioy/linux
C | 2587 lines | 1845 code | 407 blank | 335 comment | 443 complexity | 97f18edf8b2ca43fb7205d9da84547d0 MD5 | raw file
Possible License(s): CC-BY-SA-3.0, GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * ALSA sequencer Client Manager
  3. * Copyright (c) 1998-2001 by Frank van de Pol <fvdpol@coil.demon.nl>
  4. * Jaroslav Kysela <perex@perex.cz>
  5. * Takashi Iwai <tiwai@suse.de>
  6. *
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <linux/init.h>
  24. #include <linux/slab.h>
  25. #include <sound/core.h>
  26. #include <sound/minors.h>
  27. #include <linux/kmod.h>
  28. #include <sound/seq_kernel.h>
  29. #include "seq_clientmgr.h"
  30. #include "seq_memory.h"
  31. #include "seq_queue.h"
  32. #include "seq_timer.h"
  33. #include "seq_info.h"
  34. #include "seq_system.h"
  35. #include <sound/seq_device.h>
  36. #ifdef CONFIG_COMPAT
  37. #include <linux/compat.h>
  38. #endif
  39. /* Client Manager
  40. * this module handles the connections of userland and kernel clients
  41. *
  42. */
  43. /*
  44. * There are four ranges of client numbers (last two shared):
  45. * 0..15: global clients
  46. * 16..127: statically allocated client numbers for cards 0..27
  47. * 128..191: dynamically allocated client numbers for cards 28..31
  48. * 128..191: dynamically allocated client numbers for applications
  49. */
  50. /* number of kernel non-card clients */
  51. #define SNDRV_SEQ_GLOBAL_CLIENTS 16
  52. /* clients per cards, for static clients */
  53. #define SNDRV_SEQ_CLIENTS_PER_CARD 4
  54. /* dynamically allocated client numbers (both kernel drivers and user space) */
  55. #define SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN 128
  56. #define SNDRV_SEQ_LFLG_INPUT 0x0001
  57. #define SNDRV_SEQ_LFLG_OUTPUT 0x0002
  58. #define SNDRV_SEQ_LFLG_OPEN (SNDRV_SEQ_LFLG_INPUT|SNDRV_SEQ_LFLG_OUTPUT)
  59. static DEFINE_SPINLOCK(clients_lock);
  60. static DEFINE_MUTEX(register_mutex);
  61. /*
  62. * client table
  63. */
  64. static char clienttablock[SNDRV_SEQ_MAX_CLIENTS];
  65. static struct snd_seq_client *clienttab[SNDRV_SEQ_MAX_CLIENTS];
  66. static struct snd_seq_usage client_usage;
  67. /*
  68. * prototypes
  69. */
  70. static int bounce_error_event(struct snd_seq_client *client,
  71. struct snd_seq_event *event,
  72. int err, int atomic, int hop);
  73. static int snd_seq_deliver_single_event(struct snd_seq_client *client,
  74. struct snd_seq_event *event,
  75. int filter, int atomic, int hop);
  76. /*
  77. */
  78. static inline mm_segment_t snd_enter_user(void)
  79. {
  80. mm_segment_t fs = get_fs();
  81. set_fs(get_ds());
  82. return fs;
  83. }
  84. static inline void snd_leave_user(mm_segment_t fs)
  85. {
  86. set_fs(fs);
  87. }
  88. /*
  89. */
  90. static inline unsigned short snd_seq_file_flags(struct file *file)
  91. {
  92. switch (file->f_mode & (FMODE_READ | FMODE_WRITE)) {
  93. case FMODE_WRITE:
  94. return SNDRV_SEQ_LFLG_OUTPUT;
  95. case FMODE_READ:
  96. return SNDRV_SEQ_LFLG_INPUT;
  97. default:
  98. return SNDRV_SEQ_LFLG_OPEN;
  99. }
  100. }
  101. static inline int snd_seq_write_pool_allocated(struct snd_seq_client *client)
  102. {
  103. return snd_seq_total_cells(client->pool) > 0;
  104. }
  105. /* return pointer to client structure for specified id */
  106. static struct snd_seq_client *clientptr(int clientid)
  107. {
  108. if (clientid < 0 || clientid >= SNDRV_SEQ_MAX_CLIENTS) {
  109. snd_printd("Seq: oops. Trying to get pointer to client %d\n",
  110. clientid);
  111. return NULL;
  112. }
  113. return clienttab[clientid];
  114. }
  115. struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
  116. {
  117. unsigned long flags;
  118. struct snd_seq_client *client;
  119. if (clientid < 0 || clientid >= SNDRV_SEQ_MAX_CLIENTS) {
  120. snd_printd("Seq: oops. Trying to get pointer to client %d\n",
  121. clientid);
  122. return NULL;
  123. }
  124. spin_lock_irqsave(&clients_lock, flags);
  125. client = clientptr(clientid);
  126. if (client)
  127. goto __lock;
  128. if (clienttablock[clientid]) {
  129. spin_unlock_irqrestore(&clients_lock, flags);
  130. return NULL;
  131. }
  132. spin_unlock_irqrestore(&clients_lock, flags);
  133. #ifdef CONFIG_MODULES
  134. if (!in_interrupt()) {
  135. static char client_requested[SNDRV_SEQ_GLOBAL_CLIENTS];
  136. static char card_requested[SNDRV_CARDS];
  137. if (clientid < SNDRV_SEQ_GLOBAL_CLIENTS) {
  138. int idx;
  139. if (!client_requested[clientid]) {
  140. client_requested[clientid] = 1;
  141. for (idx = 0; idx < 15; idx++) {
  142. if (seq_client_load[idx] < 0)
  143. break;
  144. if (seq_client_load[idx] == clientid) {
  145. request_module("snd-seq-client-%i",
  146. clientid);
  147. break;
  148. }
  149. }
  150. }
  151. } else if (clientid < SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN) {
  152. int card = (clientid - SNDRV_SEQ_GLOBAL_CLIENTS) /
  153. SNDRV_SEQ_CLIENTS_PER_CARD;
  154. if (card < snd_ecards_limit) {
  155. if (! card_requested[card]) {
  156. card_requested[card] = 1;
  157. snd_request_card(card);
  158. }
  159. snd_seq_device_load_drivers();
  160. }
  161. }
  162. spin_lock_irqsave(&clients_lock, flags);
  163. client = clientptr(clientid);
  164. if (client)
  165. goto __lock;
  166. spin_unlock_irqrestore(&clients_lock, flags);
  167. }
  168. #endif
  169. return NULL;
  170. __lock:
  171. snd_use_lock_use(&client->use_lock);
  172. spin_unlock_irqrestore(&clients_lock, flags);
  173. return client;
  174. }
  175. static void usage_alloc(struct snd_seq_usage *res, int num)
  176. {
  177. res->cur += num;
  178. if (res->cur > res->peak)
  179. res->peak = res->cur;
  180. }
  181. static void usage_free(struct snd_seq_usage *res, int num)
  182. {
  183. res->cur -= num;
  184. }
  185. /* initialise data structures */
  186. int __init client_init_data(void)
  187. {
  188. /* zap out the client table */
  189. memset(&clienttablock, 0, sizeof(clienttablock));
  190. memset(&clienttab, 0, sizeof(clienttab));
  191. return 0;
  192. }
  193. static struct snd_seq_client *seq_create_client1(int client_index, int poolsize)
  194. {
  195. unsigned long flags;
  196. int c;
  197. struct snd_seq_client *client;
  198. /* init client data */
  199. client = kzalloc(sizeof(*client), GFP_KERNEL);
  200. if (client == NULL)
  201. return NULL;
  202. client->pool = snd_seq_pool_new(poolsize);
  203. if (client->pool == NULL) {
  204. kfree(client);
  205. return NULL;
  206. }
  207. client->type = NO_CLIENT;
  208. snd_use_lock_init(&client->use_lock);
  209. rwlock_init(&client->ports_lock);
  210. mutex_init(&client->ports_mutex);
  211. INIT_LIST_HEAD(&client->ports_list_head);
  212. /* find free slot in the client table */
  213. spin_lock_irqsave(&clients_lock, flags);
  214. if (client_index < 0) {
  215. for (c = SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN;
  216. c < SNDRV_SEQ_MAX_CLIENTS;
  217. c++) {
  218. if (clienttab[c] || clienttablock[c])
  219. continue;
  220. clienttab[client->number = c] = client;
  221. spin_unlock_irqrestore(&clients_lock, flags);
  222. return client;
  223. }
  224. } else {
  225. if (clienttab[client_index] == NULL && !clienttablock[client_index]) {
  226. clienttab[client->number = client_index] = client;
  227. spin_unlock_irqrestore(&clients_lock, flags);
  228. return client;
  229. }
  230. }
  231. spin_unlock_irqrestore(&clients_lock, flags);
  232. snd_seq_pool_delete(&client->pool);
  233. kfree(client);
  234. return NULL; /* no free slot found or busy, return failure code */
  235. }
  236. static int seq_free_client1(struct snd_seq_client *client)
  237. {
  238. unsigned long flags;
  239. if (!client)
  240. return 0;
  241. snd_seq_delete_all_ports(client);
  242. snd_seq_queue_client_leave(client->number);
  243. spin_lock_irqsave(&clients_lock, flags);
  244. clienttablock[client->number] = 1;
  245. clienttab[client->number] = NULL;
  246. spin_unlock_irqrestore(&clients_lock, flags);
  247. snd_use_lock_sync(&client->use_lock);
  248. snd_seq_queue_client_termination(client->number);
  249. if (client->pool)
  250. snd_seq_pool_delete(&client->pool);
  251. spin_lock_irqsave(&clients_lock, flags);
  252. clienttablock[client->number] = 0;
  253. spin_unlock_irqrestore(&clients_lock, flags);
  254. return 0;
  255. }
  256. static void seq_free_client(struct snd_seq_client * client)
  257. {
  258. mutex_lock(&register_mutex);
  259. switch (client->type) {
  260. case NO_CLIENT:
  261. snd_printk(KERN_WARNING "Seq: Trying to free unused client %d\n",
  262. client->number);
  263. break;
  264. case USER_CLIENT:
  265. case KERNEL_CLIENT:
  266. seq_free_client1(client);
  267. usage_free(&client_usage, 1);
  268. break;
  269. default:
  270. snd_printk(KERN_ERR "Seq: Trying to free client %d with undefined type = %d\n",
  271. client->number, client->type);
  272. }
  273. mutex_unlock(&register_mutex);
  274. snd_seq_system_client_ev_client_exit(client->number);
  275. }
  276. /* -------------------------------------------------------- */
  277. /* create a user client */
  278. static int snd_seq_open(struct inode *inode, struct file *file)
  279. {
  280. int c, mode; /* client id */
  281. struct snd_seq_client *client;
  282. struct snd_seq_user_client *user;
  283. if (mutex_lock_interruptible(&register_mutex))
  284. return -ERESTARTSYS;
  285. client = seq_create_client1(-1, SNDRV_SEQ_DEFAULT_EVENTS);
  286. if (client == NULL) {
  287. mutex_unlock(&register_mutex);
  288. return -ENOMEM; /* failure code */
  289. }
  290. mode = snd_seq_file_flags(file);
  291. if (mode & SNDRV_SEQ_LFLG_INPUT)
  292. client->accept_input = 1;
  293. if (mode & SNDRV_SEQ_LFLG_OUTPUT)
  294. client->accept_output = 1;
  295. user = &client->data.user;
  296. user->fifo = NULL;
  297. user->fifo_pool_size = 0;
  298. if (mode & SNDRV_SEQ_LFLG_INPUT) {
  299. user->fifo_pool_size = SNDRV_SEQ_DEFAULT_CLIENT_EVENTS;
  300. user->fifo = snd_seq_fifo_new(user->fifo_pool_size);
  301. if (user->fifo == NULL) {
  302. seq_free_client1(client);
  303. kfree(client);
  304. mutex_unlock(&register_mutex);
  305. return -ENOMEM;
  306. }
  307. }
  308. usage_alloc(&client_usage, 1);
  309. client->type = USER_CLIENT;
  310. mutex_unlock(&register_mutex);
  311. c = client->number;
  312. file->private_data = client;
  313. /* fill client data */
  314. user->file = file;
  315. sprintf(client->name, "Client-%d", c);
  316. /* make others aware this new client */
  317. snd_seq_system_client_ev_client_start(c);
  318. return 0;
  319. }
  320. /* delete a user client */
  321. static int snd_seq_release(struct inode *inode, struct file *file)
  322. {
  323. struct snd_seq_client *client = file->private_data;
  324. if (client) {
  325. seq_free_client(client);
  326. if (client->data.user.fifo)
  327. snd_seq_fifo_delete(&client->data.user.fifo);
  328. kfree(client);
  329. }
  330. return 0;
  331. }
  332. /* handle client read() */
  333. /* possible error values:
  334. * -ENXIO invalid client or file open mode
  335. * -ENOSPC FIFO overflow (the flag is cleared after this error report)
  336. * -EINVAL no enough user-space buffer to write the whole event
  337. * -EFAULT seg. fault during copy to user space
  338. */
  339. static ssize_t snd_seq_read(struct file *file, char __user *buf, size_t count,
  340. loff_t *offset)
  341. {
  342. struct snd_seq_client *client = file->private_data;
  343. struct snd_seq_fifo *fifo;
  344. int err;
  345. long result = 0;
  346. struct snd_seq_event_cell *cell;
  347. if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT))
  348. return -ENXIO;
  349. if (!access_ok(VERIFY_WRITE, buf, count))
  350. return -EFAULT;
  351. /* check client structures are in place */
  352. if (snd_BUG_ON(!client))
  353. return -ENXIO;
  354. if (!client->accept_input || (fifo = client->data.user.fifo) == NULL)
  355. return -ENXIO;
  356. if (atomic_read(&fifo->overflow) > 0) {
  357. /* buffer overflow is detected */
  358. snd_seq_fifo_clear(fifo);
  359. /* return error code */
  360. return -ENOSPC;
  361. }
  362. cell = NULL;
  363. err = 0;
  364. snd_seq_fifo_lock(fifo);
  365. /* while data available in queue */
  366. while (count >= sizeof(struct snd_seq_event)) {
  367. int nonblock;
  368. nonblock = (file->f_flags & O_NONBLOCK) || result > 0;
  369. if ((err = snd_seq_fifo_cell_out(fifo, &cell, nonblock)) < 0) {
  370. break;
  371. }
  372. if (snd_seq_ev_is_variable(&cell->event)) {
  373. struct snd_seq_event tmpev;
  374. tmpev = cell->event;
  375. tmpev.data.ext.len &= ~SNDRV_SEQ_EXT_MASK;
  376. if (copy_to_user(buf, &tmpev, sizeof(struct snd_seq_event))) {
  377. err = -EFAULT;
  378. break;
  379. }
  380. count -= sizeof(struct snd_seq_event);
  381. buf += sizeof(struct snd_seq_event);
  382. err = snd_seq_expand_var_event(&cell->event, count,
  383. (char __force *)buf, 0,
  384. sizeof(struct snd_seq_event));
  385. if (err < 0)
  386. break;
  387. result += err;
  388. count -= err;
  389. buf += err;
  390. } else {
  391. if (copy_to_user(buf, &cell->event, sizeof(struct snd_seq_event))) {
  392. err = -EFAULT;
  393. break;
  394. }
  395. count -= sizeof(struct snd_seq_event);
  396. buf += sizeof(struct snd_seq_event);
  397. }
  398. snd_seq_cell_free(cell);
  399. cell = NULL; /* to be sure */
  400. result += sizeof(struct snd_seq_event);
  401. }
  402. if (err < 0) {
  403. if (cell)
  404. snd_seq_fifo_cell_putback(fifo, cell);
  405. if (err == -EAGAIN && result > 0)
  406. err = 0;
  407. }
  408. snd_seq_fifo_unlock(fifo);
  409. return (err < 0) ? err : result;
  410. }
  411. /*
  412. * check access permission to the port
  413. */
  414. static int check_port_perm(struct snd_seq_client_port *port, unsigned int flags)
  415. {
  416. if ((port->capability & flags) != flags)
  417. return 0;
  418. return flags;
  419. }
  420. /*
  421. * check if the destination client is available, and return the pointer
  422. * if filter is non-zero, client filter bitmap is tested.
  423. */
  424. static struct snd_seq_client *get_event_dest_client(struct snd_seq_event *event,
  425. int filter)
  426. {
  427. struct snd_seq_client *dest;
  428. dest = snd_seq_client_use_ptr(event->dest.client);
  429. if (dest == NULL)
  430. return NULL;
  431. if (! dest->accept_input)
  432. goto __not_avail;
  433. if ((dest->filter & SNDRV_SEQ_FILTER_USE_EVENT) &&
  434. ! test_bit(event->type, dest->event_filter))
  435. goto __not_avail;
  436. if (filter && !(dest->filter & filter))
  437. goto __not_avail;
  438. return dest; /* ok - accessible */
  439. __not_avail:
  440. snd_seq_client_unlock(dest);
  441. return NULL;
  442. }
  443. /*
  444. * Return the error event.
  445. *
  446. * If the receiver client is a user client, the original event is
  447. * encapsulated in SNDRV_SEQ_EVENT_BOUNCE as variable length event. If
  448. * the original event is also variable length, the external data is
  449. * copied after the event record.
  450. * If the receiver client is a kernel client, the original event is
  451. * quoted in SNDRV_SEQ_EVENT_KERNEL_ERROR, since this requires no extra
  452. * kmalloc.
  453. */
  454. static int bounce_error_event(struct snd_seq_client *client,
  455. struct snd_seq_event *event,
  456. int err, int atomic, int hop)
  457. {
  458. struct snd_seq_event bounce_ev;
  459. int result;
  460. if (client == NULL ||
  461. ! (client->filter & SNDRV_SEQ_FILTER_BOUNCE) ||
  462. ! client->accept_input)
  463. return 0; /* ignored */
  464. /* set up quoted error */
  465. memset(&bounce_ev, 0, sizeof(bounce_ev));
  466. bounce_ev.type = SNDRV_SEQ_EVENT_KERNEL_ERROR;
  467. bounce_ev.flags = SNDRV_SEQ_EVENT_LENGTH_FIXED;
  468. bounce_ev.queue = SNDRV_SEQ_QUEUE_DIRECT;
  469. bounce_ev.source.client = SNDRV_SEQ_CLIENT_SYSTEM;
  470. bounce_ev.source.port = SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE;
  471. bounce_ev.dest.client = client->number;
  472. bounce_ev.dest.port = event->source.port;
  473. bounce_ev.data.quote.origin = event->dest;
  474. bounce_ev.data.quote.event = event;
  475. bounce_ev.data.quote.value = -err; /* use positive value */
  476. result = snd_seq_deliver_single_event(NULL, &bounce_ev, 0, atomic, hop + 1);
  477. if (result < 0) {
  478. client->event_lost++;
  479. return result;
  480. }
  481. return result;
  482. }
  483. /*
  484. * rewrite the time-stamp of the event record with the curren time
  485. * of the given queue.
  486. * return non-zero if updated.
  487. */
  488. static int update_timestamp_of_queue(struct snd_seq_event *event,
  489. int queue, int real_time)
  490. {
  491. struct snd_seq_queue *q;
  492. q = queueptr(queue);
  493. if (! q)
  494. return 0;
  495. event->queue = queue;
  496. event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
  497. if (real_time) {
  498. event->time.time = snd_seq_timer_get_cur_time(q->timer);
  499. event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
  500. } else {
  501. event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
  502. event->flags |= SNDRV_SEQ_TIME_STAMP_TICK;
  503. }
  504. queuefree(q);
  505. return 1;
  506. }
  507. /*
  508. * deliver an event to the specified destination.
  509. * if filter is non-zero, client filter bitmap is tested.
  510. *
  511. * RETURN VALUE: 0 : if succeeded
  512. * <0 : error
  513. */
  514. static int snd_seq_deliver_single_event(struct snd_seq_client *client,
  515. struct snd_seq_event *event,
  516. int filter, int atomic, int hop)
  517. {
  518. struct snd_seq_client *dest = NULL;
  519. struct snd_seq_client_port *dest_port = NULL;
  520. int result = -ENOENT;
  521. int direct;
  522. direct = snd_seq_ev_is_direct(event);
  523. dest = get_event_dest_client(event, filter);
  524. if (dest == NULL)
  525. goto __skip;
  526. dest_port = snd_seq_port_use_ptr(dest, event->dest.port);
  527. if (dest_port == NULL)
  528. goto __skip;
  529. /* check permission */
  530. if (! check_port_perm(dest_port, SNDRV_SEQ_PORT_CAP_WRITE)) {
  531. result = -EPERM;
  532. goto __skip;
  533. }
  534. if (dest_port->timestamping)
  535. update_timestamp_of_queue(event, dest_port->time_queue,
  536. dest_port->time_real);
  537. switch (dest->type) {
  538. case USER_CLIENT:
  539. if (dest->data.user.fifo)
  540. result = snd_seq_fifo_event_in(dest->data.user.fifo, event);
  541. break;
  542. case KERNEL_CLIENT:
  543. if (dest_port->event_input == NULL)
  544. break;
  545. result = dest_port->event_input(event, direct,
  546. dest_port->private_data,
  547. atomic, hop);
  548. break;
  549. default:
  550. break;
  551. }
  552. __skip:
  553. if (dest_port)
  554. snd_seq_port_unlock(dest_port);
  555. if (dest)
  556. snd_seq_client_unlock(dest);
  557. if (result < 0 && !direct) {
  558. result = bounce_error_event(client, event, result, atomic, hop);
  559. }
  560. return result;
  561. }
  562. /*
  563. * send the event to all subscribers:
  564. */
  565. static int deliver_to_subscribers(struct snd_seq_client *client,
  566. struct snd_seq_event *event,
  567. int atomic, int hop)
  568. {
  569. struct snd_seq_subscribers *subs;
  570. int err = 0, num_ev = 0;
  571. struct snd_seq_event event_saved;
  572. struct snd_seq_client_port *src_port;
  573. struct snd_seq_port_subs_info *grp;
  574. src_port = snd_seq_port_use_ptr(client, event->source.port);
  575. if (src_port == NULL)
  576. return -EINVAL; /* invalid source port */
  577. /* save original event record */
  578. event_saved = *event;
  579. grp = &src_port->c_src;
  580. /* lock list */
  581. if (atomic)
  582. read_lock(&grp->list_lock);
  583. else
  584. down_read(&grp->list_mutex);
  585. list_for_each_entry(subs, &grp->list_head, src_list) {
  586. event->dest = subs->info.dest;
  587. if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
  588. /* convert time according to flag with subscription */
  589. update_timestamp_of_queue(event, subs->info.queue,
  590. subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL);
  591. err = snd_seq_deliver_single_event(client, event,
  592. 0, atomic, hop);
  593. if (err < 0)
  594. break;
  595. num_ev++;
  596. /* restore original event record */
  597. *event = event_saved;
  598. }
  599. if (atomic)
  600. read_unlock(&grp->list_lock);
  601. else
  602. up_read(&grp->list_mutex);
  603. *event = event_saved; /* restore */
  604. snd_seq_port_unlock(src_port);
  605. return (err < 0) ? err : num_ev;
  606. }
  607. #ifdef SUPPORT_BROADCAST
  608. /*
  609. * broadcast to all ports:
  610. */
  611. static int port_broadcast_event(struct snd_seq_client *client,
  612. struct snd_seq_event *event,
  613. int atomic, int hop)
  614. {
  615. int num_ev = 0, err = 0;
  616. struct snd_seq_client *dest_client;
  617. struct snd_seq_client_port *port;
  618. dest_client = get_event_dest_client(event, SNDRV_SEQ_FILTER_BROADCAST);
  619. if (dest_client == NULL)
  620. return 0; /* no matching destination */
  621. read_lock(&dest_client->ports_lock);
  622. list_for_each_entry(port, &dest_client->ports_list_head, list) {
  623. event->dest.port = port->addr.port;
  624. /* pass NULL as source client to avoid error bounce */
  625. err = snd_seq_deliver_single_event(NULL, event,
  626. SNDRV_SEQ_FILTER_BROADCAST,
  627. atomic, hop);
  628. if (err < 0)
  629. break;
  630. num_ev++;
  631. }
  632. read_unlock(&dest_client->ports_lock);
  633. snd_seq_client_unlock(dest_client);
  634. event->dest.port = SNDRV_SEQ_ADDRESS_BROADCAST; /* restore */
  635. return (err < 0) ? err : num_ev;
  636. }
  637. /*
  638. * send the event to all clients:
  639. * if destination port is also ADDRESS_BROADCAST, deliver to all ports.
  640. */
  641. static int broadcast_event(struct snd_seq_client *client,
  642. struct snd_seq_event *event, int atomic, int hop)
  643. {
  644. int err = 0, num_ev = 0;
  645. int dest;
  646. struct snd_seq_addr addr;
  647. addr = event->dest; /* save */
  648. for (dest = 0; dest < SNDRV_SEQ_MAX_CLIENTS; dest++) {
  649. /* don't send to itself */
  650. if (dest == client->number)
  651. continue;
  652. event->dest.client = dest;
  653. event->dest.port = addr.port;
  654. if (addr.port == SNDRV_SEQ_ADDRESS_BROADCAST)
  655. err = port_broadcast_event(client, event, atomic, hop);
  656. else
  657. /* pass NULL as source client to avoid error bounce */
  658. err = snd_seq_deliver_single_event(NULL, event,
  659. SNDRV_SEQ_FILTER_BROADCAST,
  660. atomic, hop);
  661. if (err < 0)
  662. break;
  663. num_ev += err;
  664. }
  665. event->dest = addr; /* restore */
  666. return (err < 0) ? err : num_ev;
  667. }
  668. /* multicast - not supported yet */
  669. static int multicast_event(struct snd_seq_client *client, struct snd_seq_event *event,
  670. int atomic, int hop)
  671. {
  672. snd_printd("seq: multicast not supported yet.\n");
  673. return 0; /* ignored */
  674. }
  675. #endif /* SUPPORT_BROADCAST */
  676. /* deliver an event to the destination port(s).
  677. * if the event is to subscribers or broadcast, the event is dispatched
  678. * to multiple targets.
  679. *
  680. * RETURN VALUE: n > 0 : the number of delivered events.
  681. * n == 0 : the event was not passed to any client.
  682. * n < 0 : error - event was not processed.
  683. */
  684. static int snd_seq_deliver_event(struct snd_seq_client *client, struct snd_seq_event *event,
  685. int atomic, int hop)
  686. {
  687. int result;
  688. hop++;
  689. if (hop >= SNDRV_SEQ_MAX_HOPS) {
  690. snd_printd("too long delivery path (%d:%d->%d:%d)\n",
  691. event->source.client, event->source.port,
  692. event->dest.client, event->dest.port);
  693. return -EMLINK;
  694. }
  695. if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS ||
  696. event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS)
  697. result = deliver_to_subscribers(client, event, atomic, hop);
  698. #ifdef SUPPORT_BROADCAST
  699. else if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST ||
  700. event->dest.client == SNDRV_SEQ_ADDRESS_BROADCAST)
  701. result = broadcast_event(client, event, atomic, hop);
  702. else if (event->dest.client >= SNDRV_SEQ_MAX_CLIENTS)
  703. result = multicast_event(client, event, atomic, hop);
  704. else if (event->dest.port == SNDRV_SEQ_ADDRESS_BROADCAST)
  705. result = port_broadcast_event(client, event, atomic, hop);
  706. #endif
  707. else
  708. result = snd_seq_deliver_single_event(client, event, 0, atomic, hop);
  709. return result;
  710. }
  711. /*
  712. * dispatch an event cell:
  713. * This function is called only from queue check routines in timer
  714. * interrupts or after enqueued.
  715. * The event cell shall be released or re-queued in this function.
  716. *
  717. * RETURN VALUE: n > 0 : the number of delivered events.
  718. * n == 0 : the event was not passed to any client.
  719. * n < 0 : error - event was not processed.
  720. */
  721. int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
  722. {
  723. struct snd_seq_client *client;
  724. int result;
  725. if (snd_BUG_ON(!cell))
  726. return -EINVAL;
  727. client = snd_seq_client_use_ptr(cell->event.source.client);
  728. if (client == NULL) {
  729. snd_seq_cell_free(cell); /* release this cell */
  730. return -EINVAL;
  731. }
  732. if (cell->event.type == SNDRV_SEQ_EVENT_NOTE) {
  733. /* NOTE event:
  734. * the event cell is re-used as a NOTE-OFF event and
  735. * enqueued again.
  736. */
  737. struct snd_seq_event tmpev, *ev;
  738. /* reserve this event to enqueue note-off later */
  739. tmpev = cell->event;
  740. tmpev.type = SNDRV_SEQ_EVENT_NOTEON;
  741. result = snd_seq_deliver_event(client, &tmpev, atomic, hop);
  742. /*
  743. * This was originally a note event. We now re-use the
  744. * cell for the note-off event.
  745. */
  746. ev = &cell->event;
  747. ev->type = SNDRV_SEQ_EVENT_NOTEOFF;
  748. ev->flags |= SNDRV_SEQ_PRIORITY_HIGH;
  749. /* add the duration time */
  750. switch (ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) {
  751. case SNDRV_SEQ_TIME_STAMP_TICK:
  752. ev->time.tick += ev->data.note.duration;
  753. break;
  754. case SNDRV_SEQ_TIME_STAMP_REAL:
  755. /* unit for duration is ms */
  756. ev->time.time.tv_nsec += 1000000 * (ev->data.note.duration % 1000);
  757. ev->time.time.tv_sec += ev->data.note.duration / 1000 +
  758. ev->time.time.tv_nsec / 1000000000;
  759. ev->time.time.tv_nsec %= 1000000000;
  760. break;
  761. }
  762. ev->data.note.velocity = ev->data.note.off_velocity;
  763. /* Now queue this cell as the note off event */
  764. if (snd_seq_enqueue_event(cell, atomic, hop) < 0)
  765. snd_seq_cell_free(cell); /* release this cell */
  766. } else {
  767. /* Normal events:
  768. * event cell is freed after processing the event
  769. */
  770. result = snd_seq_deliver_event(client, &cell->event, atomic, hop);
  771. snd_seq_cell_free(cell);
  772. }
  773. snd_seq_client_unlock(client);
  774. return result;
  775. }
  776. /* Allocate a cell from client pool and enqueue it to queue:
  777. * if pool is empty and blocking is TRUE, sleep until a new cell is
  778. * available.
  779. */
  780. static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
  781. struct snd_seq_event *event,
  782. struct file *file, int blocking,
  783. int atomic, int hop)
  784. {
  785. struct snd_seq_event_cell *cell;
  786. int err;
  787. /* special queue values - force direct passing */
  788. if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) {
  789. event->dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
  790. event->queue = SNDRV_SEQ_QUEUE_DIRECT;
  791. } else
  792. #ifdef SUPPORT_BROADCAST
  793. if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST) {
  794. event->dest.client = SNDRV_SEQ_ADDRESS_BROADCAST;
  795. event->queue = SNDRV_SEQ_QUEUE_DIRECT;
  796. }
  797. #endif
  798. if (event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) {
  799. /* check presence of source port */
  800. struct snd_seq_client_port *src_port = snd_seq_port_use_ptr(client, event->source.port);
  801. if (src_port == NULL)
  802. return -EINVAL;
  803. snd_seq_port_unlock(src_port);
  804. }
  805. /* direct event processing without enqueued */
  806. if (snd_seq_ev_is_direct(event)) {
  807. if (event->type == SNDRV_SEQ_EVENT_NOTE)
  808. return -EINVAL; /* this event must be enqueued! */
  809. return snd_seq_deliver_event(client, event, atomic, hop);
  810. }
  811. /* Not direct, normal queuing */
  812. if (snd_seq_queue_is_used(event->queue, client->number) <= 0)
  813. return -EINVAL; /* invalid queue */
  814. if (! snd_seq_write_pool_allocated(client))
  815. return -ENXIO; /* queue is not allocated */
  816. /* allocate an event cell */
  817. err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file);
  818. if (err < 0)
  819. return err;
  820. /* we got a cell. enqueue it. */
  821. if ((err = snd_seq_enqueue_event(cell, atomic, hop)) < 0) {
  822. snd_seq_cell_free(cell);
  823. return err;
  824. }
  825. return 0;
  826. }
  827. /*
  828. * check validity of event type and data length.
  829. * return non-zero if invalid.
  830. */
  831. static int check_event_type_and_length(struct snd_seq_event *ev)
  832. {
  833. switch (snd_seq_ev_length_type(ev)) {
  834. case SNDRV_SEQ_EVENT_LENGTH_FIXED:
  835. if (snd_seq_ev_is_variable_type(ev))
  836. return -EINVAL;
  837. break;
  838. case SNDRV_SEQ_EVENT_LENGTH_VARIABLE:
  839. if (! snd_seq_ev_is_variable_type(ev) ||
  840. (ev->data.ext.len & ~SNDRV_SEQ_EXT_MASK) >= SNDRV_SEQ_MAX_EVENT_LEN)
  841. return -EINVAL;
  842. break;
  843. case SNDRV_SEQ_EVENT_LENGTH_VARUSR:
  844. if (! snd_seq_ev_is_direct(ev))
  845. return -EINVAL;
  846. break;
  847. }
  848. return 0;
  849. }
  850. /* handle write() */
  851. /* possible error values:
  852. * -ENXIO invalid client or file open mode
  853. * -ENOMEM malloc failed
  854. * -EFAULT seg. fault during copy from user space
  855. * -EINVAL invalid event
  856. * -EAGAIN no space in output pool
  857. * -EINTR interrupts while sleep
  858. * -EMLINK too many hops
  859. * others depends on return value from driver callback
  860. */
  861. static ssize_t snd_seq_write(struct file *file, const char __user *buf,
  862. size_t count, loff_t *offset)
  863. {
  864. struct snd_seq_client *client = file->private_data;
  865. int written = 0, len;
  866. int err = -EINVAL;
  867. struct snd_seq_event event;
  868. if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
  869. return -ENXIO;
  870. /* check client structures are in place */
  871. if (snd_BUG_ON(!client))
  872. return -ENXIO;
  873. if (!client->accept_output || client->pool == NULL)
  874. return -ENXIO;
  875. /* allocate the pool now if the pool is not allocated yet */
  876. if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
  877. if (snd_seq_pool_init(client->pool) < 0)
  878. return -ENOMEM;
  879. }
  880. /* only process whole events */
  881. while (count >= sizeof(struct snd_seq_event)) {
  882. /* Read in the event header from the user */
  883. len = sizeof(event);
  884. if (copy_from_user(&event, buf, len)) {
  885. err = -EFAULT;
  886. break;
  887. }
  888. event.source.client = client->number; /* fill in client number */
  889. /* Check for extension data length */
  890. if (check_event_type_and_length(&event)) {
  891. err = -EINVAL;
  892. break;
  893. }
  894. /* check for special events */
  895. if (event.type == SNDRV_SEQ_EVENT_NONE)
  896. goto __skip_event;
  897. else if (snd_seq_ev_is_reserved(&event)) {
  898. err = -EINVAL;
  899. break;
  900. }
  901. if (snd_seq_ev_is_variable(&event)) {
  902. int extlen = event.data.ext.len & ~SNDRV_SEQ_EXT_MASK;
  903. if ((size_t)(extlen + len) > count) {
  904. /* back out, will get an error this time or next */
  905. err = -EINVAL;
  906. break;
  907. }
  908. /* set user space pointer */
  909. event.data.ext.len = extlen | SNDRV_SEQ_EXT_USRPTR;
  910. event.data.ext.ptr = (char __force *)buf
  911. + sizeof(struct snd_seq_event);
  912. len += extlen; /* increment data length */
  913. } else {
  914. #ifdef CONFIG_COMPAT
  915. if (client->convert32 && snd_seq_ev_is_varusr(&event)) {
  916. void *ptr = compat_ptr(event.data.raw32.d[1]);
  917. event.data.ext.ptr = ptr;
  918. }
  919. #endif
  920. }
  921. /* ok, enqueue it */
  922. err = snd_seq_client_enqueue_event(client, &event, file,
  923. !(file->f_flags & O_NONBLOCK),
  924. 0, 0);
  925. if (err < 0)
  926. break;
  927. __skip_event:
  928. /* Update pointers and counts */
  929. count -= len;
  930. buf += len;
  931. written += len;
  932. }
  933. return written ? written : err;
  934. }
  935. /*
  936. * handle polling
  937. */
  938. static unsigned int snd_seq_poll(struct file *file, poll_table * wait)
  939. {
  940. struct snd_seq_client *client = file->private_data;
  941. unsigned int mask = 0;
  942. /* check client structures are in place */
  943. if (snd_BUG_ON(!client))
  944. return -ENXIO;
  945. if ((snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT) &&
  946. client->data.user.fifo) {
  947. /* check if data is available in the outqueue */
  948. if (snd_seq_fifo_poll_wait(client->data.user.fifo, file, wait))
  949. mask |= POLLIN | POLLRDNORM;
  950. }
  951. if (snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT) {
  952. /* check if data is available in the pool */
  953. if (!snd_seq_write_pool_allocated(client) ||
  954. snd_seq_pool_poll_wait(client->pool, file, wait))
  955. mask |= POLLOUT | POLLWRNORM;
  956. }
  957. return mask;
  958. }
  959. /*-----------------------------------------------------*/
  960. /* SYSTEM_INFO ioctl() */
  961. static int snd_seq_ioctl_system_info(struct snd_seq_client *client, void __user *arg)
  962. {
  963. struct snd_seq_system_info info;
  964. memset(&info, 0, sizeof(info));
  965. /* fill the info fields */
  966. info.queues = SNDRV_SEQ_MAX_QUEUES;
  967. info.clients = SNDRV_SEQ_MAX_CLIENTS;
  968. info.ports = 256; /* fixed limit */
  969. info.channels = 256; /* fixed limit */
  970. info.cur_clients = client_usage.cur;
  971. info.cur_queues = snd_seq_queue_get_cur_queues();
  972. if (copy_to_user(arg, &info, sizeof(info)))
  973. return -EFAULT;
  974. return 0;
  975. }
  976. /* RUNNING_MODE ioctl() */
  977. static int snd_seq_ioctl_running_mode(struct snd_seq_client *client, void __user *arg)
  978. {
  979. struct snd_seq_running_info info;
  980. struct snd_seq_client *cptr;
  981. int err = 0;
  982. if (copy_from_user(&info, arg, sizeof(info)))
  983. return -EFAULT;
  984. /* requested client number */
  985. cptr = snd_seq_client_use_ptr(info.client);
  986. if (cptr == NULL)
  987. return -ENOENT; /* don't change !!! */
  988. #ifdef SNDRV_BIG_ENDIAN
  989. if (! info.big_endian) {
  990. err = -EINVAL;
  991. goto __err;
  992. }
  993. #else
  994. if (info.big_endian) {
  995. err = -EINVAL;
  996. goto __err;
  997. }
  998. #endif
  999. if (info.cpu_mode > sizeof(long)) {
  1000. err = -EINVAL;
  1001. goto __err;
  1002. }
  1003. cptr->convert32 = (info.cpu_mode < sizeof(long));
  1004. __err:
  1005. snd_seq_client_unlock(cptr);
  1006. return err;
  1007. }
  1008. /* CLIENT_INFO ioctl() */
  1009. static void get_client_info(struct snd_seq_client *cptr,
  1010. struct snd_seq_client_info *info)
  1011. {
  1012. info->client = cptr->number;
  1013. /* fill the info fields */
  1014. info->type = cptr->type;
  1015. strcpy(info->name, cptr->name);
  1016. info->filter = cptr->filter;
  1017. info->event_lost = cptr->event_lost;
  1018. memcpy(info->event_filter, cptr->event_filter, 32);
  1019. info->num_ports = cptr->num_ports;
  1020. memset(info->reserved, 0, sizeof(info->reserved));
  1021. }
  1022. static int snd_seq_ioctl_get_client_info(struct snd_seq_client *client,
  1023. void __user *arg)
  1024. {
  1025. struct snd_seq_client *cptr;
  1026. struct snd_seq_client_info client_info;
  1027. if (copy_from_user(&client_info, arg, sizeof(client_info)))
  1028. return -EFAULT;
  1029. /* requested client number */
  1030. cptr = snd_seq_client_use_ptr(client_info.client);
  1031. if (cptr == NULL)
  1032. return -ENOENT; /* don't change !!! */
  1033. get_client_info(cptr, &client_info);
  1034. snd_seq_client_unlock(cptr);
  1035. if (copy_to_user(arg, &client_info, sizeof(client_info)))
  1036. return -EFAULT;
  1037. return 0;
  1038. }
  1039. /* CLIENT_INFO ioctl() */
  1040. static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
  1041. void __user *arg)
  1042. {
  1043. struct snd_seq_client_info client_info;
  1044. if (copy_from_user(&client_info, arg, sizeof(client_info)))
  1045. return -EFAULT;
  1046. /* it is not allowed to set the info fields for an another client */
  1047. if (client->number != client_info.client)
  1048. return -EPERM;
  1049. /* also client type must be set now */
  1050. if (client->type != client_info.type)
  1051. return -EINVAL;
  1052. /* fill the info fields */
  1053. if (client_info.name[0])
  1054. strlcpy(client->name, client_info.name, sizeof(client->name));
  1055. client->filter = client_info.filter;
  1056. client->event_lost = client_info.event_lost;
  1057. memcpy(client->event_filter, client_info.event_filter, 32);
  1058. return 0;
  1059. }
  1060. /*
  1061. * CREATE PORT ioctl()
  1062. */
  1063. static int snd_seq_ioctl_create_port(struct snd_seq_client *client,
  1064. void __user *arg)
  1065. {
  1066. struct snd_seq_client_port *port;
  1067. struct snd_seq_port_info info;
  1068. struct snd_seq_port_callback *callback;
  1069. if (copy_from_user(&info, arg, sizeof(info)))
  1070. return -EFAULT;
  1071. /* it is not allowed to create the port for an another client */
  1072. if (info.addr.client != client->number)
  1073. return -EPERM;
  1074. port = snd_seq_create_port(client, (info.flags & SNDRV_SEQ_PORT_FLG_GIVEN_PORT) ? info.addr.port : -1);
  1075. if (port == NULL)
  1076. return -ENOMEM;
  1077. if (client->type == USER_CLIENT && info.kernel) {
  1078. snd_seq_delete_port(client, port->addr.port);
  1079. return -EINVAL;
  1080. }
  1081. if (client->type == KERNEL_CLIENT) {
  1082. if ((callback = info.kernel) != NULL) {
  1083. if (callback->owner)
  1084. port->owner = callback->owner;
  1085. port->private_data = callback->private_data;
  1086. port->private_free = callback->private_free;
  1087. port->callback_all = callback->callback_all;
  1088. port->event_input = callback->event_input;
  1089. port->c_src.open = callback->subscribe;
  1090. port->c_src.close = callback->unsubscribe;
  1091. port->c_dest.open = callback->use;
  1092. port->c_dest.close = callback->unuse;
  1093. }
  1094. }
  1095. info.addr = port->addr;
  1096. snd_seq_set_port_info(port, &info);
  1097. snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
  1098. if (copy_to_user(arg, &info, sizeof(info)))
  1099. return -EFAULT;
  1100. return 0;
  1101. }
  1102. /*
  1103. * DELETE PORT ioctl()
  1104. */
  1105. static int snd_seq_ioctl_delete_port(struct snd_seq_client *client,
  1106. void __user *arg)
  1107. {
  1108. struct snd_seq_port_info info;
  1109. int err;
  1110. /* set passed parameters */
  1111. if (copy_from_user(&info, arg, sizeof(info)))
  1112. return -EFAULT;
  1113. /* it is not allowed to remove the port for an another client */
  1114. if (info.addr.client != client->number)
  1115. return -EPERM;
  1116. err = snd_seq_delete_port(client, info.addr.port);
  1117. if (err >= 0)
  1118. snd_seq_system_client_ev_port_exit(client->number, info.addr.port);
  1119. return err;
  1120. }
  1121. /*
  1122. * GET_PORT_INFO ioctl() (on any client)
  1123. */
  1124. static int snd_seq_ioctl_get_port_info(struct snd_seq_client *client,
  1125. void __user *arg)
  1126. {
  1127. struct snd_seq_client *cptr;
  1128. struct snd_seq_client_port *port;
  1129. struct snd_seq_port_info info;
  1130. if (copy_from_user(&info, arg, sizeof(info)))
  1131. return -EFAULT;
  1132. cptr = snd_seq_client_use_ptr(info.addr.client);
  1133. if (cptr == NULL)
  1134. return -ENXIO;
  1135. port = snd_seq_port_use_ptr(cptr, info.addr.port);
  1136. if (port == NULL) {
  1137. snd_seq_client_unlock(cptr);
  1138. return -ENOENT; /* don't change */
  1139. }
  1140. /* get port info */
  1141. snd_seq_get_port_info(port, &info);
  1142. snd_seq_port_unlock(port);
  1143. snd_seq_client_unlock(cptr);
  1144. if (copy_to_user(arg, &info, sizeof(info)))
  1145. return -EFAULT;
  1146. return 0;
  1147. }
  1148. /*
  1149. * SET_PORT_INFO ioctl() (only ports on this/own client)
  1150. */
  1151. static int snd_seq_ioctl_set_port_info(struct snd_seq_client *client,
  1152. void __user *arg)
  1153. {
  1154. struct snd_seq_client_port *port;
  1155. struct snd_seq_port_info info;
  1156. if (copy_from_user(&info, arg, sizeof(info)))
  1157. return -EFAULT;
  1158. if (info.addr.client != client->number) /* only set our own ports ! */
  1159. return -EPERM;
  1160. port = snd_seq_port_use_ptr(client, info.addr.port);
  1161. if (port) {
  1162. snd_seq_set_port_info(port, &info);
  1163. snd_seq_port_unlock(port);
  1164. }
  1165. return 0;
  1166. }
  1167. /*
  1168. * port subscription (connection)
  1169. */
  1170. #define PERM_RD (SNDRV_SEQ_PORT_CAP_READ|SNDRV_SEQ_PORT_CAP_SUBS_READ)
  1171. #define PERM_WR (SNDRV_SEQ_PORT_CAP_WRITE|SNDRV_SEQ_PORT_CAP_SUBS_WRITE)
  1172. static int check_subscription_permission(struct snd_seq_client *client,
  1173. struct snd_seq_client_port *sport,
  1174. struct snd_seq_client_port *dport,
  1175. struct snd_seq_port_subscribe *subs)
  1176. {
  1177. if (client->number != subs->sender.client &&
  1178. client->number != subs->dest.client) {
  1179. /* connection by third client - check export permission */
  1180. if (check_port_perm(sport, SNDRV_SEQ_PORT_CAP_NO_EXPORT))
  1181. return -EPERM;
  1182. if (check_port_perm(dport, SNDRV_SEQ_PORT_CAP_NO_EXPORT))
  1183. return -EPERM;
  1184. }
  1185. /* check read permission */
  1186. /* if sender or receiver is the subscribing client itself,
  1187. * no permission check is necessary
  1188. */
  1189. if (client->number != subs->sender.client) {
  1190. if (! check_port_perm(sport, PERM_RD))
  1191. return -EPERM;
  1192. }
  1193. /* check write permission */
  1194. if (client->number != subs->dest.client) {
  1195. if (! check_port_perm(dport, PERM_WR))
  1196. return -EPERM;
  1197. }
  1198. return 0;
  1199. }
  1200. /*
  1201. * send an subscription notify event to user client:
  1202. * client must be user client.
  1203. */
  1204. int snd_seq_client_notify_subscription(int client, int port,
  1205. struct snd_seq_port_subscribe *info,
  1206. int evtype)
  1207. {
  1208. struct snd_seq_event event;
  1209. memset(&event, 0, sizeof(event));
  1210. event.type = evtype;
  1211. event.data.connect.dest = info->dest;
  1212. event.data.connect.sender = info->sender;
  1213. return snd_seq_system_notify(client, port, &event); /* non-atomic */
  1214. }
  1215. /*
  1216. * add to port's subscription list IOCTL interface
  1217. */
  1218. static int snd_seq_ioctl_subscribe_port(struct snd_seq_client *client,
  1219. void __user *arg)
  1220. {
  1221. int result = -EINVAL;
  1222. struct snd_seq_client *receiver = NULL, *sender = NULL;
  1223. struct snd_seq_client_port *sport = NULL, *dport = NULL;
  1224. struct snd_seq_port_subscribe subs;
  1225. if (copy_from_user(&subs, arg, sizeof(subs)))
  1226. return -EFAULT;
  1227. if ((receiver = snd_seq_client_use_ptr(subs.dest.client)) == NULL)
  1228. goto __end;
  1229. if ((sender = snd_seq_client_use_ptr(subs.sender.client)) == NULL)
  1230. goto __end;
  1231. if ((sport = snd_seq_port_use_ptr(sender, subs.sender.port)) == NULL)
  1232. goto __end;
  1233. if ((dport = snd_seq_port_use_ptr(receiver, subs.dest.port)) == NULL)
  1234. goto __end;
  1235. result = check_subscription_permission(client, sport, dport, &subs);
  1236. if (result < 0)
  1237. goto __end;
  1238. /* connect them */
  1239. result = snd_seq_port_connect(client, sender, sport, receiver, dport, &subs);
  1240. if (! result) /* broadcast announce */
  1241. snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0,
  1242. &subs, SNDRV_SEQ_EVENT_PORT_SUBSCRIBED);
  1243. __end:
  1244. if (sport)
  1245. snd_seq_port_unlock(sport);
  1246. if (dport)
  1247. snd_seq_port_unlock(dport);
  1248. if (sender)
  1249. snd_seq_client_unlock(sender);
  1250. if (receiver)
  1251. snd_seq_client_unlock(receiver);
  1252. return result;
  1253. }
  1254. /*
  1255. * remove from port's subscription list
  1256. */
  1257. static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client *client,
  1258. void __user *arg)
  1259. {
  1260. int result = -ENXIO;
  1261. struct snd_seq_client *receiver = NULL, *sender = NULL;
  1262. struct snd_seq_client_port *sport = NULL, *dport = NULL;
  1263. struct snd_seq_port_subscribe subs;
  1264. if (copy_from_user(&subs, arg, sizeof(subs)))
  1265. return -EFAULT;
  1266. if ((receiver = snd_seq_client_use_ptr(subs.dest.client)) == NULL)
  1267. goto __end;
  1268. if ((sender = snd_seq_client_use_ptr(subs.sender.client)) == NULL)
  1269. goto __end;
  1270. if ((sport = snd_seq_port_use_ptr(sender, subs.sender.port)) == NULL)
  1271. goto __end;
  1272. if ((dport = snd_seq_port_use_ptr(receiver, subs.dest.port)) == NULL)
  1273. goto __end;
  1274. result = check_subscription_permission(client, sport, dport, &subs);
  1275. if (result < 0)
  1276. goto __end;
  1277. result = snd_seq_port_disconnect(client, sender, sport, receiver, dport, &subs);
  1278. if (! result) /* broadcast announce */
  1279. snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0,
  1280. &subs, SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED);
  1281. __end:
  1282. if (sport)
  1283. snd_seq_port_unlock(sport);
  1284. if (dport)
  1285. snd_seq_port_unlock(dport);
  1286. if (sender)
  1287. snd_seq_client_unlock(sender);
  1288. if (receiver)
  1289. snd_seq_client_unlock(receiver);
  1290. return result;
  1291. }
  1292. /* CREATE_QUEUE ioctl() */
  1293. static int snd_seq_ioctl_create_queue(struct snd_seq_client *client,
  1294. void __user *arg)
  1295. {
  1296. struct snd_seq_queue_info info;
  1297. int result;
  1298. struct snd_seq_queue *q;
  1299. if (copy_from_user(&info, arg, sizeof(info)))
  1300. return -EFAULT;
  1301. result = snd_seq_queue_alloc(client->number, info.locked, info.flags);
  1302. if (result < 0)
  1303. return result;
  1304. q = queueptr(result);
  1305. if (q == NULL)
  1306. return -EINVAL;
  1307. info.queue = q->queue;
  1308. info.locked = q->locked;
  1309. info.owner = q->owner;
  1310. /* set queue name */
  1311. if (! info.name[0])
  1312. snprintf(info.name, sizeof(info.name), "Queue-%d", q->queue);
  1313. strlcpy(q->name, info.name, sizeof(q->name));
  1314. queuefree(q);
  1315. if (copy_to_user(arg, &info, sizeof(info)))
  1316. return -EFAULT;
  1317. return 0;
  1318. }
  1319. /* DELETE_QUEUE ioctl() */
  1320. static int snd_seq_ioctl_delete_queue(struct snd_seq_client *client,
  1321. void __user *arg)
  1322. {
  1323. struct snd_seq_queue_info info;
  1324. if (copy_from_user(&info, arg, sizeof(info)))
  1325. return -EFAULT;
  1326. return snd_seq_queue_delete(client->number, info.queue);
  1327. }
  1328. /* GET_QUEUE_INFO ioctl() */
  1329. static int snd_seq_ioctl_get_queue_info(struct snd_seq_client *client,
  1330. void __user *arg)
  1331. {
  1332. struct snd_seq_queue_info info;
  1333. struct snd_seq_queue *q;
  1334. if (copy_from_user(&info, arg, sizeof(info)))
  1335. return -EFAULT;
  1336. q = queueptr(info.queue);
  1337. if (q == NULL)
  1338. return -EINVAL;
  1339. memset(&info, 0, sizeof(info));
  1340. info.queue = q->queue;
  1341. info.owner = q->owner;
  1342. info.locked = q->locked;
  1343. strlcpy(info.name, q->name, sizeof(info.name));
  1344. queuefree(q);
  1345. if (copy_to_user(arg, &info, sizeof(info)))
  1346. return -EFAULT;
  1347. return 0;
  1348. }
  1349. /* SET_QUEUE_INFO ioctl() */
  1350. static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
  1351. void __user *arg)
  1352. {
  1353. struct snd_seq_queue_info info;
  1354. struct snd_seq_queue *q;
  1355. if (copy_from_user(&info, arg, sizeof(info)))
  1356. return -EFAULT;
  1357. if (info.owner != client->number)
  1358. return -EINVAL;
  1359. /* change owner/locked permission */
  1360. if (snd_seq_queue_check_access(info.queue, client->number)) {
  1361. if (snd_seq_queue_set_owner(info.queue, client->number, info.locked) < 0)
  1362. return -EPERM;
  1363. if (info.locked)
  1364. snd_seq_queue_use(info.queue, client->number, 1);
  1365. } else {
  1366. return -EPERM;
  1367. }
  1368. q = queueptr(info.queue);
  1369. if (! q)
  1370. return -EINVAL;
  1371. if (q->owner != client->number) {
  1372. queuefree(q);
  1373. return -EPERM;
  1374. }
  1375. strlcpy(q->name, info.name, sizeof(q->name));
  1376. queuefree(q);
  1377. return 0;
  1378. }
  1379. /* GET_NAMED_QUEUE ioctl() */
  1380. static int snd_seq_ioctl_get_named_queue(struct snd_seq_client *client, void __user *arg)
  1381. {
  1382. struct snd_seq_queue_info info;
  1383. struct snd_seq_queue *q;
  1384. if (copy_from_user(&info, arg, sizeof(info)))
  1385. return -EFAULT;
  1386. q = snd_seq_queue_find_name(info.name);
  1387. if (q == NULL)
  1388. return -EINVAL;
  1389. info.queue = q->queue;
  1390. info.owner = q->owner;
  1391. info.locked = q->locked;
  1392. queuefree(q);
  1393. if (copy_to_user(arg, &info, sizeof(info)))
  1394. return -EFAULT;
  1395. return 0;
  1396. }
  1397. /* GET_QUEUE_STATUS ioctl() */
  1398. static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
  1399. void __user *arg)
  1400. {
  1401. struct snd_seq_queue_status status;
  1402. struct snd_seq_queue *queue;
  1403. struct snd_seq_timer *tmr;
  1404. if (copy_from_user(&status, arg, sizeof(status)))
  1405. return -EFAULT;
  1406. queue = queueptr(status.queue);
  1407. if (queue == NULL)
  1408. return -EINVAL;
  1409. memset(&status, 0, sizeof(status));
  1410. status.queue = queue->queue;
  1411. tmr = queue->timer;
  1412. status.events = queue->tickq->cells + queue->timeq->cells;
  1413. status.time = snd_seq_timer_get_cur_time(tmr);
  1414. status.tick = snd_seq_timer_get_cur_tick(tmr);
  1415. status.running = tmr->running;
  1416. status.flags = queue->flags;
  1417. queuefree(queue);
  1418. if (copy_to_user(arg, &status, sizeof(status)))
  1419. return -EFAULT;
  1420. return 0;
  1421. }
  1422. /* GET_QUEUE_TEMPO ioctl() */
  1423. static int snd_seq_ioctl_get_queue_tempo(struct snd_seq_client *client,
  1424. void __user *arg)
  1425. {
  1426. struct snd_seq_queue_tempo tempo;
  1427. struct snd_seq_queue *queue;
  1428. struct snd_seq_timer *tmr;
  1429. if (copy_from_user(&tempo, arg, sizeof(tempo)))
  1430. return -EFAULT;
  1431. queue = queueptr(tempo.queue);
  1432. if (queue == NULL)
  1433. return -EINVAL;
  1434. memset(&tempo, 0, sizeof(tempo));
  1435. tempo.queue = queue->queue;
  1436. tmr = queue->timer;
  1437. tempo.tempo = tmr->tempo;
  1438. tempo.ppq = tmr->ppq;
  1439. tempo.skew_value = tmr->skew;
  1440. tempo.skew_base = tmr->skew_base;
  1441. queuefree(queue);
  1442. if (copy_to_user(arg, &tempo, sizeof(tempo)))
  1443. return -EFAULT;
  1444. return 0;
  1445. }
  1446. /* SET_QUEUE_TEMPO ioctl() */
  1447. int snd_seq_set_queue_tempo(int client, struct snd_seq_queue_tempo *tempo)
  1448. {
  1449. if (!snd_seq_queue_check_access(tempo->queue, client))
  1450. return -EPERM;
  1451. return snd_seq_queue_timer_set_tempo(tempo->queue, client, tempo);
  1452. }
  1453. EXPORT_SYMBOL(snd_seq_set_queue_tempo);
  1454. static int snd_seq_ioctl_set_queue_tempo(struct snd_seq_client *client,
  1455. void __user *arg)
  1456. {
  1457. int result;
  1458. struct snd_seq_queue_tempo tempo;
  1459. if (copy_from_user(&tempo, arg, sizeof(tempo)))
  1460. return -EFAULT;
  1461. result = snd_seq_set_queue_tempo(client->number, &tempo);
  1462. return result < 0 ? result : 0;
  1463. }
  1464. /* GET_QUEUE_TIMER ioctl() */
  1465. static int snd_seq_ioctl_get_queue_timer(struct snd_seq_client *client,
  1466. void __user *arg)
  1467. {
  1468. struct snd_seq_queue_timer timer;
  1469. struct snd_seq_queue *queue;
  1470. struct snd_seq_timer *tmr;
  1471. if (copy_from_user(&timer, arg, sizeof(timer)))
  1472. return -EFAULT;
  1473. queue = queueptr(timer.queue);
  1474. if (queue == NULL)
  1475. return -EINVAL;
  1476. if (mutex_lock_interruptible(&queue->timer_mutex)) {
  1477. queuefree(queue);
  1478. return -ERESTARTSYS;
  1479. }
  1480. tmr = queue->timer;
  1481. memset(&timer, 0, sizeof(timer));
  1482. timer.queue = queue->queue;
  1483. timer.type = tmr->type;
  1484. if (tmr->type == SNDRV_SEQ_TIMER_ALSA) {
  1485. timer.u.alsa.id = tmr->alsa_id;
  1486. timer.u.alsa.resolution = tmr->preferred_resolution;
  1487. }
  1488. mutex_unlock(&queue->timer_mutex);
  1489. queuefree(queue);
  1490. if (copy_to_user(arg, &timer, sizeof(timer)))
  1491. return -EFAULT;
  1492. return 0;
  1493. }
  1494. /* SET_QUEUE_TIMER ioctl() */
  1495. static int snd_seq_ioctl_set_queue_timer(struct snd_seq_client *client,
  1496. void __user *arg)
  1497. {
  1498. int result = 0;
  1499. struct snd_seq_queue_timer timer;
  1500. if (copy_from_user(&timer, arg, sizeof(timer)))
  1501. return -EFAULT;
  1502. if (timer.type != SNDRV_SEQ_TIMER_ALSA)
  1503. return -EINVAL;
  1504. if (snd_seq_queue_check_access(timer.queue, client->number)) {
  1505. struct snd_seq_queue *q;
  1506. struct snd_seq_timer *tmr;
  1507. q = queueptr(timer.queue);
  1508. if (q == NULL)
  1509. return -ENXIO;
  1510. if (mutex_lock_interruptible(&q->timer_mutex)) {
  1511. queuefree(q);
  1512. return -ERESTARTSYS;
  1513. }
  1514. tmr = q->timer;
  1515. snd_seq_queue_timer_close(timer.queue);
  1516. tmr->type = timer.type;
  1517. if (tmr->type == SNDRV_SEQ_TIMER_ALSA) {
  1518. tmr->alsa_id = timer.u.alsa.id;
  1519. tmr->preferred_resolution = timer.u.alsa.resolution;
  1520. }
  1521. result = snd_seq_queue_timer_open(timer.queue);
  1522. mutex_unlock(&q->timer_mutex);
  1523. queuefree(q);
  1524. } else {
  1525. return -EPERM;
  1526. }
  1527. return result;
  1528. }
  1529. /* GET_QUEUE_CLIENT ioctl() */
  1530. static int snd_seq_ioctl_get_queue_client(struct snd_seq_client *client,
  1531. void __user *arg)
  1532. {
  1533. struct snd_seq_queue_client info;
  1534. int used;
  1535. if (copy_from_user(&info, arg, sizeof(info)))
  1536. return -EFAULT;
  1537. used = snd_seq_queue_is_used(info.queue, client->number);
  1538. if (used < 0)
  1539. return -EINVAL;
  1540. info.used = used;
  1541. info.client = client->number;
  1542. if (copy_to_user(arg, &info, sizeof(info)))
  1543. return -EFAULT;
  1544. return 0;
  1545. }
  1546. /* SET_QUEUE_CLIENT ioctl() */
  1547. static int snd_seq_ioctl_set_queue_client(struct snd_seq_client *client,
  1548. void __user *arg)
  1549. {
  1550. int err;
  1551. struct snd_seq_queue_client info;
  1552. if (copy_from_user(&info, arg, sizeof(info)))
  1553. return -EFAULT;
  1554. if (info.used >= 0) {
  1555. err = snd_seq_queue_use(info.queue, client->number, info.used);
  1556. if (err < 0)
  1557. return err;
  1558. }
  1559. return snd_seq_ioctl_get_queue_client(client, arg);
  1560. }
  1561. /* GET_CLIENT_POOL ioctl() */
  1562. static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
  1563. void __user *arg)
  1564. {
  1565. struct snd_seq_client_pool info;
  1566. struct snd_seq_client *cptr;
  1567. if (copy_from_user(&info, arg, sizeof(info)))
  1568. return -EFAULT;
  1569. cptr = snd_seq_client_use_ptr(info.client);
  1570. if (cptr == NULL)
  1571. return -ENOENT;
  1572. memset(&info, 0, sizeof(info));
  1573. info.output_pool = cptr->pool->size;
  1574. info.output_room = cptr->pool->room;
  1575. info.output_free = info.output_pool;
  1576. info.output_free = snd_seq_unused_cells(cptr->pool);
  1577. if (cptr->type == USER_CLIENT) {
  1578. info.input_pool = cptr->data.user.fifo_pool_size;
  1579. info.input_free = info.input_pool;
  1580. if (cptr->data.user.fifo)
  1581. info.input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
  1582. } else {
  1583. info.input_pool = 0;
  1584. info.input_free = 0;
  1585. }
  1586. snd_seq_client_unlock(cptr);
  1587. if (copy_to_user(arg, &info, sizeof(info)))
  1588. return -EFAULT;
  1589. return 0;
  1590. }
  1591. /* SET_CLIENT_POOL ioctl() */
  1592. static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
  1593. void __user *arg)
  1594. {
  1595. struct snd_seq_client_pool info;
  1596. int rc;
  1597. if (copy_from_user(&info, arg, sizeof(info)))
  1598. return -EFAULT;
  1599. if (client->number != info.client)
  1600. return -EINVAL; /* can't change other clients */
  1601. if (info.output_pool >= 1 && info.output_pool <= SNDRV_SEQ_MAX_EVENTS &&
  1602. (! snd_seq_write_pool_allocated(client) ||
  1603. info.output_pool != client->pool->size)) {
  1604. if (snd_seq_write_pool_allocated(client)) {
  1605. /* remove all existing cells */
  1606. snd_seq_queue_client_leave_cells(client->number);
  1607. snd_seq_pool_done(client->pool);
  1608. }
  1609. client->pool->size = info.output_pool;
  1610. rc = snd_seq_pool_init(client->pool);
  1611. if (rc < 0)
  1612. return rc;
  1613. }
  1614. if (client->type == USER_CLIENT && client->data.user.fifo != NULL &&
  1615. info.input_pool >= 1 &&
  1616. info.input_pool <= SNDRV_SEQ_MAX_CLIENT_EVENTS &&
  1617. info.input_pool != client->data.user.fifo_pool_size) {
  1618. /* change pool size */
  1619. rc = snd_seq_fifo_resize(client->data.user.fifo, info.input_pool);
  1620. if (rc < 0)
  1621. return rc;
  1622. client->data.user.fifo_pool_size = info.input_pool;
  1623. }
  1624. if (info.output_room >= 1 &&
  1625. info.output_room <= client->pool->size) {
  1626. client->pool->room = info.output_room;
  1627. }
  1628. return snd_seq_ioctl_get_client_pool(client, arg);
  1629. }
  1630. /* REMOVE_EVENTS ioctl() */
  1631. static int snd_seq_ioctl_remove_events(struct snd_seq_client *client,
  1632. void __user *arg)
  1633. {
  1634. struct snd_seq_remove_events info;
  1635. if (copy_from_user(&info, arg, sizeof(info)))
  1636. return -EFAULT;
  1637. /*
  1638. * Input mostly not implemented XXX.
  1639. */
  1640. if (info.remove_mode & SNDRV_SEQ_REMOVE_INPUT) {
  1641. /*
  1642. * No restrictions so for a user client we can clear
  1643. * the whole fifo
  1644. */
  1645. if (client->type == USER_CLIENT)
  1646. snd_seq_fifo_clear(client->data.user.fifo);
  1647. }
  1648. if (info.remove_mode & SNDRV_SEQ_REMOVE_OUTPUT)
  1649. snd_seq_queue_remove_cells(client->number, &info);
  1650. return 0;
  1651. }
  1652. /*
  1653. * get subscription info
  1654. */
  1655. static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client,
  1656. void __user *arg)
  1657. {
  1658. int result;
  1659. struct snd_seq_client *sender = NULL;
  1660. struct snd_seq_client_port *sport = NULL;
  1661. struct snd_seq_port_subscribe subs;
  1662. struct snd_seq_subscribers *p;
  1663. if (copy_from_user(&subs, arg, sizeof(subs)))
  1664. return -EFAULT;
  1665. result = -EINVAL;
  1666. if ((sender = snd_seq_client_use_ptr(subs.sender.client)) == NULL)
  1667. goto __end;
  1668. if ((sport = snd_seq_port_use_ptr(sender, subs.sender.port)) == NULL)
  1669. goto __end;
  1670. p = snd_seq_port_get_subscription(&sport->c_src, &subs.dest);
  1671. if (p) {
  1672. result = 0;
  1673. subs = p->info;
  1674. } else
  1675. result = -ENOENT;
  1676. __end:
  1677. if (sport)
  1678. snd_seq_port_unlock(sport);
  1679. if (sender)
  1680. snd_seq_client_unlock(sender);
  1681. if (result >= 0) {
  1682. if (copy_to_user(arg, &subs, sizeof(subs)))
  1683. return -EFAULT;
  1684. }
  1685. return result;
  1686. }
  1687. /*
  1688. * get subscription info - check only its presence
  1689. */
  1690. static int snd_seq_ioctl_query_subs(struct snd_seq_client *client,
  1691. void __user *arg)
  1692. {
  1693. int result = -ENXIO;
  1694. struct snd_seq_client *cptr = NULL;
  1695. struct snd_seq_client_port *port = NULL;
  1696. struct snd_seq_query_subs subs;
  1697. struct snd_seq_port_subs_info *group;
  1698. struct list_head *p;
  1699. int i;
  1700. if (copy_from_user(&subs, arg, sizeof(subs)))
  1701. return -EFAULT;
  1702. if ((cptr = snd_seq_client_use_ptr(subs.root.client)) == NULL)
  1703. goto __end;
  1704. if ((port = snd_seq_port_use_ptr(cptr, subs.root.port)) == NULL)
  1705. goto __end;
  1706. switch (subs.type) {
  1707. case SNDRV_SEQ_QUERY_SUBS_READ:
  1708. group = &port->c_src;
  1709. break;
  1710. case SNDRV_SEQ_QUERY_SUBS_WRITE:
  1711. group = &port->c_dest;
  1712. break;
  1713. default:
  1714. goto __end;
  1715. }
  1716. down_read(&group->list_mutex);
  1717. /* search for the subscriber */
  1718. subs.num_subs = group->count;
  1719. i = 0;
  1720. result = -ENOENT;
  1721. list_for_each(p, &group->list_head) {
  1722. if (i++ == subs.index) {
  1723. /* found! */
  1724. struct snd_seq_subscribers *s;
  1725. if (subs.type == SNDRV_SEQ_QUERY_SUBS_READ) {
  1726. s = list_entry(p, struct snd_seq_subscribers, src_list);
  1727. subs.addr = s->info.dest;
  1728. } else {
  1729. s = list_entry(p, struct snd_seq_subscribers, dest_list);
  1730. subs.addr = s->info.sender;
  1731. }
  1732. subs.flags = s->info.flags;
  1733. subs.queue = s->info.queue;
  1734. result = 0;
  1735. break;
  1736. }
  1737. }
  1738. up_read(&group->list_mutex);
  1739. __end:
  1740. if (port)
  1741. snd_seq_port_unlock(port);
  1742. if (cptr)
  1743. snd_seq_client_unlock(cptr);
  1744. if (result >= 0) {
  1745. if (copy_to_user(arg, &subs, sizeof(subs)))
  1746. return -EFAULT;
  1747. }
  1748. return result;
  1749. }
  1750. /*
  1751. * query next client
  1752. */
  1753. static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client,
  1754. void __user *arg)
  1755. {
  1756. struct snd_seq_client *cptr = NULL;
  1757. struct snd_seq_client_info info;
  1758. if (copy_from_user(&info, arg, sizeof(info)))
  1759. return -EFAULT;
  1760. /* search for next client */
  1761. info.client++;
  1762. if (info.client < 0)
  1763. info.client = 0;
  1764. for (; info.client < SNDRV_SEQ_MAX_CLIENTS; info.client++) {
  1765. cptr = snd_seq_client_use_ptr(info.client);
  1766. if (cptr)
  1767. break; /* found */
  1768. }
  1769. if (cptr == NULL)
  1770. return -ENOENT;
  1771. get_client_info(cptr, &info);
  1772. snd_seq_client_unlock(cptr);
  1773. if (copy_to_user(arg, &info, sizeof(info)))
  1774. return -EFAULT;
  1775. return 0;
  1776. }
  1777. /*
  1778. * query next port
  1779. */
  1780. static int snd_seq_ioctl_query_next_port(struct snd_seq_client *client,
  1781. void __user *arg)
  1782. {
  1783. struct snd_seq_client *cptr;
  1784. struct snd_seq_client_port *port = NULL;
  1785. struct snd_seq_port_info info;
  1786. if (copy_from_user(&info, arg, sizeof(info)))
  1787. return -EFAULT;
  1788. cptr = snd_seq_client_use_ptr(info.addr.client);
  1789. if (cptr == NULL)
  1790. return -ENXIO;
  1791. /* search for next port */
  1792. info.addr.port++;
  1793. port = snd_seq_port_query_nearest(cptr, &info);
  1794. if (port == NULL) {
  1795. snd_seq_client_unlock(cptr);
  1796. return -ENOENT;
  1797. }
  1798. /* get port info */
  1799. info.addr = port->addr;
  1800. snd_seq_get_port_info(port, &info);
  1801. snd_seq_port_unlock(port);
  1802. snd_seq_client_unlock(cptr);
  1803. if (copy_to_user(arg, &info, sizeof(info)))
  1804. return -EFAULT;
  1805. return 0;
  1806. }
  1807. /* -------------------------------------------------------- */
  1808. static struct seq_ioctl_table {
  1809. unsigned int cmd;
  1810. int (*func)(struct snd_seq_client *client, void __user * arg);
  1811. } ioctl_tables[] = {
  1812. { SNDRV_SEQ_IOCTL_SYSTEM_INFO, snd_seq_ioctl_system_info },
  1813. { SNDRV_SEQ_IOCTL_RUNNING_MODE, snd_seq_ioctl_running_mode },
  1814. { SNDRV_SEQ_IOCTL_GET_CLIENT_INFO, snd_seq_ioctl_get_client_info },
  1815. { SNDRV_SEQ_IOCTL_SET_CLIENT_INFO, snd_seq_ioctl_set_client_info },
  1816. { SNDRV_SEQ_IOCTL_CREATE_PORT, snd_seq_ioctl_create_port },
  1817. { SNDRV_SEQ_IOCTL_DELETE_PORT, snd_seq_ioctl_delete_port },
  1818. { SNDRV_SEQ_IOCTL_GET_PORT_INFO, snd_seq_ioctl_get_port_info },
  1819. { SNDRV_SEQ_IOCTL_SET_PORT_INFO, snd_seq_ioctl_set_port_info },
  1820. { SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT, snd_seq_ioctl_subscribe_port },
  1821. { SNDRV_SEQ_IOCTL_UNSUBSCRIBE_PORT, snd_seq_ioctl_unsubscribe_port },
  1822. { SNDRV_SEQ_IOCTL_CREATE_QUEUE, snd_seq_ioctl_create_queue },
  1823. { SNDRV_SEQ_IOCTL_DELETE_QUEUE, snd_seq_ioctl_delete_queue },
  1824. { SNDRV_SEQ_IOCTL_GET_QUEUE_INFO, snd_seq_ioctl_get_queue_info },
  1825. { SNDRV_SEQ_IOCTL_SET_QUEUE_INFO, snd_seq_ioctl_set_queue_info },
  1826. { SNDRV_SEQ_IOCTL_GET_NAMED_QUEUE, snd_seq_ioctl_get_named_queue },
  1827. { SNDRV_SEQ_IOCTL_GET_QUEUE_STATUS, snd_seq_ioctl_get_queue_status },
  1828. { SNDRV_SEQ_IOCTL_GET_QUEUE_TEMPO, snd_seq_ioctl_get_queue_tempo },
  1829. { SNDRV_SEQ_IOCTL_SET_QUEUE_TEMPO, snd_seq_ioctl_set_queue_tempo },
  1830. { SNDRV_SEQ_IOCTL_GET_QUEUE_TIMER, snd_seq_ioctl_get_queue_timer },
  1831. { SNDRV_SEQ_IOCTL_SET_QUEUE_TIMER, snd_seq_ioctl_set_queue_timer },
  1832. { SNDRV_SEQ_IOCTL_GET_QUEUE_CLIENT, snd_seq_ioctl_get_queue_client },
  1833. { SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT, snd_seq_ioctl_set_queue_client },
  1834. { SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, snd_seq_ioctl_get_client_pool },
  1835. { SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, snd_seq_ioctl_set_client_pool },
  1836. { SNDRV_SEQ_IOCTL_GET_SUBSCRIPTION, snd_seq_ioctl_get_subscription },
  1837. { SNDRV_SEQ_IOCTL_QUERY_NEXT_CLIENT, snd_seq_ioctl_query_next_client },
  1838. { SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT, snd_seq_ioctl_query_next_port },
  1839. { SNDRV_SEQ_IOCTL_REMOVE_EVENTS, snd_seq_ioctl_remove_events },
  1840. { SNDRV_SEQ_IOCTL_QUERY_SUBS, snd_seq_ioctl_query_subs },
  1841. { 0, NULL },
  1842. };
  1843. static int snd_seq_do_ioctl(struct snd_seq_client *client, unsigned int cmd,
  1844. void __user *arg)
  1845. {
  1846. struct seq_ioctl_table *p;
  1847. switch (cmd) {
  1848. case SNDRV_SEQ_IOCTL_PVERSION:
  1849. /* return sequencer version number */
  1850. return put_user(SNDRV_SEQ_VERSION, (int __user *)arg) ? -EFAULT : 0;
  1851. case SNDRV_SEQ_IOCTL_CLIENT_ID:
  1852. /* return the id of this client */
  1853. return put_user(client->number, (int __user *)arg) ? -EFAULT : 0;
  1854. }
  1855. if (! arg)
  1856. return -EFAULT;
  1857. for (p = ioctl_tables; p->cmd; p++) {
  1858. if (p->cmd == cmd)
  1859. return p->func(client, arg);
  1860. }
  1861. snd_printd("seq unknown ioctl() 0x%x (type='%c', number=0x%02x)\n",
  1862. cmd, _IOC_TYPE(cmd), _IOC_NR(cmd));
  1863. return -ENOTTY;
  1864. }
  1865. static long snd_seq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  1866. {
  1867. struct snd_seq_client *client = file->private_data;
  1868. if (snd_BUG_ON(!client))
  1869. return -ENXIO;
  1870. return snd_seq_do_ioctl(client, cmd, (void __user *) arg);
  1871. }
  1872. #ifdef CONFIG_COMPAT
  1873. #include "seq_compat.c"
  1874. #else
  1875. #define snd_seq_ioctl_compat NULL
  1876. #endif
  1877. /* -------------------------------------------------------- */
  1878. /* exported to kernel modules */
  1879. int snd_seq_create_kernel_client(struct snd_card *card, int client_index,
  1880. const char *name_fmt, ...)
  1881. {
  1882. struct snd_seq_client *client;
  1883. va_list args;
  1884. if (snd_BUG_ON(in_interrupt()))
  1885. return -EBUSY;
  1886. if (card && client_index >= SNDRV_SEQ_CLIENTS_PER_CARD)
  1887. return -EINVAL;
  1888. if (card == NULL && client_index >= SNDRV_SEQ_GLOBAL_CLIENTS)
  1889. return -EINVAL;
  1890. if (mutex_lock_interruptible(&register_mutex))
  1891. return -ERESTARTSYS;
  1892. if (card) {
  1893. client_index += SNDRV_SEQ_GLOBAL_CLIENTS
  1894. + card->number * SNDRV_SEQ_CLIENTS_PER_CARD;
  1895. if (client_index >= SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN)
  1896. client_index = -1;
  1897. }
  1898. /* empty write queue as default */
  1899. client = seq_create_client1(client_index, 0);
  1900. if (client == NULL) {
  1901. mutex_unlock(&register_mutex);
  1902. return -EBUSY; /* failure code */
  1903. }
  1904. usage_alloc(&client_usage, 1);
  1905. client->accept_input = 1;
  1906. client->accept_output = 1;
  1907. va_start(args, name_fmt);
  1908. vsnprintf(client->name, sizeof(client->name), name_fmt, args);
  1909. va_end(args);
  1910. client->type = KERNEL_CLIENT;
  1911. mutex_unlock(&register_mutex);
  1912. /* make others aware this new client */
  1913. snd_seq_system_client_ev_client_start(client->number);
  1914. /* return client number to caller */
  1915. return client->number;
  1916. }
  1917. EXPORT_SYMBOL(snd_seq_create_kernel_client);
  1918. /* exported to kernel modules */
  1919. int snd_seq_delete_kernel_client(int client)
  1920. {
  1921. struct snd_seq_client *ptr;
  1922. if (snd_BUG_ON(in_interrupt()))
  1923. return -EBUSY;
  1924. ptr = clientptr(client);
  1925. if (ptr == NULL)
  1926. return -EINVAL;
  1927. seq_free_client(ptr);
  1928. kfree(ptr);
  1929. return 0;
  1930. }
  1931. EXPORT_SYMBOL(snd_seq_delete_kernel_client);
  1932. /* skeleton to enqueue event, called from snd_seq_kernel_client_enqueue
  1933. * and snd_seq_kernel_client_enqueue_blocking
  1934. */
  1935. static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
  1936. struct file *file, int blocking,
  1937. int atomic, int hop)
  1938. {
  1939. struct snd_seq_client *cptr;
  1940. int result;
  1941. if (snd_BUG_ON(!ev))
  1942. return -EINVAL;
  1943. if (ev->type == SNDRV_SEQ_EVENT_NONE)
  1944. return 0; /* ignore this */
  1945. if (ev->type == SNDRV_SEQ_EVENT_KERNEL_ERROR)
  1946. return -EINVAL; /* quoted events can't be enqueued */
  1947. /* fill in client number */
  1948. ev->source.client = client;
  1949. if (check_event_type_and_length(ev))
  1950. return -EINVAL;
  1951. cptr = snd_seq_client_use_ptr(client);
  1952. if (cptr == NULL)
  1953. return -EINVAL;
  1954. if (! cptr->accept_output)
  1955. result = -EPERM;
  1956. else /* send it */
  1957. result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop);
  1958. snd_seq_client_unlock(cptr);
  1959. return result;
  1960. }
  1961. /*
  1962. * exported, called by kernel clients to enqueue events (w/o blocking)
  1963. *
  1964. * RETURN VALUE: zero if succeed, negative if error
  1965. */
  1966. int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event * ev,
  1967. int atomic, int hop)
  1968. {
  1969. return kernel_client_enqueue(client, ev, NULL, 0, atomic, hop);
  1970. }
  1971. EXPORT_SYMBOL(snd_seq_kernel_client_enqueue);
  1972. /*
  1973. * exported, called by kernel clients to enqueue events (with blocking)
  1974. *
  1975. * RETURN VALUE: zero if succeed, negative if error
  1976. */
  1977. int snd_seq_kernel_client_enqueue_blocking(int client, struct snd_seq_event * ev,
  1978. struct file *file,
  1979. int atomic, int hop)
  1980. {
  1981. return kernel_client_enqueue(client, ev, file, 1, atomic, hop);
  1982. }
  1983. EXPORT_SYMBOL(snd_seq_kernel_client_enqueue_blocking);
  1984. /*
  1985. * exported, called by kernel clients to dispatch events directly to other
  1986. * clients, bypassing the queues. Event time-stamp will be updated.
  1987. *
  1988. * RETURN VALUE: negative = delivery failed,
  1989. * zero, or positive: the number of delivered events
  1990. */
  1991. int snd_seq_kernel_client_dispatch(int client, struct snd_seq_event * ev,
  1992. int atomic, int hop)
  1993. {
  1994. struct snd_seq_client *cptr;
  1995. int result;
  1996. if (snd_BUG_ON(!ev))
  1997. return -EINVAL;
  1998. /* fill in client number */
  1999. ev->queue = SNDRV_SEQ_QUEUE_DIRECT;
  2000. ev->source.client = client;
  2001. if (check_event_type_and_length(ev))
  2002. return -EINVAL;
  2003. cptr = snd_seq_client_use_ptr(client);
  2004. if (cptr == NULL)
  2005. return -EINVAL;
  2006. if (!cptr->accept_output)
  2007. result = -EPERM;
  2008. else
  2009. result = snd_seq_deliver_event(cptr, ev, atomic, hop);
  2010. snd_seq_client_unlock(cptr);
  2011. return result;
  2012. }
  2013. EXPORT_SYMBOL(snd_seq_kernel_client_dispatch);
  2014. /*
  2015. * exported, called by kernel clients to perform same functions as with
  2016. * userland ioctl()
  2017. */
  2018. int snd_seq_kernel_client_ctl(int clientid, unsigned int cmd, void *arg)
  2019. {
  2020. struct snd_seq_client *client;
  2021. mm_segment_t fs;
  2022. int result;
  2023. client = clientptr(clientid);
  2024. if (client == NULL)
  2025. return -ENXIO;
  2026. fs = snd_enter_user();
  2027. result = snd_seq_do_ioctl(client, cmd, (void __user *)arg);
  2028. snd_leave_user(fs);
  2029. return result;
  2030. }
  2031. EXPORT_SYMBOL(snd_seq_kernel_client_ctl);
  2032. /* exported (for OSS emulator) */
  2033. int snd_seq_kernel_client_write_poll(int clientid, struct file *file, poll_table *wait)
  2034. {
  2035. struct snd_seq_client *client;
  2036. client = clientptr(clientid);
  2037. if (client == NULL)
  2038. return -ENXIO;
  2039. if (! snd_seq_write_pool_allocated(client))
  2040. return 1;
  2041. if (snd_seq_pool_poll_wait(client->pool, file, wait))
  2042. return 1;
  2043. return 0;
  2044. }
  2045. EXPORT_SYMBOL(snd_seq_kernel_client_write_poll);
  2046. /*---------------------------------------------------------------------------*/
  2047. #ifdef CONFIG_PROC_FS
  2048. /*
  2049. * /proc interface
  2050. */
  2051. static void snd_seq_info_dump_subscribers(struct snd_info_buffer *buffer,
  2052. struct snd_seq_port_subs_info *group,
  2053. int is_src, char *msg)
  2054. {
  2055. struct list_head *p;
  2056. struct snd_seq_subscribers *s;
  2057. int count = 0;
  2058. down_read(&group->list_mutex);
  2059. if (list_empty(&group->list_head)) {
  2060. up_read(&group->list_mutex);
  2061. return;
  2062. }
  2063. snd_iprintf(buffer, msg);
  2064. list_for_each(p, &group->list_head) {
  2065. if (is_src)
  2066. s = list_entry(p, struct snd_seq_subscribers, src_list);
  2067. else
  2068. s = list_entry(p, struct snd_seq_subscribers, dest_list);
  2069. if (count++)
  2070. snd_iprintf(buffer, ", ");
  2071. snd_iprintf(buffer, "%d:%d",
  2072. is_src ? s->info.dest.client : s->info.sender.client,
  2073. is_src ? s->info.dest.port : s->info.sender.port);
  2074. if (s->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
  2075. snd_iprintf(buffer, "[%c:%d]", ((s->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL) ? 'r' : 't'), s->info.queue);
  2076. if (group->exclusive)
  2077. snd_iprintf(buffer, "[ex]");
  2078. }
  2079. up_read(&group->list_mutex);
  2080. snd_iprintf(buffer, "\n");
  2081. }
  2082. #define FLAG_PERM_RD(perm) ((perm) & SNDRV_SEQ_PORT_CAP_READ ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_READ ? 'R' : 'r') : '-')
  2083. #define FLAG_PERM_WR(perm) ((perm) & SNDRV_SEQ_PORT_CAP_WRITE ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_WRITE ? 'W' : 'w') : '-')
  2084. #define FLAG_PERM_EX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_NO_EXPORT ? '-' : 'e')
  2085. #define FLAG_PERM_DUPLEX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_DUPLEX ? 'X' : '-')
  2086. static void snd_seq_info_dump_ports(struct snd_info_buffer *buffer,
  2087. struct snd_seq_client *client)
  2088. {
  2089. struct snd_seq_client_port *p;
  2090. mutex_lock(&client->ports_mutex);
  2091. list_for_each_entry(p, &client->ports_list_head, list) {
  2092. snd_iprintf(buffer, " Port %3d : \"%s\" (%c%c%c%c)\n",
  2093. p->addr.port, p->name,
  2094. FLAG_PERM_RD(p->capability),
  2095. FLAG_PERM_WR(p->capability),
  2096. FLAG_PERM_EX(p->capability),
  2097. FLAG_PERM_DUPLEX(p->capability));
  2098. snd_seq_info_dump_subscribers(buffer, &p->c_src, 1, " Connecting To: ");
  2099. snd_seq_info_dump_subscribers(buffer, &p->c_dest, 0, " Connected From: ");
  2100. }
  2101. mutex_unlock(&client->ports_mutex);
  2102. }
  2103. void snd_seq_info_pool(struct snd_info_buffer *buffer,
  2104. struct snd_seq_pool *pool, char *space);
  2105. /* exported to seq_info.c */
  2106. void snd_seq_info_clients_read(struct snd_info_entry *entry,
  2107. struct snd_info_buffer *buffer)
  2108. {
  2109. int c;
  2110. struct snd_seq_client *client;
  2111. snd_iprintf(buffer, "Client info\n");
  2112. snd_iprintf(buffer, " cur clients : %d\n", client_usage.cur);
  2113. snd_iprintf(buffer, " peak clients : %d\n", client_usage.peak);
  2114. snd_iprintf(buffer, " max clients : %d\n", SNDRV_SEQ_MAX_CLIENTS);
  2115. snd_iprintf(buffer, "\n");
  2116. /* list the client table */
  2117. for (c = 0; c < SNDRV_SEQ_MAX_CLIENTS; c++) {
  2118. client = snd_seq_client_use_ptr(c);
  2119. if (client == NULL)
  2120. continue;
  2121. if (client->type == NO_CLIENT) {
  2122. snd_seq_client_unlock(client);
  2123. continue;
  2124. }
  2125. snd_iprintf(buffer, "Client %3d : \"%s\" [%s]\n",
  2126. c, client->name,
  2127. client->type == USER_CLIENT ? "User" : "Kernel");
  2128. snd_seq_info_dump_ports(buffer, client);
  2129. if (snd_seq_write_pool_allocated(client)) {
  2130. snd_iprintf(buffer, " Output pool :\n");
  2131. snd_seq_info_pool(buffer, client->pool, " ");
  2132. }
  2133. if (client->type == USER_CLIENT && client->data.user.fifo &&
  2134. client->data.user.fifo->pool) {
  2135. snd_iprintf(buffer, " Input pool :\n");
  2136. snd_seq_info_pool(buffer, client->data.user.fifo->pool, " ");
  2137. }
  2138. snd_seq_client_unlock(client);
  2139. }
  2140. }
  2141. #endif /* CONFIG_PROC_FS */
  2142. /*---------------------------------------------------------------------------*/
  2143. /*
  2144. * REGISTRATION PART
  2145. */
  2146. static const struct file_operations snd_seq_f_ops =
  2147. {
  2148. .owner = THIS_MODULE,
  2149. .read = snd_seq_read,
  2150. .write = snd_seq_write,
  2151. .open = snd_seq_open,
  2152. .release = snd_seq_release,
  2153. .poll = snd_seq_poll,
  2154. .unlocked_ioctl = snd_seq_ioctl,
  2155. .compat_ioctl = snd_seq_ioctl_compat,
  2156. };
  2157. /*
  2158. * register sequencer device
  2159. */
  2160. int __init snd_sequencer_device_init(void)
  2161. {
  2162. int err;
  2163. if (mutex_lock_interruptible(&register_mutex))
  2164. return -ERESTARTSYS;
  2165. if ((err = snd_register_device(SNDRV_DEVICE_TYPE_SEQUENCER, NULL, 0,
  2166. &snd_seq_f_ops, NULL, "seq")) < 0) {
  2167. mutex_unlock(&register_mutex);
  2168. return err;
  2169. }
  2170. mutex_unlock(&register_mutex);
  2171. return 0;
  2172. }
  2173. /*
  2174. * unregister sequencer device
  2175. */
  2176. void __exit snd_sequencer_device_done(void)
  2177. {
  2178. snd_unregister_device(SNDRV_DEVICE_TYPE_SEQUENCER, NULL, 0);
  2179. }