/arch/arm/mach-msm/qdsp6/msm_q6venc.c

https://bitbucket.org/sammyz/iscream_thunderc-2.6.35-rebase · C · 1200 lines · 1083 code · 100 blank · 17 comment · 136 complexity · faab94334e82e7c1205f4bf92740eb39 MD5 · raw file

  1. /* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software
  14. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  15. * 02110-1301, USA.
  16. *
  17. */
  18. #include <linux/slab.h>
  19. #include <linux/cdev.h>
  20. #include <linux/file.h>
  21. #include <linux/device.h>
  22. #include <linux/fs.h>
  23. #include <linux/list.h>
  24. #include <linux/module.h>
  25. #include <linux/sched.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/uaccess.h>
  28. #include <linux/wakelock.h>
  29. #include <linux/android_pmem.h>
  30. #include <linux/msm_q6venc.h>
  31. #include "dal.h"
  32. #define DALDEVICEID_VENC_DEVICE 0x0200002D
  33. #define DALDEVICEID_VENC_PORTNAME "DAL_AQ_VID"
  34. #define VENC_NAME "q6venc"
  35. #define VENC_MSG_MAX 128
  36. #define VENC_INTERFACE_VERSION 0x00020000
  37. #define MAJOR_MASK 0xFFFF0000
  38. #define MINOR_MASK 0x0000FFFF
  39. #define VENC_GET_MAJOR_VERSION(version) ((version & MAJOR_MASK)>>16)
  40. #define VENC_GET_MINOR_VERSION(version) (version & MINOR_MASK)
  41. enum {
  42. VENC_BUFFER_TYPE_INPUT,
  43. VENC_BUFFER_TYPE_OUTPUT,
  44. VENC_BUFFER_TYPE_QDSP6,
  45. VENC_BUFFER_TYPE_HDR
  46. };
  47. enum {
  48. VENC_DALRPC_GET_SYNTAX_HEADER = DAL_OP_FIRST_DEVICE_API,
  49. VENC_DALRPC_UPDATE_INTRA_REFRESH,
  50. VENC_DALRPC_UPDATE_FRAME_RATE,
  51. VENC_DALRPC_UPDATE_BITRATE,
  52. VENC_DALRPC_UPDATE_QP_RANGE,
  53. VENC_DALRPC_UPDATE_INTRA_PERIOD,
  54. VENC_DALRPC_REQUEST_IFRAME,
  55. VENC_DALRPC_START,
  56. VENC_DALRPC_STOP,
  57. VENC_DALRPC_SUSPEND,
  58. VENC_DALRPC_RESUME,
  59. VENC_DALRPC_FLUSH,
  60. VENC_DALRPC_QUEUE_INPUT,
  61. VENC_DALRPC_QUEUE_OUTPUT
  62. };
  63. struct venc_input_payload {
  64. u32 data;
  65. };
  66. struct venc_output_payload {
  67. u32 size;
  68. long long time_stamp;
  69. u32 flags;
  70. u32 data;
  71. u32 client_data_from_input;
  72. };
  73. union venc_payload {
  74. struct venc_input_payload input_payload;
  75. struct venc_output_payload output_payload;
  76. };
  77. struct venc_msg_type {
  78. u32 event;
  79. u32 status;
  80. union venc_payload payload;
  81. };
  82. struct venc_input_buf {
  83. struct venc_buf_type yuv_buf;
  84. u32 data_size;
  85. long long time_stamp;
  86. u32 flags;
  87. u32 dvs_offsetx;
  88. u32 dvs_offsety;
  89. u32 client_data;
  90. u32 op_client_data;
  91. };
  92. struct venc_output_buf {
  93. struct venc_buf_type bit_stream_buf;
  94. u32 client_data;
  95. };
  96. struct venc_msg_list {
  97. struct list_head list;
  98. struct venc_msg msg_data;
  99. };
  100. struct venc_buf {
  101. int fd;
  102. u32 src;
  103. u32 offset;
  104. u32 size;
  105. u32 btype;
  106. unsigned long paddr;
  107. struct file *file;
  108. };
  109. struct venc_pmem_list {
  110. struct list_head list;
  111. struct venc_buf buf;
  112. };
  113. struct venc_dev {
  114. bool is_active;
  115. bool pmem_freed;
  116. enum venc_state_type state;
  117. struct list_head venc_msg_list_head;
  118. struct list_head venc_msg_list_free;
  119. spinlock_t venc_msg_list_lock;
  120. struct list_head venc_pmem_list_head;
  121. spinlock_t venc_pmem_list_lock;
  122. struct dal_client *q6_handle;
  123. wait_queue_head_t venc_msg_evt;
  124. struct device *class_devp;
  125. };
  126. #define DEBUG_VENC 0
  127. #if DEBUG_VENC
  128. #define TRACE(fmt, x...) \
  129. do { pr_debug("%s:%d " fmt, __func__, __LINE__, ##x); } while (0)
  130. #else
  131. #define TRACE(fmt, x...) do { } while (0)
  132. #endif
  133. static struct cdev cdev;
  134. static dev_t venc_dev_num;
  135. static struct class *venc_class;
  136. static struct venc_dev *venc_device_p;
  137. static int venc_ref;
  138. static DEFINE_MUTEX(idlecount_lock);
  139. static int idlecount;
  140. static struct wake_lock wakelock;
  141. static struct wake_lock idlelock;
  142. static void prevent_sleep(void)
  143. {
  144. mutex_lock(&idlecount_lock);
  145. if (++idlecount == 1) {
  146. wake_lock(&idlelock);
  147. wake_lock(&wakelock);
  148. }
  149. mutex_unlock(&idlecount_lock);
  150. }
  151. static void allow_sleep(void)
  152. {
  153. mutex_lock(&idlecount_lock);
  154. if (--idlecount == 0) {
  155. wake_unlock(&idlelock);
  156. wake_unlock(&wakelock);
  157. }
  158. mutex_unlock(&idlecount_lock);
  159. }
  160. static inline int venc_check_version(u32 client, u32 server)
  161. {
  162. int ret = -EINVAL;
  163. if ((VENC_GET_MAJOR_VERSION(client) == VENC_GET_MAJOR_VERSION(server))
  164. && (VENC_GET_MINOR_VERSION(client) <=
  165. VENC_GET_MINOR_VERSION(server)))
  166. ret = 0;
  167. return ret;
  168. }
  169. static int venc_get_msg(struct venc_dev *dvenc, void *msg)
  170. {
  171. struct venc_msg_list *l;
  172. unsigned long flags;
  173. int ret = 0;
  174. struct venc_msg qdsp_msg;
  175. if (!dvenc->is_active)
  176. return -EPERM;
  177. spin_lock_irqsave(&dvenc->venc_msg_list_lock, flags);
  178. list_for_each_entry_reverse(l, &dvenc->venc_msg_list_head, list) {
  179. memcpy(&qdsp_msg, &l->msg_data, sizeof(struct venc_msg));
  180. list_del(&l->list);
  181. list_add(&l->list, &dvenc->venc_msg_list_free);
  182. ret = 1;
  183. break;
  184. }
  185. spin_unlock_irqrestore(&dvenc->venc_msg_list_lock, flags);
  186. if (copy_to_user(msg, &qdsp_msg, sizeof(struct venc_msg)))
  187. pr_err("%s failed to copy_to_user\n", __func__);
  188. return ret;
  189. }
  190. static void venc_put_msg(struct venc_dev *dvenc, struct venc_msg *msg)
  191. {
  192. struct venc_msg_list *l;
  193. unsigned long flags;
  194. int found = 0;
  195. spin_lock_irqsave(&dvenc->venc_msg_list_lock, flags);
  196. list_for_each_entry(l, &dvenc->venc_msg_list_free, list) {
  197. memcpy(&l->msg_data, msg, sizeof(struct venc_msg));
  198. list_del(&l->list);
  199. list_add(&l->list, &dvenc->venc_msg_list_head);
  200. found = 1;
  201. break;
  202. }
  203. spin_unlock_irqrestore(&dvenc->venc_msg_list_lock, flags);
  204. if (found)
  205. wake_up(&dvenc->venc_msg_evt);
  206. else
  207. pr_err("%s: failed to find a free node\n", __func__);
  208. }
  209. static struct venc_pmem_list *venc_add_pmem_to_list(struct venc_dev *dvenc,
  210. struct venc_pmem *mptr,
  211. u32 btype)
  212. {
  213. int ret = 0;
  214. unsigned long flags;
  215. unsigned long len;
  216. unsigned long vaddr;
  217. struct venc_pmem_list *plist = NULL;
  218. plist = kzalloc(sizeof(struct venc_pmem_list), GFP_KERNEL);
  219. if (!plist) {
  220. pr_err("%s: kzalloc failed\n", __func__);
  221. return NULL;
  222. }
  223. ret = get_pmem_file(mptr->fd, &(plist->buf.paddr),
  224. &vaddr, &len, &(plist->buf.file));
  225. if (ret) {
  226. pr_err("%s: get_pmem_file failed for fd=%d offset=%d\n",
  227. __func__, mptr->fd, mptr->offset);
  228. goto err_venc_add_pmem;
  229. } else if (mptr->offset >= len) {
  230. pr_err("%s: invalid offset (%d > %ld) for fd=%d\n",
  231. __func__, mptr->offset, len, mptr->fd);
  232. ret = -EINVAL;
  233. goto err_venc_get_pmem;
  234. }
  235. plist->buf.fd = mptr->fd;
  236. plist->buf.paddr += mptr->offset;
  237. plist->buf.size = mptr->size;
  238. plist->buf.btype = btype;
  239. plist->buf.offset = mptr->offset;
  240. plist->buf.src = mptr->src;
  241. spin_lock_irqsave(&dvenc->venc_pmem_list_lock, flags);
  242. list_add(&plist->list, &dvenc->venc_pmem_list_head);
  243. spin_unlock_irqrestore(&dvenc->venc_pmem_list_lock, flags);
  244. return plist;
  245. err_venc_get_pmem:
  246. put_pmem_file(plist->buf.file);
  247. err_venc_add_pmem:
  248. kfree(plist);
  249. return NULL;
  250. }
  251. static struct venc_pmem_list *venc_get_pmem_from_list(
  252. struct venc_dev *dvenc, u32 pmem_fd,
  253. u32 offset, u32 btype)
  254. {
  255. struct venc_pmem_list *plist;
  256. unsigned long flags;
  257. struct file *file;
  258. int found = 0;
  259. file = fget(pmem_fd);
  260. if (!file) {
  261. pr_err("%s: invalid encoder buffer fd(%d)\n", __func__,
  262. pmem_fd);
  263. return NULL;
  264. }
  265. spin_lock_irqsave(&dvenc->venc_pmem_list_lock, flags);
  266. list_for_each_entry(plist, &dvenc->venc_pmem_list_head, list) {
  267. if (plist->buf.btype == btype && plist->buf.file == file &&
  268. plist->buf.offset == offset) {
  269. found = 1;
  270. break;
  271. }
  272. }
  273. spin_unlock_irqrestore(&dvenc->venc_pmem_list_lock, flags);
  274. fput(file);
  275. if (found)
  276. return plist;
  277. else
  278. return NULL;
  279. }
  280. static int venc_set_buffer(struct venc_dev *dvenc, void *argp,
  281. u32 btype)
  282. {
  283. struct venc_pmem pmem;
  284. struct venc_pmem_list *plist;
  285. int ret = 0;
  286. ret = copy_from_user(&pmem, argp, sizeof(pmem));
  287. if (ret) {
  288. pr_err("%s: copy_from_user failed\n", __func__);
  289. return ret;
  290. }
  291. plist = venc_add_pmem_to_list(dvenc, &pmem, btype);
  292. if (plist == NULL) {
  293. pr_err("%s: buffer add_to_pmem_list failed\n",
  294. __func__);
  295. return -EPERM;
  296. }
  297. return ret;
  298. }
  299. static int venc_assign_q6_buffers(struct venc_dev *dvenc,
  300. struct venc_buffers *pbufs,
  301. struct venc_nonio_buf_config *pcfg)
  302. {
  303. int ret = 0;
  304. struct venc_pmem_list *plist;
  305. plist = venc_add_pmem_to_list(dvenc, &(pbufs->recon_buf[0]),
  306. VENC_BUFFER_TYPE_QDSP6);
  307. if (plist == NULL) {
  308. pr_err("%s: recon_buf0 failed to add_to_pmem_list\n",
  309. __func__);
  310. return -EPERM;
  311. }
  312. pcfg->recon_buf1.region = pbufs->recon_buf[0].src;
  313. pcfg->recon_buf1.phys = plist->buf.paddr;
  314. pcfg->recon_buf1.size = plist->buf.size;
  315. pcfg->recon_buf1.offset = 0;
  316. plist = venc_add_pmem_to_list(dvenc, &(pbufs->recon_buf[1]),
  317. VENC_BUFFER_TYPE_QDSP6);
  318. if (plist == NULL) {
  319. pr_err("%s: recons_buf1 failed to add_to_pmem_list\n",
  320. __func__);
  321. return -EPERM;
  322. }
  323. pcfg->recon_buf2.region = pbufs->recon_buf[1].src;
  324. pcfg->recon_buf2.phys = plist->buf.paddr;
  325. pcfg->recon_buf2.size = plist->buf.size;
  326. pcfg->recon_buf2.offset = 0;
  327. plist = venc_add_pmem_to_list(dvenc, &(pbufs->wb_buf),
  328. VENC_BUFFER_TYPE_QDSP6);
  329. if (plist == NULL) {
  330. pr_err("%s: wb_buf failed to add_to_pmem_list\n",
  331. __func__);
  332. return -EPERM;
  333. }
  334. pcfg->wb_buf.region = pbufs->wb_buf.src;
  335. pcfg->wb_buf.phys = plist->buf.paddr;
  336. pcfg->wb_buf.size = plist->buf.size;
  337. pcfg->wb_buf.offset = 0;
  338. plist = venc_add_pmem_to_list(dvenc, &(pbufs->cmd_buf),
  339. VENC_BUFFER_TYPE_QDSP6);
  340. if (plist == NULL) {
  341. pr_err("%s: cmd_buf failed to add_to_pmem_list\n",
  342. __func__);
  343. return -EPERM;
  344. }
  345. pcfg->cmd_buf.region = pbufs->cmd_buf.src;
  346. pcfg->cmd_buf.phys = plist->buf.paddr;
  347. pcfg->cmd_buf.size = plist->buf.size;
  348. pcfg->cmd_buf.offset = 0;
  349. plist = venc_add_pmem_to_list(dvenc, &(pbufs->vlc_buf),
  350. VENC_BUFFER_TYPE_QDSP6);
  351. if (plist == NULL) {
  352. pr_err("%s: vlc_buf failed to add_to_pmem_list"
  353. " failed\n", __func__);
  354. return -EPERM;
  355. }
  356. pcfg->vlc_buf.region = pbufs->vlc_buf.src;
  357. pcfg->vlc_buf.phys = plist->buf.paddr;
  358. pcfg->vlc_buf.size = plist->buf.size;
  359. pcfg->vlc_buf.offset = 0;
  360. return ret;
  361. }
  362. static int venc_start(struct venc_dev *dvenc, void *argp)
  363. {
  364. int ret = 0;
  365. struct venc_q6_config q6_config;
  366. struct venc_init_config vconfig;
  367. dvenc->state = VENC_STATE_START;
  368. ret = copy_from_user(&vconfig, argp, sizeof(struct venc_init_config));
  369. if (ret) {
  370. pr_err("%s: copy_from_user failed\n", __func__);
  371. return ret;
  372. }
  373. memcpy(&q6_config, &(vconfig.q6_config), sizeof(q6_config));
  374. ret = venc_assign_q6_buffers(dvenc, &(vconfig.q6_bufs),
  375. &(q6_config.buf_params));
  376. if (ret != 0) {
  377. pr_err("%s: assign_q6_buffers failed\n", __func__);
  378. return -EPERM;
  379. }
  380. q6_config.callback_event = dvenc->q6_handle;
  381. TRACE("%s: parameters: handle:%p, config:%p, callback:%p \n", __func__,
  382. dvenc->q6_handle, &q6_config, q6_config.callback_event);
  383. TRACE("%s: parameters:recon1:0x%x, recon2:0x%x,"
  384. " wb_buf:0x%x, cmd:0x%x, vlc:0x%x\n", __func__,
  385. q6_config.buf_params.recon_buf1.phys,
  386. q6_config.buf_params.recon_buf2.phys,
  387. q6_config.buf_params.wb_buf.phys,
  388. q6_config.buf_params.cmd_buf.phys,
  389. q6_config.buf_params.vlc_buf.phys);
  390. TRACE("%s: size of param:%d \n", __func__, sizeof(q6_config));
  391. ret = dal_call_f5(dvenc->q6_handle, VENC_DALRPC_START, &q6_config,
  392. sizeof(q6_config));
  393. if (ret != 0) {
  394. pr_err("%s: remote function failed (%d)\n", __func__, ret);
  395. return ret;
  396. }
  397. return ret;
  398. }
  399. static int venc_encode_frame(struct venc_dev *dvenc, void *argp)
  400. {
  401. int ret = 0;
  402. struct venc_pmem buf;
  403. struct venc_input_buf q6_input;
  404. struct venc_pmem_list *plist;
  405. struct venc_buffer input;
  406. ret = copy_from_user(&input, argp, sizeof(struct venc_buffer));
  407. if (ret) {
  408. pr_err("%s: copy_from_user failed\n", __func__);
  409. return ret;
  410. }
  411. ret = copy_from_user(&buf,
  412. ((struct venc_buffer *)argp)->ptr_buffer,
  413. sizeof(struct venc_pmem));
  414. if (ret) {
  415. pr_err("%s: copy_from_user failed\n", __func__);
  416. return ret;
  417. }
  418. plist = venc_get_pmem_from_list(dvenc, buf.fd, buf.offset,
  419. VENC_BUFFER_TYPE_INPUT);
  420. if (NULL == plist) {
  421. plist = venc_add_pmem_to_list(dvenc, &buf,
  422. VENC_BUFFER_TYPE_INPUT);
  423. if (plist == NULL) {
  424. pr_err("%s: buffer add_to_pmem_list failed\n",
  425. __func__);
  426. return -EPERM;
  427. }
  428. }
  429. q6_input.flags = 0;
  430. if (input.flags & VENC_FLAG_EOS)
  431. q6_input.flags |= 0x00000001;
  432. q6_input.yuv_buf.region = plist->buf.src;
  433. q6_input.yuv_buf.phys = plist->buf.paddr;
  434. q6_input.yuv_buf.size = plist->buf.size;
  435. q6_input.yuv_buf.offset = 0;
  436. q6_input.data_size = plist->buf.size;
  437. q6_input.client_data = (u32)input.client_data;
  438. q6_input.time_stamp = input.time_stamp;
  439. q6_input.dvs_offsetx = 0;
  440. q6_input.dvs_offsety = 0;
  441. TRACE("Pushing down input phys=0x%x fd= %d, client_data: 0x%x,"
  442. " time_stamp:%lld \n", q6_input.yuv_buf.phys, plist->buf.fd,
  443. input.client_data, input.time_stamp);
  444. ret = dal_call_f5(dvenc->q6_handle, VENC_DALRPC_QUEUE_INPUT,
  445. &q6_input, sizeof(q6_input));
  446. if (ret != 0)
  447. pr_err("%s: Q6 queue_input failed (%d)\n", __func__,
  448. (int)ret);
  449. return ret;
  450. }
  451. static int venc_fill_output(struct venc_dev *dvenc, void *argp)
  452. {
  453. int ret = 0;
  454. struct venc_pmem buf;
  455. struct venc_output_buf q6_output;
  456. struct venc_pmem_list *plist;
  457. struct venc_buffer output;
  458. ret = copy_from_user(&output, argp, sizeof(struct venc_buffer));
  459. if (ret) {
  460. pr_err("%s: copy_from_user failed\n", __func__);
  461. return ret;
  462. }
  463. ret = copy_from_user(&buf,
  464. ((struct venc_buffer *)argp)->ptr_buffer,
  465. sizeof(struct venc_pmem));
  466. if (ret) {
  467. pr_err("%s: copy_from_user failed\n", __func__);
  468. return ret;
  469. }
  470. plist = venc_get_pmem_from_list(dvenc, buf.fd, buf.offset,
  471. VENC_BUFFER_TYPE_OUTPUT);
  472. if (NULL == plist) {
  473. plist = venc_add_pmem_to_list(dvenc, &buf,
  474. VENC_BUFFER_TYPE_OUTPUT);
  475. if (NULL == plist) {
  476. pr_err("%s: output buffer failed to add_to_pmem_list"
  477. "\n", __func__);
  478. return -EPERM;
  479. }
  480. }
  481. q6_output.bit_stream_buf.region = plist->buf.src;
  482. q6_output.bit_stream_buf.phys = (u32)plist->buf.paddr;
  483. q6_output.bit_stream_buf.size = plist->buf.size;
  484. q6_output.bit_stream_buf.offset = 0;
  485. q6_output.client_data = (u32)output.client_data;
  486. ret =
  487. dal_call_f5(dvenc->q6_handle, VENC_DALRPC_QUEUE_OUTPUT, &q6_output,
  488. sizeof(q6_output));
  489. if (ret != 0)
  490. pr_err("%s: remote function failed (%d)\n", __func__, ret);
  491. return ret;
  492. }
  493. static int venc_stop(struct venc_dev *dvenc)
  494. {
  495. int ret = 0;
  496. struct venc_msg msg;
  497. ret = dal_call_f0(dvenc->q6_handle, VENC_DALRPC_STOP, 1);
  498. if (ret) {
  499. pr_err("%s: remote runction failed (%d)\n", __func__, ret);
  500. msg.msg_code = VENC_MSG_STOP;
  501. msg.msg_data_size = 0;
  502. msg.status_code = VENC_S_EFAIL;
  503. venc_put_msg(dvenc, &msg);
  504. }
  505. return ret;
  506. }
  507. static int venc_pause(struct venc_dev *dvenc)
  508. {
  509. int ret = 0;
  510. struct venc_msg msg;
  511. ret = dal_call_f0(dvenc->q6_handle, VENC_DALRPC_SUSPEND, 1);
  512. if (ret) {
  513. pr_err("%s: remote function failed (%d)\n", __func__, ret);
  514. msg.msg_code = VENC_MSG_PAUSE;
  515. msg.status_code = VENC_S_EFAIL;
  516. msg.msg_data_size = 0;
  517. venc_put_msg(dvenc, &msg);
  518. }
  519. return ret;
  520. }
  521. static int venc_resume(struct venc_dev *dvenc)
  522. {
  523. int ret = 0;
  524. struct venc_msg msg;
  525. ret = dal_call_f0(dvenc->q6_handle, VENC_DALRPC_RESUME, 1);
  526. if (ret) {
  527. pr_err("%s: remote function failed (%d)\n", __func__, ret);
  528. msg.msg_code = VENC_MSG_RESUME;
  529. msg.msg_data_size = 0;
  530. msg.status_code = VENC_S_EFAIL;
  531. venc_put_msg(dvenc, &msg);
  532. }
  533. return ret;
  534. }
  535. static int venc_flush(struct venc_dev *dvenc, void *argp)
  536. {
  537. int ret = 0;
  538. struct venc_msg msg;
  539. union venc_msg_data smsg;
  540. int status = VENC_S_SUCCESS;
  541. struct venc_buffer_flush flush;
  542. if (copy_from_user(&flush, argp, sizeof(struct venc_buffer_flush)))
  543. return -EFAULT;
  544. if (flush.flush_mode == VENC_FLUSH_ALL) {
  545. ret = dal_call_f0(dvenc->q6_handle, VENC_DALRPC_FLUSH, 1);
  546. if (ret)
  547. status = VENC_S_EFAIL;
  548. } else
  549. status = VENC_S_ENOTSUPP;
  550. if (status != VENC_S_SUCCESS) {
  551. if ((flush.flush_mode == VENC_FLUSH_INPUT) ||
  552. (flush.flush_mode == VENC_FLUSH_ALL)) {
  553. smsg.flush_ret.flush_mode = VENC_FLUSH_INPUT;
  554. msg.msg_data = smsg;
  555. msg.status_code = status;
  556. msg.msg_code = VENC_MSG_FLUSH;
  557. msg.msg_data_size = sizeof(union venc_msg_data);
  558. venc_put_msg(dvenc, &msg);
  559. }
  560. if (flush.flush_mode == VENC_FLUSH_OUTPUT ||
  561. (flush.flush_mode == VENC_FLUSH_ALL)) {
  562. smsg.flush_ret.flush_mode = VENC_FLUSH_OUTPUT;
  563. msg.msg_data = smsg;
  564. msg.status_code = status;
  565. msg.msg_code = VENC_MSG_FLUSH;
  566. msg.msg_data_size = sizeof(union venc_msg_data);
  567. venc_put_msg(dvenc, &msg);
  568. }
  569. return -EIO;
  570. }
  571. return ret;
  572. }
  573. static int venc_get_sequence_hdr(struct venc_dev *dvenc, void *argp)
  574. {
  575. pr_err("%s not supported\n", __func__);
  576. return -EIO;
  577. }
  578. static int venc_set_qp_range(struct venc_dev *dvenc, void *argp)
  579. {
  580. int ret = 0;
  581. struct venc_qp_range qp;
  582. ret = copy_from_user(&qp, argp, sizeof(struct venc_qp_range));
  583. if (ret) {
  584. pr_err("%s: copy_from_user failed\n", __func__);
  585. return ret;
  586. }
  587. if (dvenc->state == VENC_STATE_START ||
  588. dvenc->state == VENC_STATE_PAUSE) {
  589. ret =
  590. dal_call_f5(dvenc->q6_handle, VENC_DALRPC_UPDATE_QP_RANGE,
  591. &qp, sizeof(struct venc_qp_range));
  592. if (ret) {
  593. pr_err("%s: remote function failed (%d) \n", __func__,
  594. ret);
  595. return ret;
  596. }
  597. }
  598. return ret;
  599. }
  600. static int venc_set_intra_period(struct venc_dev *dvenc, void *argp)
  601. {
  602. int ret = 0;
  603. u32 pnum = 0;
  604. ret = copy_from_user(&pnum, argp, sizeof(int));
  605. if (ret) {
  606. pr_err("%s: copy_from_user failed\n", __func__);
  607. return ret;
  608. }
  609. if (dvenc->state == VENC_STATE_START ||
  610. dvenc->state == VENC_STATE_PAUSE) {
  611. ret = dal_call_f0(dvenc->q6_handle,
  612. VENC_DALRPC_UPDATE_INTRA_PERIOD, pnum);
  613. if (ret)
  614. pr_err("%s: remote function failed (%d)\n", __func__,
  615. ret);
  616. }
  617. return ret;
  618. }
  619. static int venc_set_intra_refresh(struct venc_dev *dvenc, void *argp)
  620. {
  621. int ret = 0;
  622. u32 mb_num = 0;
  623. ret = copy_from_user(&mb_num, argp, sizeof(int));
  624. if (ret) {
  625. pr_err("%s: copy_from_user failed\n", __func__);
  626. return ret;
  627. }
  628. if (dvenc->state == VENC_STATE_START ||
  629. dvenc->state == VENC_STATE_PAUSE) {
  630. ret = dal_call_f0(dvenc->q6_handle,
  631. VENC_DALRPC_UPDATE_INTRA_REFRESH, mb_num);
  632. if (ret)
  633. pr_err("%s: remote function failed (%d)\n", __func__,
  634. ret);
  635. }
  636. return ret;
  637. }
  638. static int venc_set_frame_rate(struct venc_dev *dvenc, void *argp)
  639. {
  640. int ret = 0;
  641. struct venc_frame_rate pdata;
  642. ret = copy_from_user(&pdata, argp, sizeof(struct venc_frame_rate));
  643. if (ret) {
  644. pr_err("%s: copy_from_user failed\n", __func__);
  645. return ret;
  646. }
  647. if (dvenc->state == VENC_STATE_START ||
  648. dvenc->state == VENC_STATE_PAUSE) {
  649. ret = dal_call_f5(dvenc->q6_handle,
  650. VENC_DALRPC_UPDATE_FRAME_RATE,
  651. (void *)&(pdata),
  652. sizeof(struct venc_frame_rate));
  653. if (ret)
  654. pr_err("%s: remote function failed (%d)\n", __func__,
  655. ret);
  656. }
  657. return ret;
  658. }
  659. static int venc_set_target_bitrate(struct venc_dev *dvenc, void *argp)
  660. {
  661. int ret = 0;
  662. u32 pdata = 0;
  663. ret = copy_from_user(&pdata, argp, sizeof(int));
  664. if (ret) {
  665. pr_err("%s: copy_from_user failed\n", __func__);
  666. return ret;
  667. }
  668. if (dvenc->state == VENC_STATE_START ||
  669. dvenc->state == VENC_STATE_PAUSE) {
  670. ret = dal_call_f0(dvenc->q6_handle,
  671. VENC_DALRPC_UPDATE_BITRATE, pdata);
  672. if (ret)
  673. pr_err("%s: remote function failed (%d)\n", __func__,
  674. ret);
  675. }
  676. return ret;
  677. }
  678. static int venc_request_iframe(struct venc_dev *dvenc)
  679. {
  680. int ret = 0;
  681. if (dvenc->state != VENC_STATE_START)
  682. return -EINVAL;
  683. ret = dal_call_f0(dvenc->q6_handle, VENC_DALRPC_REQUEST_IFRAME, 1);
  684. if (ret)
  685. pr_err("%s: remote function failed (%d)\n", __func__, ret);
  686. return ret;
  687. }
  688. static int venc_stop_read_msg(struct venc_dev *dvenc)
  689. {
  690. struct venc_msg msg;
  691. int ret = 0;
  692. msg.status_code = 0;
  693. msg.msg_code = VENC_MSG_STOP_READING_MSG;
  694. msg.msg_data_size = 0;
  695. venc_put_msg(dvenc, &msg);
  696. return ret;
  697. }
  698. static int venc_q6_stop(struct venc_dev *dvenc)
  699. {
  700. int ret = 0;
  701. struct venc_pmem_list *plist;
  702. unsigned long flags;
  703. wake_up(&dvenc->venc_msg_evt);
  704. spin_lock_irqsave(&dvenc->venc_pmem_list_lock, flags);
  705. if (!dvenc->pmem_freed) {
  706. list_for_each_entry(plist, &dvenc->venc_pmem_list_head, list)
  707. put_pmem_file(plist->buf.file);
  708. dvenc->pmem_freed = 1;
  709. }
  710. spin_unlock_irqrestore(&dvenc->venc_pmem_list_lock, flags);
  711. dvenc->state = VENC_STATE_STOP;
  712. return ret;
  713. }
  714. static int venc_translate_error(enum venc_status_code q6_status)
  715. {
  716. int ret = 0;
  717. switch (q6_status) {
  718. case VENC_STATUS_SUCCESS:
  719. ret = VENC_S_SUCCESS;
  720. break;
  721. case VENC_STATUS_ERROR:
  722. ret = VENC_S_EFAIL;
  723. break;
  724. case VENC_STATUS_INVALID_STATE:
  725. ret = VENC_S_EINVALSTATE;
  726. break;
  727. case VENC_STATUS_FLUSHING:
  728. ret = VENC_S_EFLUSHED;
  729. break;
  730. case VENC_STATUS_INVALID_PARAM:
  731. ret = VENC_S_EBADPARAM;
  732. break;
  733. case VENC_STATUS_CMD_QUEUE_FULL:
  734. ret = VENC_S_ECMDQFULL;
  735. break;
  736. case VENC_STATUS_CRITICAL:
  737. ret = VENC_S_EFATAL;
  738. break;
  739. case VENC_STATUS_INSUFFICIENT_RESOURCES:
  740. ret = VENC_S_ENOHWRES;
  741. break;
  742. case VENC_STATUS_TIMEOUT:
  743. ret = VENC_S_ETIMEOUT;
  744. break;
  745. }
  746. if (q6_status != VENC_STATUS_SUCCESS)
  747. pr_err("%s: Q6 failed (%d)", __func__, (int)q6_status);
  748. return ret;
  749. }
  750. static void venc_q6_callback(void *data, int len, void *cookie)
  751. {
  752. int status = 0;
  753. struct venc_dev *dvenc = (struct venc_dev *)cookie;
  754. struct venc_msg_type *q6_msg = NULL;
  755. struct venc_msg msg, msg1;
  756. union venc_msg_data smsg1, smsg2;
  757. unsigned long msg_code = 0;
  758. struct venc_input_payload *pload1;
  759. struct venc_output_payload *pload2;
  760. uint32_t * tmp = (uint32_t *) data;
  761. if (dvenc == NULL) {
  762. pr_err("%s: empty driver parameter\n", __func__);
  763. return;
  764. }
  765. if (tmp[2] == sizeof(struct venc_msg_type)) {
  766. q6_msg = (struct venc_msg_type *)&tmp[3];
  767. } else {
  768. pr_err("%s: callback with empty message (%d, %d)\n",
  769. __func__, tmp[2], sizeof(struct venc_msg_type));
  770. return;
  771. }
  772. msg.msg_data_size = 0;
  773. status = venc_translate_error(q6_msg->status);
  774. switch ((enum venc_event_type_enum)q6_msg->event) {
  775. case VENC_EVENT_START_STATUS:
  776. dvenc->state = VENC_STATE_START;
  777. msg_code = VENC_MSG_START;
  778. break;
  779. case VENC_EVENT_STOP_STATUS:
  780. venc_q6_stop(dvenc);
  781. msg_code = VENC_MSG_STOP;
  782. break;
  783. case VENC_EVENT_SUSPEND_STATUS:
  784. dvenc->state = VENC_STATE_PAUSE;
  785. msg_code = VENC_MSG_PAUSE;
  786. break;
  787. case VENC_EVENT_RESUME_STATUS:
  788. dvenc->state = VENC_STATE_START;
  789. msg_code = VENC_MSG_RESUME;
  790. break;
  791. case VENC_EVENT_FLUSH_STATUS:
  792. smsg1.flush_ret.flush_mode = VENC_FLUSH_INPUT;
  793. msg1.status_code = status;
  794. msg1.msg_code = VENC_MSG_FLUSH;
  795. msg1.msg_data = smsg1;
  796. msg1.msg_data_size = sizeof(union venc_msg_data);
  797. venc_put_msg(dvenc, &msg1);
  798. smsg2.flush_ret.flush_mode = VENC_FLUSH_OUTPUT;
  799. msg_code = VENC_MSG_FLUSH;
  800. msg.msg_data = smsg2;
  801. msg.msg_data_size = sizeof(union venc_msg_data);
  802. break;
  803. case VENC_EVENT_RELEASE_INPUT:
  804. pload1 = &((q6_msg->payload).input_payload);
  805. TRACE("Release_input: data: 0x%x \n", pload1->data);
  806. if (pload1 != NULL) {
  807. msg.msg_data.buf.client_data = pload1->data;
  808. msg_code = VENC_MSG_INPUT_BUFFER_DONE;
  809. msg.msg_data_size = sizeof(union venc_msg_data);
  810. }
  811. break;
  812. case VENC_EVENT_DELIVER_OUTPUT:
  813. pload2 = &((q6_msg->payload).output_payload);
  814. smsg1.buf.flags = 0;
  815. if (pload2->flags & VENC_FLAG_SYNC_FRAME)
  816. smsg1.buf.flags |= VENC_FLAG_SYNC_FRAME;
  817. if (pload2->flags & VENC_FLAG_CODEC_CONFIG)
  818. smsg1.buf.flags |= VENC_FLAG_CODEC_CONFIG;
  819. if (pload2->flags & VENC_FLAG_END_OF_FRAME)
  820. smsg1.buf.flags |= VENC_FLAG_END_OF_FRAME;
  821. if (pload2->flags & VENC_FLAG_EOS)
  822. smsg1.buf.flags |= VENC_FLAG_EOS;
  823. smsg1.buf.len = pload2->size;
  824. smsg1.buf.offset = 0;
  825. smsg1.buf.time_stamp = pload2->time_stamp;
  826. smsg1.buf.client_data = pload2->data;
  827. msg_code = VENC_MSG_OUTPUT_BUFFER_DONE;
  828. msg.msg_data = smsg1;
  829. msg.msg_data_size = sizeof(union venc_msg_data);
  830. break;
  831. default:
  832. pr_err("%s: invalid response from Q6 (%d)\n", __func__,
  833. (int)q6_msg->event);
  834. return;
  835. }
  836. msg.status_code = status;
  837. msg.msg_code = msg_code;
  838. venc_put_msg(dvenc, &msg);
  839. return;
  840. }
  841. static int venc_get_version(struct venc_dev *dvenc, void *argp)
  842. {
  843. struct venc_version ver_info;
  844. int ret = 0;
  845. ver_info.major = VENC_GET_MAJOR_VERSION(VENC_INTERFACE_VERSION);
  846. ver_info.minor = VENC_GET_MINOR_VERSION(VENC_INTERFACE_VERSION);
  847. ret = copy_to_user(((struct venc_version *)argp),
  848. &ver_info, sizeof(ver_info));
  849. if (ret)
  850. pr_err("%s failed to copy_to_user\n", __func__);
  851. return ret;
  852. }
  853. static long q6venc_ioctl(struct file *file, u32 cmd,
  854. unsigned long arg)
  855. {
  856. long ret = 0;
  857. void __user *argp = (void __user *)arg;
  858. struct venc_dev *dvenc = file->private_data;
  859. if (!dvenc || !dvenc->is_active)
  860. return -EPERM;
  861. switch (cmd) {
  862. case VENC_IOCTL_SET_INPUT_BUFFER:
  863. ret = venc_set_buffer(dvenc, argp, VENC_BUFFER_TYPE_INPUT);
  864. break;
  865. case VENC_IOCTL_SET_OUTPUT_BUFFER:
  866. ret = venc_set_buffer(dvenc, argp, VENC_BUFFER_TYPE_OUTPUT);
  867. break;
  868. case VENC_IOCTL_GET_SEQUENCE_HDR:
  869. ret = venc_get_sequence_hdr(dvenc, argp);
  870. break;
  871. case VENC_IOCTL_SET_QP_RANGE:
  872. ret = venc_set_qp_range(dvenc, argp);
  873. break;
  874. case VENC_IOCTL_SET_INTRA_PERIOD:
  875. ret = venc_set_intra_period(dvenc, argp);
  876. break;
  877. case VENC_IOCTL_SET_INTRA_REFRESH:
  878. ret = venc_set_intra_refresh(dvenc, argp);
  879. break;
  880. case VENC_IOCTL_SET_FRAME_RATE:
  881. ret = venc_set_frame_rate(dvenc, argp);
  882. break;
  883. case VENC_IOCTL_SET_TARGET_BITRATE:
  884. ret = venc_set_target_bitrate(dvenc, argp);
  885. break;
  886. case VENC_IOCTL_CMD_REQUEST_IFRAME:
  887. if (dvenc->state == VENC_STATE_START)
  888. ret = venc_request_iframe(dvenc);
  889. break;
  890. case VENC_IOCTL_CMD_START:
  891. ret = venc_start(dvenc, argp);
  892. break;
  893. case VENC_IOCTL_CMD_STOP:
  894. ret = venc_stop(dvenc);
  895. break;
  896. case VENC_IOCTL_CMD_PAUSE:
  897. ret = venc_pause(dvenc);
  898. break;
  899. case VENC_IOCTL_CMD_RESUME:
  900. ret = venc_resume(dvenc);
  901. break;
  902. case VENC_IOCTL_CMD_ENCODE_FRAME:
  903. ret = venc_encode_frame(dvenc, argp);
  904. break;
  905. case VENC_IOCTL_CMD_FILL_OUTPUT_BUFFER:
  906. ret = venc_fill_output(dvenc, argp);
  907. break;
  908. case VENC_IOCTL_CMD_FLUSH:
  909. ret = venc_flush(dvenc, argp);
  910. break;
  911. case VENC_IOCTL_CMD_READ_NEXT_MSG:
  912. wait_event_interruptible(dvenc->venc_msg_evt,
  913. venc_get_msg(dvenc, argp));
  914. break;
  915. case VENC_IOCTL_CMD_STOP_READ_MSG:
  916. ret = venc_stop_read_msg(dvenc);
  917. break;
  918. case VENC_IOCTL_GET_VERSION:
  919. ret = venc_get_version(dvenc, argp);
  920. break;
  921. default:
  922. pr_err("%s: invalid ioctl code (%d)\n", __func__, cmd);
  923. ret = -ENOTTY;
  924. break;
  925. }
  926. return ret;
  927. }
  928. static int q6venc_open(struct inode *inode, struct file *file)
  929. {
  930. int i;
  931. int ret = 0;
  932. struct venc_dev *dvenc;
  933. struct venc_msg_list *plist, *tmp;
  934. struct dal_info version_info;
  935. dvenc = kzalloc(sizeof(struct venc_dev), GFP_KERNEL);
  936. if (!dvenc) {
  937. pr_err("%s: unable to allocate memory for struct venc_dev\n",
  938. __func__);
  939. return -ENOMEM;
  940. }
  941. file->private_data = dvenc;
  942. INIT_LIST_HEAD(&dvenc->venc_msg_list_head);
  943. INIT_LIST_HEAD(&dvenc->venc_msg_list_free);
  944. INIT_LIST_HEAD(&dvenc->venc_pmem_list_head);
  945. init_waitqueue_head(&dvenc->venc_msg_evt);
  946. spin_lock_init(&dvenc->venc_msg_list_lock);
  947. spin_lock_init(&dvenc->venc_pmem_list_lock);
  948. venc_ref++;
  949. for (i = 0; i < VENC_MSG_MAX; i++) {
  950. plist = kzalloc(sizeof(struct venc_msg_list), GFP_KERNEL);
  951. if (!plist) {
  952. pr_err("%s: kzalloc failed\n", __func__);
  953. ret = -ENOMEM;
  954. goto err_venc_create_msg_list;
  955. }
  956. list_add(&plist->list, &dvenc->venc_msg_list_free);
  957. }
  958. dvenc->q6_handle =
  959. dal_attach(DALDEVICEID_VENC_DEVICE, DALDEVICEID_VENC_PORTNAME, 1,
  960. venc_q6_callback, (void *)dvenc);
  961. if (!(dvenc->q6_handle)) {
  962. pr_err("%s: daldevice_attach failed (%d)\n", __func__, ret);
  963. goto err_venc_dal_attach;
  964. }
  965. ret = dal_call_f9(dvenc->q6_handle, DAL_OP_INFO, &version_info,
  966. sizeof(struct dal_info));
  967. if (ret) {
  968. pr_err("%s: failed to get version\n", __func__);
  969. goto err_venc_dal_open;
  970. }
  971. if (venc_check_version(VENC_INTERFACE_VERSION, version_info.version)) {
  972. pr_err("%s: driver version mismatch\n", __func__);
  973. goto err_venc_dal_open;
  974. }
  975. ret = dal_call_f0(dvenc->q6_handle, DAL_OP_OPEN, 1);
  976. if (ret) {
  977. pr_err("%s: dal_call_open failed (%d)\n", __func__, ret);
  978. goto err_venc_dal_open;
  979. }
  980. dvenc->state = VENC_STATE_STOP;
  981. dvenc->is_active = 1;
  982. prevent_sleep();
  983. return ret;
  984. err_venc_dal_open:
  985. dal_detach(dvenc->q6_handle);
  986. err_venc_dal_attach:
  987. list_for_each_entry_safe(plist, tmp, &dvenc->venc_msg_list_free, list) {
  988. list_del(&plist->list);
  989. kfree(plist);
  990. }
  991. err_venc_create_msg_list:
  992. kfree(dvenc);
  993. venc_ref--;
  994. return ret;
  995. }
  996. static int q6venc_release(struct inode *inode, struct file *file)
  997. {
  998. int ret = 0;
  999. struct venc_msg_list *l, *n;
  1000. struct venc_pmem_list *plist, *m;
  1001. struct venc_dev *dvenc;
  1002. unsigned long flags;
  1003. venc_ref--;
  1004. dvenc = file->private_data;
  1005. dvenc->is_active = 0;
  1006. wake_up_all(&dvenc->venc_msg_evt);
  1007. dal_call_f0(dvenc->q6_handle, VENC_DALRPC_STOP, 1);
  1008. dal_call_f0(dvenc->q6_handle, DAL_OP_CLOSE, 1);
  1009. dal_detach(dvenc->q6_handle);
  1010. list_for_each_entry_safe(l, n, &dvenc->venc_msg_list_free, list) {
  1011. list_del(&l->list);
  1012. kfree(l);
  1013. }
  1014. list_for_each_entry_safe(l, n, &dvenc->venc_msg_list_head, list) {
  1015. list_del(&l->list);
  1016. kfree(l);
  1017. }
  1018. spin_lock_irqsave(&dvenc->venc_pmem_list_lock, flags);
  1019. if (!dvenc->pmem_freed) {
  1020. list_for_each_entry(plist, &dvenc->venc_pmem_list_head, list)
  1021. put_pmem_file(plist->buf.file);
  1022. dvenc->pmem_freed = 1;
  1023. }
  1024. spin_unlock_irqrestore(&dvenc->venc_pmem_list_lock, flags);
  1025. list_for_each_entry_safe(plist, m, &dvenc->venc_pmem_list_head, list) {
  1026. list_del(&plist->list);
  1027. kfree(plist);
  1028. }
  1029. kfree(dvenc);
  1030. allow_sleep();
  1031. return ret;
  1032. }
  1033. const struct file_operations q6venc_fops = {
  1034. .owner = THIS_MODULE,
  1035. .open = q6venc_open,
  1036. .release = q6venc_release,
  1037. .unlocked_ioctl = q6venc_ioctl,
  1038. };
  1039. static int __init q6venc_init(void)
  1040. {
  1041. int ret = 0;
  1042. wake_lock_init(&idlelock, WAKE_LOCK_IDLE, "venc_idle");
  1043. wake_lock_init(&wakelock, WAKE_LOCK_SUSPEND, "venc_suspend");
  1044. venc_device_p = kzalloc(sizeof(struct venc_dev), GFP_KERNEL);
  1045. if (!venc_device_p) {
  1046. pr_err("%s: unable to allocate memory for venc_device_p\n",
  1047. __func__);
  1048. return -ENOMEM;
  1049. }
  1050. ret = alloc_chrdev_region(&venc_dev_num, 0, 1, VENC_NAME);
  1051. if (ret < 0) {
  1052. pr_err("%s: alloc_chrdev_region failed (%d)\n", __func__,
  1053. ret);
  1054. return ret;
  1055. }
  1056. venc_class = class_create(THIS_MODULE, VENC_NAME);
  1057. if (IS_ERR(venc_class)) {
  1058. ret = PTR_ERR(venc_class);
  1059. pr_err("%s: failed to create venc_class (%d)\n",
  1060. __func__, ret);
  1061. goto err_venc_class_create;
  1062. }
  1063. venc_device_p->class_devp =
  1064. device_create(venc_class, NULL, venc_dev_num, NULL,
  1065. VENC_NAME);
  1066. if (IS_ERR(venc_device_p->class_devp)) {
  1067. ret = PTR_ERR(venc_device_p->class_devp);
  1068. pr_err("%s: failed to create class_device (%d)\n", __func__,
  1069. ret);
  1070. goto err_venc_class_device_create;
  1071. }
  1072. cdev_init(&cdev, &q6venc_fops);
  1073. cdev.owner = THIS_MODULE;
  1074. ret = cdev_add(&cdev, venc_dev_num, 1);
  1075. if (ret < 0) {
  1076. pr_err("%s: cdev_add failed (%d)\n", __func__, ret);
  1077. goto err_venc_cdev_add;
  1078. }
  1079. init_waitqueue_head(&venc_device_p->venc_msg_evt);
  1080. return ret;
  1081. err_venc_cdev_add:
  1082. device_destroy(venc_class, venc_dev_num);
  1083. err_venc_class_device_create:
  1084. class_destroy(venc_class);
  1085. err_venc_class_create:
  1086. unregister_chrdev_region(venc_dev_num, 1);
  1087. return ret;
  1088. }
  1089. static void __exit q6venc_exit(void)
  1090. {
  1091. cdev_del(&(cdev));
  1092. device_destroy(venc_class, venc_dev_num);
  1093. class_destroy(venc_class);
  1094. unregister_chrdev_region(venc_dev_num, 1);
  1095. }
  1096. MODULE_LICENSE("GPL v2");
  1097. MODULE_DESCRIPTION("Video encoder driver for QDSP6");
  1098. MODULE_VERSION("2.0");
  1099. module_init(q6venc_init);
  1100. module_exit(q6venc_exit);