PageRenderTime 48ms CodeModel.GetById 15ms RepoModel.GetById 0ms app.codeStats 1ms

/src/system/kernel/fs/fd.cpp

http://github.com/Barrett17/Haiku-services-branch
C++ | 1309 lines | 886 code | 324 blank | 99 comment | 230 complexity | 7df7d1e90b63f6efd79c3bd708eb7691 MD5 | raw file
Possible License(s): GPL-2.0, GPL-3.0, LGPL-2.0, LGPL-2.1, BSD-2-Clause, ISC, Apache-2.0, AGPL-1.0, MIT, MPL-2.0-no-copyleft-exception, Unlicense, BSD-3-Clause, LGPL-3.0
  1. /*
  2. * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
  3. * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
  4. * Distributed under the terms of the MIT License.
  5. */
  6. //! Operations on file descriptors
  7. #include <fd.h>
  8. #include <stdlib.h>
  9. #include <string.h>
  10. #include <OS.h>
  11. #include <AutoDeleter.h>
  12. #include <syscalls.h>
  13. #include <syscall_restart.h>
  14. #include <util/AutoLock.h>
  15. #include <vfs.h>
  16. #include <wait_for_objects.h>
  17. #include "vfs_tracing.h"
  18. //#define TRACE_FD
  19. #ifdef TRACE_FD
  20. # define TRACE(x) dprintf x
  21. #else
  22. # define TRACE(x)
  23. #endif
  24. static const size_t kMaxReadDirBufferSize = 64 * 1024;
  25. static struct file_descriptor* get_fd_locked(struct io_context* context,
  26. int fd);
  27. static struct file_descriptor* remove_fd(struct io_context* context, int fd);
  28. static void deselect_select_infos(file_descriptor* descriptor,
  29. select_info* infos);
  30. struct FDGetterLocking {
  31. inline bool Lock(file_descriptor* /*lockable*/)
  32. {
  33. return false;
  34. }
  35. inline void Unlock(file_descriptor* lockable)
  36. {
  37. put_fd(lockable);
  38. }
  39. };
  40. class FDGetter : public AutoLocker<file_descriptor, FDGetterLocking> {
  41. public:
  42. inline FDGetter()
  43. : AutoLocker<file_descriptor, FDGetterLocking>()
  44. {
  45. }
  46. inline FDGetter(io_context* context, int fd, bool contextLocked = false)
  47. : AutoLocker<file_descriptor, FDGetterLocking>(
  48. contextLocked ? get_fd_locked(context, fd) : get_fd(context, fd))
  49. {
  50. }
  51. inline file_descriptor* SetTo(io_context* context, int fd,
  52. bool contextLocked = false)
  53. {
  54. file_descriptor* descriptor
  55. = contextLocked ? get_fd_locked(context, fd) : get_fd(context, fd);
  56. AutoLocker<file_descriptor, FDGetterLocking>::SetTo(descriptor, true);
  57. return descriptor;
  58. }
  59. inline file_descriptor* SetTo(int fd, bool kernel,
  60. bool contextLocked = false)
  61. {
  62. return SetTo(get_current_io_context(kernel), fd, contextLocked);
  63. }
  64. inline file_descriptor* FD() const
  65. {
  66. return fLockable;
  67. }
  68. };
  69. // #pragma mark - General fd routines
  70. #ifdef DEBUG
  71. void dump_fd(int fd, struct file_descriptor* descriptor);
  72. void
  73. dump_fd(int fd,struct file_descriptor* descriptor)
  74. {
  75. dprintf("fd[%d] = %p: type = %ld, ref_count = %ld, ops = %p, u.vnode = %p, "
  76. "u.mount = %p, cookie = %p, open_mode = %lx, pos = %Ld\n",
  77. fd, descriptor, descriptor->type, descriptor->ref_count,
  78. descriptor->ops, descriptor->u.vnode, descriptor->u.mount,
  79. descriptor->cookie, descriptor->open_mode, descriptor->pos);
  80. }
  81. #endif
  82. /*! Allocates and initializes a new file_descriptor.
  83. */
  84. struct file_descriptor*
  85. alloc_fd(void)
  86. {
  87. file_descriptor* descriptor
  88. = (file_descriptor*)malloc(sizeof(struct file_descriptor));
  89. if (descriptor == NULL)
  90. return NULL;
  91. descriptor->u.vnode = NULL;
  92. descriptor->cookie = NULL;
  93. descriptor->ref_count = 1;
  94. descriptor->open_count = 0;
  95. descriptor->open_mode = 0;
  96. descriptor->pos = 0;
  97. return descriptor;
  98. }
  99. bool
  100. fd_close_on_exec(struct io_context* context, int fd)
  101. {
  102. return CHECK_BIT(context->fds_close_on_exec[fd / 8], fd & 7) ? true : false;
  103. }
  104. void
  105. fd_set_close_on_exec(struct io_context* context, int fd, bool closeFD)
  106. {
  107. if (closeFD)
  108. context->fds_close_on_exec[fd / 8] |= (1 << (fd & 7));
  109. else
  110. context->fds_close_on_exec[fd / 8] &= ~(1 << (fd & 7));
  111. }
  112. /*! Searches a free slot in the FD table of the provided I/O context, and
  113. inserts the specified descriptor into it.
  114. */
  115. int
  116. new_fd_etc(struct io_context* context, struct file_descriptor* descriptor,
  117. int firstIndex)
  118. {
  119. int fd = -1;
  120. uint32 i;
  121. mutex_lock(&context->io_mutex);
  122. for (i = firstIndex; i < context->table_size; i++) {
  123. if (!context->fds[i]) {
  124. fd = i;
  125. break;
  126. }
  127. }
  128. if (fd < 0) {
  129. fd = B_NO_MORE_FDS;
  130. goto err;
  131. }
  132. TFD(NewFD(context, fd, descriptor));
  133. context->fds[fd] = descriptor;
  134. context->num_used_fds++;
  135. atomic_add(&descriptor->open_count, 1);
  136. err:
  137. mutex_unlock(&context->io_mutex);
  138. return fd;
  139. }
  140. int
  141. new_fd(struct io_context* context, struct file_descriptor* descriptor)
  142. {
  143. return new_fd_etc(context, descriptor, 0);
  144. }
  145. /*! Reduces the descriptor's reference counter, and frees all resources
  146. when it's no longer used.
  147. */
  148. void
  149. put_fd(struct file_descriptor* descriptor)
  150. {
  151. int32 previous = atomic_add(&descriptor->ref_count, -1);
  152. TFD(PutFD(descriptor));
  153. TRACE(("put_fd(descriptor = %p [ref = %ld, cookie = %p])\n",
  154. descriptor, descriptor->ref_count, descriptor->cookie));
  155. // free the descriptor if we don't need it anymore
  156. if (previous == 1) {
  157. // free the underlying object
  158. if (descriptor->ops != NULL && descriptor->ops->fd_free != NULL)
  159. descriptor->ops->fd_free(descriptor);
  160. free(descriptor);
  161. } else if ((descriptor->open_mode & O_DISCONNECTED) != 0
  162. && previous - 1 == descriptor->open_count
  163. && descriptor->ops != NULL) {
  164. // the descriptor has been disconnected - it cannot
  165. // be accessed anymore, let's close it (no one is
  166. // currently accessing this descriptor)
  167. if (descriptor->ops->fd_close)
  168. descriptor->ops->fd_close(descriptor);
  169. if (descriptor->ops->fd_free)
  170. descriptor->ops->fd_free(descriptor);
  171. // prevent this descriptor from being closed/freed again
  172. descriptor->open_count = -1;
  173. descriptor->ref_count = -1;
  174. descriptor->ops = NULL;
  175. descriptor->u.vnode = NULL;
  176. // the file descriptor is kept intact, so that it's not
  177. // reused until someone explicetly closes it
  178. }
  179. }
  180. /*! Decrements the open counter of the file descriptor and invokes
  181. its close hook when appropriate.
  182. */
  183. void
  184. close_fd(struct file_descriptor* descriptor)
  185. {
  186. if (atomic_add(&descriptor->open_count, -1) == 1) {
  187. vfs_unlock_vnode_if_locked(descriptor);
  188. if (descriptor->ops != NULL && descriptor->ops->fd_close != NULL)
  189. descriptor->ops->fd_close(descriptor);
  190. }
  191. }
  192. status_t
  193. close_fd_index(struct io_context* context, int fd)
  194. {
  195. struct file_descriptor* descriptor = remove_fd(context, fd);
  196. if (descriptor == NULL)
  197. return B_FILE_ERROR;
  198. close_fd(descriptor);
  199. put_fd(descriptor);
  200. // the reference associated with the slot
  201. return B_OK;
  202. }
  203. /*! This descriptor's underlying object will be closed and freed as soon as
  204. possible (in one of the next calls to put_fd() - get_fd() will no longer
  205. succeed on this descriptor).
  206. This is useful if the underlying object is gone, for instance when a
  207. (mounted) volume got removed unexpectedly.
  208. */
  209. void
  210. disconnect_fd(struct file_descriptor* descriptor)
  211. {
  212. descriptor->open_mode |= O_DISCONNECTED;
  213. }
  214. void
  215. inc_fd_ref_count(struct file_descriptor* descriptor)
  216. {
  217. atomic_add(&descriptor->ref_count, 1);
  218. }
  219. static struct file_descriptor*
  220. get_fd_locked(struct io_context* context, int fd)
  221. {
  222. if (fd < 0 || (uint32)fd >= context->table_size)
  223. return NULL;
  224. struct file_descriptor* descriptor = context->fds[fd];
  225. if (descriptor != NULL) {
  226. // Disconnected descriptors cannot be accessed anymore
  227. if (descriptor->open_mode & O_DISCONNECTED)
  228. descriptor = NULL;
  229. else {
  230. TFD(GetFD(context, fd, descriptor));
  231. inc_fd_ref_count(descriptor);
  232. }
  233. }
  234. return descriptor;
  235. }
  236. struct file_descriptor*
  237. get_fd(struct io_context* context, int fd)
  238. {
  239. MutexLocker _(context->io_mutex);
  240. return get_fd_locked(context, fd);
  241. }
  242. struct file_descriptor*
  243. get_open_fd(struct io_context* context, int fd)
  244. {
  245. MutexLocker _(context->io_mutex);
  246. file_descriptor* descriptor = get_fd_locked(context, fd);
  247. if (descriptor == NULL)
  248. return NULL;
  249. atomic_add(&descriptor->open_count, 1);
  250. return descriptor;
  251. }
  252. /*! Removes the file descriptor from the specified slot.
  253. */
  254. static struct file_descriptor*
  255. remove_fd(struct io_context* context, int fd)
  256. {
  257. struct file_descriptor* descriptor = NULL;
  258. if (fd < 0)
  259. return NULL;
  260. mutex_lock(&context->io_mutex);
  261. if ((uint32)fd < context->table_size)
  262. descriptor = context->fds[fd];
  263. select_info* selectInfos = NULL;
  264. bool disconnected = false;
  265. if (descriptor != NULL) {
  266. // fd is valid
  267. TFD(RemoveFD(context, fd, descriptor));
  268. context->fds[fd] = NULL;
  269. fd_set_close_on_exec(context, fd, false);
  270. context->num_used_fds--;
  271. selectInfos = context->select_infos[fd];
  272. context->select_infos[fd] = NULL;
  273. disconnected = (descriptor->open_mode & O_DISCONNECTED);
  274. }
  275. mutex_unlock(&context->io_mutex);
  276. if (selectInfos != NULL)
  277. deselect_select_infos(descriptor, selectInfos);
  278. return disconnected ? NULL : descriptor;
  279. }
  280. static int
  281. dup_fd(int fd, bool kernel)
  282. {
  283. struct io_context* context = get_current_io_context(kernel);
  284. struct file_descriptor* descriptor;
  285. int status;
  286. TRACE(("dup_fd: fd = %d\n", fd));
  287. // Try to get the fd structure
  288. descriptor = get_fd(context, fd);
  289. if (descriptor == NULL)
  290. return B_FILE_ERROR;
  291. // now put the fd in place
  292. status = new_fd(context, descriptor);
  293. if (status < 0)
  294. put_fd(descriptor);
  295. else {
  296. mutex_lock(&context->io_mutex);
  297. fd_set_close_on_exec(context, status, false);
  298. mutex_unlock(&context->io_mutex);
  299. }
  300. return status;
  301. }
  302. /*! POSIX says this should be the same as:
  303. close(newfd);
  304. fcntl(oldfd, F_DUPFD, newfd);
  305. We do dup2() directly to be thread-safe.
  306. */
  307. static int
  308. dup2_fd(int oldfd, int newfd, bool kernel)
  309. {
  310. struct file_descriptor* evicted = NULL;
  311. struct io_context* context;
  312. TRACE(("dup2_fd: ofd = %d, nfd = %d\n", oldfd, newfd));
  313. // quick check
  314. if (oldfd < 0 || newfd < 0)
  315. return B_FILE_ERROR;
  316. // Get current I/O context and lock it
  317. context = get_current_io_context(kernel);
  318. mutex_lock(&context->io_mutex);
  319. // Check if the fds are valid (mutex must be locked because
  320. // the table size could be changed)
  321. if ((uint32)oldfd >= context->table_size
  322. || (uint32)newfd >= context->table_size
  323. || context->fds[oldfd] == NULL) {
  324. mutex_unlock(&context->io_mutex);
  325. return B_FILE_ERROR;
  326. }
  327. // Check for identity, note that it cannot be made above
  328. // because we always want to return an error on invalid
  329. // handles
  330. select_info* selectInfos = NULL;
  331. if (oldfd != newfd) {
  332. // Now do the work
  333. TFD(Dup2FD(context, oldfd, newfd));
  334. evicted = context->fds[newfd];
  335. selectInfos = context->select_infos[newfd];
  336. context->select_infos[newfd] = NULL;
  337. atomic_add(&context->fds[oldfd]->ref_count, 1);
  338. atomic_add(&context->fds[oldfd]->open_count, 1);
  339. context->fds[newfd] = context->fds[oldfd];
  340. if (evicted == NULL)
  341. context->num_used_fds++;
  342. }
  343. fd_set_close_on_exec(context, newfd, false);
  344. mutex_unlock(&context->io_mutex);
  345. // Say bye bye to the evicted fd
  346. if (evicted) {
  347. deselect_select_infos(evicted, selectInfos);
  348. close_fd(evicted);
  349. put_fd(evicted);
  350. }
  351. return newfd;
  352. }
  353. /*! Duplicates an FD from another team to this/the kernel team.
  354. \param fromTeam The team which owns the FD.
  355. \param fd The FD to duplicate.
  356. \param kernel If \c true, the new FD will be created in the kernel team,
  357. the current userland team otherwise.
  358. \return The newly created FD or an error code, if something went wrong.
  359. */
  360. int
  361. dup_foreign_fd(team_id fromTeam, int fd, bool kernel)
  362. {
  363. // get the I/O context for the team in question
  364. Team* team = Team::Get(fromTeam);
  365. if (team == NULL)
  366. return B_BAD_TEAM_ID;
  367. BReference<Team> teamReference(team, true);
  368. io_context* fromContext = team->io_context;
  369. // get the file descriptor
  370. file_descriptor* descriptor = get_fd(fromContext, fd);
  371. if (descriptor == NULL)
  372. return B_FILE_ERROR;
  373. CObjectDeleter<file_descriptor> descriptorPutter(descriptor, put_fd);
  374. // create a new FD in the target I/O context
  375. int result = new_fd(get_current_io_context(kernel), descriptor);
  376. if (result >= 0) {
  377. // the descriptor reference belongs to the slot, now
  378. descriptorPutter.Detach();
  379. }
  380. return result;
  381. }
  382. static status_t
  383. fd_ioctl(bool kernelFD, int fd, uint32 op, void* buffer, size_t length)
  384. {
  385. struct file_descriptor* descriptor;
  386. int status;
  387. descriptor = get_fd(get_current_io_context(kernelFD), fd);
  388. if (descriptor == NULL)
  389. return B_FILE_ERROR;
  390. if (descriptor->ops->fd_ioctl)
  391. status = descriptor->ops->fd_ioctl(descriptor, op, buffer, length);
  392. else
  393. status = B_DEV_INVALID_IOCTL;
  394. if (status == B_DEV_INVALID_IOCTL)
  395. status = ENOTTY;
  396. put_fd(descriptor);
  397. return status;
  398. }
  399. static void
  400. deselect_select_infos(file_descriptor* descriptor, select_info* infos)
  401. {
  402. TRACE(("deselect_select_infos(%p, %p)\n", descriptor, infos));
  403. select_info* info = infos;
  404. while (info != NULL) {
  405. select_sync* sync = info->sync;
  406. // deselect the selected events
  407. uint16 eventsToDeselect = info->selected_events & ~B_EVENT_INVALID;
  408. if (descriptor->ops->fd_deselect != NULL && eventsToDeselect != 0) {
  409. for (uint16 event = 1; event < 16; event++) {
  410. if ((eventsToDeselect & SELECT_FLAG(event)) != 0) {
  411. descriptor->ops->fd_deselect(descriptor, event,
  412. (selectsync*)info);
  413. }
  414. }
  415. }
  416. notify_select_events(info, B_EVENT_INVALID);
  417. info = info->next;
  418. put_select_sync(sync);
  419. }
  420. }
  421. status_t
  422. select_fd(int32 fd, struct select_info* info, bool kernel)
  423. {
  424. TRACE(("select_fd(fd = %ld, info = %p (%p), 0x%x)\n", fd, info,
  425. info->sync, info->selected_events));
  426. FDGetter fdGetter;
  427. // define before the context locker, so it will be destroyed after it
  428. io_context* context = get_current_io_context(kernel);
  429. MutexLocker locker(context->io_mutex);
  430. struct file_descriptor* descriptor = fdGetter.SetTo(context, fd, true);
  431. if (descriptor == NULL)
  432. return B_FILE_ERROR;
  433. uint16 eventsToSelect = info->selected_events & ~B_EVENT_INVALID;
  434. if (descriptor->ops->fd_select == NULL && eventsToSelect != 0) {
  435. // if the I/O subsystem doesn't support select(), we will
  436. // immediately notify the select call
  437. return notify_select_events(info, eventsToSelect);
  438. }
  439. // We need the FD to stay open while we're doing this, so no select()/
  440. // deselect() will be called on it after it is closed.
  441. atomic_add(&descriptor->open_count, 1);
  442. locker.Unlock();
  443. // select any events asked for
  444. uint32 selectedEvents = 0;
  445. for (uint16 event = 1; event < 16; event++) {
  446. if ((eventsToSelect & SELECT_FLAG(event)) != 0
  447. && descriptor->ops->fd_select(descriptor, event,
  448. (selectsync*)info) == B_OK) {
  449. selectedEvents |= SELECT_FLAG(event);
  450. }
  451. }
  452. info->selected_events = selectedEvents
  453. | (info->selected_events & B_EVENT_INVALID);
  454. // Add the info to the IO context. Even if nothing has been selected -- we
  455. // always support B_EVENT_INVALID.
  456. locker.Lock();
  457. if (context->fds[fd] != descriptor) {
  458. // Someone close()d the index in the meantime. deselect() all
  459. // events.
  460. info->next = NULL;
  461. deselect_select_infos(descriptor, info);
  462. // Release our open reference of the descriptor.
  463. close_fd(descriptor);
  464. return B_FILE_ERROR;
  465. }
  466. // The FD index hasn't changed, so we add the select info to the table.
  467. info->next = context->select_infos[fd];
  468. context->select_infos[fd] = info;
  469. // As long as the info is in the list, we keep a reference to the sync
  470. // object.
  471. atomic_add(&info->sync->ref_count, 1);
  472. // Finally release our open reference. It is safe just to decrement,
  473. // since as long as the descriptor is associated with the slot,
  474. // someone else still has it open.
  475. atomic_add(&descriptor->open_count, -1);
  476. return B_OK;
  477. }
  478. status_t
  479. deselect_fd(int32 fd, struct select_info* info, bool kernel)
  480. {
  481. TRACE(("deselect_fd(fd = %ld, info = %p (%p), 0x%x)\n", fd, info,
  482. info->sync, info->selected_events));
  483. FDGetter fdGetter;
  484. // define before the context locker, so it will be destroyed after it
  485. io_context* context = get_current_io_context(kernel);
  486. MutexLocker locker(context->io_mutex);
  487. struct file_descriptor* descriptor = fdGetter.SetTo(context, fd, true);
  488. if (descriptor == NULL)
  489. return B_FILE_ERROR;
  490. // remove the info from the IO context
  491. select_info** infoLocation = &context->select_infos[fd];
  492. while (*infoLocation != NULL && *infoLocation != info)
  493. infoLocation = &(*infoLocation)->next;
  494. // If not found, someone else beat us to it.
  495. if (*infoLocation != info)
  496. return B_OK;
  497. *infoLocation = info->next;
  498. locker.Unlock();
  499. // deselect the selected events
  500. uint16 eventsToDeselect = info->selected_events & ~B_EVENT_INVALID;
  501. if (descriptor->ops->fd_deselect != NULL && eventsToDeselect != 0) {
  502. for (uint16 event = 1; event < 16; event++) {
  503. if ((eventsToDeselect & SELECT_FLAG(event)) != 0) {
  504. descriptor->ops->fd_deselect(descriptor, event,
  505. (selectsync*)info);
  506. }
  507. }
  508. }
  509. put_select_sync(info->sync);
  510. return B_OK;
  511. }
  512. /*! This function checks if the specified fd is valid in the current
  513. context. It can be used for a quick check; the fd is not locked
  514. so it could become invalid immediately after this check.
  515. */
  516. bool
  517. fd_is_valid(int fd, bool kernel)
  518. {
  519. struct file_descriptor* descriptor
  520. = get_fd(get_current_io_context(kernel), fd);
  521. if (descriptor == NULL)
  522. return false;
  523. put_fd(descriptor);
  524. return true;
  525. }
  526. struct vnode*
  527. fd_vnode(struct file_descriptor* descriptor)
  528. {
  529. switch (descriptor->type) {
  530. case FDTYPE_FILE:
  531. case FDTYPE_DIR:
  532. case FDTYPE_ATTR_DIR:
  533. case FDTYPE_ATTR:
  534. return descriptor->u.vnode;
  535. }
  536. return NULL;
  537. }
  538. static status_t
  539. common_close(int fd, bool kernel)
  540. {
  541. return close_fd_index(get_current_io_context(kernel), fd);
  542. }
  543. static ssize_t
  544. common_user_io(int fd, off_t pos, void* buffer, size_t length, bool write)
  545. {
  546. if (!IS_USER_ADDRESS(buffer))
  547. return B_BAD_ADDRESS;
  548. if (pos < -1)
  549. return B_BAD_VALUE;
  550. FDGetter fdGetter;
  551. struct file_descriptor* descriptor = fdGetter.SetTo(fd, false);
  552. if (!descriptor)
  553. return B_FILE_ERROR;
  554. if (write ? (descriptor->open_mode & O_RWMASK) == O_RDONLY
  555. : (descriptor->open_mode & O_RWMASK) == O_WRONLY) {
  556. return B_FILE_ERROR;
  557. }
  558. bool movePosition = false;
  559. if (pos == -1) {
  560. pos = descriptor->pos;
  561. movePosition = true;
  562. }
  563. if (write ? descriptor->ops->fd_write == NULL
  564. : descriptor->ops->fd_read == NULL) {
  565. return B_BAD_VALUE;
  566. }
  567. SyscallRestartWrapper<status_t> status;
  568. if (write)
  569. status = descriptor->ops->fd_write(descriptor, pos, buffer, &length);
  570. else
  571. status = descriptor->ops->fd_read(descriptor, pos, buffer, &length);
  572. if (status != B_OK)
  573. return status;
  574. if (movePosition)
  575. descriptor->pos = pos + length;
  576. return length <= SSIZE_MAX ? (ssize_t)length : SSIZE_MAX;
  577. }
  578. static ssize_t
  579. common_user_vector_io(int fd, off_t pos, const iovec* userVecs, size_t count,
  580. bool write)
  581. {
  582. if (!IS_USER_ADDRESS(userVecs))
  583. return B_BAD_ADDRESS;
  584. if (pos < -1)
  585. return B_BAD_VALUE;
  586. // prevent integer overflow exploit in malloc()
  587. if (count > IOV_MAX)
  588. return B_BAD_VALUE;
  589. FDGetter fdGetter;
  590. struct file_descriptor* descriptor = fdGetter.SetTo(fd, false);
  591. if (!descriptor)
  592. return B_FILE_ERROR;
  593. if (write ? (descriptor->open_mode & O_RWMASK) == O_RDONLY
  594. : (descriptor->open_mode & O_RWMASK) == O_WRONLY) {
  595. return B_FILE_ERROR;
  596. }
  597. iovec* vecs = (iovec*)malloc(sizeof(iovec) * count);
  598. if (vecs == NULL)
  599. return B_NO_MEMORY;
  600. MemoryDeleter _(vecs);
  601. if (user_memcpy(vecs, userVecs, sizeof(iovec) * count) != B_OK)
  602. return B_BAD_ADDRESS;
  603. bool movePosition = false;
  604. if (pos == -1) {
  605. pos = descriptor->pos;
  606. movePosition = true;
  607. }
  608. if (write ? descriptor->ops->fd_write == NULL
  609. : descriptor->ops->fd_read == NULL) {
  610. return B_BAD_VALUE;
  611. }
  612. SyscallRestartWrapper<status_t> status;
  613. ssize_t bytesTransferred = 0;
  614. for (uint32 i = 0; i < count; i++) {
  615. size_t length = vecs[i].iov_len;
  616. if (write) {
  617. status = descriptor->ops->fd_write(descriptor, pos,
  618. vecs[i].iov_base, &length);
  619. } else {
  620. status = descriptor->ops->fd_read(descriptor, pos, vecs[i].iov_base,
  621. &length);
  622. }
  623. if (status != B_OK) {
  624. if (bytesTransferred == 0)
  625. return status;
  626. status = B_OK;
  627. break;
  628. }
  629. if ((uint64)bytesTransferred + length > SSIZE_MAX)
  630. bytesTransferred = SSIZE_MAX;
  631. else
  632. bytesTransferred += (ssize_t)length;
  633. pos += length;
  634. if (length < vecs[i].iov_len)
  635. break;
  636. }
  637. if (movePosition)
  638. descriptor->pos = pos;
  639. return bytesTransferred;
  640. }
  641. status_t
  642. user_fd_kernel_ioctl(int fd, uint32 op, void* buffer, size_t length)
  643. {
  644. TRACE(("user_fd_kernel_ioctl: fd %d\n", fd));
  645. return fd_ioctl(false, fd, op, buffer, length);
  646. }
  647. // #pragma mark - User syscalls
  648. ssize_t
  649. _user_read(int fd, off_t pos, void* buffer, size_t length)
  650. {
  651. return common_user_io(fd, pos, buffer, length, false);
  652. }
  653. ssize_t
  654. _user_readv(int fd, off_t pos, const iovec* userVecs, size_t count)
  655. {
  656. return common_user_vector_io(fd, pos, userVecs, count, false);
  657. }
  658. ssize_t
  659. _user_write(int fd, off_t pos, const void* buffer, size_t length)
  660. {
  661. return common_user_io(fd, pos, (void*)buffer, length, true);
  662. }
  663. ssize_t
  664. _user_writev(int fd, off_t pos, const iovec* userVecs, size_t count)
  665. {
  666. return common_user_vector_io(fd, pos, userVecs, count, true);
  667. }
  668. off_t
  669. _user_seek(int fd, off_t pos, int seekType)
  670. {
  671. syscall_64_bit_return_value();
  672. struct file_descriptor* descriptor;
  673. descriptor = get_fd(get_current_io_context(false), fd);
  674. if (!descriptor)
  675. return B_FILE_ERROR;
  676. TRACE(("user_seek(descriptor = %p)\n", descriptor));
  677. if (descriptor->ops->fd_seek)
  678. pos = descriptor->ops->fd_seek(descriptor, pos, seekType);
  679. else
  680. pos = ESPIPE;
  681. put_fd(descriptor);
  682. return pos;
  683. }
  684. status_t
  685. _user_ioctl(int fd, uint32 op, void* buffer, size_t length)
  686. {
  687. if (!IS_USER_ADDRESS(buffer))
  688. return B_BAD_ADDRESS;
  689. TRACE(("user_ioctl: fd %d\n", fd));
  690. SyscallRestartWrapper<status_t> status;
  691. return status = fd_ioctl(false, fd, op, buffer, length);
  692. }
  693. ssize_t
  694. _user_read_dir(int fd, struct dirent* userBuffer, size_t bufferSize,
  695. uint32 maxCount)
  696. {
  697. TRACE(("user_read_dir(fd = %d, userBuffer = %p, bufferSize = %ld, count = "
  698. "%lu)\n", fd, userBuffer, bufferSize, maxCount));
  699. if (maxCount == 0)
  700. return 0;
  701. if (userBuffer == NULL || !IS_USER_ADDRESS(userBuffer))
  702. return B_BAD_ADDRESS;
  703. // get I/O context and FD
  704. io_context* ioContext = get_current_io_context(false);
  705. FDGetter fdGetter;
  706. struct file_descriptor* descriptor = fdGetter.SetTo(ioContext, fd, false);
  707. if (descriptor == NULL)
  708. return B_FILE_ERROR;
  709. if (descriptor->ops->fd_read_dir == NULL)
  710. return B_UNSUPPORTED;
  711. // restrict buffer size and allocate a heap buffer
  712. if (bufferSize > kMaxReadDirBufferSize)
  713. bufferSize = kMaxReadDirBufferSize;
  714. struct dirent* buffer = (struct dirent*)malloc(bufferSize);
  715. if (buffer == NULL)
  716. return B_NO_MEMORY;
  717. MemoryDeleter bufferDeleter(buffer);
  718. // read the directory
  719. uint32 count = maxCount;
  720. status_t status = descriptor->ops->fd_read_dir(ioContext, descriptor,
  721. buffer, bufferSize, &count);
  722. if (status != B_OK)
  723. return status;
  724. // copy the buffer back -- determine the total buffer size first
  725. size_t sizeToCopy = 0;
  726. struct dirent* entry = buffer;
  727. for (uint32 i = 0; i < count; i++) {
  728. size_t length = entry->d_reclen;
  729. sizeToCopy += length;
  730. entry = (struct dirent*)((uint8*)entry + length);
  731. }
  732. if (user_memcpy(userBuffer, buffer, sizeToCopy) != B_OK)
  733. return B_BAD_ADDRESS;
  734. return count;
  735. }
  736. status_t
  737. _user_rewind_dir(int fd)
  738. {
  739. struct file_descriptor* descriptor;
  740. status_t status;
  741. TRACE(("user_rewind_dir(fd = %d)\n", fd));
  742. descriptor = get_fd(get_current_io_context(false), fd);
  743. if (descriptor == NULL)
  744. return B_FILE_ERROR;
  745. if (descriptor->ops->fd_rewind_dir)
  746. status = descriptor->ops->fd_rewind_dir(descriptor);
  747. else
  748. status = B_UNSUPPORTED;
  749. put_fd(descriptor);
  750. return status;
  751. }
  752. status_t
  753. _user_close(int fd)
  754. {
  755. return common_close(fd, false);
  756. }
  757. int
  758. _user_dup(int fd)
  759. {
  760. return dup_fd(fd, false);
  761. }
  762. int
  763. _user_dup2(int ofd, int nfd)
  764. {
  765. return dup2_fd(ofd, nfd, false);
  766. }
  767. // #pragma mark - Kernel calls
  768. ssize_t
  769. _kern_read(int fd, off_t pos, void* buffer, size_t length)
  770. {
  771. if (pos < -1)
  772. return B_BAD_VALUE;
  773. FDGetter fdGetter;
  774. struct file_descriptor* descriptor = fdGetter.SetTo(fd, true);
  775. if (!descriptor)
  776. return B_FILE_ERROR;
  777. if ((descriptor->open_mode & O_RWMASK) == O_WRONLY)
  778. return B_FILE_ERROR;
  779. bool movePosition = false;
  780. if (pos == -1) {
  781. pos = descriptor->pos;
  782. movePosition = true;
  783. }
  784. SyscallFlagUnsetter _;
  785. if (descriptor->ops->fd_read == NULL)
  786. return B_BAD_VALUE;
  787. ssize_t bytesRead = descriptor->ops->fd_read(descriptor, pos, buffer,
  788. &length);
  789. if (bytesRead >= B_OK) {
  790. if (length > SSIZE_MAX)
  791. bytesRead = SSIZE_MAX;
  792. else
  793. bytesRead = (ssize_t)length;
  794. if (movePosition)
  795. descriptor->pos = pos + length;
  796. }
  797. return bytesRead;
  798. }
  799. ssize_t
  800. _kern_readv(int fd, off_t pos, const iovec* vecs, size_t count)
  801. {
  802. bool movePosition = false;
  803. status_t status;
  804. uint32 i;
  805. if (pos < -1)
  806. return B_BAD_VALUE;
  807. FDGetter fdGetter;
  808. struct file_descriptor* descriptor = fdGetter.SetTo(fd, true);
  809. if (!descriptor)
  810. return B_FILE_ERROR;
  811. if ((descriptor->open_mode & O_RWMASK) == O_WRONLY)
  812. return B_FILE_ERROR;
  813. if (pos == -1) {
  814. pos = descriptor->pos;
  815. movePosition = true;
  816. }
  817. if (descriptor->ops->fd_read == NULL)
  818. return B_BAD_VALUE;
  819. SyscallFlagUnsetter _;
  820. ssize_t bytesRead = 0;
  821. for (i = 0; i < count; i++) {
  822. size_t length = vecs[i].iov_len;
  823. status = descriptor->ops->fd_read(descriptor, pos, vecs[i].iov_base,
  824. &length);
  825. if (status != B_OK) {
  826. bytesRead = status;
  827. break;
  828. }
  829. if ((uint64)bytesRead + length > SSIZE_MAX)
  830. bytesRead = SSIZE_MAX;
  831. else
  832. bytesRead += (ssize_t)length;
  833. pos += vecs[i].iov_len;
  834. }
  835. if (movePosition)
  836. descriptor->pos = pos;
  837. return bytesRead;
  838. }
  839. ssize_t
  840. _kern_write(int fd, off_t pos, const void* buffer, size_t length)
  841. {
  842. if (pos < -1)
  843. return B_BAD_VALUE;
  844. FDGetter fdGetter;
  845. struct file_descriptor* descriptor = fdGetter.SetTo(fd, true);
  846. if (descriptor == NULL)
  847. return B_FILE_ERROR;
  848. if ((descriptor->open_mode & O_RWMASK) == O_RDONLY)
  849. return B_FILE_ERROR;
  850. bool movePosition = false;
  851. if (pos == -1) {
  852. pos = descriptor->pos;
  853. movePosition = true;
  854. }
  855. if (descriptor->ops->fd_write == NULL)
  856. return B_BAD_VALUE;
  857. SyscallFlagUnsetter _;
  858. ssize_t bytesWritten = descriptor->ops->fd_write(descriptor, pos, buffer,
  859. &length);
  860. if (bytesWritten >= B_OK) {
  861. if (length > SSIZE_MAX)
  862. bytesWritten = SSIZE_MAX;
  863. else
  864. bytesWritten = (ssize_t)length;
  865. if (movePosition)
  866. descriptor->pos = pos + length;
  867. }
  868. return bytesWritten;
  869. }
  870. ssize_t
  871. _kern_writev(int fd, off_t pos, const iovec* vecs, size_t count)
  872. {
  873. bool movePosition = false;
  874. status_t status;
  875. uint32 i;
  876. if (pos < -1)
  877. return B_BAD_VALUE;
  878. FDGetter fdGetter;
  879. struct file_descriptor* descriptor = fdGetter.SetTo(fd, true);
  880. if (!descriptor)
  881. return B_FILE_ERROR;
  882. if ((descriptor->open_mode & O_RWMASK) == O_RDONLY)
  883. return B_FILE_ERROR;
  884. if (pos == -1) {
  885. pos = descriptor->pos;
  886. movePosition = true;
  887. }
  888. if (descriptor->ops->fd_write == NULL)
  889. return B_BAD_VALUE;
  890. SyscallFlagUnsetter _;
  891. ssize_t bytesWritten = 0;
  892. for (i = 0; i < count; i++) {
  893. size_t length = vecs[i].iov_len;
  894. status = descriptor->ops->fd_write(descriptor, pos,
  895. vecs[i].iov_base, &length);
  896. if (status != B_OK) {
  897. bytesWritten = status;
  898. break;
  899. }
  900. if ((uint64)bytesWritten + length > SSIZE_MAX)
  901. bytesWritten = SSIZE_MAX;
  902. else
  903. bytesWritten += (ssize_t)length;
  904. pos += vecs[i].iov_len;
  905. }
  906. if (movePosition)
  907. descriptor->pos = pos;
  908. return bytesWritten;
  909. }
  910. off_t
  911. _kern_seek(int fd, off_t pos, int seekType)
  912. {
  913. struct file_descriptor* descriptor;
  914. descriptor = get_fd(get_current_io_context(true), fd);
  915. if (!descriptor)
  916. return B_FILE_ERROR;
  917. if (descriptor->ops->fd_seek)
  918. pos = descriptor->ops->fd_seek(descriptor, pos, seekType);
  919. else
  920. pos = ESPIPE;
  921. put_fd(descriptor);
  922. return pos;
  923. }
  924. status_t
  925. _kern_ioctl(int fd, uint32 op, void* buffer, size_t length)
  926. {
  927. TRACE(("kern_ioctl: fd %d\n", fd));
  928. SyscallFlagUnsetter _;
  929. return fd_ioctl(true, fd, op, buffer, length);
  930. }
  931. ssize_t
  932. _kern_read_dir(int fd, struct dirent* buffer, size_t bufferSize,
  933. uint32 maxCount)
  934. {
  935. struct file_descriptor* descriptor;
  936. ssize_t retval;
  937. TRACE(("sys_read_dir(fd = %d, buffer = %p, bufferSize = %ld, count = "
  938. "%lu)\n",fd, buffer, bufferSize, maxCount));
  939. struct io_context* ioContext = get_current_io_context(true);
  940. descriptor = get_fd(ioContext, fd);
  941. if (descriptor == NULL)
  942. return B_FILE_ERROR;
  943. if (descriptor->ops->fd_read_dir) {
  944. uint32 count = maxCount;
  945. retval = descriptor->ops->fd_read_dir(ioContext, descriptor, buffer,
  946. bufferSize, &count);
  947. if (retval >= 0)
  948. retval = count;
  949. } else
  950. retval = B_UNSUPPORTED;
  951. put_fd(descriptor);
  952. return retval;
  953. }
  954. status_t
  955. _kern_rewind_dir(int fd)
  956. {
  957. struct file_descriptor* descriptor;
  958. status_t status;
  959. TRACE(("sys_rewind_dir(fd = %d)\n",fd));
  960. descriptor = get_fd(get_current_io_context(true), fd);
  961. if (descriptor == NULL)
  962. return B_FILE_ERROR;
  963. if (descriptor->ops->fd_rewind_dir)
  964. status = descriptor->ops->fd_rewind_dir(descriptor);
  965. else
  966. status = B_UNSUPPORTED;
  967. put_fd(descriptor);
  968. return status;
  969. }
  970. status_t
  971. _kern_close(int fd)
  972. {
  973. return common_close(fd, true);
  974. }
  975. int
  976. _kern_dup(int fd)
  977. {
  978. return dup_fd(fd, true);
  979. }
  980. int
  981. _kern_dup2(int ofd, int nfd)
  982. {
  983. return dup2_fd(ofd, nfd, true);
  984. }