/drivers/mtd/mtdchar.c
C | 1209 lines | 913 code | 204 blank | 92 comment | 157 complexity | fecf654380c6c43b589925a8285fd787 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
1/* 2 * Copyright 1999-2010 David Woodhouse <dwmw2@infradead.org> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 * 18 */ 19 20#include <linux/device.h> 21#include <linux/fs.h> 22#include <linux/mm.h> 23#include <linux/err.h> 24#include <linux/init.h> 25#include <linux/kernel.h> 26#include <linux/module.h> 27#include <linux/slab.h> 28#include <linux/sched.h> 29#include <linux/mutex.h> 30#include <linux/backing-dev.h> 31#include <linux/compat.h> 32#include <linux/mount.h> 33#include <linux/blkpg.h> 34#include <linux/mtd/mtd.h> 35#include <linux/mtd/partitions.h> 36#include <linux/mtd/map.h> 37 38#include <asm/uaccess.h> 39 40#define MTD_INODE_FS_MAGIC 0x11307854 41static DEFINE_MUTEX(mtd_mutex); 42static struct vfsmount *mtd_inode_mnt __read_mostly; 43 44/* 45 * Data structure to hold the pointer to the mtd device as well 46 * as mode information ofr various use cases. 47 */ 48struct mtd_file_info { 49 struct mtd_info *mtd; 50 struct inode *ino; 51 enum mtd_file_modes mode; 52}; 53 54static loff_t mtd_lseek (struct file *file, loff_t offset, int orig) 55{ 56 struct mtd_file_info *mfi = file->private_data; 57 struct mtd_info *mtd = mfi->mtd; 58 59 switch (orig) { 60 case SEEK_SET: 61 break; 62 case SEEK_CUR: 63 offset += file->f_pos; 64 break; 65 case SEEK_END: 66 offset += mtd->size; 67 break; 68 default: 69 return -EINVAL; 70 } 71 72 if (offset >= 0 && offset <= mtd->size) 73 return file->f_pos = offset; 74 75 return -EINVAL; 76} 77 78 79 80static int mtd_open(struct inode *inode, struct file *file) 81{ 82 int minor = iminor(inode); 83 int devnum = minor >> 1; 84 int ret = 0; 85 struct mtd_info *mtd; 86 struct mtd_file_info *mfi; 87 struct inode *mtd_ino; 88 89 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); 90 91 /* You can't open the RO devices RW */ 92 if ((file->f_mode & FMODE_WRITE) && (minor & 1)) 93 return -EACCES; 94 95 mutex_lock(&mtd_mutex); 96 mtd = get_mtd_device(NULL, devnum); 97 98 if (IS_ERR(mtd)) { 99 ret = PTR_ERR(mtd); 100 goto out; 101 } 102 103 if (mtd->type == MTD_ABSENT) { 104 put_mtd_device(mtd); 105 ret = -ENODEV; 106 goto out; 107 } 108 109 mtd_ino = iget_locked(mtd_inode_mnt->mnt_sb, devnum); 110 if (!mtd_ino) { 111 put_mtd_device(mtd); 112 ret = -ENOMEM; 113 goto out; 114 } 115 if (mtd_ino->i_state & I_NEW) { 116 mtd_ino->i_private = mtd; 117 mtd_ino->i_mode = S_IFCHR; 118 mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info; 119 unlock_new_inode(mtd_ino); 120 } 121 file->f_mapping = mtd_ino->i_mapping; 122 123 /* You can't open it RW if it's not a writeable device */ 124 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { 125 iput(mtd_ino); 126 put_mtd_device(mtd); 127 ret = -EACCES; 128 goto out; 129 } 130 131 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 132 if (!mfi) { 133 iput(mtd_ino); 134 put_mtd_device(mtd); 135 ret = -ENOMEM; 136 goto out; 137 } 138 mfi->ino = mtd_ino; 139 mfi->mtd = mtd; 140 file->private_data = mfi; 141 142out: 143 mutex_unlock(&mtd_mutex); 144 return ret; 145} /* mtd_open */ 146 147/*====================================================================*/ 148 149static int mtd_close(struct inode *inode, struct file *file) 150{ 151 struct mtd_file_info *mfi = file->private_data; 152 struct mtd_info *mtd = mfi->mtd; 153 154 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n"); 155 156 /* Only sync if opened RW */ 157 if ((file->f_mode & FMODE_WRITE) && mtd->sync) 158 mtd->sync(mtd); 159 160 iput(mfi->ino); 161 162 put_mtd_device(mtd); 163 file->private_data = NULL; 164 kfree(mfi); 165 166 return 0; 167} /* mtd_close */ 168 169/* Back in June 2001, dwmw2 wrote: 170 * 171 * FIXME: This _really_ needs to die. In 2.5, we should lock the 172 * userspace buffer down and use it directly with readv/writev. 173 * 174 * The implementation below, using mtd_kmalloc_up_to, mitigates 175 * allocation failures when the system is under low-memory situations 176 * or if memory is highly fragmented at the cost of reducing the 177 * performance of the requested transfer due to a smaller buffer size. 178 * 179 * A more complex but more memory-efficient implementation based on 180 * get_user_pages and iovecs to cover extents of those pages is a 181 * longer-term goal, as intimated by dwmw2 above. However, for the 182 * write case, this requires yet more complex head and tail transfer 183 * handling when those head and tail offsets and sizes are such that 184 * alignment requirements are not met in the NAND subdriver. 185 */ 186 187static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) 188{ 189 struct mtd_file_info *mfi = file->private_data; 190 struct mtd_info *mtd = mfi->mtd; 191 size_t retlen=0; 192 size_t total_retlen=0; 193 int ret=0; 194 int len; 195 size_t size = count; 196 char *kbuf; 197 198 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); 199 200 if (*ppos + count > mtd->size) 201 count = mtd->size - *ppos; 202 203 if (!count) 204 return 0; 205 206 kbuf = mtd_kmalloc_up_to(mtd, &size); 207 if (!kbuf) 208 return -ENOMEM; 209 210 while (count) { 211 len = min_t(size_t, count, size); 212 213 switch (mfi->mode) { 214 case MTD_MODE_OTP_FACTORY: 215 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf); 216 break; 217 case MTD_MODE_OTP_USER: 218 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 219 break; 220 case MTD_MODE_RAW: 221 { 222 struct mtd_oob_ops ops; 223 224 ops.mode = MTD_OOB_RAW; 225 ops.datbuf = kbuf; 226 ops.oobbuf = NULL; 227 ops.len = len; 228 229 ret = mtd->read_oob(mtd, *ppos, &ops); 230 retlen = ops.retlen; 231 break; 232 } 233 default: 234 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf); 235 } 236 /* Nand returns -EBADMSG on ecc errors, but it returns 237 * the data. For our userspace tools it is important 238 * to dump areas with ecc errors ! 239 * For kernel internal usage it also might return -EUCLEAN 240 * to signal the caller that a bitflip has occurred and has 241 * been corrected by the ECC algorithm. 242 * Userspace software which accesses NAND this way 243 * must be aware of the fact that it deals with NAND 244 */ 245 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) { 246 *ppos += retlen; 247 if (copy_to_user(buf, kbuf, retlen)) { 248 kfree(kbuf); 249 return -EFAULT; 250 } 251 else 252 total_retlen += retlen; 253 254 count -= retlen; 255 buf += retlen; 256 if (retlen == 0) 257 count = 0; 258 } 259 else { 260 kfree(kbuf); 261 return ret; 262 } 263 264 } 265 266 kfree(kbuf); 267 return total_retlen; 268} /* mtd_read */ 269 270static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos) 271{ 272 struct mtd_file_info *mfi = file->private_data; 273 struct mtd_info *mtd = mfi->mtd; 274 size_t size = count; 275 char *kbuf; 276 size_t retlen; 277 size_t total_retlen=0; 278 int ret=0; 279 int len; 280 281 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n"); 282 283 if (*ppos == mtd->size) 284 return -ENOSPC; 285 286 if (*ppos + count > mtd->size) 287 count = mtd->size - *ppos; 288 289 if (!count) 290 return 0; 291 292 kbuf = mtd_kmalloc_up_to(mtd, &size); 293 if (!kbuf) 294 return -ENOMEM; 295 296 while (count) { 297 len = min_t(size_t, count, size); 298 299 if (copy_from_user(kbuf, buf, len)) { 300 kfree(kbuf); 301 return -EFAULT; 302 } 303 304 switch (mfi->mode) { 305 case MTD_MODE_OTP_FACTORY: 306 ret = -EROFS; 307 break; 308 case MTD_MODE_OTP_USER: 309 if (!mtd->write_user_prot_reg) { 310 ret = -EOPNOTSUPP; 311 break; 312 } 313 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 314 break; 315 316 case MTD_MODE_RAW: 317 { 318 struct mtd_oob_ops ops; 319 320 ops.mode = MTD_OOB_RAW; 321 ops.datbuf = kbuf; 322 ops.oobbuf = NULL; 323 ops.ooboffs = 0; 324 ops.len = len; 325 326 ret = mtd->write_oob(mtd, *ppos, &ops); 327 retlen = ops.retlen; 328 break; 329 } 330 331 default: 332 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf); 333 } 334 if (!ret) { 335 *ppos += retlen; 336 total_retlen += retlen; 337 count -= retlen; 338 buf += retlen; 339 } 340 else { 341 kfree(kbuf); 342 return ret; 343 } 344 } 345 346 kfree(kbuf); 347 return total_retlen; 348} /* mtd_write */ 349 350/*====================================================================== 351 352 IOCTL calls for getting device parameters. 353 354======================================================================*/ 355static void mtdchar_erase_callback (struct erase_info *instr) 356{ 357 wake_up((wait_queue_head_t *)instr->priv); 358} 359 360#ifdef CONFIG_HAVE_MTD_OTP 361static int otp_select_filemode(struct mtd_file_info *mfi, int mode) 362{ 363 struct mtd_info *mtd = mfi->mtd; 364 int ret = 0; 365 366 switch (mode) { 367 case MTD_OTP_FACTORY: 368 if (!mtd->read_fact_prot_reg) 369 ret = -EOPNOTSUPP; 370 else 371 mfi->mode = MTD_MODE_OTP_FACTORY; 372 break; 373 case MTD_OTP_USER: 374 if (!mtd->read_fact_prot_reg) 375 ret = -EOPNOTSUPP; 376 else 377 mfi->mode = MTD_MODE_OTP_USER; 378 break; 379 default: 380 ret = -EINVAL; 381 case MTD_OTP_OFF: 382 break; 383 } 384 return ret; 385} 386#else 387# define otp_select_filemode(f,m) -EOPNOTSUPP 388#endif 389 390static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd, 391 uint64_t start, uint32_t length, void __user *ptr, 392 uint32_t __user *retp) 393{ 394 struct mtd_oob_ops ops; 395 uint32_t retlen; 396 int ret = 0; 397 398 if (!(file->f_mode & FMODE_WRITE)) 399 return -EPERM; 400 401 if (length > 4096) 402 return -EINVAL; 403 404 if (!mtd->write_oob) 405 ret = -EOPNOTSUPP; 406 else 407 ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT; 408 409 if (ret) 410 return ret; 411 412 ops.ooblen = length; 413 ops.ooboffs = start & (mtd->oobsize - 1); 414 ops.datbuf = NULL; 415 ops.mode = MTD_OOB_PLACE; 416 417 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 418 return -EINVAL; 419 420 ops.oobbuf = memdup_user(ptr, length); 421 if (IS_ERR(ops.oobbuf)) 422 return PTR_ERR(ops.oobbuf); 423 424 start &= ~((uint64_t)mtd->oobsize - 1); 425 ret = mtd->write_oob(mtd, start, &ops); 426 427 if (ops.oobretlen > 0xFFFFFFFFU) 428 ret = -EOVERFLOW; 429 retlen = ops.oobretlen; 430 if (copy_to_user(retp, &retlen, sizeof(length))) 431 ret = -EFAULT; 432 433 kfree(ops.oobbuf); 434 return ret; 435} 436 437static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start, 438 uint32_t length, void __user *ptr, uint32_t __user *retp) 439{ 440 struct mtd_oob_ops ops; 441 int ret = 0; 442 443 if (length > 4096) 444 return -EINVAL; 445 446 if (!mtd->read_oob) 447 ret = -EOPNOTSUPP; 448 else 449 ret = access_ok(VERIFY_WRITE, ptr, 450 length) ? 0 : -EFAULT; 451 if (ret) 452 return ret; 453 454 ops.ooblen = length; 455 ops.ooboffs = start & (mtd->oobsize - 1); 456 ops.datbuf = NULL; 457 ops.mode = MTD_OOB_PLACE; 458 459 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) 460 return -EINVAL; 461 462 ops.oobbuf = kmalloc(length, GFP_KERNEL); 463 if (!ops.oobbuf) 464 return -ENOMEM; 465 466 start &= ~((uint64_t)mtd->oobsize - 1); 467 ret = mtd->read_oob(mtd, start, &ops); 468 469 if (put_user(ops.oobretlen, retp)) 470 ret = -EFAULT; 471 else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf, 472 ops.oobretlen)) 473 ret = -EFAULT; 474 475 kfree(ops.oobbuf); 476 return ret; 477} 478 479/* 480 * Copies (and truncates, if necessary) data from the larger struct, 481 * nand_ecclayout, to the smaller, deprecated layout struct, 482 * nand_ecclayout_user. This is necessary only to suppport the deprecated 483 * API ioctl ECCGETLAYOUT while allowing all new functionality to use 484 * nand_ecclayout flexibly (i.e. the struct may change size in new 485 * releases without requiring major rewrites). 486 */ 487static int shrink_ecclayout(const struct nand_ecclayout *from, 488 struct nand_ecclayout_user *to) 489{ 490 int i; 491 492 if (!from || !to) 493 return -EINVAL; 494 495 memset(to, 0, sizeof(*to)); 496 497 to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES); 498 for (i = 0; i < to->eccbytes; i++) 499 to->eccpos[i] = from->eccpos[i]; 500 501 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) { 502 if (from->oobfree[i].length == 0 && 503 from->oobfree[i].offset == 0) 504 break; 505 to->oobavail += from->oobfree[i].length; 506 to->oobfree[i] = from->oobfree[i]; 507 } 508 509 return 0; 510} 511 512static int mtd_blkpg_ioctl(struct mtd_info *mtd, 513 struct blkpg_ioctl_arg __user *arg) 514{ 515 struct blkpg_ioctl_arg a; 516 struct blkpg_partition p; 517 518 if (!capable(CAP_SYS_ADMIN)) 519 return -EPERM; 520 521 if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg))) 522 return -EFAULT; 523 524 if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition))) 525 return -EFAULT; 526 527 switch (a.op) { 528 case BLKPG_ADD_PARTITION: 529 530 /* Only master mtd device must be used to add partitions */ 531 if (mtd_is_partition(mtd)) 532 return -EINVAL; 533 534 return mtd_add_partition(mtd, p.devname, p.start, p.length); 535 536 case BLKPG_DEL_PARTITION: 537 538 if (p.pno < 0) 539 return -EINVAL; 540 541 return mtd_del_partition(mtd, p.pno); 542 543 default: 544 return -EINVAL; 545 } 546} 547 548static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) 549{ 550 struct mtd_file_info *mfi = file->private_data; 551 struct mtd_info *mtd = mfi->mtd; 552 void __user *argp = (void __user *)arg; 553 int ret = 0; 554 u_long size; 555 struct mtd_info_user info; 556 557 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); 558 559 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; 560 if (cmd & IOC_IN) { 561 if (!access_ok(VERIFY_READ, argp, size)) 562 return -EFAULT; 563 } 564 if (cmd & IOC_OUT) { 565 if (!access_ok(VERIFY_WRITE, argp, size)) 566 return -EFAULT; 567 } 568 569 switch (cmd) { 570 case MEMGETREGIONCOUNT: 571 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) 572 return -EFAULT; 573 break; 574 575 case MEMGETREGIONINFO: 576 { 577 uint32_t ur_idx; 578 struct mtd_erase_region_info *kr; 579 struct region_info_user __user *ur = argp; 580 581 if (get_user(ur_idx, &(ur->regionindex))) 582 return -EFAULT; 583 584 if (ur_idx >= mtd->numeraseregions) 585 return -EINVAL; 586 587 kr = &(mtd->eraseregions[ur_idx]); 588 589 if (put_user(kr->offset, &(ur->offset)) 590 || put_user(kr->erasesize, &(ur->erasesize)) 591 || put_user(kr->numblocks, &(ur->numblocks))) 592 return -EFAULT; 593 594 break; 595 } 596 597 case MEMGETINFO: 598 memset(&info, 0, sizeof(info)); 599 info.type = mtd->type; 600 info.flags = mtd->flags; 601 info.size = mtd->size; 602 info.erasesize = mtd->erasesize; 603 info.writesize = mtd->writesize; 604 info.oobsize = mtd->oobsize; 605 /* The below fields are obsolete */ 606 info.ecctype = -1; 607 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) 608 return -EFAULT; 609 break; 610 611 case MEMERASE: 612 case MEMERASE64: 613 { 614 struct erase_info *erase; 615 616 if(!(file->f_mode & FMODE_WRITE)) 617 return -EPERM; 618 619 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL); 620 if (!erase) 621 ret = -ENOMEM; 622 else { 623 wait_queue_head_t waitq; 624 DECLARE_WAITQUEUE(wait, current); 625 626 init_waitqueue_head(&waitq); 627 628 if (cmd == MEMERASE64) { 629 struct erase_info_user64 einfo64; 630 631 if (copy_from_user(&einfo64, argp, 632 sizeof(struct erase_info_user64))) { 633 kfree(erase); 634 return -EFAULT; 635 } 636 erase->addr = einfo64.start; 637 erase->len = einfo64.length; 638 } else { 639 struct erase_info_user einfo32; 640 641 if (copy_from_user(&einfo32, argp, 642 sizeof(struct erase_info_user))) { 643 kfree(erase); 644 return -EFAULT; 645 } 646 erase->addr = einfo32.start; 647 erase->len = einfo32.length; 648 } 649 erase->mtd = mtd; 650 erase->callback = mtdchar_erase_callback; 651 erase->priv = (unsigned long)&waitq; 652 653 /* 654 FIXME: Allow INTERRUPTIBLE. Which means 655 not having the wait_queue head on the stack. 656 657 If the wq_head is on the stack, and we 658 leave because we got interrupted, then the 659 wq_head is no longer there when the 660 callback routine tries to wake us up. 661 */ 662 ret = mtd->erase(mtd, erase); 663 if (!ret) { 664 set_current_state(TASK_UNINTERRUPTIBLE); 665 add_wait_queue(&waitq, &wait); 666 if (erase->state != MTD_ERASE_DONE && 667 erase->state != MTD_ERASE_FAILED) 668 schedule(); 669 remove_wait_queue(&waitq, &wait); 670 set_current_state(TASK_RUNNING); 671 672 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0; 673 } 674 kfree(erase); 675 } 676 break; 677 } 678 679 case MEMWRITEOOB: 680 { 681 struct mtd_oob_buf buf; 682 struct mtd_oob_buf __user *buf_user = argp; 683 684 /* NOTE: writes return length to buf_user->length */ 685 if (copy_from_user(&buf, argp, sizeof(buf))) 686 ret = -EFAULT; 687 else 688 ret = mtd_do_writeoob(file, mtd, buf.start, buf.length, 689 buf.ptr, &buf_user->length); 690 break; 691 } 692 693 case MEMREADOOB: 694 { 695 struct mtd_oob_buf buf; 696 struct mtd_oob_buf __user *buf_user = argp; 697 698 /* NOTE: writes return length to buf_user->start */ 699 if (copy_from_user(&buf, argp, sizeof(buf))) 700 ret = -EFAULT; 701 else 702 ret = mtd_do_readoob(mtd, buf.start, buf.length, 703 buf.ptr, &buf_user->start); 704 break; 705 } 706 707 case MEMWRITEOOB64: 708 { 709 struct mtd_oob_buf64 buf; 710 struct mtd_oob_buf64 __user *buf_user = argp; 711 712 if (copy_from_user(&buf, argp, sizeof(buf))) 713 ret = -EFAULT; 714 else 715 ret = mtd_do_writeoob(file, mtd, buf.start, buf.length, 716 (void __user *)(uintptr_t)buf.usr_ptr, 717 &buf_user->length); 718 break; 719 } 720 721 case MEMREADOOB64: 722 { 723 struct mtd_oob_buf64 buf; 724 struct mtd_oob_buf64 __user *buf_user = argp; 725 726 if (copy_from_user(&buf, argp, sizeof(buf))) 727 ret = -EFAULT; 728 else 729 ret = mtd_do_readoob(mtd, buf.start, buf.length, 730 (void __user *)(uintptr_t)buf.usr_ptr, 731 &buf_user->length); 732 break; 733 } 734 735 case MEMLOCK: 736 { 737 struct erase_info_user einfo; 738 739 if (copy_from_user(&einfo, argp, sizeof(einfo))) 740 return -EFAULT; 741 742 if (!mtd->lock) 743 ret = -EOPNOTSUPP; 744 else 745 ret = mtd->lock(mtd, einfo.start, einfo.length); 746 break; 747 } 748 749 case MEMUNLOCK: 750 { 751 struct erase_info_user einfo; 752 753 if (copy_from_user(&einfo, argp, sizeof(einfo))) 754 return -EFAULT; 755 756 if (!mtd->unlock) 757 ret = -EOPNOTSUPP; 758 else 759 ret = mtd->unlock(mtd, einfo.start, einfo.length); 760 break; 761 } 762 763 case MEMISLOCKED: 764 { 765 struct erase_info_user einfo; 766 767 if (copy_from_user(&einfo, argp, sizeof(einfo))) 768 return -EFAULT; 769 770 if (!mtd->is_locked) 771 ret = -EOPNOTSUPP; 772 else 773 ret = mtd->is_locked(mtd, einfo.start, einfo.length); 774 break; 775 } 776 777 /* Legacy interface */ 778 case MEMGETOOBSEL: 779 { 780 struct nand_oobinfo oi; 781 782 if (!mtd->ecclayout) 783 return -EOPNOTSUPP; 784 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos)) 785 return -EINVAL; 786 787 oi.useecc = MTD_NANDECC_AUTOPLACE; 788 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos)); 789 memcpy(&oi.oobfree, mtd->ecclayout->oobfree, 790 sizeof(oi.oobfree)); 791 oi.eccbytes = mtd->ecclayout->eccbytes; 792 793 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo))) 794 return -EFAULT; 795 break; 796 } 797 798 case MEMGETBADBLOCK: 799 { 800 loff_t offs; 801 802 if (copy_from_user(&offs, argp, sizeof(loff_t))) 803 return -EFAULT; 804 if (!mtd->block_isbad) 805 ret = -EOPNOTSUPP; 806 else 807 return mtd->block_isbad(mtd, offs); 808 break; 809 } 810 811 case MEMSETBADBLOCK: 812 { 813 loff_t offs; 814 815 if (copy_from_user(&offs, argp, sizeof(loff_t))) 816 return -EFAULT; 817 if (!mtd->block_markbad) 818 ret = -EOPNOTSUPP; 819 else 820 return mtd->block_markbad(mtd, offs); 821 break; 822 } 823 824#ifdef CONFIG_HAVE_MTD_OTP 825 case OTPSELECT: 826 { 827 int mode; 828 if (copy_from_user(&mode, argp, sizeof(int))) 829 return -EFAULT; 830 831 mfi->mode = MTD_MODE_NORMAL; 832 833 ret = otp_select_filemode(mfi, mode); 834 835 file->f_pos = 0; 836 break; 837 } 838 839 case OTPGETREGIONCOUNT: 840 case OTPGETREGIONINFO: 841 { 842 struct otp_info *buf = kmalloc(4096, GFP_KERNEL); 843 if (!buf) 844 return -ENOMEM; 845 ret = -EOPNOTSUPP; 846 switch (mfi->mode) { 847 case MTD_MODE_OTP_FACTORY: 848 if (mtd->get_fact_prot_info) 849 ret = mtd->get_fact_prot_info(mtd, buf, 4096); 850 break; 851 case MTD_MODE_OTP_USER: 852 if (mtd->get_user_prot_info) 853 ret = mtd->get_user_prot_info(mtd, buf, 4096); 854 break; 855 default: 856 break; 857 } 858 if (ret >= 0) { 859 if (cmd == OTPGETREGIONCOUNT) { 860 int nbr = ret / sizeof(struct otp_info); 861 ret = copy_to_user(argp, &nbr, sizeof(int)); 862 } else 863 ret = copy_to_user(argp, buf, ret); 864 if (ret) 865 ret = -EFAULT; 866 } 867 kfree(buf); 868 break; 869 } 870 871 case OTPLOCK: 872 { 873 struct otp_info oinfo; 874 875 if (mfi->mode != MTD_MODE_OTP_USER) 876 return -EINVAL; 877 if (copy_from_user(&oinfo, argp, sizeof(oinfo))) 878 return -EFAULT; 879 if (!mtd->lock_user_prot_reg) 880 return -EOPNOTSUPP; 881 ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length); 882 break; 883 } 884#endif 885 886 /* This ioctl is being deprecated - it truncates the ecc layout */ 887 case ECCGETLAYOUT: 888 { 889 struct nand_ecclayout_user *usrlay; 890 891 if (!mtd->ecclayout) 892 return -EOPNOTSUPP; 893 894 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL); 895 if (!usrlay) 896 return -ENOMEM; 897 898 shrink_ecclayout(mtd->ecclayout, usrlay); 899 900 if (copy_to_user(argp, usrlay, sizeof(*usrlay))) 901 ret = -EFAULT; 902 kfree(usrlay); 903 break; 904 } 905 906 case ECCGETSTATS: 907 { 908 if (copy_to_user(argp, &mtd->ecc_stats, 909 sizeof(struct mtd_ecc_stats))) 910 return -EFAULT; 911 break; 912 } 913 914 case MTDFILEMODE: 915 { 916 mfi->mode = 0; 917 918 switch(arg) { 919 case MTD_MODE_OTP_FACTORY: 920 case MTD_MODE_OTP_USER: 921 ret = otp_select_filemode(mfi, arg); 922 break; 923 924 case MTD_MODE_RAW: 925 if (!mtd->read_oob || !mtd->write_oob) 926 return -EOPNOTSUPP; 927 mfi->mode = arg; 928 929 case MTD_MODE_NORMAL: 930 break; 931 default: 932 ret = -EINVAL; 933 } 934 file->f_pos = 0; 935 break; 936 } 937 938 case BLKPG: 939 { 940 ret = mtd_blkpg_ioctl(mtd, 941 (struct blkpg_ioctl_arg __user *)arg); 942 break; 943 } 944 945 case BLKRRPART: 946 { 947 /* No reread partition feature. Just return ok */ 948 ret = 0; 949 break; 950 } 951 952 default: 953 ret = -ENOTTY; 954 } 955 956 return ret; 957} /* memory_ioctl */ 958 959static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg) 960{ 961 int ret; 962 963 mutex_lock(&mtd_mutex); 964 ret = mtd_ioctl(file, cmd, arg); 965 mutex_unlock(&mtd_mutex); 966 967 return ret; 968} 969 970#ifdef CONFIG_COMPAT 971 972struct mtd_oob_buf32 { 973 u_int32_t start; 974 u_int32_t length; 975 compat_caddr_t ptr; /* unsigned char* */ 976}; 977 978#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32) 979#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32) 980 981static long mtd_compat_ioctl(struct file *file, unsigned int cmd, 982 unsigned long arg) 983{ 984 struct mtd_file_info *mfi = file->private_data; 985 struct mtd_info *mtd = mfi->mtd; 986 void __user *argp = compat_ptr(arg); 987 int ret = 0; 988 989 mutex_lock(&mtd_mutex); 990 991 switch (cmd) { 992 case MEMWRITEOOB32: 993 { 994 struct mtd_oob_buf32 buf; 995 struct mtd_oob_buf32 __user *buf_user = argp; 996 997 if (copy_from_user(&buf, argp, sizeof(buf))) 998 ret = -EFAULT; 999 else 1000 ret = mtd_do_writeoob(file, mtd, buf.start, 1001 buf.length, compat_ptr(buf.ptr), 1002 &buf_user->length); 1003 break; 1004 } 1005 1006 case MEMREADOOB32: 1007 { 1008 struct mtd_oob_buf32 buf; 1009 struct mtd_oob_buf32 __user *buf_user = argp; 1010 1011 /* NOTE: writes return length to buf->start */ 1012 if (copy_from_user(&buf, argp, sizeof(buf))) 1013 ret = -EFAULT; 1014 else 1015 ret = mtd_do_readoob(mtd, buf.start, 1016 buf.length, compat_ptr(buf.ptr), 1017 &buf_user->start); 1018 break; 1019 } 1020 default: 1021 ret = mtd_ioctl(file, cmd, (unsigned long)argp); 1022 } 1023 1024 mutex_unlock(&mtd_mutex); 1025 1026 return ret; 1027} 1028 1029#endif /* CONFIG_COMPAT */ 1030 1031/* 1032 * try to determine where a shared mapping can be made 1033 * - only supported for NOMMU at the moment (MMU can't doesn't copy private 1034 * mappings) 1035 */ 1036#ifndef CONFIG_MMU 1037static unsigned long mtd_get_unmapped_area(struct file *file, 1038 unsigned long addr, 1039 unsigned long len, 1040 unsigned long pgoff, 1041 unsigned long flags) 1042{ 1043 struct mtd_file_info *mfi = file->private_data; 1044 struct mtd_info *mtd = mfi->mtd; 1045 1046 if (mtd->get_unmapped_area) { 1047 unsigned long offset; 1048 1049 if (addr != 0) 1050 return (unsigned long) -EINVAL; 1051 1052 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT)) 1053 return (unsigned long) -EINVAL; 1054 1055 offset = pgoff << PAGE_SHIFT; 1056 if (offset > mtd->size - len) 1057 return (unsigned long) -EINVAL; 1058 1059 return mtd->get_unmapped_area(mtd, len, offset, flags); 1060 } 1061 1062 /* can't map directly */ 1063 return (unsigned long) -ENOSYS; 1064} 1065#endif 1066 1067/* 1068 * set up a mapping for shared memory segments 1069 */ 1070static int mtd_mmap(struct file *file, struct vm_area_struct *vma) 1071{ 1072#ifdef CONFIG_MMU 1073 struct mtd_file_info *mfi = file->private_data; 1074 struct mtd_info *mtd = mfi->mtd; 1075 struct map_info *map = mtd->priv; 1076 unsigned long start; 1077 unsigned long off; 1078 u32 len; 1079 1080 if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) { 1081 off = vma->vm_pgoff << PAGE_SHIFT; 1082 start = map->phys; 1083 len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); 1084 start &= PAGE_MASK; 1085 if ((vma->vm_end - vma->vm_start + off) > len) 1086 return -EINVAL; 1087 1088 off += start; 1089 vma->vm_pgoff = off >> PAGE_SHIFT; 1090 vma->vm_flags |= VM_IO | VM_RESERVED; 1091 1092#ifdef pgprot_noncached 1093 if (file->f_flags & O_DSYNC || off >= __pa(high_memory)) 1094 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1095#endif 1096 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, 1097 vma->vm_end - vma->vm_start, 1098 vma->vm_page_prot)) 1099 return -EAGAIN; 1100 1101 return 0; 1102 } 1103 return -ENOSYS; 1104#else 1105 return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS; 1106#endif 1107} 1108 1109static const struct file_operations mtd_fops = { 1110 .owner = THIS_MODULE, 1111 .llseek = mtd_lseek, 1112 .read = mtd_read, 1113 .write = mtd_write, 1114 .unlocked_ioctl = mtd_unlocked_ioctl, 1115#ifdef CONFIG_COMPAT 1116 .compat_ioctl = mtd_compat_ioctl, 1117#endif 1118 .open = mtd_open, 1119 .release = mtd_close, 1120 .mmap = mtd_mmap, 1121#ifndef CONFIG_MMU 1122 .get_unmapped_area = mtd_get_unmapped_area, 1123#endif 1124}; 1125 1126static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type, 1127 int flags, const char *dev_name, void *data) 1128{ 1129 return mount_pseudo(fs_type, "mtd_inode:", NULL, NULL, MTD_INODE_FS_MAGIC); 1130} 1131 1132static struct file_system_type mtd_inodefs_type = { 1133 .name = "mtd_inodefs", 1134 .mount = mtd_inodefs_mount, 1135 .kill_sb = kill_anon_super, 1136}; 1137 1138static void mtdchar_notify_add(struct mtd_info *mtd) 1139{ 1140} 1141 1142static void mtdchar_notify_remove(struct mtd_info *mtd) 1143{ 1144 struct inode *mtd_ino = ilookup(mtd_inode_mnt->mnt_sb, mtd->index); 1145 1146 if (mtd_ino) { 1147 /* Destroy the inode if it exists */ 1148 mtd_ino->i_nlink = 0; 1149 iput(mtd_ino); 1150 } 1151} 1152 1153static struct mtd_notifier mtdchar_notifier = { 1154 .add = mtdchar_notify_add, 1155 .remove = mtdchar_notify_remove, 1156}; 1157 1158static int __init init_mtdchar(void) 1159{ 1160 int ret; 1161 1162 ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, 1163 "mtd", &mtd_fops); 1164 if (ret < 0) { 1165 pr_notice("Can't allocate major number %d for " 1166 "Memory Technology Devices.\n", MTD_CHAR_MAJOR); 1167 return ret; 1168 } 1169 1170 ret = register_filesystem(&mtd_inodefs_type); 1171 if (ret) { 1172 pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret); 1173 goto err_unregister_chdev; 1174 } 1175 1176 mtd_inode_mnt = kern_mount(&mtd_inodefs_type); 1177 if (IS_ERR(mtd_inode_mnt)) { 1178 ret = PTR_ERR(mtd_inode_mnt); 1179 pr_notice("Error mounting mtd_inodefs filesystem: %d\n", ret); 1180 goto err_unregister_filesystem; 1181 } 1182 register_mtd_user(&mtdchar_notifier); 1183 1184 return ret; 1185 1186err_unregister_filesystem: 1187 unregister_filesystem(&mtd_inodefs_type); 1188err_unregister_chdev: 1189 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1190 return ret; 1191} 1192 1193static void __exit cleanup_mtdchar(void) 1194{ 1195 unregister_mtd_user(&mtdchar_notifier); 1196 mntput(mtd_inode_mnt); 1197 unregister_filesystem(&mtd_inodefs_type); 1198 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); 1199} 1200 1201module_init(init_mtdchar); 1202module_exit(cleanup_mtdchar); 1203 1204MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR); 1205 1206MODULE_LICENSE("GPL"); 1207MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 1208MODULE_DESCRIPTION("Direct character-device access to MTD devices"); 1209MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);