PageRenderTime 180ms CodeModel.GetById 19ms app.highlight 141ms RepoModel.GetById 2ms app.codeStats 0ms

/fs/cifs/file.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2
C | 2462 lines | 1820 code | 346 blank | 296 comment | 447 complexity | 2972a9f8ae8a89471fb16a749c5df6ed MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0

Large files files are truncated, but you can click here to view the full file

   1/*
   2 *   fs/cifs/file.c
   3 *
   4 *   vfs operations that deal with files
   5 *
   6 *   Copyright (C) International Business Machines  Corp., 2002,2010
   7 *   Author(s): Steve French (sfrench@us.ibm.com)
   8 *              Jeremy Allison (jra@samba.org)
   9 *
  10 *   This library is free software; you can redistribute it and/or modify
  11 *   it under the terms of the GNU Lesser General Public License as published
  12 *   by the Free Software Foundation; either version 2.1 of the License, or
  13 *   (at your option) any later version.
  14 *
  15 *   This library is distributed in the hope that it will be useful,
  16 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
  18 *   the GNU Lesser General Public License for more details.
  19 *
  20 *   You should have received a copy of the GNU Lesser General Public License
  21 *   along with this library; if not, write to the Free Software
  22 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  23 */
  24#include <linux/fs.h>
  25#include <linux/backing-dev.h>
  26#include <linux/stat.h>
  27#include <linux/fcntl.h>
  28#include <linux/pagemap.h>
  29#include <linux/pagevec.h>
  30#include <linux/writeback.h>
  31#include <linux/task_io_accounting_ops.h>
  32#include <linux/delay.h>
  33#include <linux/mount.h>
  34#include <linux/slab.h>
  35#include <asm/div64.h>
  36#include "cifsfs.h"
  37#include "cifspdu.h"
  38#include "cifsglob.h"
  39#include "cifsproto.h"
  40#include "cifs_unicode.h"
  41#include "cifs_debug.h"
  42#include "cifs_fs_sb.h"
  43#include "fscache.h"
  44
  45static inline int cifs_convert_flags(unsigned int flags)
  46{
  47	if ((flags & O_ACCMODE) == O_RDONLY)
  48		return GENERIC_READ;
  49	else if ((flags & O_ACCMODE) == O_WRONLY)
  50		return GENERIC_WRITE;
  51	else if ((flags & O_ACCMODE) == O_RDWR) {
  52		/* GENERIC_ALL is too much permission to request
  53		   can cause unnecessary access denied on create */
  54		/* return GENERIC_ALL; */
  55		return (GENERIC_READ | GENERIC_WRITE);
  56	}
  57
  58	return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
  59		FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
  60		FILE_READ_DATA);
  61}
  62
  63static u32 cifs_posix_convert_flags(unsigned int flags)
  64{
  65	u32 posix_flags = 0;
  66
  67	if ((flags & O_ACCMODE) == O_RDONLY)
  68		posix_flags = SMB_O_RDONLY;
  69	else if ((flags & O_ACCMODE) == O_WRONLY)
  70		posix_flags = SMB_O_WRONLY;
  71	else if ((flags & O_ACCMODE) == O_RDWR)
  72		posix_flags = SMB_O_RDWR;
  73
  74	if (flags & O_CREAT)
  75		posix_flags |= SMB_O_CREAT;
  76	if (flags & O_EXCL)
  77		posix_flags |= SMB_O_EXCL;
  78	if (flags & O_TRUNC)
  79		posix_flags |= SMB_O_TRUNC;
  80	/* be safe and imply O_SYNC for O_DSYNC */
  81	if (flags & O_DSYNC)
  82		posix_flags |= SMB_O_SYNC;
  83	if (flags & O_DIRECTORY)
  84		posix_flags |= SMB_O_DIRECTORY;
  85	if (flags & O_NOFOLLOW)
  86		posix_flags |= SMB_O_NOFOLLOW;
  87	if (flags & O_DIRECT)
  88		posix_flags |= SMB_O_DIRECT;
  89
  90	return posix_flags;
  91}
  92
  93static inline int cifs_get_disposition(unsigned int flags)
  94{
  95	if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
  96		return FILE_CREATE;
  97	else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
  98		return FILE_OVERWRITE_IF;
  99	else if ((flags & O_CREAT) == O_CREAT)
 100		return FILE_OPEN_IF;
 101	else if ((flags & O_TRUNC) == O_TRUNC)
 102		return FILE_OVERWRITE;
 103	else
 104		return FILE_OPEN;
 105}
 106
 107int cifs_posix_open(char *full_path, struct inode **pinode,
 108			struct super_block *sb, int mode, unsigned int f_flags,
 109			__u32 *poplock, __u16 *pnetfid, int xid)
 110{
 111	int rc;
 112	FILE_UNIX_BASIC_INFO *presp_data;
 113	__u32 posix_flags = 0;
 114	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 115	struct cifs_fattr fattr;
 116	struct tcon_link *tlink;
 117	struct cifs_tcon *tcon;
 118
 119	cFYI(1, "posix open %s", full_path);
 120
 121	presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
 122	if (presp_data == NULL)
 123		return -ENOMEM;
 124
 125	tlink = cifs_sb_tlink(cifs_sb);
 126	if (IS_ERR(tlink)) {
 127		rc = PTR_ERR(tlink);
 128		goto posix_open_ret;
 129	}
 130
 131	tcon = tlink_tcon(tlink);
 132	mode &= ~current_umask();
 133
 134	posix_flags = cifs_posix_convert_flags(f_flags);
 135	rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
 136			     poplock, full_path, cifs_sb->local_nls,
 137			     cifs_sb->mnt_cifs_flags &
 138					CIFS_MOUNT_MAP_SPECIAL_CHR);
 139	cifs_put_tlink(tlink);
 140
 141	if (rc)
 142		goto posix_open_ret;
 143
 144	if (presp_data->Type == cpu_to_le32(-1))
 145		goto posix_open_ret; /* open ok, caller does qpathinfo */
 146
 147	if (!pinode)
 148		goto posix_open_ret; /* caller does not need info */
 149
 150	cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
 151
 152	/* get new inode and set it up */
 153	if (*pinode == NULL) {
 154		cifs_fill_uniqueid(sb, &fattr);
 155		*pinode = cifs_iget(sb, &fattr);
 156		if (!*pinode) {
 157			rc = -ENOMEM;
 158			goto posix_open_ret;
 159		}
 160	} else {
 161		cifs_fattr_to_inode(*pinode, &fattr);
 162	}
 163
 164posix_open_ret:
 165	kfree(presp_data);
 166	return rc;
 167}
 168
 169static int
 170cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
 171	     struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
 172	     __u16 *pnetfid, int xid)
 173{
 174	int rc;
 175	int desiredAccess;
 176	int disposition;
 177	FILE_ALL_INFO *buf;
 178
 179	desiredAccess = cifs_convert_flags(f_flags);
 180
 181/*********************************************************************
 182 *  open flag mapping table:
 183 *
 184 *	POSIX Flag            CIFS Disposition
 185 *	----------            ----------------
 186 *	O_CREAT               FILE_OPEN_IF
 187 *	O_CREAT | O_EXCL      FILE_CREATE
 188 *	O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
 189 *	O_TRUNC               FILE_OVERWRITE
 190 *	none of the above     FILE_OPEN
 191 *
 192 *	Note that there is not a direct match between disposition
 193 *	FILE_SUPERSEDE (ie create whether or not file exists although
 194 *	O_CREAT | O_TRUNC is similar but truncates the existing
 195 *	file rather than creating a new file as FILE_SUPERSEDE does
 196 *	(which uses the attributes / metadata passed in on open call)
 197 *?
 198 *?  O_SYNC is a reasonable match to CIFS writethrough flag
 199 *?  and the read write flags match reasonably.  O_LARGEFILE
 200 *?  is irrelevant because largefile support is always used
 201 *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
 202 *	 O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
 203 *********************************************************************/
 204
 205	disposition = cifs_get_disposition(f_flags);
 206
 207	/* BB pass O_SYNC flag through on file attributes .. BB */
 208
 209	buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
 210	if (!buf)
 211		return -ENOMEM;
 212
 213	if (tcon->ses->capabilities & CAP_NT_SMBS)
 214		rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
 215			 desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
 216			 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
 217				 & CIFS_MOUNT_MAP_SPECIAL_CHR);
 218	else
 219		rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
 220			desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
 221			cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
 222				& CIFS_MOUNT_MAP_SPECIAL_CHR);
 223
 224	if (rc)
 225		goto out;
 226
 227	if (tcon->unix_ext)
 228		rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
 229					      xid);
 230	else
 231		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
 232					 xid, pnetfid);
 233
 234out:
 235	kfree(buf);
 236	return rc;
 237}
 238
 239struct cifsFileInfo *
 240cifs_new_fileinfo(__u16 fileHandle, struct file *file,
 241		  struct tcon_link *tlink, __u32 oplock)
 242{
 243	struct dentry *dentry = file->f_path.dentry;
 244	struct inode *inode = dentry->d_inode;
 245	struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
 246	struct cifsFileInfo *pCifsFile;
 247
 248	pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
 249	if (pCifsFile == NULL)
 250		return pCifsFile;
 251
 252	pCifsFile->count = 1;
 253	pCifsFile->netfid = fileHandle;
 254	pCifsFile->pid = current->tgid;
 255	pCifsFile->uid = current_fsuid();
 256	pCifsFile->dentry = dget(dentry);
 257	pCifsFile->f_flags = file->f_flags;
 258	pCifsFile->invalidHandle = false;
 259	pCifsFile->tlink = cifs_get_tlink(tlink);
 260	mutex_init(&pCifsFile->fh_mutex);
 261	mutex_init(&pCifsFile->lock_mutex);
 262	INIT_LIST_HEAD(&pCifsFile->llist);
 263	INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
 264
 265	spin_lock(&cifs_file_list_lock);
 266	list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
 267	/* if readable file instance put first in list*/
 268	if (file->f_mode & FMODE_READ)
 269		list_add(&pCifsFile->flist, &pCifsInode->openFileList);
 270	else
 271		list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
 272	spin_unlock(&cifs_file_list_lock);
 273
 274	cifs_set_oplock_level(pCifsInode, oplock);
 275
 276	file->private_data = pCifsFile;
 277	return pCifsFile;
 278}
 279
 280/*
 281 * Release a reference on the file private data. This may involve closing
 282 * the filehandle out on the server. Must be called without holding
 283 * cifs_file_list_lock.
 284 */
 285void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
 286{
 287	struct inode *inode = cifs_file->dentry->d_inode;
 288	struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
 289	struct cifsInodeInfo *cifsi = CIFS_I(inode);
 290	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
 291	struct cifsLockInfo *li, *tmp;
 292
 293	spin_lock(&cifs_file_list_lock);
 294	if (--cifs_file->count > 0) {
 295		spin_unlock(&cifs_file_list_lock);
 296		return;
 297	}
 298
 299	/* remove it from the lists */
 300	list_del(&cifs_file->flist);
 301	list_del(&cifs_file->tlist);
 302
 303	if (list_empty(&cifsi->openFileList)) {
 304		cFYI(1, "closing last open instance for inode %p",
 305			cifs_file->dentry->d_inode);
 306
 307		/* in strict cache mode we need invalidate mapping on the last
 308		   close  because it may cause a error when we open this file
 309		   again and get at least level II oplock */
 310		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
 311			CIFS_I(inode)->invalid_mapping = true;
 312
 313		cifs_set_oplock_level(cifsi, 0);
 314	}
 315	spin_unlock(&cifs_file_list_lock);
 316
 317	if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
 318		int xid, rc;
 319
 320		xid = GetXid();
 321		rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
 322		FreeXid(xid);
 323	}
 324
 325	/* Delete any outstanding lock records. We'll lose them when the file
 326	 * is closed anyway.
 327	 */
 328	mutex_lock(&cifs_file->lock_mutex);
 329	list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
 330		list_del(&li->llist);
 331		kfree(li);
 332	}
 333	mutex_unlock(&cifs_file->lock_mutex);
 334
 335	cifs_put_tlink(cifs_file->tlink);
 336	dput(cifs_file->dentry);
 337	kfree(cifs_file);
 338}
 339
 340int cifs_open(struct inode *inode, struct file *file)
 341{
 342	int rc = -EACCES;
 343	int xid;
 344	__u32 oplock;
 345	struct cifs_sb_info *cifs_sb;
 346	struct cifs_tcon *tcon;
 347	struct tcon_link *tlink;
 348	struct cifsFileInfo *pCifsFile = NULL;
 349	char *full_path = NULL;
 350	bool posix_open_ok = false;
 351	__u16 netfid;
 352
 353	xid = GetXid();
 354
 355	cifs_sb = CIFS_SB(inode->i_sb);
 356	tlink = cifs_sb_tlink(cifs_sb);
 357	if (IS_ERR(tlink)) {
 358		FreeXid(xid);
 359		return PTR_ERR(tlink);
 360	}
 361	tcon = tlink_tcon(tlink);
 362
 363	full_path = build_path_from_dentry(file->f_path.dentry);
 364	if (full_path == NULL) {
 365		rc = -ENOMEM;
 366		goto out;
 367	}
 368
 369	cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
 370		 inode, file->f_flags, full_path);
 371
 372	if (oplockEnabled)
 373		oplock = REQ_OPLOCK;
 374	else
 375		oplock = 0;
 376
 377	if (!tcon->broken_posix_open && tcon->unix_ext &&
 378	    (tcon->ses->capabilities & CAP_UNIX) &&
 379	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
 380			le64_to_cpu(tcon->fsUnixInfo.Capability))) {
 381		/* can not refresh inode info since size could be stale */
 382		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
 383				cifs_sb->mnt_file_mode /* ignored */,
 384				file->f_flags, &oplock, &netfid, xid);
 385		if (rc == 0) {
 386			cFYI(1, "posix open succeeded");
 387			posix_open_ok = true;
 388		} else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
 389			if (tcon->ses->serverNOS)
 390				cERROR(1, "server %s of type %s returned"
 391					   " unexpected error on SMB posix open"
 392					   ", disabling posix open support."
 393					   " Check if server update available.",
 394					   tcon->ses->serverName,
 395					   tcon->ses->serverNOS);
 396			tcon->broken_posix_open = true;
 397		} else if ((rc != -EIO) && (rc != -EREMOTE) &&
 398			 (rc != -EOPNOTSUPP)) /* path not found or net err */
 399			goto out;
 400		/* else fallthrough to retry open the old way on network i/o
 401		   or DFS errors */
 402	}
 403
 404	if (!posix_open_ok) {
 405		rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
 406				  file->f_flags, &oplock, &netfid, xid);
 407		if (rc)
 408			goto out;
 409	}
 410
 411	pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
 412	if (pCifsFile == NULL) {
 413		CIFSSMBClose(xid, tcon, netfid);
 414		rc = -ENOMEM;
 415		goto out;
 416	}
 417
 418	cifs_fscache_set_inode_cookie(inode, file);
 419
 420	if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
 421		/* time to set mode which we can not set earlier due to
 422		   problems creating new read-only files */
 423		struct cifs_unix_set_info_args args = {
 424			.mode	= inode->i_mode,
 425			.uid	= NO_CHANGE_64,
 426			.gid	= NO_CHANGE_64,
 427			.ctime	= NO_CHANGE_64,
 428			.atime	= NO_CHANGE_64,
 429			.mtime	= NO_CHANGE_64,
 430			.device	= 0,
 431		};
 432		CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
 433					pCifsFile->pid);
 434	}
 435
 436out:
 437	kfree(full_path);
 438	FreeXid(xid);
 439	cifs_put_tlink(tlink);
 440	return rc;
 441}
 442
 443/* Try to reacquire byte range locks that were released when session */
 444/* to server was lost */
 445static int cifs_relock_file(struct cifsFileInfo *cifsFile)
 446{
 447	int rc = 0;
 448
 449/* BB list all locks open on this file and relock */
 450
 451	return rc;
 452}
 453
 454static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
 455{
 456	int rc = -EACCES;
 457	int xid;
 458	__u32 oplock;
 459	struct cifs_sb_info *cifs_sb;
 460	struct cifs_tcon *tcon;
 461	struct cifsInodeInfo *pCifsInode;
 462	struct inode *inode;
 463	char *full_path = NULL;
 464	int desiredAccess;
 465	int disposition = FILE_OPEN;
 466	__u16 netfid;
 467
 468	xid = GetXid();
 469	mutex_lock(&pCifsFile->fh_mutex);
 470	if (!pCifsFile->invalidHandle) {
 471		mutex_unlock(&pCifsFile->fh_mutex);
 472		rc = 0;
 473		FreeXid(xid);
 474		return rc;
 475	}
 476
 477	inode = pCifsFile->dentry->d_inode;
 478	cifs_sb = CIFS_SB(inode->i_sb);
 479	tcon = tlink_tcon(pCifsFile->tlink);
 480
 481/* can not grab rename sem here because various ops, including
 482   those that already have the rename sem can end up causing writepage
 483   to get called and if the server was down that means we end up here,
 484   and we can never tell if the caller already has the rename_sem */
 485	full_path = build_path_from_dentry(pCifsFile->dentry);
 486	if (full_path == NULL) {
 487		rc = -ENOMEM;
 488		mutex_unlock(&pCifsFile->fh_mutex);
 489		FreeXid(xid);
 490		return rc;
 491	}
 492
 493	cFYI(1, "inode = 0x%p file flags 0x%x for %s",
 494		 inode, pCifsFile->f_flags, full_path);
 495
 496	if (oplockEnabled)
 497		oplock = REQ_OPLOCK;
 498	else
 499		oplock = 0;
 500
 501	if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
 502	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
 503			le64_to_cpu(tcon->fsUnixInfo.Capability))) {
 504
 505		/*
 506		 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
 507		 * original open. Must mask them off for a reopen.
 508		 */
 509		unsigned int oflags = pCifsFile->f_flags &
 510						~(O_CREAT | O_EXCL | O_TRUNC);
 511
 512		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
 513				cifs_sb->mnt_file_mode /* ignored */,
 514				oflags, &oplock, &netfid, xid);
 515		if (rc == 0) {
 516			cFYI(1, "posix reopen succeeded");
 517			goto reopen_success;
 518		}
 519		/* fallthrough to retry open the old way on errors, especially
 520		   in the reconnect path it is important to retry hard */
 521	}
 522
 523	desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
 524
 525	/* Can not refresh inode by passing in file_info buf to be returned
 526	   by SMBOpen and then calling get_inode_info with returned buf
 527	   since file might have write behind data that needs to be flushed
 528	   and server version of file size can be stale. If we knew for sure
 529	   that inode was not dirty locally we could do this */
 530
 531	rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
 532			 CREATE_NOT_DIR, &netfid, &oplock, NULL,
 533			 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
 534				CIFS_MOUNT_MAP_SPECIAL_CHR);
 535	if (rc) {
 536		mutex_unlock(&pCifsFile->fh_mutex);
 537		cFYI(1, "cifs_open returned 0x%x", rc);
 538		cFYI(1, "oplock: %d", oplock);
 539		goto reopen_error_exit;
 540	}
 541
 542reopen_success:
 543	pCifsFile->netfid = netfid;
 544	pCifsFile->invalidHandle = false;
 545	mutex_unlock(&pCifsFile->fh_mutex);
 546	pCifsInode = CIFS_I(inode);
 547
 548	if (can_flush) {
 549		rc = filemap_write_and_wait(inode->i_mapping);
 550		mapping_set_error(inode->i_mapping, rc);
 551
 552		if (tcon->unix_ext)
 553			rc = cifs_get_inode_info_unix(&inode,
 554				full_path, inode->i_sb, xid);
 555		else
 556			rc = cifs_get_inode_info(&inode,
 557				full_path, NULL, inode->i_sb,
 558				xid, NULL);
 559	} /* else we are writing out data to server already
 560	     and could deadlock if we tried to flush data, and
 561	     since we do not know if we have data that would
 562	     invalidate the current end of file on the server
 563	     we can not go to the server to get the new inod
 564	     info */
 565
 566	cifs_set_oplock_level(pCifsInode, oplock);
 567
 568	cifs_relock_file(pCifsFile);
 569
 570reopen_error_exit:
 571	kfree(full_path);
 572	FreeXid(xid);
 573	return rc;
 574}
 575
 576int cifs_close(struct inode *inode, struct file *file)
 577{
 578	if (file->private_data != NULL) {
 579		cifsFileInfo_put(file->private_data);
 580		file->private_data = NULL;
 581	}
 582
 583	/* return code from the ->release op is always ignored */
 584	return 0;
 585}
 586
 587int cifs_closedir(struct inode *inode, struct file *file)
 588{
 589	int rc = 0;
 590	int xid;
 591	struct cifsFileInfo *pCFileStruct = file->private_data;
 592	char *ptmp;
 593
 594	cFYI(1, "Closedir inode = 0x%p", inode);
 595
 596	xid = GetXid();
 597
 598	if (pCFileStruct) {
 599		struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
 600
 601		cFYI(1, "Freeing private data in close dir");
 602		spin_lock(&cifs_file_list_lock);
 603		if (!pCFileStruct->srch_inf.endOfSearch &&
 604		    !pCFileStruct->invalidHandle) {
 605			pCFileStruct->invalidHandle = true;
 606			spin_unlock(&cifs_file_list_lock);
 607			rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
 608			cFYI(1, "Closing uncompleted readdir with rc %d",
 609				 rc);
 610			/* not much we can do if it fails anyway, ignore rc */
 611			rc = 0;
 612		} else
 613			spin_unlock(&cifs_file_list_lock);
 614		ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
 615		if (ptmp) {
 616			cFYI(1, "closedir free smb buf in srch struct");
 617			pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
 618			if (pCFileStruct->srch_inf.smallBuf)
 619				cifs_small_buf_release(ptmp);
 620			else
 621				cifs_buf_release(ptmp);
 622		}
 623		cifs_put_tlink(pCFileStruct->tlink);
 624		kfree(file->private_data);
 625		file->private_data = NULL;
 626	}
 627	/* BB can we lock the filestruct while this is going on? */
 628	FreeXid(xid);
 629	return rc;
 630}
 631
 632static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
 633				__u64 offset, __u8 lockType)
 634{
 635	struct cifsLockInfo *li =
 636		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
 637	if (li == NULL)
 638		return -ENOMEM;
 639	li->offset = offset;
 640	li->length = len;
 641	li->type = lockType;
 642	mutex_lock(&fid->lock_mutex);
 643	list_add(&li->llist, &fid->llist);
 644	mutex_unlock(&fid->lock_mutex);
 645	return 0;
 646}
 647
 648int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
 649{
 650	int rc, xid;
 651	__u32 numLock = 0;
 652	__u32 numUnlock = 0;
 653	__u64 length;
 654	bool wait_flag = false;
 655	struct cifs_sb_info *cifs_sb;
 656	struct cifs_tcon *tcon;
 657	__u16 netfid;
 658	__u8 lockType = LOCKING_ANDX_LARGE_FILES;
 659	bool posix_locking = 0;
 660
 661	length = 1 + pfLock->fl_end - pfLock->fl_start;
 662	rc = -EACCES;
 663	xid = GetXid();
 664
 665	cFYI(1, "Lock parm: 0x%x flockflags: "
 666		 "0x%x flocktype: 0x%x start: %lld end: %lld",
 667		cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
 668		pfLock->fl_end);
 669
 670	if (pfLock->fl_flags & FL_POSIX)
 671		cFYI(1, "Posix");
 672	if (pfLock->fl_flags & FL_FLOCK)
 673		cFYI(1, "Flock");
 674	if (pfLock->fl_flags & FL_SLEEP) {
 675		cFYI(1, "Blocking lock");
 676		wait_flag = true;
 677	}
 678	if (pfLock->fl_flags & FL_ACCESS)
 679		cFYI(1, "Process suspended by mandatory locking - "
 680			 "not implemented yet");
 681	if (pfLock->fl_flags & FL_LEASE)
 682		cFYI(1, "Lease on file - not implemented yet");
 683	if (pfLock->fl_flags &
 684	    (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
 685		cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags);
 686
 687	if (pfLock->fl_type == F_WRLCK) {
 688		cFYI(1, "F_WRLCK ");
 689		numLock = 1;
 690	} else if (pfLock->fl_type == F_UNLCK) {
 691		cFYI(1, "F_UNLCK");
 692		numUnlock = 1;
 693		/* Check if unlock includes more than
 694		one lock range */
 695	} else if (pfLock->fl_type == F_RDLCK) {
 696		cFYI(1, "F_RDLCK");
 697		lockType |= LOCKING_ANDX_SHARED_LOCK;
 698		numLock = 1;
 699	} else if (pfLock->fl_type == F_EXLCK) {
 700		cFYI(1, "F_EXLCK");
 701		numLock = 1;
 702	} else if (pfLock->fl_type == F_SHLCK) {
 703		cFYI(1, "F_SHLCK");
 704		lockType |= LOCKING_ANDX_SHARED_LOCK;
 705		numLock = 1;
 706	} else
 707		cFYI(1, "Unknown type of lock");
 708
 709	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 710	tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
 711	netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
 712
 713	if ((tcon->ses->capabilities & CAP_UNIX) &&
 714	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
 715	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
 716		posix_locking = 1;
 717	/* BB add code here to normalize offset and length to
 718	account for negative length which we can not accept over the
 719	wire */
 720	if (IS_GETLK(cmd)) {
 721		if (posix_locking) {
 722			int posix_lock_type;
 723			if (lockType & LOCKING_ANDX_SHARED_LOCK)
 724				posix_lock_type = CIFS_RDLCK;
 725			else
 726				posix_lock_type = CIFS_WRLCK;
 727			rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
 728					length, pfLock, posix_lock_type,
 729					wait_flag);
 730			FreeXid(xid);
 731			return rc;
 732		}
 733
 734		/* BB we could chain these into one lock request BB */
 735		rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
 736				 0, 1, lockType, 0 /* wait flag */, 0);
 737		if (rc == 0) {
 738			rc = CIFSSMBLock(xid, tcon, netfid, length,
 739					 pfLock->fl_start, 1 /* numUnlock */ ,
 740					 0 /* numLock */ , lockType,
 741					 0 /* wait flag */, 0);
 742			pfLock->fl_type = F_UNLCK;
 743			if (rc != 0)
 744				cERROR(1, "Error unlocking previously locked "
 745					   "range %d during test of lock", rc);
 746			rc = 0;
 747
 748		} else {
 749			/* if rc == ERR_SHARING_VIOLATION ? */
 750			rc = 0;
 751
 752			if (lockType & LOCKING_ANDX_SHARED_LOCK) {
 753				pfLock->fl_type = F_WRLCK;
 754			} else {
 755				rc = CIFSSMBLock(xid, tcon, netfid, length,
 756					pfLock->fl_start, 0, 1,
 757					lockType | LOCKING_ANDX_SHARED_LOCK,
 758					0 /* wait flag */, 0);
 759				if (rc == 0) {
 760					rc = CIFSSMBLock(xid, tcon, netfid,
 761						length, pfLock->fl_start, 1, 0,
 762						lockType |
 763						LOCKING_ANDX_SHARED_LOCK,
 764						0 /* wait flag */, 0);
 765					pfLock->fl_type = F_RDLCK;
 766					if (rc != 0)
 767						cERROR(1, "Error unlocking "
 768						"previously locked range %d "
 769						"during test of lock", rc);
 770					rc = 0;
 771				} else {
 772					pfLock->fl_type = F_WRLCK;
 773					rc = 0;
 774				}
 775			}
 776		}
 777
 778		FreeXid(xid);
 779		return rc;
 780	}
 781
 782	if (!numLock && !numUnlock) {
 783		/* if no lock or unlock then nothing
 784		to do since we do not know what it is */
 785		FreeXid(xid);
 786		return -EOPNOTSUPP;
 787	}
 788
 789	if (posix_locking) {
 790		int posix_lock_type;
 791		if (lockType & LOCKING_ANDX_SHARED_LOCK)
 792			posix_lock_type = CIFS_RDLCK;
 793		else
 794			posix_lock_type = CIFS_WRLCK;
 795
 796		if (numUnlock == 1)
 797			posix_lock_type = CIFS_UNLCK;
 798
 799		rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
 800				      length, pfLock, posix_lock_type,
 801				      wait_flag);
 802	} else {
 803		struct cifsFileInfo *fid = file->private_data;
 804
 805		if (numLock) {
 806			rc = CIFSSMBLock(xid, tcon, netfid, length,
 807					 pfLock->fl_start, 0, numLock, lockType,
 808					 wait_flag, 0);
 809
 810			if (rc == 0) {
 811				/* For Windows locks we must store them. */
 812				rc = store_file_lock(fid, length,
 813						pfLock->fl_start, lockType);
 814			}
 815		} else if (numUnlock) {
 816			/* For each stored lock that this unlock overlaps
 817			   completely, unlock it. */
 818			int stored_rc = 0;
 819			struct cifsLockInfo *li, *tmp;
 820
 821			rc = 0;
 822			mutex_lock(&fid->lock_mutex);
 823			list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
 824				if (pfLock->fl_start <= li->offset &&
 825						(pfLock->fl_start + length) >=
 826						(li->offset + li->length)) {
 827					stored_rc = CIFSSMBLock(xid, tcon,
 828							netfid, li->length,
 829							li->offset, 1, 0,
 830							li->type, false, 0);
 831					if (stored_rc)
 832						rc = stored_rc;
 833					else {
 834						list_del(&li->llist);
 835						kfree(li);
 836					}
 837				}
 838			}
 839			mutex_unlock(&fid->lock_mutex);
 840		}
 841	}
 842
 843	if (pfLock->fl_flags & FL_POSIX)
 844		posix_lock_file_wait(file, pfLock);
 845	FreeXid(xid);
 846	return rc;
 847}
 848
 849/* update the file size (if needed) after a write */
 850void
 851cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
 852		      unsigned int bytes_written)
 853{
 854	loff_t end_of_write = offset + bytes_written;
 855
 856	if (end_of_write > cifsi->server_eof)
 857		cifsi->server_eof = end_of_write;
 858}
 859
 860static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
 861			  const char *write_data, size_t write_size,
 862			  loff_t *poffset)
 863{
 864	int rc = 0;
 865	unsigned int bytes_written = 0;
 866	unsigned int total_written;
 867	struct cifs_sb_info *cifs_sb;
 868	struct cifs_tcon *pTcon;
 869	int xid;
 870	struct dentry *dentry = open_file->dentry;
 871	struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
 872	struct cifs_io_parms io_parms;
 873
 874	cifs_sb = CIFS_SB(dentry->d_sb);
 875
 876	cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
 877	   *poffset, dentry->d_name.name);
 878
 879	pTcon = tlink_tcon(open_file->tlink);
 880
 881	xid = GetXid();
 882
 883	for (total_written = 0; write_size > total_written;
 884	     total_written += bytes_written) {
 885		rc = -EAGAIN;
 886		while (rc == -EAGAIN) {
 887			struct kvec iov[2];
 888			unsigned int len;
 889
 890			if (open_file->invalidHandle) {
 891				/* we could deadlock if we called
 892				   filemap_fdatawait from here so tell
 893				   reopen_file not to flush data to
 894				   server now */
 895				rc = cifs_reopen_file(open_file, false);
 896				if (rc != 0)
 897					break;
 898			}
 899
 900			len = min((size_t)cifs_sb->wsize,
 901				  write_size - total_written);
 902			/* iov[0] is reserved for smb header */
 903			iov[1].iov_base = (char *)write_data + total_written;
 904			iov[1].iov_len = len;
 905			io_parms.netfid = open_file->netfid;
 906			io_parms.pid = pid;
 907			io_parms.tcon = pTcon;
 908			io_parms.offset = *poffset;
 909			io_parms.length = len;
 910			rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
 911					   1, 0);
 912		}
 913		if (rc || (bytes_written == 0)) {
 914			if (total_written)
 915				break;
 916			else {
 917				FreeXid(xid);
 918				return rc;
 919			}
 920		} else {
 921			cifs_update_eof(cifsi, *poffset, bytes_written);
 922			*poffset += bytes_written;
 923		}
 924	}
 925
 926	cifs_stats_bytes_written(pTcon, total_written);
 927
 928	if (total_written > 0) {
 929		spin_lock(&dentry->d_inode->i_lock);
 930		if (*poffset > dentry->d_inode->i_size)
 931			i_size_write(dentry->d_inode, *poffset);
 932		spin_unlock(&dentry->d_inode->i_lock);
 933	}
 934	mark_inode_dirty_sync(dentry->d_inode);
 935	FreeXid(xid);
 936	return total_written;
 937}
 938
 939struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
 940					bool fsuid_only)
 941{
 942	struct cifsFileInfo *open_file = NULL;
 943	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
 944
 945	/* only filter by fsuid on multiuser mounts */
 946	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
 947		fsuid_only = false;
 948
 949	spin_lock(&cifs_file_list_lock);
 950	/* we could simply get the first_list_entry since write-only entries
 951	   are always at the end of the list but since the first entry might
 952	   have a close pending, we go through the whole list */
 953	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
 954		if (fsuid_only && open_file->uid != current_fsuid())
 955			continue;
 956		if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
 957			if (!open_file->invalidHandle) {
 958				/* found a good file */
 959				/* lock it so it will not be closed on us */
 960				cifsFileInfo_get(open_file);
 961				spin_unlock(&cifs_file_list_lock);
 962				return open_file;
 963			} /* else might as well continue, and look for
 964			     another, or simply have the caller reopen it
 965			     again rather than trying to fix this handle */
 966		} else /* write only file */
 967			break; /* write only files are last so must be done */
 968	}
 969	spin_unlock(&cifs_file_list_lock);
 970	return NULL;
 971}
 972
 973struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
 974					bool fsuid_only)
 975{
 976	struct cifsFileInfo *open_file;
 977	struct cifs_sb_info *cifs_sb;
 978	bool any_available = false;
 979	int rc;
 980
 981	/* Having a null inode here (because mapping->host was set to zero by
 982	the VFS or MM) should not happen but we had reports of on oops (due to
 983	it being zero) during stress testcases so we need to check for it */
 984
 985	if (cifs_inode == NULL) {
 986		cERROR(1, "Null inode passed to cifs_writeable_file");
 987		dump_stack();
 988		return NULL;
 989	}
 990
 991	cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
 992
 993	/* only filter by fsuid on multiuser mounts */
 994	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
 995		fsuid_only = false;
 996
 997	spin_lock(&cifs_file_list_lock);
 998refind_writable:
 999	list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1000		if (!any_available && open_file->pid != current->tgid)
1001			continue;
1002		if (fsuid_only && open_file->uid != current_fsuid())
1003			continue;
1004		if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1005			cifsFileInfo_get(open_file);
1006
1007			if (!open_file->invalidHandle) {
1008				/* found a good writable file */
1009				spin_unlock(&cifs_file_list_lock);
1010				return open_file;
1011			}
1012
1013			spin_unlock(&cifs_file_list_lock);
1014
1015			/* Had to unlock since following call can block */
1016			rc = cifs_reopen_file(open_file, false);
1017			if (!rc)
1018				return open_file;
1019
1020			/* if it fails, try another handle if possible */
1021			cFYI(1, "wp failed on reopen file");
1022			cifsFileInfo_put(open_file);
1023
1024			spin_lock(&cifs_file_list_lock);
1025
1026			/* else we simply continue to the next entry. Thus
1027			   we do not loop on reopen errors.  If we
1028			   can not reopen the file, for example if we
1029			   reconnected to a server with another client
1030			   racing to delete or lock the file we would not
1031			   make progress if we restarted before the beginning
1032			   of the loop here. */
1033		}
1034	}
1035	/* couldn't find useable FH with same pid, try any available */
1036	if (!any_available) {
1037		any_available = true;
1038		goto refind_writable;
1039	}
1040	spin_unlock(&cifs_file_list_lock);
1041	return NULL;
1042}
1043
1044static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1045{
1046	struct address_space *mapping = page->mapping;
1047	loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1048	char *write_data;
1049	int rc = -EFAULT;
1050	int bytes_written = 0;
1051	struct inode *inode;
1052	struct cifsFileInfo *open_file;
1053
1054	if (!mapping || !mapping->host)
1055		return -EFAULT;
1056
1057	inode = page->mapping->host;
1058
1059	offset += (loff_t)from;
1060	write_data = kmap(page);
1061	write_data += from;
1062
1063	if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1064		kunmap(page);
1065		return -EIO;
1066	}
1067
1068	/* racing with truncate? */
1069	if (offset > mapping->host->i_size) {
1070		kunmap(page);
1071		return 0; /* don't care */
1072	}
1073
1074	/* check to make sure that we are not extending the file */
1075	if (mapping->host->i_size - offset < (loff_t)to)
1076		to = (unsigned)(mapping->host->i_size - offset);
1077
1078	open_file = find_writable_file(CIFS_I(mapping->host), false);
1079	if (open_file) {
1080		bytes_written = cifs_write(open_file, open_file->pid,
1081					   write_data, to - from, &offset);
1082		cifsFileInfo_put(open_file);
1083		/* Does mm or vfs already set times? */
1084		inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1085		if ((bytes_written > 0) && (offset))
1086			rc = 0;
1087		else if (bytes_written < 0)
1088			rc = bytes_written;
1089	} else {
1090		cFYI(1, "No writeable filehandles for inode");
1091		rc = -EIO;
1092	}
1093
1094	kunmap(page);
1095	return rc;
1096}
1097
1098static int cifs_writepages(struct address_space *mapping,
1099			   struct writeback_control *wbc)
1100{
1101	struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1102	bool done = false, scanned = false, range_whole = false;
1103	pgoff_t end, index;
1104	struct cifs_writedata *wdata;
1105	struct page *page;
1106	int rc = 0;
1107
1108	/*
1109	 * If wsize is smaller than the page cache size, default to writing
1110	 * one page at a time via cifs_writepage
1111	 */
1112	if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1113		return generic_writepages(mapping, wbc);
1114
1115	if (wbc->range_cyclic) {
1116		index = mapping->writeback_index; /* Start from prev offset */
1117		end = -1;
1118	} else {
1119		index = wbc->range_start >> PAGE_CACHE_SHIFT;
1120		end = wbc->range_end >> PAGE_CACHE_SHIFT;
1121		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1122			range_whole = true;
1123		scanned = true;
1124	}
1125retry:
1126	while (!done && index <= end) {
1127		unsigned int i, nr_pages, found_pages;
1128		pgoff_t next = 0, tofind;
1129		struct page **pages;
1130
1131		tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1132				end - index) + 1;
1133
1134		wdata = cifs_writedata_alloc((unsigned int)tofind);
1135		if (!wdata) {
1136			rc = -ENOMEM;
1137			break;
1138		}
1139
1140		/*
1141		 * find_get_pages_tag seems to return a max of 256 on each
1142		 * iteration, so we must call it several times in order to
1143		 * fill the array or the wsize is effectively limited to
1144		 * 256 * PAGE_CACHE_SIZE.
1145		 */
1146		found_pages = 0;
1147		pages = wdata->pages;
1148		do {
1149			nr_pages = find_get_pages_tag(mapping, &index,
1150							PAGECACHE_TAG_DIRTY,
1151							tofind, pages);
1152			found_pages += nr_pages;
1153			tofind -= nr_pages;
1154			pages += nr_pages;
1155		} while (nr_pages && tofind && index <= end);
1156
1157		if (found_pages == 0) {
1158			kref_put(&wdata->refcount, cifs_writedata_release);
1159			break;
1160		}
1161
1162		nr_pages = 0;
1163		for (i = 0; i < found_pages; i++) {
1164			page = wdata->pages[i];
1165			/*
1166			 * At this point we hold neither mapping->tree_lock nor
1167			 * lock on the page itself: the page may be truncated or
1168			 * invalidated (changing page->mapping to NULL), or even
1169			 * swizzled back from swapper_space to tmpfs file
1170			 * mapping
1171			 */
1172
1173			if (nr_pages == 0)
1174				lock_page(page);
1175			else if (!trylock_page(page))
1176				break;
1177
1178			if (unlikely(page->mapping != mapping)) {
1179				unlock_page(page);
1180				break;
1181			}
1182
1183			if (!wbc->range_cyclic && page->index > end) {
1184				done = true;
1185				unlock_page(page);
1186				break;
1187			}
1188
1189			if (next && (page->index != next)) {
1190				/* Not next consecutive page */
1191				unlock_page(page);
1192				break;
1193			}
1194
1195			if (wbc->sync_mode != WB_SYNC_NONE)
1196				wait_on_page_writeback(page);
1197
1198			if (PageWriteback(page) ||
1199					!clear_page_dirty_for_io(page)) {
1200				unlock_page(page);
1201				break;
1202			}
1203
1204			/*
1205			 * This actually clears the dirty bit in the radix tree.
1206			 * See cifs_writepage() for more commentary.
1207			 */
1208			set_page_writeback(page);
1209
1210			if (page_offset(page) >= mapping->host->i_size) {
1211				done = true;
1212				unlock_page(page);
1213				end_page_writeback(page);
1214				break;
1215			}
1216
1217			wdata->pages[i] = page;
1218			next = page->index + 1;
1219			++nr_pages;
1220		}
1221
1222		/* reset index to refind any pages skipped */
1223		if (nr_pages == 0)
1224			index = wdata->pages[0]->index + 1;
1225
1226		/* put any pages we aren't going to use */
1227		for (i = nr_pages; i < found_pages; i++) {
1228			page_cache_release(wdata->pages[i]);
1229			wdata->pages[i] = NULL;
1230		}
1231
1232		/* nothing to write? */
1233		if (nr_pages == 0) {
1234			kref_put(&wdata->refcount, cifs_writedata_release);
1235			continue;
1236		}
1237
1238		wdata->sync_mode = wbc->sync_mode;
1239		wdata->nr_pages = nr_pages;
1240		wdata->offset = page_offset(wdata->pages[0]);
1241
1242		do {
1243			if (wdata->cfile != NULL)
1244				cifsFileInfo_put(wdata->cfile);
1245			wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1246							  false);
1247			if (!wdata->cfile) {
1248				cERROR(1, "No writable handles for inode");
1249				rc = -EBADF;
1250				break;
1251			}
1252			rc = cifs_async_writev(wdata);
1253		} while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
1254
1255		for (i = 0; i < nr_pages; ++i)
1256			unlock_page(wdata->pages[i]);
1257
1258		/* send failure -- clean up the mess */
1259		if (rc != 0) {
1260			for (i = 0; i < nr_pages; ++i) {
1261				if (rc == -EAGAIN)
1262					redirty_page_for_writepage(wbc,
1263							   wdata->pages[i]);
1264				else
1265					SetPageError(wdata->pages[i]);
1266				end_page_writeback(wdata->pages[i]);
1267				page_cache_release(wdata->pages[i]);
1268			}
1269			if (rc != -EAGAIN)
1270				mapping_set_error(mapping, rc);
1271		}
1272		kref_put(&wdata->refcount, cifs_writedata_release);
1273
1274		wbc->nr_to_write -= nr_pages;
1275		if (wbc->nr_to_write <= 0)
1276			done = true;
1277
1278		index = next;
1279	}
1280
1281	if (!scanned && !done) {
1282		/*
1283		 * We hit the last page and there is more work to be done: wrap
1284		 * back to the start of the file
1285		 */
1286		scanned = true;
1287		index = 0;
1288		goto retry;
1289	}
1290
1291	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1292		mapping->writeback_index = index;
1293
1294	return rc;
1295}
1296
1297static int
1298cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1299{
1300	int rc;
1301	int xid;
1302
1303	xid = GetXid();
1304/* BB add check for wbc flags */
1305	page_cache_get(page);
1306	if (!PageUptodate(page))
1307		cFYI(1, "ppw - page not up to date");
1308
1309	/*
1310	 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1311	 *
1312	 * A writepage() implementation always needs to do either this,
1313	 * or re-dirty the page with "redirty_page_for_writepage()" in
1314	 * the case of a failure.
1315	 *
1316	 * Just unlocking the page will cause the radix tree tag-bits
1317	 * to fail to update with the state of the page correctly.
1318	 */
1319	set_page_writeback(page);
1320retry_write:
1321	rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1322	if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1323		goto retry_write;
1324	else if (rc == -EAGAIN)
1325		redirty_page_for_writepage(wbc, page);
1326	else if (rc != 0)
1327		SetPageError(page);
1328	else
1329		SetPageUptodate(page);
1330	end_page_writeback(page);
1331	page_cache_release(page);
1332	FreeXid(xid);
1333	return rc;
1334}
1335
1336static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1337{
1338	int rc = cifs_writepage_locked(page, wbc);
1339	unlock_page(page);
1340	return rc;
1341}
1342
1343static int cifs_write_end(struct file *file, struct address_space *mapping,
1344			loff_t pos, unsigned len, unsigned copied,
1345			struct page *page, void *fsdata)
1346{
1347	int rc;
1348	struct inode *inode = mapping->host;
1349	struct cifsFileInfo *cfile = file->private_data;
1350	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1351	__u32 pid;
1352
1353	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1354		pid = cfile->pid;
1355	else
1356		pid = current->tgid;
1357
1358	cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1359		 page, pos, copied);
1360
1361	if (PageChecked(page)) {
1362		if (copied == len)
1363			SetPageUptodate(page);
1364		ClearPageChecked(page);
1365	} else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
1366		SetPageUptodate(page);
1367
1368	if (!PageUptodate(page)) {
1369		char *page_data;
1370		unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1371		int xid;
1372
1373		xid = GetXid();
1374		/* this is probably better than directly calling
1375		   partialpage_write since in this function the file handle is
1376		   known which we might as well	leverage */
1377		/* BB check if anything else missing out of ppw
1378		   such as updating last write time */
1379		page_data = kmap(page);
1380		rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
1381		/* if (rc < 0) should we set writebehind rc? */
1382		kunmap(page);
1383
1384		FreeXid(xid);
1385	} else {
1386		rc = copied;
1387		pos += copied;
1388		set_page_dirty(page);
1389	}
1390
1391	if (rc > 0) {
1392		spin_lock(&inode->i_lock);
1393		if (pos > inode->i_size)
1394			i_size_write(inode, pos);
1395		spin_unlock(&inode->i_lock);
1396	}
1397
1398	unlock_page(page);
1399	page_cache_release(page);
1400
1401	return rc;
1402}
1403
1404int cifs_strict_fsync(struct file *file, int datasync)
1405{
1406	int xid;
1407	int rc = 0;
1408	struct cifs_tcon *tcon;
1409	struct cifsFileInfo *smbfile = file->private_data;
1410	struct inode *inode = file->f_path.dentry->d_inode;
1411	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1412
1413	xid = GetXid();
1414
1415	cFYI(1, "Sync file - name: %s datasync: 0x%x",
1416		file->f_path.dentry->d_name.name, datasync);
1417
1418	if (!CIFS_I(inode)->clientCanCacheRead) {
1419		rc = cifs_invalidate_mapping(inode);
1420		if (rc) {
1421			cFYI(1, "rc: %d during invalidate phase", rc);
1422			rc = 0; /* don't care about it in fsync */
1423		}
1424	}
1425
1426	tcon = tlink_tcon(smbfile->tlink);
1427	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1428		rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1429
1430	FreeXid(xid);
1431	return rc;
1432}
1433
1434int cifs_fsync(struct file *file, int datasync)
1435{
1436	int xid;
1437	int rc = 0;
1438	struct cifs_tcon *tcon;
1439	struct cifsFileInfo *smbfile = file->private_data;
1440	struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1441
1442	xid = GetXid();
1443
1444	cFYI(1, "Sync file - name: %s datasync: 0x%x",
1445		file->f_path.dentry->d_name.name, datasync);
1446
1447	tcon = tlink_tcon(smbfile->tlink);
1448	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
1449		rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
1450
1451	FreeXid(xid);
1452	return rc;
1453}
1454
1455/*
1456 * As file closes, flush all cached write data for this inode checking
1457 * for write behind errors.
1458 */
1459int cifs_flush(struct file *file, fl_owner_t id)
1460{
1461	struct inode *inode = file->f_path.dentry->d_inode;
1462	int rc = 0;
1463
1464	if (file->f_mode & FMODE_WRITE)
1465		rc = filemap_write_and_wait(inode->i_mapping);
1466
1467	cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
1468
1469	return rc;
1470}
1471
1472static int
1473cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
1474{
1475	int rc = 0;
1476	unsigned long i;
1477
1478	for (i = 0; i < num_pages; i++) {
1479		pages[i] = alloc_page(__GFP_HIGHMEM);
1480		if (!pages[i]) {
1481			/*
1482			 * save number of pages we have already allocated and
1483			 * return with ENOMEM error
1484			 */
1485			num_pages = i;
1486			rc = -ENOMEM;
1487			goto error;
1488		}
1489	}
1490
1491	return rc;
1492
1493error:
1494	for (i = 0; i < num_pages; i++)
1495		put_page(pages[i]);
1496	return rc;
1497}
1498
1499static inline
1500size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
1501{
1502	size_t num_pages;
1503	size_t clen;
1504
1505	clen = min_t(const size_t, len, wsize);
1506	num_pages = clen / PAGE_CACHE_SIZE;
1507	if (clen % PAGE_CACHE_SIZE)
1508		num_pages++;
1509
1510	if (cur_len)
1511		*cur_len = clen;
1512
1513	return num_pages;
1514}
1515
1516static ssize_t
1517cifs_iovec_write(struct file *file, const struct iovec *iov,
1518		 unsigned long nr_segs, loff_t *poffset)
1519{
1520	unsigned int written;
1521	unsigned long num_pages, npages, i;
1522	size_t copied, len, cur_len;
1523	ssize_t total_written = 0;
1524	struct kvec *to_send;
1525	struct page **pages;
1526	struct iov_iter it;
1527	struct inode *inode;
1528	struct cifsFileInfo *open_file;
1529	struct cifs_tcon *pTcon;
1530	struct cifs_sb_info *cifs_sb;
1531	struct cifs_io_parms io_parms;
1532	int xid, rc;
1533	__u32 pid;
1534
1535	len = iov_length(iov, nr_segs);
1536	if (!len)
1537		return 0;
1538
1539	rc = generic_write_checks(file, poffset, &len, 0);
1540	if (rc)
1541		return rc;
1542
1543	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1544	num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
1545
1546	pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
1547	if (!pages)
1548		return -ENOMEM;
1549
1550	to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
1551	if (!to_send) {
1552		kfree(pages);
1553		return -ENOMEM;
1554	}
1555
1556	rc = cifs_write_allocate_pages(pages, num_pages);
1557	if (rc) {
1558		kfree(pages);
1559		kfree(to_send);
1560		return rc;
1561	}
1562
1563	xid = GetXid();
1564	open_file = file->private_data;
1565
1566	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1567		pid = open_file->pid;
1568	else
1569		pid = current->tgid;
1570
1571	pTcon = tlink_tcon(open_file->tlink);
1572	inode = file->f_path.dentry->d_inode;
1573
1574	iov_iter_init(&it, iov, nr_segs, len, 0);
1575	npages = num_pages;
1576
1577	do {
1578		size_t save_len = cur_len;
1579		for (i = 0; i < npages; i++) {
1580			copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
1581			copied = iov_iter_copy_from_user(pages[i], &it, 0,
1582							 copied);
1583			cur_len -= copied;
1584			iov_iter_advance(&it, copied);
1585			to_send[i+1].iov_base = kmap(pages[i]);
1586			to_send[i+1].iov_len = copied;
1587		}
1588
1589		cur_len = save_len - cur_len;
1590
1591		do {
1592			if (open_file->invalidHandle) {
1593				rc = cifs_reopen_file(open_file, false);
1594				if (rc != 0)
1595					break;
1596			}
1597			io_parms.netfid = open_file->netfid;
1598			io_parms.pid = pid;
1599			io_parms.tcon = pTcon;
1600			io_parms.offset = *poffset;
1601			io_parms.length = cur_len;
1602			rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
1603					   npages, 0);
1604		} while (rc == -EAGAIN);
1605
1606		for (i = 0; i < npages; i++)
1607			kunmap(pages[i]);
1608
1609		if (written) {
1610			len -= written;
1611			total_written += written;
1612			cifs_update_eof(CIFS_I(inode), *poffset, written);
1613			*poffset += written;
1614		} else if (rc < 0) {
1615			if (!total_written)
1616				total_written = rc;
1617			break;
1618		}
1619
1620		/* get length and number of kvecs of the next write */
1621		npages = get_numpages(cifs_sb->wsize, len, &cur_len);
1622	} while (len > 0);
1623
1624	if (total_written > 0) {
1625		spin_lock(&inode->i_lock);
1626		if (*poffset > inode->i_size)
1627			i_size_write(inode, *poffset);
1628		spin_unlock(&inode->i_lock);
1629	}
1630
1631	cifs_stats_bytes_written(pTcon, total_written);
1632	mark_inode_dirty_sync(inode);
1633
1634	for (i = 0; i < num_pages; i++)
1635		put_page(pages[i]);
1636	kfree(to_send);
1637	kfree(pages);
1638	FreeXid(xid);
1639	return total_written;
1640}
1641
1642ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
1643				unsigned long nr_segs, loff_t pos)
1644{
1645	ssize_t written;
1646	struct inode *inode;
1647
1648	inode = iocb->ki_filp->f_path.dentry->d_inode;
1649
1650	/*
1651	 * BB - optimize the way when signing is disabled. We can drop this
1652	 * extra memory-to-memory copying and use iovec buffers for constructing
1653	 * write request.
1654	 */
1655
1656	written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
1657	if (written > 0) {
1658		CIFS_I(inode)->invalid_mapping = true;
1659		iocb->ki_pos = pos;
1660	}
1661
1662	return written;
1663}
1664
1665ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
1666			   unsigned long nr_segs, loff_t pos)
1667{
1668	struct inode *inode;
1669
1670	inode = iocb->ki_filp->f_path.dentry->d_inode;
1671
1672	if (CIFS_I(inode)->clientCanCacheAll)
1673		return generic_file_aio_write(iocb, iov, nr_segs, pos);
1674
1675	/*
1676	 * In strict cache mode we need to write the data to the server exactly
1677	 * from the pos to pos+len-1 rather than flush all affected pages
1678	 * because it may cause a error with mandatory locks on these pages but
1679	 * not on the region from pos to ppos+len-1.
1680	 */
1681
1682	return cifs_user_writev(iocb, iov, nr_segs, pos);
1683}
1684
1685static ssize_t
1686cifs_iovec_read(struct file *file, const struct iovec *iov,
1687		 unsigned long nr_segs, loff_t *poffset)
1688{
1689	int rc;
1690	int xid;
1691	ssize_t total_read;
1692	unsigned int bytes_read = 0;
1693	size_t len, cur_len;
1694	int iov_offset = 0;
1695	struct cifs_sb_info *cifs_sb;
1696	struct cifs_tcon *pTcon;
1697	struct cifsFileInfo *open_file;
1698	struct smb_com_read_rsp *pSMBr;
1699	struct cifs_io_parms io_parms;
1700	char *read_data;
1701	__u32 pid;
1702
1703	if (!nr_segs)
1704		return 0;
1705
1706	len = iov_length(iov, nr_segs);
1707	if (!len)
1708		return 0;
1709
1710	xid = GetXid();
1711	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1712
1713	open_file = file->private_data;
1714	pTcon = tlink_tcon(open_file->tlink);
1715
1716	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1717		pid = open_file->pid;
1718	else
1719		pid = current->tgid;
1720
1721	if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1722		cFYI(1, "attempting read on write only file instance");
1723
1724	for (total_read = 0; total_read < len; total_read += bytes_read) {
1725		cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
1726		rc = -EAGAIN;
1727		read_data = NULL;
1728
1729		while (rc == -EAGAIN) {
1730			int buf_type = CIFS_NO_BUFFER;
1731			if (open_file->invalidHandle) {
1732				rc = cifs_reopen_file(open_file, true);
1733				if (rc != 0)
1734					break;
1735			}
1736			io_parms.netfid = open_file->netfid;
1737			io_parms.pid = pid;
1738			io_parms.tcon = pTcon;
1739			io_parms.offset = *poffset;
1740			io_parms.length = cur_len;
1741			rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
1742					 &read_data, &buf_type);
1743			pSMBr = (struct smb_com_read_rsp *)read_data;
1744			if (read_data) {
1745				char *data_offset = read_data + 4 +
1746						le16_to_cpu(pSMBr->DataOffset);
1747				if (memcpy_toiovecend(iov, data_offset,
1748						      iov_offset, bytes_read))
1749					rc = -EFAULT;
1750				if (buf_type == CIFS_SMALL_BUFFER)
1751					cifs_small_buf_release(read_data);
1752				else if (buf_type == CIFS_LARGE_BUFFER)
1753					cifs_buf_release(read_data);
1754				read_data = NULL;
1755				iov_offset += bytes_read;
1756			}
1757		}
1758
1759		if (rc || (bytes_read == 0)) {
1760			if (total_read) {
1761				break;
1762			} else {
1763				FreeXid(xid);
1764				return rc;
1765			}
1766		} else {
1767			cifs_stats_bytes_read(pTcon, bytes_read);
1768			*poffset += bytes_read;
1769		}
1770	}
1771
1772	FreeXid(xid);
1773	return total_read;
1774}
1775
1776ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
1777			       unsigned long nr_segs, loff_t pos)
1778{
1779	ssize_t read;
1780
1781	read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
1782	if (read > 0)
1783		iocb->ki_pos = pos;
1784
1785	return read;
1786}
1787
1788ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
1789			  unsigned long nr_segs, loff_t pos)
1790{
1791	struct inode *inode;
1792
1793	inode = iocb->ki_filp->f_path.dentry->d_inode;
1794
1795	if (CIFS_I(inode)->clientCanCacheRead)
1796		return generic_file_aio_read(iocb, iov, nr_segs, pos);
1797
1798	/*
1799	 * In strict cache mode we need to read from the server all the time
1800	 * if we don't have level II oplock because the server can delay mtime
1801	 * change - so we can't make a decision about inode invalidating.
1802	 * And we can also fail with pagereading if there are mandatory locks
1803	 * on pages affected by this read but not on the region from pos to
1804	 * pos+len-1.
1805	 */
1806
1807	return cifs_user_readv(iocb, iov, nr_segs, pos);
1808}
1809
1810static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1811			 loff_t *poffset)
1812{
1813	int rc = -EACCES;
1814	unsigned int bytes_read = 0;
1815	unsigned int total_read;
1816	unsigned int current_read_size;
1817	struct cifs_sb_info *cifs_sb;
1818	struct cifs_tcon *pTcon;
1819	int xid;
1820	char *current_offset;
1821	struct cifsFileInfo *open_file;
1822	struct cifs_io_parms io_parms;
1823	int buf_type = CIFS_NO_BUFFER;
1824	__u32 pid;
1825
1826	xid = GetXid();
1827	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1828
1829	if (file->private_data == NULL) {
1830		rc = -EBADF;
1831		FreeXid(xid);
1832		return rc;
1833	}
1834	open_file = file->private_data;
1835	pTcon = tlink_tcon(open_file->tlink);
1836
1837	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1838		pid = open_file->pid;
1839	else
1840		pid = current->tgid;
1841
1842	if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1843		cFYI(1, "attempting read on write only file instance");
1844
1845	for (total_read = 0, current_offset = read_data;
1846	     read_size > total_read;
1847	     total_read += bytes_read, current_offset += bytes_read) {
1848		current_read_size = min_t(const int, read_size - total_read,
1849					  cifs_sb->rsize);
1850		/* For windows me and 9x we do not want to request more
1851		than it negotiated since it will refuse the read then */
1852		if ((pTcon->ses) &&
1853			!(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1854			current_read_size = min_t(const int, current_read_size,
1855					pTcon->ses->server->maxBuf - 128);
1856		}
1857		rc = -EAGAIN;
1858		while (rc == -EAGAIN) {
1859			if (open_file->invalidHandle) {
1860				rc = cifs_reopen_file(open_file, true);
1861				if (rc != 0)
1862					break;
1863			}
1864			io_parms.netfid = open_file->netfid;
1865			io_parms.pid = pid;
1866			io_parms.tcon = pTcon;
1867			io_parms.offset = *poffset;
1868			io_parms.length = current_read_size;
1869			rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
1870					 &current_offset, &buf_type);
1871		}
1872		if (rc || (bytes_read == 0)) {
1873			if (total_read) {
1874				break;
1875			} else {
1876				FreeXid(xid);
1877				return rc;
1878			}
1879		} else {
1880			cifs_stats_bytes_read(pTcon, total_read);
1881			*poffset += bytes_read;
1882		}
1883	}
1884	FreeXid(xid);
1885	return total_read;
1886}
1887
1888/*
1889 * If the page is mmap'ed into a process' page tables, then we need to make
1890 * sure that it doesn't change while being written back.
1891 */
1892static int
1893cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1894{
1895	struct page *page = vmf->page;
1896
1897	lock_page(page);
1898	return VM_FAULT_LOCKED;
1899}
1900
1901static struct vm_operations_struct cifs_file_vm_ops = {
1902	.fault = filemap_fault,
1903	.page_mkwrite = cifs_page_mkwrite,
1904};
1905
1906int cifs_file_strict_mmap(struct file *file, struct

Large files files are truncated, but you can click here to view the full file