PageRenderTime 7ms CodeModel.GetById 2ms app.highlight 110ms RepoModel.GetById 1ms app.codeStats 0ms

/fs/gfs2/xattr.c

https://bitbucket.org/digetx/picasso-kernel
C | 1505 lines | 1110 code | 277 blank | 118 comment | 205 complexity | 21a98503af5981f90c037b7e9651db38 MD5 | raw file
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#include <linux/slab.h>
  11#include <linux/spinlock.h>
  12#include <linux/completion.h>
  13#include <linux/buffer_head.h>
  14#include <linux/xattr.h>
  15#include <linux/gfs2_ondisk.h>
  16#include <asm/uaccess.h>
  17
  18#include "gfs2.h"
  19#include "incore.h"
  20#include "acl.h"
  21#include "xattr.h"
  22#include "glock.h"
  23#include "inode.h"
  24#include "meta_io.h"
  25#include "quota.h"
  26#include "rgrp.h"
  27#include "trans.h"
  28#include "util.h"
  29
  30/**
  31 * ea_calc_size - returns the acutal number of bytes the request will take up
  32 *                (not counting any unstuffed data blocks)
  33 * @sdp:
  34 * @er:
  35 * @size:
  36 *
  37 * Returns: 1 if the EA should be stuffed
  38 */
  39
  40static int ea_calc_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize,
  41			unsigned int *size)
  42{
  43	unsigned int jbsize = sdp->sd_jbsize;
  44
  45	/* Stuffed */
  46	*size = ALIGN(sizeof(struct gfs2_ea_header) + nsize + dsize, 8);
  47
  48	if (*size <= jbsize)
  49		return 1;
  50
  51	/* Unstuffed */
  52	*size = ALIGN(sizeof(struct gfs2_ea_header) + nsize +
  53		      (sizeof(__be64) * DIV_ROUND_UP(dsize, jbsize)), 8);
  54
  55	return 0;
  56}
  57
  58static int ea_check_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize)
  59{
  60	unsigned int size;
  61
  62	if (dsize > GFS2_EA_MAX_DATA_LEN)
  63		return -ERANGE;
  64
  65	ea_calc_size(sdp, nsize, dsize, &size);
  66
  67	/* This can only happen with 512 byte blocks */
  68	if (size > sdp->sd_jbsize)
  69		return -ERANGE;
  70
  71	return 0;
  72}
  73
  74typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
  75			  struct gfs2_ea_header *ea,
  76			  struct gfs2_ea_header *prev, void *private);
  77
  78static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
  79			ea_call_t ea_call, void *data)
  80{
  81	struct gfs2_ea_header *ea, *prev = NULL;
  82	int error = 0;
  83
  84	if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
  85		return -EIO;
  86
  87	for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
  88		if (!GFS2_EA_REC_LEN(ea))
  89			goto fail;
  90		if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
  91						  bh->b_data + bh->b_size))
  92			goto fail;
  93		if (!GFS2_EATYPE_VALID(ea->ea_type))
  94			goto fail;
  95
  96		error = ea_call(ip, bh, ea, prev, data);
  97		if (error)
  98			return error;
  99
 100		if (GFS2_EA_IS_LAST(ea)) {
 101			if ((char *)GFS2_EA2NEXT(ea) !=
 102			    bh->b_data + bh->b_size)
 103				goto fail;
 104			break;
 105		}
 106	}
 107
 108	return error;
 109
 110fail:
 111	gfs2_consist_inode(ip);
 112	return -EIO;
 113}
 114
 115static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
 116{
 117	struct buffer_head *bh, *eabh;
 118	__be64 *eablk, *end;
 119	int error;
 120
 121	error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &bh);
 122	if (error)
 123		return error;
 124
 125	if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) {
 126		error = ea_foreach_i(ip, bh, ea_call, data);
 127		goto out;
 128	}
 129
 130	if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
 131		error = -EIO;
 132		goto out;
 133	}
 134
 135	eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
 136	end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
 137
 138	for (; eablk < end; eablk++) {
 139		u64 bn;
 140
 141		if (!*eablk)
 142			break;
 143		bn = be64_to_cpu(*eablk);
 144
 145		error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, &eabh);
 146		if (error)
 147			break;
 148		error = ea_foreach_i(ip, eabh, ea_call, data);
 149		brelse(eabh);
 150		if (error)
 151			break;
 152	}
 153out:
 154	brelse(bh);
 155	return error;
 156}
 157
 158struct ea_find {
 159	int type;
 160	const char *name;
 161	size_t namel;
 162	struct gfs2_ea_location *ef_el;
 163};
 164
 165static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
 166		     struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
 167		     void *private)
 168{
 169	struct ea_find *ef = private;
 170
 171	if (ea->ea_type == GFS2_EATYPE_UNUSED)
 172		return 0;
 173
 174	if (ea->ea_type == ef->type) {
 175		if (ea->ea_name_len == ef->namel &&
 176		    !memcmp(GFS2_EA2NAME(ea), ef->name, ea->ea_name_len)) {
 177			struct gfs2_ea_location *el = ef->ef_el;
 178			get_bh(bh);
 179			el->el_bh = bh;
 180			el->el_ea = ea;
 181			el->el_prev = prev;
 182			return 1;
 183		}
 184	}
 185
 186	return 0;
 187}
 188
 189static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
 190			struct gfs2_ea_location *el)
 191{
 192	struct ea_find ef;
 193	int error;
 194
 195	ef.type = type;
 196	ef.name = name;
 197	ef.namel = strlen(name);
 198	ef.ef_el = el;
 199
 200	memset(el, 0, sizeof(struct gfs2_ea_location));
 201
 202	error = ea_foreach(ip, ea_find_i, &ef);
 203	if (error > 0)
 204		return 0;
 205
 206	return error;
 207}
 208
 209/**
 210 * ea_dealloc_unstuffed -
 211 * @ip:
 212 * @bh:
 213 * @ea:
 214 * @prev:
 215 * @private:
 216 *
 217 * Take advantage of the fact that all unstuffed blocks are
 218 * allocated from the same RG.  But watch, this may not always
 219 * be true.
 220 *
 221 * Returns: errno
 222 */
 223
 224static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
 225				struct gfs2_ea_header *ea,
 226				struct gfs2_ea_header *prev, void *private)
 227{
 228	int *leave = private;
 229	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 230	struct gfs2_rgrpd *rgd;
 231	struct gfs2_holder rg_gh;
 232	struct buffer_head *dibh;
 233	__be64 *dataptrs;
 234	u64 bn = 0;
 235	u64 bstart = 0;
 236	unsigned int blen = 0;
 237	unsigned int blks = 0;
 238	unsigned int x;
 239	int error;
 240
 241	error = gfs2_rindex_update(sdp);
 242	if (error)
 243		return error;
 244
 245	if (GFS2_EA_IS_STUFFED(ea))
 246		return 0;
 247
 248	dataptrs = GFS2_EA2DATAPTRS(ea);
 249	for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
 250		if (*dataptrs) {
 251			blks++;
 252			bn = be64_to_cpu(*dataptrs);
 253		}
 254	}
 255	if (!blks)
 256		return 0;
 257
 258	rgd = gfs2_blk2rgrpd(sdp, bn, 1);
 259	if (!rgd) {
 260		gfs2_consist_inode(ip);
 261		return -EIO;
 262	}
 263
 264	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
 265	if (error)
 266		return error;
 267
 268	error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE +
 269				 RES_EATTR + RES_STATFS + RES_QUOTA, blks);
 270	if (error)
 271		goto out_gunlock;
 272
 273	gfs2_trans_add_bh(ip->i_gl, bh, 1);
 274
 275	dataptrs = GFS2_EA2DATAPTRS(ea);
 276	for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
 277		if (!*dataptrs)
 278			break;
 279		bn = be64_to_cpu(*dataptrs);
 280
 281		if (bstart + blen == bn)
 282			blen++;
 283		else {
 284			if (bstart)
 285				gfs2_free_meta(ip, bstart, blen);
 286			bstart = bn;
 287			blen = 1;
 288		}
 289
 290		*dataptrs = 0;
 291		gfs2_add_inode_blocks(&ip->i_inode, -1);
 292	}
 293	if (bstart)
 294		gfs2_free_meta(ip, bstart, blen);
 295
 296	if (prev && !leave) {
 297		u32 len;
 298
 299		len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
 300		prev->ea_rec_len = cpu_to_be32(len);
 301
 302		if (GFS2_EA_IS_LAST(ea))
 303			prev->ea_flags |= GFS2_EAFLAG_LAST;
 304	} else {
 305		ea->ea_type = GFS2_EATYPE_UNUSED;
 306		ea->ea_num_ptrs = 0;
 307	}
 308
 309	error = gfs2_meta_inode_buffer(ip, &dibh);
 310	if (!error) {
 311		ip->i_inode.i_ctime = CURRENT_TIME;
 312		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
 313		gfs2_dinode_out(ip, dibh->b_data);
 314		brelse(dibh);
 315	}
 316
 317	gfs2_trans_end(sdp);
 318
 319out_gunlock:
 320	gfs2_glock_dq_uninit(&rg_gh);
 321	return error;
 322}
 323
 324static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
 325			       struct gfs2_ea_header *ea,
 326			       struct gfs2_ea_header *prev, int leave)
 327{
 328	int error;
 329
 330	error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
 331	if (error)
 332		return error;
 333
 334	error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
 335	if (error)
 336		goto out_alloc;
 337
 338	error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL);
 339
 340	gfs2_quota_unhold(ip);
 341out_alloc:
 342	return error;
 343}
 344
 345struct ea_list {
 346	struct gfs2_ea_request *ei_er;
 347	unsigned int ei_size;
 348};
 349
 350static inline unsigned int gfs2_ea_strlen(struct gfs2_ea_header *ea)
 351{
 352	switch (ea->ea_type) {
 353	case GFS2_EATYPE_USR:
 354		return 5 + ea->ea_name_len + 1;
 355	case GFS2_EATYPE_SYS:
 356		return 7 + ea->ea_name_len + 1;
 357	case GFS2_EATYPE_SECURITY:
 358		return 9 + ea->ea_name_len + 1;
 359	default:
 360		return 0;
 361	}
 362}
 363
 364static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
 365		     struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
 366		     void *private)
 367{
 368	struct ea_list *ei = private;
 369	struct gfs2_ea_request *er = ei->ei_er;
 370	unsigned int ea_size = gfs2_ea_strlen(ea);
 371
 372	if (ea->ea_type == GFS2_EATYPE_UNUSED)
 373		return 0;
 374
 375	if (er->er_data_len) {
 376		char *prefix = NULL;
 377		unsigned int l = 0;
 378		char c = 0;
 379
 380		if (ei->ei_size + ea_size > er->er_data_len)
 381			return -ERANGE;
 382
 383		switch (ea->ea_type) {
 384		case GFS2_EATYPE_USR:
 385			prefix = "user.";
 386			l = 5;
 387			break;
 388		case GFS2_EATYPE_SYS:
 389			prefix = "system.";
 390			l = 7;
 391			break;
 392		case GFS2_EATYPE_SECURITY:
 393			prefix = "security.";
 394			l = 9;
 395			break;
 396		}
 397
 398		BUG_ON(l == 0);
 399
 400		memcpy(er->er_data + ei->ei_size, prefix, l);
 401		memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
 402		       ea->ea_name_len);
 403		memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
 404	}
 405
 406	ei->ei_size += ea_size;
 407
 408	return 0;
 409}
 410
 411/**
 412 * gfs2_listxattr - List gfs2 extended attributes
 413 * @dentry: The dentry whose inode we are interested in
 414 * @buffer: The buffer to write the results
 415 * @size: The size of the buffer
 416 *
 417 * Returns: actual size of data on success, -errno on error
 418 */
 419
 420ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
 421{
 422	struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
 423	struct gfs2_ea_request er;
 424	struct gfs2_holder i_gh;
 425	int error;
 426
 427	memset(&er, 0, sizeof(struct gfs2_ea_request));
 428	if (size) {
 429		er.er_data = buffer;
 430		er.er_data_len = size;
 431	}
 432
 433	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
 434	if (error)
 435		return error;
 436
 437	if (ip->i_eattr) {
 438		struct ea_list ei = { .ei_er = &er, .ei_size = 0 };
 439
 440		error = ea_foreach(ip, ea_list_i, &ei);
 441		if (!error)
 442			error = ei.ei_size;
 443	}
 444
 445	gfs2_glock_dq_uninit(&i_gh);
 446
 447	return error;
 448}
 449
 450/**
 451 * ea_iter_unstuffed - copies the unstuffed xattr data to/from the
 452 *                     request buffer
 453 * @ip: The GFS2 inode
 454 * @ea: The extended attribute header structure
 455 * @din: The data to be copied in
 456 * @dout: The data to be copied out (one of din,dout will be NULL)
 457 *
 458 * Returns: errno
 459 */
 460
 461static int gfs2_iter_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
 462			       const char *din, char *dout)
 463{
 464	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 465	struct buffer_head **bh;
 466	unsigned int amount = GFS2_EA_DATA_LEN(ea);
 467	unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
 468	__be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
 469	unsigned int x;
 470	int error = 0;
 471	unsigned char *pos;
 472	unsigned cp_size;
 473
 474	bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
 475	if (!bh)
 476		return -ENOMEM;
 477
 478	for (x = 0; x < nptrs; x++) {
 479		error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
 480				       bh + x);
 481		if (error) {
 482			while (x--)
 483				brelse(bh[x]);
 484			goto out;
 485		}
 486		dataptrs++;
 487	}
 488
 489	for (x = 0; x < nptrs; x++) {
 490		error = gfs2_meta_wait(sdp, bh[x]);
 491		if (error) {
 492			for (; x < nptrs; x++)
 493				brelse(bh[x]);
 494			goto out;
 495		}
 496		if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
 497			for (; x < nptrs; x++)
 498				brelse(bh[x]);
 499			error = -EIO;
 500			goto out;
 501		}
 502
 503		pos = bh[x]->b_data + sizeof(struct gfs2_meta_header);
 504		cp_size = (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize;
 505
 506		if (dout) {
 507			memcpy(dout, pos, cp_size);
 508			dout += sdp->sd_jbsize;
 509		}
 510
 511		if (din) {
 512			gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
 513			memcpy(pos, din, cp_size);
 514			din += sdp->sd_jbsize;
 515		}
 516
 517		amount -= sdp->sd_jbsize;
 518		brelse(bh[x]);
 519	}
 520
 521out:
 522	kfree(bh);
 523	return error;
 524}
 525
 526static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
 527			    char *data, size_t size)
 528{
 529	int ret;
 530	size_t len = GFS2_EA_DATA_LEN(el->el_ea);
 531	if (len > size)
 532		return -ERANGE;
 533
 534	if (GFS2_EA_IS_STUFFED(el->el_ea)) {
 535		memcpy(data, GFS2_EA2DATA(el->el_ea), len);
 536		return len;
 537	}
 538	ret = gfs2_iter_unstuffed(ip, el->el_ea, NULL, data);
 539	if (ret < 0)
 540		return ret;
 541	return len;
 542}
 543
 544int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
 545{
 546	struct gfs2_ea_location el;
 547	int error;
 548	int len;
 549	char *data;
 550
 551	error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el);
 552	if (error)
 553		return error;
 554	if (!el.el_ea)
 555		goto out;
 556	if (!GFS2_EA_DATA_LEN(el.el_ea))
 557		goto out;
 558
 559	len = GFS2_EA_DATA_LEN(el.el_ea);
 560	data = kmalloc(len, GFP_NOFS);
 561	error = -ENOMEM;
 562	if (data == NULL)
 563		goto out;
 564
 565	error = gfs2_ea_get_copy(ip, &el, data, len);
 566	if (error < 0)
 567		kfree(data);
 568	else
 569		*ppdata = data;
 570out:
 571	brelse(el.el_bh);
 572	return error;
 573}
 574
 575/**
 576 * gfs2_xattr_get - Get a GFS2 extended attribute
 577 * @inode: The inode
 578 * @name: The name of the extended attribute
 579 * @buffer: The buffer to write the result into
 580 * @size: The size of the buffer
 581 * @type: The type of extended attribute
 582 *
 583 * Returns: actual size of data on success, -errno on error
 584 */
 585static int gfs2_xattr_get(struct dentry *dentry, const char *name,
 586		void *buffer, size_t size, int type)
 587{
 588	struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
 589	struct gfs2_ea_location el;
 590	int error;
 591
 592	if (!ip->i_eattr)
 593		return -ENODATA;
 594	if (strlen(name) > GFS2_EA_MAX_NAME_LEN)
 595		return -EINVAL;
 596
 597	error = gfs2_ea_find(ip, type, name, &el);
 598	if (error)
 599		return error;
 600	if (!el.el_ea)
 601		return -ENODATA;
 602	if (size)
 603		error = gfs2_ea_get_copy(ip, &el, buffer, size);
 604	else
 605		error = GFS2_EA_DATA_LEN(el.el_ea);
 606	brelse(el.el_bh);
 607
 608	return error;
 609}
 610
 611/**
 612 * ea_alloc_blk - allocates a new block for extended attributes.
 613 * @ip: A pointer to the inode that's getting extended attributes
 614 * @bhp: Pointer to pointer to a struct buffer_head
 615 *
 616 * Returns: errno
 617 */
 618
 619static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
 620{
 621	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 622	struct gfs2_ea_header *ea;
 623	unsigned int n = 1;
 624	u64 block;
 625	int error;
 626
 627	error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
 628	if (error)
 629		return error;
 630	gfs2_trans_add_unrevoke(sdp, block, 1);
 631	*bhp = gfs2_meta_new(ip->i_gl, block);
 632	gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
 633	gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
 634	gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
 635
 636	ea = GFS2_EA_BH2FIRST(*bhp);
 637	ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
 638	ea->ea_type = GFS2_EATYPE_UNUSED;
 639	ea->ea_flags = GFS2_EAFLAG_LAST;
 640	ea->ea_num_ptrs = 0;
 641
 642	gfs2_add_inode_blocks(&ip->i_inode, 1);
 643
 644	return 0;
 645}
 646
 647/**
 648 * ea_write - writes the request info to an ea, creating new blocks if
 649 *            necessary
 650 * @ip: inode that is being modified
 651 * @ea: the location of the new ea in a block
 652 * @er: the write request
 653 *
 654 * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
 655 *
 656 * returns : errno
 657 */
 658
 659static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
 660		    struct gfs2_ea_request *er)
 661{
 662	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 663	int error;
 664
 665	ea->ea_data_len = cpu_to_be32(er->er_data_len);
 666	ea->ea_name_len = er->er_name_len;
 667	ea->ea_type = er->er_type;
 668	ea->__pad = 0;
 669
 670	memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
 671
 672	if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
 673		ea->ea_num_ptrs = 0;
 674		memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
 675	} else {
 676		__be64 *dataptr = GFS2_EA2DATAPTRS(ea);
 677		const char *data = er->er_data;
 678		unsigned int data_len = er->er_data_len;
 679		unsigned int copy;
 680		unsigned int x;
 681
 682		ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
 683		for (x = 0; x < ea->ea_num_ptrs; x++) {
 684			struct buffer_head *bh;
 685			u64 block;
 686			int mh_size = sizeof(struct gfs2_meta_header);
 687			unsigned int n = 1;
 688
 689			error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
 690			if (error)
 691				return error;
 692			gfs2_trans_add_unrevoke(sdp, block, 1);
 693			bh = gfs2_meta_new(ip->i_gl, block);
 694			gfs2_trans_add_bh(ip->i_gl, bh, 1);
 695			gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
 696
 697			gfs2_add_inode_blocks(&ip->i_inode, 1);
 698
 699			copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
 700							   data_len;
 701			memcpy(bh->b_data + mh_size, data, copy);
 702			if (copy < sdp->sd_jbsize)
 703				memset(bh->b_data + mh_size + copy, 0,
 704				       sdp->sd_jbsize - copy);
 705
 706			*dataptr++ = cpu_to_be64(bh->b_blocknr);
 707			data += copy;
 708			data_len -= copy;
 709
 710			brelse(bh);
 711		}
 712
 713		gfs2_assert_withdraw(sdp, !data_len);
 714	}
 715
 716	return 0;
 717}
 718
 719typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
 720				   struct gfs2_ea_request *er, void *private);
 721
 722static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
 723			     unsigned int blks,
 724			     ea_skeleton_call_t skeleton_call, void *private)
 725{
 726	struct buffer_head *dibh;
 727	int error;
 728
 729	error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
 730	if (error)
 731		return error;
 732
 733	error = gfs2_quota_lock_check(ip);
 734	if (error)
 735		return error;
 736
 737	error = gfs2_inplace_reserve(ip, blks, 0);
 738	if (error)
 739		goto out_gunlock_q;
 740
 741	error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
 742				 blks + gfs2_rg_blocks(ip, blks) +
 743				 RES_DINODE + RES_STATFS + RES_QUOTA, 0);
 744	if (error)
 745		goto out_ipres;
 746
 747	error = skeleton_call(ip, er, private);
 748	if (error)
 749		goto out_end_trans;
 750
 751	error = gfs2_meta_inode_buffer(ip, &dibh);
 752	if (!error) {
 753		ip->i_inode.i_ctime = CURRENT_TIME;
 754		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
 755		gfs2_dinode_out(ip, dibh->b_data);
 756		brelse(dibh);
 757	}
 758
 759out_end_trans:
 760	gfs2_trans_end(GFS2_SB(&ip->i_inode));
 761out_ipres:
 762	gfs2_inplace_release(ip);
 763out_gunlock_q:
 764	gfs2_quota_unlock(ip);
 765	return error;
 766}
 767
 768static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
 769		     void *private)
 770{
 771	struct buffer_head *bh;
 772	int error;
 773
 774	error = ea_alloc_blk(ip, &bh);
 775	if (error)
 776		return error;
 777
 778	ip->i_eattr = bh->b_blocknr;
 779	error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
 780
 781	brelse(bh);
 782
 783	return error;
 784}
 785
 786/**
 787 * ea_init - initializes a new eattr block
 788 * @ip:
 789 * @er:
 790 *
 791 * Returns: errno
 792 */
 793
 794static int ea_init(struct gfs2_inode *ip, int type, const char *name,
 795		   const void *data, size_t size)
 796{
 797	struct gfs2_ea_request er;
 798	unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
 799	unsigned int blks = 1;
 800
 801	er.er_type = type;
 802	er.er_name = name;
 803	er.er_name_len = strlen(name);
 804	er.er_data = (void *)data;
 805	er.er_data_len = size;
 806
 807	if (GFS2_EAREQ_SIZE_STUFFED(&er) > jbsize)
 808		blks += DIV_ROUND_UP(er.er_data_len, jbsize);
 809
 810	return ea_alloc_skeleton(ip, &er, blks, ea_init_i, NULL);
 811}
 812
 813static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
 814{
 815	u32 ea_size = GFS2_EA_SIZE(ea);
 816	struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
 817				     ea_size);
 818	u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
 819	int last = ea->ea_flags & GFS2_EAFLAG_LAST;
 820
 821	ea->ea_rec_len = cpu_to_be32(ea_size);
 822	ea->ea_flags ^= last;
 823
 824	new->ea_rec_len = cpu_to_be32(new_size);
 825	new->ea_flags = last;
 826
 827	return new;
 828}
 829
 830static void ea_set_remove_stuffed(struct gfs2_inode *ip,
 831				  struct gfs2_ea_location *el)
 832{
 833	struct gfs2_ea_header *ea = el->el_ea;
 834	struct gfs2_ea_header *prev = el->el_prev;
 835	u32 len;
 836
 837	gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
 838
 839	if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
 840		ea->ea_type = GFS2_EATYPE_UNUSED;
 841		return;
 842	} else if (GFS2_EA2NEXT(prev) != ea) {
 843		prev = GFS2_EA2NEXT(prev);
 844		gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
 845	}
 846
 847	len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
 848	prev->ea_rec_len = cpu_to_be32(len);
 849
 850	if (GFS2_EA_IS_LAST(ea))
 851		prev->ea_flags |= GFS2_EAFLAG_LAST;
 852}
 853
 854struct ea_set {
 855	int ea_split;
 856
 857	struct gfs2_ea_request *es_er;
 858	struct gfs2_ea_location *es_el;
 859
 860	struct buffer_head *es_bh;
 861	struct gfs2_ea_header *es_ea;
 862};
 863
 864static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
 865				 struct gfs2_ea_header *ea, struct ea_set *es)
 866{
 867	struct gfs2_ea_request *er = es->es_er;
 868	struct buffer_head *dibh;
 869	int error;
 870
 871	error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
 872	if (error)
 873		return error;
 874
 875	gfs2_trans_add_bh(ip->i_gl, bh, 1);
 876
 877	if (es->ea_split)
 878		ea = ea_split_ea(ea);
 879
 880	ea_write(ip, ea, er);
 881
 882	if (es->es_el)
 883		ea_set_remove_stuffed(ip, es->es_el);
 884
 885	error = gfs2_meta_inode_buffer(ip, &dibh);
 886	if (error)
 887		goto out;
 888	ip->i_inode.i_ctime = CURRENT_TIME;
 889	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
 890	gfs2_dinode_out(ip, dibh->b_data);
 891	brelse(dibh);
 892out:
 893	gfs2_trans_end(GFS2_SB(&ip->i_inode));
 894	return error;
 895}
 896
 897static int ea_set_simple_alloc(struct gfs2_inode *ip,
 898			       struct gfs2_ea_request *er, void *private)
 899{
 900	struct ea_set *es = private;
 901	struct gfs2_ea_header *ea = es->es_ea;
 902	int error;
 903
 904	gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
 905
 906	if (es->ea_split)
 907		ea = ea_split_ea(ea);
 908
 909	error = ea_write(ip, ea, er);
 910	if (error)
 911		return error;
 912
 913	if (es->es_el)
 914		ea_set_remove_stuffed(ip, es->es_el);
 915
 916	return 0;
 917}
 918
 919static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
 920			 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
 921			 void *private)
 922{
 923	struct ea_set *es = private;
 924	unsigned int size;
 925	int stuffed;
 926	int error;
 927
 928	stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er->er_name_len,
 929			       es->es_er->er_data_len, &size);
 930
 931	if (ea->ea_type == GFS2_EATYPE_UNUSED) {
 932		if (GFS2_EA_REC_LEN(ea) < size)
 933			return 0;
 934		if (!GFS2_EA_IS_STUFFED(ea)) {
 935			error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
 936			if (error)
 937				return error;
 938		}
 939		es->ea_split = 0;
 940	} else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
 941		es->ea_split = 1;
 942	else
 943		return 0;
 944
 945	if (stuffed) {
 946		error = ea_set_simple_noalloc(ip, bh, ea, es);
 947		if (error)
 948			return error;
 949	} else {
 950		unsigned int blks;
 951
 952		es->es_bh = bh;
 953		es->es_ea = ea;
 954		blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
 955					GFS2_SB(&ip->i_inode)->sd_jbsize);
 956
 957		error = ea_alloc_skeleton(ip, es->es_er, blks,
 958					  ea_set_simple_alloc, es);
 959		if (error)
 960			return error;
 961	}
 962
 963	return 1;
 964}
 965
 966static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
 967			void *private)
 968{
 969	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 970	struct buffer_head *indbh, *newbh;
 971	__be64 *eablk;
 972	int error;
 973	int mh_size = sizeof(struct gfs2_meta_header);
 974
 975	if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
 976		__be64 *end;
 977
 978		error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT,
 979				       &indbh);
 980		if (error)
 981			return error;
 982
 983		if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
 984			error = -EIO;
 985			goto out;
 986		}
 987
 988		eablk = (__be64 *)(indbh->b_data + mh_size);
 989		end = eablk + sdp->sd_inptrs;
 990
 991		for (; eablk < end; eablk++)
 992			if (!*eablk)
 993				break;
 994
 995		if (eablk == end) {
 996			error = -ENOSPC;
 997			goto out;
 998		}
 999
1000		gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1001	} else {
1002		u64 blk;
1003		unsigned int n = 1;
1004		error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL);
1005		if (error)
1006			return error;
1007		gfs2_trans_add_unrevoke(sdp, blk, 1);
1008		indbh = gfs2_meta_new(ip->i_gl, blk);
1009		gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1010		gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
1011		gfs2_buffer_clear_tail(indbh, mh_size);
1012
1013		eablk = (__be64 *)(indbh->b_data + mh_size);
1014		*eablk = cpu_to_be64(ip->i_eattr);
1015		ip->i_eattr = blk;
1016		ip->i_diskflags |= GFS2_DIF_EA_INDIRECT;
1017		gfs2_add_inode_blocks(&ip->i_inode, 1);
1018
1019		eablk++;
1020	}
1021
1022	error = ea_alloc_blk(ip, &newbh);
1023	if (error)
1024		goto out;
1025
1026	*eablk = cpu_to_be64((u64)newbh->b_blocknr);
1027	error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
1028	brelse(newbh);
1029	if (error)
1030		goto out;
1031
1032	if (private)
1033		ea_set_remove_stuffed(ip, private);
1034
1035out:
1036	brelse(indbh);
1037	return error;
1038}
1039
1040static int ea_set_i(struct gfs2_inode *ip, int type, const char *name,
1041		    const void *value, size_t size, struct gfs2_ea_location *el)
1042{
1043	struct gfs2_ea_request er;
1044	struct ea_set es;
1045	unsigned int blks = 2;
1046	int error;
1047
1048	er.er_type = type;
1049	er.er_name = name;
1050	er.er_data = (void *)value;
1051	er.er_name_len = strlen(name);
1052	er.er_data_len = size;
1053
1054	memset(&es, 0, sizeof(struct ea_set));
1055	es.es_er = &er;
1056	es.es_el = el;
1057
1058	error = ea_foreach(ip, ea_set_simple, &es);
1059	if (error > 0)
1060		return 0;
1061	if (error)
1062		return error;
1063
1064	if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT))
1065		blks++;
1066	if (GFS2_EAREQ_SIZE_STUFFED(&er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
1067		blks += DIV_ROUND_UP(er.er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
1068
1069	return ea_alloc_skeleton(ip, &er, blks, ea_set_block, el);
1070}
1071
1072static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1073				   struct gfs2_ea_location *el)
1074{
1075	if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1076		el->el_prev = GFS2_EA2NEXT(el->el_prev);
1077		gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
1078				     GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1079	}
1080
1081	return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev, 0);
1082}
1083
1084static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1085{
1086	struct gfs2_ea_header *ea = el->el_ea;
1087	struct gfs2_ea_header *prev = el->el_prev;
1088	struct buffer_head *dibh;
1089	int error;
1090
1091	error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1092	if (error)
1093		return error;
1094
1095	gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1096
1097	if (prev) {
1098		u32 len;
1099
1100		len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1101		prev->ea_rec_len = cpu_to_be32(len);
1102
1103		if (GFS2_EA_IS_LAST(ea))
1104			prev->ea_flags |= GFS2_EAFLAG_LAST;
1105	} else {
1106		ea->ea_type = GFS2_EATYPE_UNUSED;
1107	}
1108
1109	error = gfs2_meta_inode_buffer(ip, &dibh);
1110	if (!error) {
1111		ip->i_inode.i_ctime = CURRENT_TIME;
1112		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1113		gfs2_dinode_out(ip, dibh->b_data);
1114		brelse(dibh);
1115	}
1116
1117	gfs2_trans_end(GFS2_SB(&ip->i_inode));
1118
1119	return error;
1120}
1121
1122/**
1123 * gfs2_xattr_remove - Remove a GFS2 extended attribute
1124 * @ip: The inode
1125 * @type: The type of the extended attribute
1126 * @name: The name of the extended attribute
1127 *
1128 * This is not called directly by the VFS since we use the (common)
1129 * scheme of making a "set with NULL data" mean a remove request. Note
1130 * that this is different from a set with zero length data.
1131 *
1132 * Returns: 0, or errno on failure
1133 */
1134
1135static int gfs2_xattr_remove(struct gfs2_inode *ip, int type, const char *name)
1136{
1137	struct gfs2_ea_location el;
1138	int error;
1139
1140	if (!ip->i_eattr)
1141		return -ENODATA;
1142
1143	error = gfs2_ea_find(ip, type, name, &el);
1144	if (error)
1145		return error;
1146	if (!el.el_ea)
1147		return -ENODATA;
1148
1149	if (GFS2_EA_IS_STUFFED(el.el_ea))
1150		error = ea_remove_stuffed(ip, &el);
1151	else
1152		error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev, 0);
1153
1154	brelse(el.el_bh);
1155
1156	return error;
1157}
1158
1159/**
1160 * __gfs2_xattr_set - Set (or remove) a GFS2 extended attribute
1161 * @ip: The inode
1162 * @name: The name of the extended attribute
1163 * @value: The value of the extended attribute (NULL for remove)
1164 * @size: The size of the @value argument
1165 * @flags: Create or Replace
1166 * @type: The type of the extended attribute
1167 *
1168 * See gfs2_xattr_remove() for details of the removal of xattrs.
1169 *
1170 * Returns: 0 or errno on failure
1171 */
1172
1173int __gfs2_xattr_set(struct inode *inode, const char *name,
1174		   const void *value, size_t size, int flags, int type)
1175{
1176	struct gfs2_inode *ip = GFS2_I(inode);
1177	struct gfs2_sbd *sdp = GFS2_SB(inode);
1178	struct gfs2_ea_location el;
1179	unsigned int namel = strlen(name);
1180	int error;
1181
1182	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
1183		return -EPERM;
1184	if (namel > GFS2_EA_MAX_NAME_LEN)
1185		return -ERANGE;
1186
1187	if (value == NULL)
1188		return gfs2_xattr_remove(ip, type, name);
1189
1190	if (ea_check_size(sdp, namel, size))
1191		return -ERANGE;
1192
1193	if (!ip->i_eattr) {
1194		if (flags & XATTR_REPLACE)
1195			return -ENODATA;
1196		return ea_init(ip, type, name, value, size);
1197	}
1198
1199	error = gfs2_ea_find(ip, type, name, &el);
1200	if (error)
1201		return error;
1202
1203	if (el.el_ea) {
1204		if (ip->i_diskflags & GFS2_DIF_APPENDONLY) {
1205			brelse(el.el_bh);
1206			return -EPERM;
1207		}
1208
1209		error = -EEXIST;
1210		if (!(flags & XATTR_CREATE)) {
1211			int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1212			error = ea_set_i(ip, type, name, value, size, &el);
1213			if (!error && unstuffed)
1214				ea_set_remove_unstuffed(ip, &el);
1215		}
1216
1217		brelse(el.el_bh);
1218		return error;
1219	}
1220
1221	error = -ENODATA;
1222	if (!(flags & XATTR_REPLACE))
1223		error = ea_set_i(ip, type, name, value, size, NULL);
1224
1225	return error;
1226}
1227
1228static int gfs2_xattr_set(struct dentry *dentry, const char *name,
1229		const void *value, size_t size, int flags, int type)
1230{
1231	return __gfs2_xattr_set(dentry->d_inode, name, value,
1232				size, flags, type);
1233}
1234
1235
1236static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
1237				  struct gfs2_ea_header *ea, char *data)
1238{
1239	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1240	unsigned int amount = GFS2_EA_DATA_LEN(ea);
1241	unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
1242	int ret;
1243
1244	ret = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
1245	if (ret)
1246		return ret;
1247
1248	ret = gfs2_iter_unstuffed(ip, ea, data, NULL);
1249	gfs2_trans_end(sdp);
1250
1251	return ret;
1252}
1253
1254int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
1255{
1256	struct inode *inode = &ip->i_inode;
1257	struct gfs2_sbd *sdp = GFS2_SB(inode);
1258	struct gfs2_ea_location el;
1259	int error;
1260
1261	error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, GFS2_POSIX_ACL_ACCESS, &el);
1262	if (error)
1263		return error;
1264
1265	if (GFS2_EA_IS_STUFFED(el.el_ea)) {
1266		error = gfs2_trans_begin(sdp, RES_DINODE + RES_EATTR, 0);
1267		if (error == 0) {
1268			gfs2_trans_add_bh(ip->i_gl, el.el_bh, 1);
1269			memcpy(GFS2_EA2DATA(el.el_ea), data,
1270			       GFS2_EA_DATA_LEN(el.el_ea));
1271		}
1272	} else {
1273		error = ea_acl_chmod_unstuffed(ip, el.el_ea, data);
1274	}
1275
1276	brelse(el.el_bh);
1277	if (error)
1278		return error;
1279
1280	error = gfs2_setattr_simple(inode, attr);
1281	gfs2_trans_end(sdp);
1282	return error;
1283}
1284
1285static int ea_dealloc_indirect(struct gfs2_inode *ip)
1286{
1287	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1288	struct gfs2_rgrp_list rlist;
1289	struct buffer_head *indbh, *dibh;
1290	__be64 *eablk, *end;
1291	unsigned int rg_blocks = 0;
1292	u64 bstart = 0;
1293	unsigned int blen = 0;
1294	unsigned int blks = 0;
1295	unsigned int x;
1296	int error;
1297
1298	error = gfs2_rindex_update(sdp);
1299	if (error)
1300		return error;
1301
1302	memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1303
1304	error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &indbh);
1305	if (error)
1306		return error;
1307
1308	if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1309		error = -EIO;
1310		goto out;
1311	}
1312
1313	eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1314	end = eablk + sdp->sd_inptrs;
1315
1316	for (; eablk < end; eablk++) {
1317		u64 bn;
1318
1319		if (!*eablk)
1320			break;
1321		bn = be64_to_cpu(*eablk);
1322
1323		if (bstart + blen == bn)
1324			blen++;
1325		else {
1326			if (bstart)
1327				gfs2_rlist_add(ip, &rlist, bstart);
1328			bstart = bn;
1329			blen = 1;
1330		}
1331		blks++;
1332	}
1333	if (bstart)
1334		gfs2_rlist_add(ip, &rlist, bstart);
1335	else
1336		goto out;
1337
1338	gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
1339
1340	for (x = 0; x < rlist.rl_rgrps; x++) {
1341		struct gfs2_rgrpd *rgd;
1342		rgd = rlist.rl_ghs[x].gh_gl->gl_object;
1343		rg_blocks += rgd->rd_length;
1344	}
1345
1346	error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1347	if (error)
1348		goto out_rlist_free;
1349
1350	error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
1351				 RES_STATFS + RES_QUOTA, blks);
1352	if (error)
1353		goto out_gunlock;
1354
1355	gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1356
1357	eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1358	bstart = 0;
1359	blen = 0;
1360
1361	for (; eablk < end; eablk++) {
1362		u64 bn;
1363
1364		if (!*eablk)
1365			break;
1366		bn = be64_to_cpu(*eablk);
1367
1368		if (bstart + blen == bn)
1369			blen++;
1370		else {
1371			if (bstart)
1372				gfs2_free_meta(ip, bstart, blen);
1373			bstart = bn;
1374			blen = 1;
1375		}
1376
1377		*eablk = 0;
1378		gfs2_add_inode_blocks(&ip->i_inode, -1);
1379	}
1380	if (bstart)
1381		gfs2_free_meta(ip, bstart, blen);
1382
1383	ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT;
1384
1385	error = gfs2_meta_inode_buffer(ip, &dibh);
1386	if (!error) {
1387		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1388		gfs2_dinode_out(ip, dibh->b_data);
1389		brelse(dibh);
1390	}
1391
1392	gfs2_trans_end(sdp);
1393
1394out_gunlock:
1395	gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1396out_rlist_free:
1397	gfs2_rlist_free(&rlist);
1398out:
1399	brelse(indbh);
1400	return error;
1401}
1402
1403static int ea_dealloc_block(struct gfs2_inode *ip)
1404{
1405	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1406	struct gfs2_rgrpd *rgd;
1407	struct buffer_head *dibh;
1408	struct gfs2_holder gh;
1409	int error;
1410
1411	error = gfs2_rindex_update(sdp);
1412	if (error)
1413		return error;
1414
1415	rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr, 1);
1416	if (!rgd) {
1417		gfs2_consist_inode(ip);
1418		return -EIO;
1419	}
1420
1421	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1422	if (error)
1423		return error;
1424
1425	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS +
1426				 RES_QUOTA, 1);
1427	if (error)
1428		goto out_gunlock;
1429
1430	gfs2_free_meta(ip, ip->i_eattr, 1);
1431
1432	ip->i_eattr = 0;
1433	gfs2_add_inode_blocks(&ip->i_inode, -1);
1434
1435	error = gfs2_meta_inode_buffer(ip, &dibh);
1436	if (!error) {
1437		gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1438		gfs2_dinode_out(ip, dibh->b_data);
1439		brelse(dibh);
1440	}
1441
1442	gfs2_trans_end(sdp);
1443
1444out_gunlock:
1445	gfs2_glock_dq_uninit(&gh);
1446	return error;
1447}
1448
1449/**
1450 * gfs2_ea_dealloc - deallocate the extended attribute fork
1451 * @ip: the inode
1452 *
1453 * Returns: errno
1454 */
1455
1456int gfs2_ea_dealloc(struct gfs2_inode *ip)
1457{
1458	int error;
1459
1460	error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
1461	if (error)
1462		return error;
1463
1464	error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1465	if (error)
1466		return error;
1467
1468	error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1469	if (error)
1470		goto out_quota;
1471
1472	if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
1473		error = ea_dealloc_indirect(ip);
1474		if (error)
1475			goto out_quota;
1476	}
1477
1478	error = ea_dealloc_block(ip);
1479
1480out_quota:
1481	gfs2_quota_unhold(ip);
1482	return error;
1483}
1484
1485static const struct xattr_handler gfs2_xattr_user_handler = {
1486	.prefix = XATTR_USER_PREFIX,
1487	.flags  = GFS2_EATYPE_USR,
1488	.get    = gfs2_xattr_get,
1489	.set    = gfs2_xattr_set,
1490};
1491
1492static const struct xattr_handler gfs2_xattr_security_handler = {
1493	.prefix = XATTR_SECURITY_PREFIX,
1494	.flags  = GFS2_EATYPE_SECURITY,
1495	.get    = gfs2_xattr_get,
1496	.set    = gfs2_xattr_set,
1497};
1498
1499const struct xattr_handler *gfs2_xattr_handlers[] = {
1500	&gfs2_xattr_user_handler,
1501	&gfs2_xattr_security_handler,
1502	&gfs2_xattr_system_handler,
1503	NULL,
1504};
1505