PageRenderTime 27ms CodeModel.GetById 29ms RepoModel.GetById 0ms app.codeStats 0ms

/lustre/obdclass/idmap.c

https://github.com/nedbass/lustre
C | 477 lines | 351 code | 67 blank | 59 comment | 116 complexity | 6124d56fab5e4183c40a285944ebdee2 MD5 | raw file
  1. /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
  2. * vim:expandtab:shiftwidth=8:tabstop=8:
  3. *
  4. * GPL HEADER START
  5. *
  6. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 only,
  10. * as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License version 2 for more details (a copy is included
  16. * in the LICENSE file that accompanied this code).
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * version 2 along with this program; If not, see
  20. * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  21. *
  22. * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23. * CA 95054 USA or visit www.sun.com if you need additional information or
  24. * have any questions.
  25. *
  26. * GPL HEADER END
  27. */
  28. /*
  29. * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  30. * Use is subject to license terms.
  31. */
  32. /*
  33. * This file is part of Lustre, http://www.lustre.org/
  34. * Lustre is a trademark of Sun Microsystems, Inc.
  35. *
  36. * lustre/obdclass/idmap.c
  37. *
  38. * Lustre user identity mapping.
  39. *
  40. * Author: Fan Yong <fanyong@clusterfs.com>
  41. */
  42. #ifndef EXPORT_SYMTAB
  43. # define EXPORT_SYMTAB
  44. #endif
  45. #define DEBUG_SUBSYSTEM S_SEC
  46. #include <lustre_idmap.h>
  47. #include <obd_support.h>
  48. #define lustre_get_group_info(group_info) do { \
  49. cfs_atomic_inc(&(group_info)->usage); \
  50. } while (0)
  51. #define lustre_put_group_info(group_info) do { \
  52. if (cfs_atomic_dec_and_test(&(group_info)->usage)) \
  53. cfs_groups_free(group_info); \
  54. } while (0)
  55. /*
  56. * groups_search() is copied from linux kernel!
  57. * A simple bsearch.
  58. */
  59. static int lustre_groups_search(cfs_group_info_t *group_info,
  60. gid_t grp)
  61. {
  62. int left, right;
  63. if (!group_info)
  64. return 0;
  65. left = 0;
  66. right = group_info->ngroups;
  67. while (left < right) {
  68. int mid = (left + right) / 2;
  69. int cmp = grp - CFS_GROUP_AT(group_info, mid);
  70. if (cmp > 0)
  71. left = mid + 1;
  72. else if (cmp < 0)
  73. right = mid;
  74. else
  75. return 1;
  76. }
  77. return 0;
  78. }
  79. void lustre_groups_from_list(cfs_group_info_t *ginfo, gid_t *glist)
  80. {
  81. int i;
  82. int count = ginfo->ngroups;
  83. /* fill group_info from gid array */
  84. for (i = 0; i < ginfo->nblocks && count > 0; i++) {
  85. int cp_count = min(CFS_NGROUPS_PER_BLOCK, count);
  86. int off = i * CFS_NGROUPS_PER_BLOCK;
  87. int len = cp_count * sizeof(*glist);
  88. memcpy(ginfo->blocks[i], glist + off, len);
  89. count -= cp_count;
  90. }
  91. }
  92. EXPORT_SYMBOL(lustre_groups_from_list);
  93. /* groups_sort() is copied from linux kernel! */
  94. /* a simple shell-metzner sort */
  95. void lustre_groups_sort(cfs_group_info_t *group_info)
  96. {
  97. int base, max, stride;
  98. int gidsetsize = group_info->ngroups;
  99. for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
  100. ; /* nothing */
  101. stride /= 3;
  102. while (stride) {
  103. max = gidsetsize - stride;
  104. for (base = 0; base < max; base++) {
  105. int left = base;
  106. int right = left + stride;
  107. gid_t tmp = CFS_GROUP_AT(group_info, right);
  108. while (left >= 0 &&
  109. CFS_GROUP_AT(group_info, left) > tmp) {
  110. CFS_GROUP_AT(group_info, right) =
  111. CFS_GROUP_AT(group_info, left);
  112. right = left;
  113. left -= stride;
  114. }
  115. CFS_GROUP_AT(group_info, right) = tmp;
  116. }
  117. stride /= 3;
  118. }
  119. }
  120. EXPORT_SYMBOL(lustre_groups_sort);
  121. int lustre_in_group_p(struct md_ucred *mu, gid_t grp)
  122. {
  123. int rc = 1;
  124. if (grp != mu->mu_fsgid) {
  125. cfs_group_info_t *group_info = NULL;
  126. if (mu->mu_ginfo || !mu->mu_identity ||
  127. mu->mu_valid == UCRED_OLD)
  128. if (grp == mu->mu_suppgids[0] ||
  129. grp == mu->mu_suppgids[1])
  130. return 1;
  131. if (mu->mu_ginfo)
  132. group_info = mu->mu_ginfo;
  133. else if (mu->mu_identity)
  134. group_info = mu->mu_identity->mi_ginfo;
  135. if (!group_info)
  136. return 0;
  137. lustre_get_group_info(group_info);
  138. rc = lustre_groups_search(group_info, grp);
  139. lustre_put_group_info(group_info);
  140. }
  141. return rc;
  142. }
  143. EXPORT_SYMBOL(lustre_in_group_p);
  144. struct lustre_idmap_entry {
  145. cfs_list_t lie_rmt_uid_hash; /* hashed as lie_rmt_uid; */
  146. cfs_list_t lie_lcl_uid_hash; /* hashed as lie_lcl_uid; */
  147. cfs_list_t lie_rmt_gid_hash; /* hashed as lie_rmt_gid; */
  148. cfs_list_t lie_lcl_gid_hash; /* hashed as lie_lcl_gid; */
  149. uid_t lie_rmt_uid; /* remote uid */
  150. uid_t lie_lcl_uid; /* local uid */
  151. gid_t lie_rmt_gid; /* remote gid */
  152. gid_t lie_lcl_gid; /* local gid */
  153. };
  154. static inline __u32 lustre_idmap_hashfunc(__u32 id)
  155. {
  156. return id & (CFS_IDMAP_HASHSIZE - 1);
  157. }
  158. static
  159. struct lustre_idmap_entry *idmap_entry_alloc(uid_t rmt_uid, uid_t lcl_uid,
  160. gid_t rmt_gid, gid_t lcl_gid)
  161. {
  162. struct lustre_idmap_entry *e;
  163. OBD_ALLOC_PTR(e);
  164. if (e == NULL)
  165. return NULL;
  166. CFS_INIT_LIST_HEAD(&e->lie_rmt_uid_hash);
  167. CFS_INIT_LIST_HEAD(&e->lie_lcl_uid_hash);
  168. CFS_INIT_LIST_HEAD(&e->lie_rmt_gid_hash);
  169. CFS_INIT_LIST_HEAD(&e->lie_lcl_gid_hash);
  170. e->lie_rmt_uid = rmt_uid;
  171. e->lie_lcl_uid = lcl_uid;
  172. e->lie_rmt_gid = rmt_gid;
  173. e->lie_lcl_gid = lcl_gid;
  174. return e;
  175. }
  176. static void idmap_entry_free(struct lustre_idmap_entry *e)
  177. {
  178. if (!cfs_list_empty(&e->lie_rmt_uid_hash))
  179. cfs_list_del(&e->lie_rmt_uid_hash);
  180. if (!cfs_list_empty(&e->lie_lcl_uid_hash))
  181. cfs_list_del(&e->lie_lcl_uid_hash);
  182. if (!cfs_list_empty(&e->lie_rmt_gid_hash))
  183. cfs_list_del(&e->lie_rmt_gid_hash);
  184. if (!cfs_list_empty(&e->lie_lcl_gid_hash))
  185. cfs_list_del(&e->lie_lcl_gid_hash);
  186. OBD_FREE_PTR(e);
  187. }
  188. /*
  189. * return value
  190. * NULL: not found entry
  191. * ERR_PTR(-EACCES): found 1(remote):N(local) mapped entry
  192. * others: found normal entry
  193. */
  194. static
  195. struct lustre_idmap_entry *idmap_search_entry(struct lustre_idmap_table *t,
  196. uid_t rmt_uid, uid_t lcl_uid,
  197. gid_t rmt_gid, gid_t lcl_gid)
  198. {
  199. cfs_list_t *head;
  200. struct lustre_idmap_entry *e;
  201. head = &t->lit_idmaps[RMT_UIDMAP_IDX][lustre_idmap_hashfunc(rmt_uid)];
  202. cfs_list_for_each_entry(e, head, lie_rmt_uid_hash)
  203. if (e->lie_rmt_uid == rmt_uid) {
  204. if (e->lie_lcl_uid == lcl_uid) {
  205. if (e->lie_rmt_gid == rmt_gid &&
  206. e->lie_lcl_gid == lcl_gid)
  207. /* must be quaternion match */
  208. return e;
  209. } else {
  210. /* 1:N uid mapping */
  211. CERROR("rmt uid %u already be mapped to %u"
  212. " (new %u)\n", e->lie_rmt_uid,
  213. e->lie_lcl_uid, lcl_uid);
  214. return ERR_PTR(-EACCES);
  215. }
  216. }
  217. head = &t->lit_idmaps[RMT_GIDMAP_IDX][lustre_idmap_hashfunc(rmt_gid)];
  218. cfs_list_for_each_entry(e, head, lie_rmt_gid_hash)
  219. if (e->lie_rmt_gid == rmt_gid) {
  220. if (e->lie_lcl_gid == lcl_gid) {
  221. if (unlikely(e->lie_rmt_uid == rmt_uid &&
  222. e->lie_lcl_uid == lcl_uid))
  223. /* after uid mapping search above,
  224. * we should never come here */
  225. LBUG();
  226. } else {
  227. /* 1:N gid mapping */
  228. CERROR("rmt gid %u already be mapped to %u"
  229. " (new %u)\n", e->lie_rmt_gid,
  230. e->lie_lcl_gid, lcl_gid);
  231. return ERR_PTR(-EACCES);
  232. }
  233. }
  234. return NULL;
  235. }
  236. static __u32 idmap_lookup_uid(cfs_list_t *hash, int reverse,
  237. __u32 uid)
  238. {
  239. cfs_list_t *head = &hash[lustre_idmap_hashfunc(uid)];
  240. struct lustre_idmap_entry *e;
  241. if (!reverse) {
  242. cfs_list_for_each_entry(e, head, lie_rmt_uid_hash)
  243. if (e->lie_rmt_uid == uid)
  244. return e->lie_lcl_uid;
  245. } else {
  246. cfs_list_for_each_entry(e, head, lie_lcl_uid_hash)
  247. if (e->lie_lcl_uid == uid)
  248. return e->lie_rmt_uid;
  249. }
  250. return CFS_IDMAP_NOTFOUND;
  251. }
  252. static __u32 idmap_lookup_gid(cfs_list_t *hash, int reverse, __u32 gid)
  253. {
  254. cfs_list_t *head = &hash[lustre_idmap_hashfunc(gid)];
  255. struct lustre_idmap_entry *e;
  256. if (!reverse) {
  257. cfs_list_for_each_entry(e, head, lie_rmt_gid_hash)
  258. if (e->lie_rmt_gid == gid)
  259. return e->lie_lcl_gid;
  260. } else {
  261. cfs_list_for_each_entry(e, head, lie_lcl_gid_hash)
  262. if (e->lie_lcl_gid == gid)
  263. return e->lie_rmt_gid;
  264. }
  265. return CFS_IDMAP_NOTFOUND;
  266. }
  267. int lustre_idmap_add(struct lustre_idmap_table *t,
  268. uid_t ruid, uid_t luid,
  269. gid_t rgid, gid_t lgid)
  270. {
  271. struct lustre_idmap_entry *e0, *e1;
  272. LASSERT(t);
  273. cfs_spin_lock(&t->lit_lock);
  274. e0 = idmap_search_entry(t, ruid, luid, rgid, lgid);
  275. cfs_spin_unlock(&t->lit_lock);
  276. if (!e0) {
  277. e0 = idmap_entry_alloc(ruid, luid, rgid, lgid);
  278. if (!e0)
  279. return -ENOMEM;
  280. cfs_spin_lock(&t->lit_lock);
  281. e1 = idmap_search_entry(t, ruid, luid, rgid, lgid);
  282. if (e1 == NULL) {
  283. cfs_list_add_tail(&e0->lie_rmt_uid_hash,
  284. &t->lit_idmaps[RMT_UIDMAP_IDX]
  285. [lustre_idmap_hashfunc(ruid)]);
  286. cfs_list_add_tail(&e0->lie_lcl_uid_hash,
  287. &t->lit_idmaps[LCL_UIDMAP_IDX]
  288. [lustre_idmap_hashfunc(luid)]);
  289. cfs_list_add_tail(&e0->lie_rmt_gid_hash,
  290. &t->lit_idmaps[RMT_GIDMAP_IDX]
  291. [lustre_idmap_hashfunc(rgid)]);
  292. cfs_list_add_tail(&e0->lie_lcl_gid_hash,
  293. &t->lit_idmaps[LCL_GIDMAP_IDX]
  294. [lustre_idmap_hashfunc(lgid)]);
  295. }
  296. cfs_spin_unlock(&t->lit_lock);
  297. if (e1 != NULL) {
  298. idmap_entry_free(e0);
  299. if (IS_ERR(e1))
  300. return PTR_ERR(e1);
  301. }
  302. } else if (IS_ERR(e0)) {
  303. return PTR_ERR(e0);
  304. }
  305. return 0;
  306. }
  307. EXPORT_SYMBOL(lustre_idmap_add);
  308. int lustre_idmap_del(struct lustre_idmap_table *t,
  309. uid_t ruid, uid_t luid,
  310. gid_t rgid, gid_t lgid)
  311. {
  312. struct lustre_idmap_entry *e;
  313. int rc = 0;
  314. LASSERT(t);
  315. cfs_spin_lock(&t->lit_lock);
  316. e = idmap_search_entry(t, ruid, luid, rgid, lgid);
  317. if (IS_ERR(e))
  318. rc = PTR_ERR(e);
  319. else if (e)
  320. idmap_entry_free(e);
  321. cfs_spin_unlock(&t->lit_lock);
  322. return rc;
  323. }
  324. EXPORT_SYMBOL(lustre_idmap_del);
  325. int lustre_idmap_lookup_uid(struct md_ucred *mu,
  326. struct lustre_idmap_table *t,
  327. int reverse, uid_t uid)
  328. {
  329. cfs_list_t *hash;
  330. if (mu && (mu->mu_valid == UCRED_OLD || mu->mu_valid == UCRED_NEW)) {
  331. if (!reverse) {
  332. if (uid == mu->mu_o_uid)
  333. return mu->mu_uid;
  334. else if (uid == mu->mu_o_fsuid)
  335. return mu->mu_fsuid;
  336. } else {
  337. if (uid == mu->mu_uid)
  338. return mu->mu_o_uid;
  339. else if (uid == mu->mu_fsuid)
  340. return mu->mu_o_fsuid;
  341. }
  342. }
  343. if (t == NULL)
  344. return CFS_IDMAP_NOTFOUND;
  345. hash = t->lit_idmaps[reverse ? LCL_UIDMAP_IDX : RMT_UIDMAP_IDX];
  346. cfs_spin_lock(&t->lit_lock);
  347. uid = idmap_lookup_uid(hash, reverse, uid);
  348. cfs_spin_unlock(&t->lit_lock);
  349. return uid;
  350. }
  351. EXPORT_SYMBOL(lustre_idmap_lookup_uid);
  352. int lustre_idmap_lookup_gid(struct md_ucred *mu, struct lustre_idmap_table *t,
  353. int reverse, gid_t gid)
  354. {
  355. cfs_list_t *hash;
  356. if (mu && (mu->mu_valid == UCRED_OLD || mu->mu_valid == UCRED_NEW)) {
  357. if (!reverse) {
  358. if (gid == mu->mu_o_gid)
  359. return mu->mu_gid;
  360. else if (gid == mu->mu_o_fsgid)
  361. return mu->mu_fsgid;
  362. } else {
  363. if (gid == mu->mu_gid)
  364. return mu->mu_o_gid;
  365. else if (gid == mu->mu_fsgid)
  366. return mu->mu_o_fsgid;
  367. }
  368. }
  369. if (t == NULL)
  370. return CFS_IDMAP_NOTFOUND;
  371. hash = t->lit_idmaps[reverse ? LCL_GIDMAP_IDX : RMT_GIDMAP_IDX];
  372. cfs_spin_lock(&t->lit_lock);
  373. gid = idmap_lookup_gid(hash, reverse, gid);
  374. cfs_spin_unlock(&t->lit_lock);
  375. return gid;
  376. }
  377. EXPORT_SYMBOL(lustre_idmap_lookup_gid);
  378. struct lustre_idmap_table *lustre_idmap_init(void)
  379. {
  380. struct lustre_idmap_table *t;
  381. int i, j;
  382. OBD_ALLOC_PTR(t);
  383. if(unlikely(t == NULL))
  384. return (ERR_PTR(-ENOMEM));
  385. cfs_spin_lock_init(&t->lit_lock);
  386. for (i = 0; i < ARRAY_SIZE(t->lit_idmaps); i++)
  387. for (j = 0; j < ARRAY_SIZE(t->lit_idmaps[i]); j++)
  388. CFS_INIT_LIST_HEAD(&t->lit_idmaps[i][j]);
  389. return t;
  390. }
  391. EXPORT_SYMBOL(lustre_idmap_init);
  392. void lustre_idmap_fini(struct lustre_idmap_table *t)
  393. {
  394. cfs_list_t *list;
  395. struct lustre_idmap_entry *e;
  396. int i;
  397. LASSERT(t);
  398. list = t->lit_idmaps[RMT_UIDMAP_IDX];
  399. cfs_spin_lock(&t->lit_lock);
  400. for (i = 0; i < CFS_IDMAP_HASHSIZE; i++)
  401. while (!cfs_list_empty(&list[i])) {
  402. e = cfs_list_entry(list[i].next,
  403. struct lustre_idmap_entry,
  404. lie_rmt_uid_hash);
  405. idmap_entry_free(e);
  406. }
  407. cfs_spin_unlock(&t->lit_lock);
  408. OBD_FREE_PTR(t);
  409. }
  410. EXPORT_SYMBOL(lustre_idmap_fini);