PageRenderTime 54ms CodeModel.GetById 13ms RepoModel.GetById 1ms app.codeStats 0ms

/net/sunrpc/svcauth_unix.c

https://github.com/mstsirkin/kvm
C | 904 lines | 736 code | 127 blank | 41 comment | 114 complexity | 5b7ae8dd6addb6b8d1ec43fff5a9dd26 MD5 | raw file
  1. #include <linux/types.h>
  2. #include <linux/sched.h>
  3. #include <linux/module.h>
  4. #include <linux/sunrpc/types.h>
  5. #include <linux/sunrpc/xdr.h>
  6. #include <linux/sunrpc/svcsock.h>
  7. #include <linux/sunrpc/svcauth.h>
  8. #include <linux/sunrpc/gss_api.h>
  9. #include <linux/err.h>
  10. #include <linux/seq_file.h>
  11. #include <linux/hash.h>
  12. #include <linux/string.h>
  13. #include <linux/slab.h>
  14. #include <net/sock.h>
  15. #include <net/ipv6.h>
  16. #include <linux/kernel.h>
  17. #define RPCDBG_FACILITY RPCDBG_AUTH
  18. #include <linux/sunrpc/clnt.h>
  19. #include "netns.h"
  20. /*
  21. * AUTHUNIX and AUTHNULL credentials are both handled here.
  22. * AUTHNULL is treated just like AUTHUNIX except that the uid/gid
  23. * are always nobody (-2). i.e. we do the same IP address checks for
  24. * AUTHNULL as for AUTHUNIX, and that is done here.
  25. */
  26. struct unix_domain {
  27. struct auth_domain h;
  28. /* other stuff later */
  29. };
  30. extern struct auth_ops svcauth_null;
  31. extern struct auth_ops svcauth_unix;
  32. static void svcauth_unix_domain_release(struct auth_domain *dom)
  33. {
  34. struct unix_domain *ud = container_of(dom, struct unix_domain, h);
  35. kfree(dom->name);
  36. kfree(ud);
  37. }
  38. struct auth_domain *unix_domain_find(char *name)
  39. {
  40. struct auth_domain *rv;
  41. struct unix_domain *new = NULL;
  42. rv = auth_domain_lookup(name, NULL);
  43. while(1) {
  44. if (rv) {
  45. if (new && rv != &new->h)
  46. svcauth_unix_domain_release(&new->h);
  47. if (rv->flavour != &svcauth_unix) {
  48. auth_domain_put(rv);
  49. return NULL;
  50. }
  51. return rv;
  52. }
  53. new = kmalloc(sizeof(*new), GFP_KERNEL);
  54. if (new == NULL)
  55. return NULL;
  56. kref_init(&new->h.ref);
  57. new->h.name = kstrdup(name, GFP_KERNEL);
  58. if (new->h.name == NULL) {
  59. kfree(new);
  60. return NULL;
  61. }
  62. new->h.flavour = &svcauth_unix;
  63. rv = auth_domain_lookup(name, &new->h);
  64. }
  65. }
  66. EXPORT_SYMBOL_GPL(unix_domain_find);
  67. /**************************************************
  68. * cache for IP address to unix_domain
  69. * as needed by AUTH_UNIX
  70. */
  71. #define IP_HASHBITS 8
  72. #define IP_HASHMAX (1<<IP_HASHBITS)
  73. struct ip_map {
  74. struct cache_head h;
  75. char m_class[8]; /* e.g. "nfsd" */
  76. struct in6_addr m_addr;
  77. struct unix_domain *m_client;
  78. };
  79. static void ip_map_put(struct kref *kref)
  80. {
  81. struct cache_head *item = container_of(kref, struct cache_head, ref);
  82. struct ip_map *im = container_of(item, struct ip_map,h);
  83. if (test_bit(CACHE_VALID, &item->flags) &&
  84. !test_bit(CACHE_NEGATIVE, &item->flags))
  85. auth_domain_put(&im->m_client->h);
  86. kfree(im);
  87. }
  88. #if IP_HASHBITS == 8
  89. /* hash_long on a 64 bit machine is currently REALLY BAD for
  90. * IP addresses in reverse-endian (i.e. on a little-endian machine).
  91. * So use a trivial but reliable hash instead
  92. */
  93. static inline int hash_ip(__be32 ip)
  94. {
  95. int hash = (__force u32)ip ^ ((__force u32)ip>>16);
  96. return (hash ^ (hash>>8)) & 0xff;
  97. }
  98. #endif
  99. static inline int hash_ip6(struct in6_addr ip)
  100. {
  101. return (hash_ip(ip.s6_addr32[0]) ^
  102. hash_ip(ip.s6_addr32[1]) ^
  103. hash_ip(ip.s6_addr32[2]) ^
  104. hash_ip(ip.s6_addr32[3]));
  105. }
  106. static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
  107. {
  108. struct ip_map *orig = container_of(corig, struct ip_map, h);
  109. struct ip_map *new = container_of(cnew, struct ip_map, h);
  110. return strcmp(orig->m_class, new->m_class) == 0 &&
  111. ipv6_addr_equal(&orig->m_addr, &new->m_addr);
  112. }
  113. static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
  114. {
  115. struct ip_map *new = container_of(cnew, struct ip_map, h);
  116. struct ip_map *item = container_of(citem, struct ip_map, h);
  117. strcpy(new->m_class, item->m_class);
  118. ipv6_addr_copy(&new->m_addr, &item->m_addr);
  119. }
  120. static void update(struct cache_head *cnew, struct cache_head *citem)
  121. {
  122. struct ip_map *new = container_of(cnew, struct ip_map, h);
  123. struct ip_map *item = container_of(citem, struct ip_map, h);
  124. kref_get(&item->m_client->h.ref);
  125. new->m_client = item->m_client;
  126. }
  127. static struct cache_head *ip_map_alloc(void)
  128. {
  129. struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL);
  130. if (i)
  131. return &i->h;
  132. else
  133. return NULL;
  134. }
  135. static void ip_map_request(struct cache_detail *cd,
  136. struct cache_head *h,
  137. char **bpp, int *blen)
  138. {
  139. char text_addr[40];
  140. struct ip_map *im = container_of(h, struct ip_map, h);
  141. if (ipv6_addr_v4mapped(&(im->m_addr))) {
  142. snprintf(text_addr, 20, "%pI4", &im->m_addr.s6_addr32[3]);
  143. } else {
  144. snprintf(text_addr, 40, "%pI6", &im->m_addr);
  145. }
  146. qword_add(bpp, blen, im->m_class);
  147. qword_add(bpp, blen, text_addr);
  148. (*bpp)[-1] = '\n';
  149. }
  150. static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h)
  151. {
  152. return sunrpc_cache_pipe_upcall(cd, h, ip_map_request);
  153. }
  154. static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr);
  155. static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
  156. static int ip_map_parse(struct cache_detail *cd,
  157. char *mesg, int mlen)
  158. {
  159. /* class ipaddress [domainname] */
  160. /* should be safe just to use the start of the input buffer
  161. * for scratch: */
  162. char *buf = mesg;
  163. int len;
  164. char class[8];
  165. union {
  166. struct sockaddr sa;
  167. struct sockaddr_in s4;
  168. struct sockaddr_in6 s6;
  169. } address;
  170. struct sockaddr_in6 sin6;
  171. int err;
  172. struct ip_map *ipmp;
  173. struct auth_domain *dom;
  174. time_t expiry;
  175. if (mesg[mlen-1] != '\n')
  176. return -EINVAL;
  177. mesg[mlen-1] = 0;
  178. /* class */
  179. len = qword_get(&mesg, class, sizeof(class));
  180. if (len <= 0) return -EINVAL;
  181. /* ip address */
  182. len = qword_get(&mesg, buf, mlen);
  183. if (len <= 0) return -EINVAL;
  184. if (rpc_pton(buf, len, &address.sa, sizeof(address)) == 0)
  185. return -EINVAL;
  186. switch (address.sa.sa_family) {
  187. case AF_INET:
  188. /* Form a mapped IPv4 address in sin6 */
  189. sin6.sin6_family = AF_INET6;
  190. ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr,
  191. &sin6.sin6_addr);
  192. break;
  193. #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
  194. case AF_INET6:
  195. memcpy(&sin6, &address.s6, sizeof(sin6));
  196. break;
  197. #endif
  198. default:
  199. return -EINVAL;
  200. }
  201. expiry = get_expiry(&mesg);
  202. if (expiry ==0)
  203. return -EINVAL;
  204. /* domainname, or empty for NEGATIVE */
  205. len = qword_get(&mesg, buf, mlen);
  206. if (len < 0) return -EINVAL;
  207. if (len) {
  208. dom = unix_domain_find(buf);
  209. if (dom == NULL)
  210. return -ENOENT;
  211. } else
  212. dom = NULL;
  213. /* IPv6 scope IDs are ignored for now */
  214. ipmp = __ip_map_lookup(cd, class, &sin6.sin6_addr);
  215. if (ipmp) {
  216. err = __ip_map_update(cd, ipmp,
  217. container_of(dom, struct unix_domain, h),
  218. expiry);
  219. } else
  220. err = -ENOMEM;
  221. if (dom)
  222. auth_domain_put(dom);
  223. cache_flush();
  224. return err;
  225. }
  226. static int ip_map_show(struct seq_file *m,
  227. struct cache_detail *cd,
  228. struct cache_head *h)
  229. {
  230. struct ip_map *im;
  231. struct in6_addr addr;
  232. char *dom = "-no-domain-";
  233. if (h == NULL) {
  234. seq_puts(m, "#class IP domain\n");
  235. return 0;
  236. }
  237. im = container_of(h, struct ip_map, h);
  238. /* class addr domain */
  239. ipv6_addr_copy(&addr, &im->m_addr);
  240. if (test_bit(CACHE_VALID, &h->flags) &&
  241. !test_bit(CACHE_NEGATIVE, &h->flags))
  242. dom = im->m_client->h.name;
  243. if (ipv6_addr_v4mapped(&addr)) {
  244. seq_printf(m, "%s %pI4 %s\n",
  245. im->m_class, &addr.s6_addr32[3], dom);
  246. } else {
  247. seq_printf(m, "%s %pI6 %s\n", im->m_class, &addr, dom);
  248. }
  249. return 0;
  250. }
  251. static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
  252. struct in6_addr *addr)
  253. {
  254. struct ip_map ip;
  255. struct cache_head *ch;
  256. strcpy(ip.m_class, class);
  257. ipv6_addr_copy(&ip.m_addr, addr);
  258. ch = sunrpc_cache_lookup(cd, &ip.h,
  259. hash_str(class, IP_HASHBITS) ^
  260. hash_ip6(*addr));
  261. if (ch)
  262. return container_of(ch, struct ip_map, h);
  263. else
  264. return NULL;
  265. }
  266. static inline struct ip_map *ip_map_lookup(struct net *net, char *class,
  267. struct in6_addr *addr)
  268. {
  269. struct sunrpc_net *sn;
  270. sn = net_generic(net, sunrpc_net_id);
  271. return __ip_map_lookup(sn->ip_map_cache, class, addr);
  272. }
  273. static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
  274. struct unix_domain *udom, time_t expiry)
  275. {
  276. struct ip_map ip;
  277. struct cache_head *ch;
  278. ip.m_client = udom;
  279. ip.h.flags = 0;
  280. if (!udom)
  281. set_bit(CACHE_NEGATIVE, &ip.h.flags);
  282. ip.h.expiry_time = expiry;
  283. ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
  284. hash_str(ipm->m_class, IP_HASHBITS) ^
  285. hash_ip6(ipm->m_addr));
  286. if (!ch)
  287. return -ENOMEM;
  288. cache_put(ch, cd);
  289. return 0;
  290. }
  291. static inline int ip_map_update(struct net *net, struct ip_map *ipm,
  292. struct unix_domain *udom, time_t expiry)
  293. {
  294. struct sunrpc_net *sn;
  295. sn = net_generic(net, sunrpc_net_id);
  296. return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
  297. }
  298. void svcauth_unix_purge(void)
  299. {
  300. struct net *net;
  301. for_each_net(net) {
  302. struct sunrpc_net *sn;
  303. sn = net_generic(net, sunrpc_net_id);
  304. cache_purge(sn->ip_map_cache);
  305. }
  306. }
  307. EXPORT_SYMBOL_GPL(svcauth_unix_purge);
  308. static inline struct ip_map *
  309. ip_map_cached_get(struct svc_xprt *xprt)
  310. {
  311. struct ip_map *ipm = NULL;
  312. struct sunrpc_net *sn;
  313. if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
  314. spin_lock(&xprt->xpt_lock);
  315. ipm = xprt->xpt_auth_cache;
  316. if (ipm != NULL) {
  317. if (!cache_valid(&ipm->h)) {
  318. /*
  319. * The entry has been invalidated since it was
  320. * remembered, e.g. by a second mount from the
  321. * same IP address.
  322. */
  323. sn = net_generic(xprt->xpt_net, sunrpc_net_id);
  324. xprt->xpt_auth_cache = NULL;
  325. spin_unlock(&xprt->xpt_lock);
  326. cache_put(&ipm->h, sn->ip_map_cache);
  327. return NULL;
  328. }
  329. cache_get(&ipm->h);
  330. }
  331. spin_unlock(&xprt->xpt_lock);
  332. }
  333. return ipm;
  334. }
  335. static inline void
  336. ip_map_cached_put(struct svc_xprt *xprt, struct ip_map *ipm)
  337. {
  338. if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
  339. spin_lock(&xprt->xpt_lock);
  340. if (xprt->xpt_auth_cache == NULL) {
  341. /* newly cached, keep the reference */
  342. xprt->xpt_auth_cache = ipm;
  343. ipm = NULL;
  344. }
  345. spin_unlock(&xprt->xpt_lock);
  346. }
  347. if (ipm) {
  348. struct sunrpc_net *sn;
  349. sn = net_generic(xprt->xpt_net, sunrpc_net_id);
  350. cache_put(&ipm->h, sn->ip_map_cache);
  351. }
  352. }
  353. void
  354. svcauth_unix_info_release(struct svc_xprt *xpt)
  355. {
  356. struct ip_map *ipm;
  357. ipm = xpt->xpt_auth_cache;
  358. if (ipm != NULL) {
  359. struct sunrpc_net *sn;
  360. sn = net_generic(xpt->xpt_net, sunrpc_net_id);
  361. cache_put(&ipm->h, sn->ip_map_cache);
  362. }
  363. }
  364. /****************************************************************************
  365. * auth.unix.gid cache
  366. * simple cache to map a UID to a list of GIDs
  367. * because AUTH_UNIX aka AUTH_SYS has a max of 16
  368. */
  369. #define GID_HASHBITS 8
  370. #define GID_HASHMAX (1<<GID_HASHBITS)
  371. struct unix_gid {
  372. struct cache_head h;
  373. uid_t uid;
  374. struct group_info *gi;
  375. };
  376. static struct cache_head *gid_table[GID_HASHMAX];
  377. static void unix_gid_put(struct kref *kref)
  378. {
  379. struct cache_head *item = container_of(kref, struct cache_head, ref);
  380. struct unix_gid *ug = container_of(item, struct unix_gid, h);
  381. if (test_bit(CACHE_VALID, &item->flags) &&
  382. !test_bit(CACHE_NEGATIVE, &item->flags))
  383. put_group_info(ug->gi);
  384. kfree(ug);
  385. }
  386. static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
  387. {
  388. struct unix_gid *orig = container_of(corig, struct unix_gid, h);
  389. struct unix_gid *new = container_of(cnew, struct unix_gid, h);
  390. return orig->uid == new->uid;
  391. }
  392. static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem)
  393. {
  394. struct unix_gid *new = container_of(cnew, struct unix_gid, h);
  395. struct unix_gid *item = container_of(citem, struct unix_gid, h);
  396. new->uid = item->uid;
  397. }
  398. static void unix_gid_update(struct cache_head *cnew, struct cache_head *citem)
  399. {
  400. struct unix_gid *new = container_of(cnew, struct unix_gid, h);
  401. struct unix_gid *item = container_of(citem, struct unix_gid, h);
  402. get_group_info(item->gi);
  403. new->gi = item->gi;
  404. }
  405. static struct cache_head *unix_gid_alloc(void)
  406. {
  407. struct unix_gid *g = kmalloc(sizeof(*g), GFP_KERNEL);
  408. if (g)
  409. return &g->h;
  410. else
  411. return NULL;
  412. }
  413. static void unix_gid_request(struct cache_detail *cd,
  414. struct cache_head *h,
  415. char **bpp, int *blen)
  416. {
  417. char tuid[20];
  418. struct unix_gid *ug = container_of(h, struct unix_gid, h);
  419. snprintf(tuid, 20, "%u", ug->uid);
  420. qword_add(bpp, blen, tuid);
  421. (*bpp)[-1] = '\n';
  422. }
  423. static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h)
  424. {
  425. return sunrpc_cache_pipe_upcall(cd, h, unix_gid_request);
  426. }
  427. static struct unix_gid *unix_gid_lookup(uid_t uid);
  428. extern struct cache_detail unix_gid_cache;
  429. static int unix_gid_parse(struct cache_detail *cd,
  430. char *mesg, int mlen)
  431. {
  432. /* uid expiry Ngid gid0 gid1 ... gidN-1 */
  433. int uid;
  434. int gids;
  435. int rv;
  436. int i;
  437. int err;
  438. time_t expiry;
  439. struct unix_gid ug, *ugp;
  440. if (mlen <= 0 || mesg[mlen-1] != '\n')
  441. return -EINVAL;
  442. mesg[mlen-1] = 0;
  443. rv = get_int(&mesg, &uid);
  444. if (rv)
  445. return -EINVAL;
  446. ug.uid = uid;
  447. expiry = get_expiry(&mesg);
  448. if (expiry == 0)
  449. return -EINVAL;
  450. rv = get_int(&mesg, &gids);
  451. if (rv || gids < 0 || gids > 8192)
  452. return -EINVAL;
  453. ug.gi = groups_alloc(gids);
  454. if (!ug.gi)
  455. return -ENOMEM;
  456. for (i = 0 ; i < gids ; i++) {
  457. int gid;
  458. rv = get_int(&mesg, &gid);
  459. err = -EINVAL;
  460. if (rv)
  461. goto out;
  462. GROUP_AT(ug.gi, i) = gid;
  463. }
  464. ugp = unix_gid_lookup(uid);
  465. if (ugp) {
  466. struct cache_head *ch;
  467. ug.h.flags = 0;
  468. ug.h.expiry_time = expiry;
  469. ch = sunrpc_cache_update(&unix_gid_cache,
  470. &ug.h, &ugp->h,
  471. hash_long(uid, GID_HASHBITS));
  472. if (!ch)
  473. err = -ENOMEM;
  474. else {
  475. err = 0;
  476. cache_put(ch, &unix_gid_cache);
  477. }
  478. } else
  479. err = -ENOMEM;
  480. out:
  481. if (ug.gi)
  482. put_group_info(ug.gi);
  483. return err;
  484. }
  485. static int unix_gid_show(struct seq_file *m,
  486. struct cache_detail *cd,
  487. struct cache_head *h)
  488. {
  489. struct unix_gid *ug;
  490. int i;
  491. int glen;
  492. if (h == NULL) {
  493. seq_puts(m, "#uid cnt: gids...\n");
  494. return 0;
  495. }
  496. ug = container_of(h, struct unix_gid, h);
  497. if (test_bit(CACHE_VALID, &h->flags) &&
  498. !test_bit(CACHE_NEGATIVE, &h->flags))
  499. glen = ug->gi->ngroups;
  500. else
  501. glen = 0;
  502. seq_printf(m, "%u %d:", ug->uid, glen);
  503. for (i = 0; i < glen; i++)
  504. seq_printf(m, " %d", GROUP_AT(ug->gi, i));
  505. seq_printf(m, "\n");
  506. return 0;
  507. }
  508. struct cache_detail unix_gid_cache = {
  509. .owner = THIS_MODULE,
  510. .hash_size = GID_HASHMAX,
  511. .hash_table = gid_table,
  512. .name = "auth.unix.gid",
  513. .cache_put = unix_gid_put,
  514. .cache_upcall = unix_gid_upcall,
  515. .cache_parse = unix_gid_parse,
  516. .cache_show = unix_gid_show,
  517. .match = unix_gid_match,
  518. .init = unix_gid_init,
  519. .update = unix_gid_update,
  520. .alloc = unix_gid_alloc,
  521. };
  522. static struct unix_gid *unix_gid_lookup(uid_t uid)
  523. {
  524. struct unix_gid ug;
  525. struct cache_head *ch;
  526. ug.uid = uid;
  527. ch = sunrpc_cache_lookup(&unix_gid_cache, &ug.h,
  528. hash_long(uid, GID_HASHBITS));
  529. if (ch)
  530. return container_of(ch, struct unix_gid, h);
  531. else
  532. return NULL;
  533. }
  534. static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
  535. {
  536. struct unix_gid *ug;
  537. struct group_info *gi;
  538. int ret;
  539. ug = unix_gid_lookup(uid);
  540. if (!ug)
  541. return ERR_PTR(-EAGAIN);
  542. ret = cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle);
  543. switch (ret) {
  544. case -ENOENT:
  545. return ERR_PTR(-ENOENT);
  546. case -ETIMEDOUT:
  547. return ERR_PTR(-ESHUTDOWN);
  548. case 0:
  549. gi = get_group_info(ug->gi);
  550. cache_put(&ug->h, &unix_gid_cache);
  551. return gi;
  552. default:
  553. return ERR_PTR(-EAGAIN);
  554. }
  555. }
  556. int
  557. svcauth_unix_set_client(struct svc_rqst *rqstp)
  558. {
  559. struct sockaddr_in *sin;
  560. struct sockaddr_in6 *sin6, sin6_storage;
  561. struct ip_map *ipm;
  562. struct group_info *gi;
  563. struct svc_cred *cred = &rqstp->rq_cred;
  564. struct svc_xprt *xprt = rqstp->rq_xprt;
  565. struct net *net = xprt->xpt_net;
  566. struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
  567. switch (rqstp->rq_addr.ss_family) {
  568. case AF_INET:
  569. sin = svc_addr_in(rqstp);
  570. sin6 = &sin6_storage;
  571. ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &sin6->sin6_addr);
  572. break;
  573. case AF_INET6:
  574. sin6 = svc_addr_in6(rqstp);
  575. break;
  576. default:
  577. BUG();
  578. }
  579. rqstp->rq_client = NULL;
  580. if (rqstp->rq_proc == 0)
  581. return SVC_OK;
  582. ipm = ip_map_cached_get(xprt);
  583. if (ipm == NULL)
  584. ipm = __ip_map_lookup(sn->ip_map_cache, rqstp->rq_server->sv_program->pg_class,
  585. &sin6->sin6_addr);
  586. if (ipm == NULL)
  587. return SVC_DENIED;
  588. switch (cache_check(sn->ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
  589. default:
  590. BUG();
  591. case -ETIMEDOUT:
  592. return SVC_CLOSE;
  593. case -EAGAIN:
  594. return SVC_DROP;
  595. case -ENOENT:
  596. return SVC_DENIED;
  597. case 0:
  598. rqstp->rq_client = &ipm->m_client->h;
  599. kref_get(&rqstp->rq_client->ref);
  600. ip_map_cached_put(xprt, ipm);
  601. break;
  602. }
  603. gi = unix_gid_find(cred->cr_uid, rqstp);
  604. switch (PTR_ERR(gi)) {
  605. case -EAGAIN:
  606. return SVC_DROP;
  607. case -ESHUTDOWN:
  608. return SVC_CLOSE;
  609. case -ENOENT:
  610. break;
  611. default:
  612. put_group_info(cred->cr_group_info);
  613. cred->cr_group_info = gi;
  614. }
  615. return SVC_OK;
  616. }
  617. EXPORT_SYMBOL_GPL(svcauth_unix_set_client);
  618. static int
  619. svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
  620. {
  621. struct kvec *argv = &rqstp->rq_arg.head[0];
  622. struct kvec *resv = &rqstp->rq_res.head[0];
  623. struct svc_cred *cred = &rqstp->rq_cred;
  624. cred->cr_group_info = NULL;
  625. rqstp->rq_client = NULL;
  626. if (argv->iov_len < 3*4)
  627. return SVC_GARBAGE;
  628. if (svc_getu32(argv) != 0) {
  629. dprintk("svc: bad null cred\n");
  630. *authp = rpc_autherr_badcred;
  631. return SVC_DENIED;
  632. }
  633. if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
  634. dprintk("svc: bad null verf\n");
  635. *authp = rpc_autherr_badverf;
  636. return SVC_DENIED;
  637. }
  638. /* Signal that mapping to nobody uid/gid is required */
  639. cred->cr_uid = (uid_t) -1;
  640. cred->cr_gid = (gid_t) -1;
  641. cred->cr_group_info = groups_alloc(0);
  642. if (cred->cr_group_info == NULL)
  643. return SVC_CLOSE; /* kmalloc failure - client must retry */
  644. /* Put NULL verifier */
  645. svc_putnl(resv, RPC_AUTH_NULL);
  646. svc_putnl(resv, 0);
  647. rqstp->rq_flavor = RPC_AUTH_NULL;
  648. return SVC_OK;
  649. }
  650. static int
  651. svcauth_null_release(struct svc_rqst *rqstp)
  652. {
  653. if (rqstp->rq_client)
  654. auth_domain_put(rqstp->rq_client);
  655. rqstp->rq_client = NULL;
  656. if (rqstp->rq_cred.cr_group_info)
  657. put_group_info(rqstp->rq_cred.cr_group_info);
  658. rqstp->rq_cred.cr_group_info = NULL;
  659. return 0; /* don't drop */
  660. }
  661. struct auth_ops svcauth_null = {
  662. .name = "null",
  663. .owner = THIS_MODULE,
  664. .flavour = RPC_AUTH_NULL,
  665. .accept = svcauth_null_accept,
  666. .release = svcauth_null_release,
  667. .set_client = svcauth_unix_set_client,
  668. };
  669. static int
  670. svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
  671. {
  672. struct kvec *argv = &rqstp->rq_arg.head[0];
  673. struct kvec *resv = &rqstp->rq_res.head[0];
  674. struct svc_cred *cred = &rqstp->rq_cred;
  675. u32 slen, i;
  676. int len = argv->iov_len;
  677. cred->cr_group_info = NULL;
  678. rqstp->rq_client = NULL;
  679. if ((len -= 3*4) < 0)
  680. return SVC_GARBAGE;
  681. svc_getu32(argv); /* length */
  682. svc_getu32(argv); /* time stamp */
  683. slen = XDR_QUADLEN(svc_getnl(argv)); /* machname length */
  684. if (slen > 64 || (len -= (slen + 3)*4) < 0)
  685. goto badcred;
  686. argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */
  687. argv->iov_len -= slen*4;
  688. cred->cr_uid = svc_getnl(argv); /* uid */
  689. cred->cr_gid = svc_getnl(argv); /* gid */
  690. slen = svc_getnl(argv); /* gids length */
  691. if (slen > 16 || (len -= (slen + 2)*4) < 0)
  692. goto badcred;
  693. cred->cr_group_info = groups_alloc(slen);
  694. if (cred->cr_group_info == NULL)
  695. return SVC_CLOSE;
  696. for (i = 0; i < slen; i++)
  697. GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
  698. if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
  699. *authp = rpc_autherr_badverf;
  700. return SVC_DENIED;
  701. }
  702. /* Put NULL verifier */
  703. svc_putnl(resv, RPC_AUTH_NULL);
  704. svc_putnl(resv, 0);
  705. rqstp->rq_flavor = RPC_AUTH_UNIX;
  706. return SVC_OK;
  707. badcred:
  708. *authp = rpc_autherr_badcred;
  709. return SVC_DENIED;
  710. }
  711. static int
  712. svcauth_unix_release(struct svc_rqst *rqstp)
  713. {
  714. /* Verifier (such as it is) is already in place.
  715. */
  716. if (rqstp->rq_client)
  717. auth_domain_put(rqstp->rq_client);
  718. rqstp->rq_client = NULL;
  719. if (rqstp->rq_cred.cr_group_info)
  720. put_group_info(rqstp->rq_cred.cr_group_info);
  721. rqstp->rq_cred.cr_group_info = NULL;
  722. return 0;
  723. }
  724. struct auth_ops svcauth_unix = {
  725. .name = "unix",
  726. .owner = THIS_MODULE,
  727. .flavour = RPC_AUTH_UNIX,
  728. .accept = svcauth_unix_accept,
  729. .release = svcauth_unix_release,
  730. .domain_release = svcauth_unix_domain_release,
  731. .set_client = svcauth_unix_set_client,
  732. };
  733. int ip_map_cache_create(struct net *net)
  734. {
  735. int err = -ENOMEM;
  736. struct cache_detail *cd;
  737. struct cache_head **tbl;
  738. struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
  739. cd = kzalloc(sizeof(struct cache_detail), GFP_KERNEL);
  740. if (cd == NULL)
  741. goto err_cd;
  742. tbl = kzalloc(IP_HASHMAX * sizeof(struct cache_head *), GFP_KERNEL);
  743. if (tbl == NULL)
  744. goto err_tbl;
  745. cd->owner = THIS_MODULE,
  746. cd->hash_size = IP_HASHMAX,
  747. cd->hash_table = tbl,
  748. cd->name = "auth.unix.ip",
  749. cd->cache_put = ip_map_put,
  750. cd->cache_upcall = ip_map_upcall,
  751. cd->cache_parse = ip_map_parse,
  752. cd->cache_show = ip_map_show,
  753. cd->match = ip_map_match,
  754. cd->init = ip_map_init,
  755. cd->update = update,
  756. cd->alloc = ip_map_alloc,
  757. err = cache_register_net(cd, net);
  758. if (err)
  759. goto err_reg;
  760. sn->ip_map_cache = cd;
  761. return 0;
  762. err_reg:
  763. kfree(tbl);
  764. err_tbl:
  765. kfree(cd);
  766. err_cd:
  767. return err;
  768. }
  769. void ip_map_cache_destroy(struct net *net)
  770. {
  771. struct sunrpc_net *sn;
  772. sn = net_generic(net, sunrpc_net_id);
  773. cache_purge(sn->ip_map_cache);
  774. cache_unregister_net(sn->ip_map_cache, net);
  775. kfree(sn->ip_map_cache->hash_table);
  776. kfree(sn->ip_map_cache);
  777. }