PageRenderTime 51ms CodeModel.GetById 17ms RepoModel.GetById 0ms app.codeStats 0ms

/security/selinux/avc.c

https://gitlab.com/LiquidSmooth-Devices/android_kernel_htc_msm8974
C | 684 lines | 561 code | 108 blank | 15 comment | 69 complexity | cd1afbe92cfdd837c11103f9ec45b0ad MD5 | raw file
Possible License(s): GPL-2.0
  1. /*
  2. * Implementation of the kernel access vector cache (AVC).
  3. *
  4. * Authors: Stephen Smalley, <sds@epoch.ncsc.mil>
  5. * James Morris <jmorris@redhat.com>
  6. *
  7. * Update: KaiGai, Kohei <kaigai@ak.jp.nec.com>
  8. * Replaced the avc_lock spinlock by RCU.
  9. *
  10. * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2,
  14. * as published by the Free Software Foundation.
  15. */
  16. #include <linux/types.h>
  17. #include <linux/stddef.h>
  18. #include <linux/kernel.h>
  19. #include <linux/slab.h>
  20. #include <linux/fs.h>
  21. #include <linux/dcache.h>
  22. #include <linux/init.h>
  23. #include <linux/skbuff.h>
  24. #include <linux/percpu.h>
  25. #include <net/sock.h>
  26. #include <linux/un.h>
  27. #include <net/af_unix.h>
  28. #include <linux/ip.h>
  29. #include <linux/audit.h>
  30. #include <linux/ipv6.h>
  31. #include <net/ipv6.h>
  32. #include "avc.h"
  33. #include "avc_ss.h"
  34. #include "classmap.h"
  35. #define AVC_CACHE_SLOTS 512
  36. #define AVC_DEF_CACHE_THRESHOLD 512
  37. #define AVC_CACHE_RECLAIM 16
  38. #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
  39. #define avc_cache_stats_incr(field) this_cpu_inc(avc_cache_stats.field)
  40. #else
  41. #define avc_cache_stats_incr(field) do {} while (0)
  42. #endif
  43. struct avc_entry {
  44. u32 ssid;
  45. u32 tsid;
  46. u16 tclass;
  47. struct av_decision avd;
  48. };
  49. struct avc_node {
  50. struct avc_entry ae;
  51. struct hlist_node list;
  52. struct rcu_head rhead;
  53. };
  54. struct avc_cache {
  55. struct hlist_head slots[AVC_CACHE_SLOTS];
  56. spinlock_t slots_lock[AVC_CACHE_SLOTS];
  57. atomic_t lru_hint;
  58. atomic_t active_nodes;
  59. u32 latest_notif;
  60. };
  61. struct avc_callback_node {
  62. int (*callback) (u32 event, u32 ssid, u32 tsid,
  63. u16 tclass, u32 perms,
  64. u32 *out_retained);
  65. u32 events;
  66. u32 ssid;
  67. u32 tsid;
  68. u16 tclass;
  69. u32 perms;
  70. struct avc_callback_node *next;
  71. };
  72. unsigned int avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD;
  73. #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
  74. DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 };
  75. #endif
  76. static struct avc_cache avc_cache;
  77. static struct avc_callback_node *avc_callbacks;
  78. static struct kmem_cache *avc_node_cachep;
  79. static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
  80. {
  81. return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1);
  82. }
  83. static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av)
  84. {
  85. const char **perms;
  86. int i, perm;
  87. if (av == 0) {
  88. audit_log_format(ab, " null");
  89. return;
  90. }
  91. perms = secclass_map[tclass-1].perms;
  92. audit_log_format(ab, " {");
  93. i = 0;
  94. perm = 1;
  95. while (i < (sizeof(av) * 8)) {
  96. if ((perm & av) && perms[i]) {
  97. audit_log_format(ab, " %s", perms[i]);
  98. av &= ~perm;
  99. }
  100. i++;
  101. perm <<= 1;
  102. }
  103. if (av)
  104. audit_log_format(ab, " 0x%x", av);
  105. audit_log_format(ab, " }");
  106. }
  107. static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tclass)
  108. {
  109. int rc;
  110. char *scontext;
  111. u32 scontext_len;
  112. rc = security_sid_to_context(ssid, &scontext, &scontext_len);
  113. if (rc)
  114. audit_log_format(ab, "ssid=%d", ssid);
  115. else {
  116. audit_log_format(ab, "scontext=%s", scontext);
  117. kfree(scontext);
  118. }
  119. rc = security_sid_to_context(tsid, &scontext, &scontext_len);
  120. if (rc)
  121. audit_log_format(ab, " tsid=%d", tsid);
  122. else {
  123. audit_log_format(ab, " tcontext=%s", scontext);
  124. kfree(scontext);
  125. }
  126. BUG_ON(tclass >= ARRAY_SIZE(secclass_map));
  127. audit_log_format(ab, " tclass=%s", secclass_map[tclass-1].name);
  128. }
  129. void __init avc_init(void)
  130. {
  131. int i;
  132. for (i = 0; i < AVC_CACHE_SLOTS; i++) {
  133. INIT_HLIST_HEAD(&avc_cache.slots[i]);
  134. spin_lock_init(&avc_cache.slots_lock[i]);
  135. }
  136. atomic_set(&avc_cache.active_nodes, 0);
  137. atomic_set(&avc_cache.lru_hint, 0);
  138. avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
  139. 0, SLAB_PANIC, NULL);
  140. audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n");
  141. }
  142. int avc_get_hash_stats(char *page)
  143. {
  144. int i, chain_len, max_chain_len, slots_used;
  145. struct avc_node *node;
  146. struct hlist_head *head;
  147. rcu_read_lock();
  148. slots_used = 0;
  149. max_chain_len = 0;
  150. for (i = 0; i < AVC_CACHE_SLOTS; i++) {
  151. head = &avc_cache.slots[i];
  152. if (!hlist_empty(head)) {
  153. struct hlist_node *next;
  154. slots_used++;
  155. chain_len = 0;
  156. hlist_for_each_entry_rcu(node, next, head, list)
  157. chain_len++;
  158. if (chain_len > max_chain_len)
  159. max_chain_len = chain_len;
  160. }
  161. }
  162. rcu_read_unlock();
  163. return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
  164. "longest chain: %d\n",
  165. atomic_read(&avc_cache.active_nodes),
  166. slots_used, AVC_CACHE_SLOTS, max_chain_len);
  167. }
  168. static void avc_node_free(struct rcu_head *rhead)
  169. {
  170. struct avc_node *node = container_of(rhead, struct avc_node, rhead);
  171. kmem_cache_free(avc_node_cachep, node);
  172. avc_cache_stats_incr(frees);
  173. }
  174. static void avc_node_delete(struct avc_node *node)
  175. {
  176. hlist_del_rcu(&node->list);
  177. call_rcu(&node->rhead, avc_node_free);
  178. atomic_dec(&avc_cache.active_nodes);
  179. }
  180. static void avc_node_kill(struct avc_node *node)
  181. {
  182. kmem_cache_free(avc_node_cachep, node);
  183. avc_cache_stats_incr(frees);
  184. atomic_dec(&avc_cache.active_nodes);
  185. }
  186. static void avc_node_replace(struct avc_node *new, struct avc_node *old)
  187. {
  188. hlist_replace_rcu(&old->list, &new->list);
  189. call_rcu(&old->rhead, avc_node_free);
  190. atomic_dec(&avc_cache.active_nodes);
  191. }
  192. static inline int avc_reclaim_node(void)
  193. {
  194. struct avc_node *node;
  195. int hvalue, try, ecx;
  196. unsigned long flags;
  197. struct hlist_head *head;
  198. struct hlist_node *next;
  199. spinlock_t *lock;
  200. for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
  201. hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
  202. head = &avc_cache.slots[hvalue];
  203. lock = &avc_cache.slots_lock[hvalue];
  204. if (!spin_trylock_irqsave(lock, flags))
  205. continue;
  206. rcu_read_lock();
  207. hlist_for_each_entry(node, next, head, list) {
  208. avc_node_delete(node);
  209. avc_cache_stats_incr(reclaims);
  210. ecx++;
  211. if (ecx >= AVC_CACHE_RECLAIM) {
  212. rcu_read_unlock();
  213. spin_unlock_irqrestore(lock, flags);
  214. goto out;
  215. }
  216. }
  217. rcu_read_unlock();
  218. spin_unlock_irqrestore(lock, flags);
  219. }
  220. out:
  221. return ecx;
  222. }
  223. static struct avc_node *avc_alloc_node(void)
  224. {
  225. struct avc_node *node;
  226. node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC);
  227. if (!node)
  228. goto out;
  229. INIT_HLIST_NODE(&node->list);
  230. avc_cache_stats_incr(allocations);
  231. if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold)
  232. avc_reclaim_node();
  233. out:
  234. return node;
  235. }
  236. static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
  237. {
  238. node->ae.ssid = ssid;
  239. node->ae.tsid = tsid;
  240. node->ae.tclass = tclass;
  241. memcpy(&node->ae.avd, avd, sizeof(node->ae.avd));
  242. }
  243. static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
  244. {
  245. struct avc_node *node, *ret = NULL;
  246. int hvalue;
  247. struct hlist_head *head;
  248. struct hlist_node *next;
  249. hvalue = avc_hash(ssid, tsid, tclass);
  250. head = &avc_cache.slots[hvalue];
  251. hlist_for_each_entry_rcu(node, next, head, list) {
  252. if (ssid == node->ae.ssid &&
  253. tclass == node->ae.tclass &&
  254. tsid == node->ae.tsid) {
  255. ret = node;
  256. break;
  257. }
  258. }
  259. return ret;
  260. }
  261. static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass)
  262. {
  263. struct avc_node *node;
  264. avc_cache_stats_incr(lookups);
  265. node = avc_search_node(ssid, tsid, tclass);
  266. if (node)
  267. return node;
  268. avc_cache_stats_incr(misses);
  269. return NULL;
  270. }
  271. static int avc_latest_notif_update(int seqno, int is_insert)
  272. {
  273. int ret = 0;
  274. static DEFINE_SPINLOCK(notif_lock);
  275. unsigned long flag;
  276. spin_lock_irqsave(&notif_lock, flag);
  277. if (is_insert) {
  278. if (seqno < avc_cache.latest_notif) {
  279. printk(KERN_WARNING "SELinux: avc: seqno %d < latest_notif %d\n",
  280. seqno, avc_cache.latest_notif);
  281. ret = -EAGAIN;
  282. }
  283. } else {
  284. if (seqno > avc_cache.latest_notif)
  285. avc_cache.latest_notif = seqno;
  286. }
  287. spin_unlock_irqrestore(&notif_lock, flag);
  288. return ret;
  289. }
  290. static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
  291. {
  292. struct avc_node *pos, *node = NULL;
  293. int hvalue;
  294. unsigned long flag;
  295. if (avc_latest_notif_update(avd->seqno, 1))
  296. goto out;
  297. node = avc_alloc_node();
  298. if (node) {
  299. struct hlist_head *head;
  300. struct hlist_node *next;
  301. spinlock_t *lock;
  302. hvalue = avc_hash(ssid, tsid, tclass);
  303. avc_node_populate(node, ssid, tsid, tclass, avd);
  304. head = &avc_cache.slots[hvalue];
  305. lock = &avc_cache.slots_lock[hvalue];
  306. spin_lock_irqsave(lock, flag);
  307. hlist_for_each_entry(pos, next, head, list) {
  308. if (pos->ae.ssid == ssid &&
  309. pos->ae.tsid == tsid &&
  310. pos->ae.tclass == tclass) {
  311. avc_node_replace(node, pos);
  312. goto found;
  313. }
  314. }
  315. hlist_add_head_rcu(&node->list, head);
  316. found:
  317. spin_unlock_irqrestore(lock, flag);
  318. }
  319. out:
  320. return node;
  321. }
  322. static void avc_audit_pre_callback(struct audit_buffer *ab, void *a)
  323. {
  324. struct common_audit_data *ad = a;
  325. audit_log_format(ab, "avc: %s ",
  326. ad->selinux_audit_data->slad->denied ? "denied" : "granted");
  327. avc_dump_av(ab, ad->selinux_audit_data->slad->tclass,
  328. ad->selinux_audit_data->slad->audited);
  329. audit_log_format(ab, " for ");
  330. }
  331. static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
  332. {
  333. struct common_audit_data *ad = a;
  334. audit_log_format(ab, " ");
  335. avc_dump_query(ab, ad->selinux_audit_data->slad->ssid,
  336. ad->selinux_audit_data->slad->tsid,
  337. ad->selinux_audit_data->slad->tclass);
  338. }
  339. static noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
  340. u32 requested, u32 audited, u32 denied,
  341. struct common_audit_data *a,
  342. unsigned flags)
  343. {
  344. struct common_audit_data stack_data;
  345. struct selinux_audit_data sad = {0,};
  346. struct selinux_late_audit_data slad;
  347. if (!a) {
  348. a = &stack_data;
  349. COMMON_AUDIT_DATA_INIT(a, NONE);
  350. a->selinux_audit_data = &sad;
  351. }
  352. if ((a->type == LSM_AUDIT_DATA_INODE) &&
  353. (flags & MAY_NOT_BLOCK))
  354. return -ECHILD;
  355. slad.tclass = tclass;
  356. slad.requested = requested;
  357. slad.ssid = ssid;
  358. slad.tsid = tsid;
  359. slad.audited = audited;
  360. slad.denied = denied;
  361. a->selinux_audit_data->slad = &slad;
  362. common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback);
  363. return 0;
  364. }
  365. inline int avc_audit(u32 ssid, u32 tsid,
  366. u16 tclass, u32 requested,
  367. struct av_decision *avd, int result, struct common_audit_data *a,
  368. unsigned flags)
  369. {
  370. u32 denied, audited;
  371. denied = requested & ~avd->allowed;
  372. if (unlikely(denied)) {
  373. audited = denied & avd->auditdeny;
  374. if (a &&
  375. a->selinux_audit_data->auditdeny &&
  376. !(a->selinux_audit_data->auditdeny & avd->auditdeny))
  377. audited = 0;
  378. } else if (result)
  379. audited = denied = requested;
  380. else
  381. audited = requested & avd->auditallow;
  382. if (likely(!audited))
  383. return 0;
  384. return slow_avc_audit(ssid, tsid, tclass,
  385. requested, audited, denied,
  386. a, flags);
  387. }
  388. int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid,
  389. u16 tclass, u32 perms,
  390. u32 *out_retained),
  391. u32 events, u32 ssid, u32 tsid,
  392. u16 tclass, u32 perms)
  393. {
  394. struct avc_callback_node *c;
  395. int rc = 0;
  396. c = kmalloc(sizeof(*c), GFP_ATOMIC);
  397. if (!c) {
  398. rc = -ENOMEM;
  399. goto out;
  400. }
  401. c->callback = callback;
  402. c->events = events;
  403. c->ssid = ssid;
  404. c->tsid = tsid;
  405. c->perms = perms;
  406. c->next = avc_callbacks;
  407. avc_callbacks = c;
  408. out:
  409. return rc;
  410. }
  411. static inline int avc_sidcmp(u32 x, u32 y)
  412. {
  413. return (x == y || x == SECSID_WILD || y == SECSID_WILD);
  414. }
  415. static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
  416. u32 seqno)
  417. {
  418. int hvalue, rc = 0;
  419. unsigned long flag;
  420. struct avc_node *pos, *node, *orig = NULL;
  421. struct hlist_head *head;
  422. struct hlist_node *next;
  423. spinlock_t *lock;
  424. node = avc_alloc_node();
  425. if (!node) {
  426. rc = -ENOMEM;
  427. goto out;
  428. }
  429. hvalue = avc_hash(ssid, tsid, tclass);
  430. head = &avc_cache.slots[hvalue];
  431. lock = &avc_cache.slots_lock[hvalue];
  432. spin_lock_irqsave(lock, flag);
  433. hlist_for_each_entry(pos, next, head, list) {
  434. if (ssid == pos->ae.ssid &&
  435. tsid == pos->ae.tsid &&
  436. tclass == pos->ae.tclass &&
  437. seqno == pos->ae.avd.seqno){
  438. orig = pos;
  439. break;
  440. }
  441. }
  442. if (!orig) {
  443. rc = -ENOENT;
  444. avc_node_kill(node);
  445. goto out_unlock;
  446. }
  447. avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd);
  448. switch (event) {
  449. case AVC_CALLBACK_GRANT:
  450. node->ae.avd.allowed |= perms;
  451. break;
  452. case AVC_CALLBACK_TRY_REVOKE:
  453. case AVC_CALLBACK_REVOKE:
  454. node->ae.avd.allowed &= ~perms;
  455. break;
  456. case AVC_CALLBACK_AUDITALLOW_ENABLE:
  457. node->ae.avd.auditallow |= perms;
  458. break;
  459. case AVC_CALLBACK_AUDITALLOW_DISABLE:
  460. node->ae.avd.auditallow &= ~perms;
  461. break;
  462. case AVC_CALLBACK_AUDITDENY_ENABLE:
  463. node->ae.avd.auditdeny |= perms;
  464. break;
  465. case AVC_CALLBACK_AUDITDENY_DISABLE:
  466. node->ae.avd.auditdeny &= ~perms;
  467. break;
  468. }
  469. avc_node_replace(node, orig);
  470. out_unlock:
  471. spin_unlock_irqrestore(lock, flag);
  472. out:
  473. return rc;
  474. }
  475. static void avc_flush(void)
  476. {
  477. struct hlist_head *head;
  478. struct hlist_node *next;
  479. struct avc_node *node;
  480. spinlock_t *lock;
  481. unsigned long flag;
  482. int i;
  483. for (i = 0; i < AVC_CACHE_SLOTS; i++) {
  484. head = &avc_cache.slots[i];
  485. lock = &avc_cache.slots_lock[i];
  486. spin_lock_irqsave(lock, flag);
  487. rcu_read_lock();
  488. hlist_for_each_entry(node, next, head, list)
  489. avc_node_delete(node);
  490. rcu_read_unlock();
  491. spin_unlock_irqrestore(lock, flag);
  492. }
  493. }
  494. int avc_ss_reset(u32 seqno)
  495. {
  496. struct avc_callback_node *c;
  497. int rc = 0, tmprc;
  498. avc_flush();
  499. for (c = avc_callbacks; c; c = c->next) {
  500. if (c->events & AVC_CALLBACK_RESET) {
  501. tmprc = c->callback(AVC_CALLBACK_RESET,
  502. 0, 0, 0, 0, NULL);
  503. if (!rc)
  504. rc = tmprc;
  505. }
  506. }
  507. avc_latest_notif_update(seqno, 0);
  508. return rc;
  509. }
  510. static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid,
  511. u16 tclass, struct av_decision *avd)
  512. {
  513. rcu_read_unlock();
  514. security_compute_av(ssid, tsid, tclass, avd);
  515. rcu_read_lock();
  516. return avc_insert(ssid, tsid, tclass, avd);
  517. }
  518. static noinline int avc_denied(u32 ssid, u32 tsid,
  519. u16 tclass, u32 requested,
  520. unsigned flags,
  521. struct av_decision *avd)
  522. {
  523. if (flags & AVC_STRICT)
  524. return -EACCES;
  525. if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE))
  526. return -EACCES;
  527. avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
  528. tsid, tclass, avd->seqno);
  529. return 0;
  530. }
  531. inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
  532. u16 tclass, u32 requested,
  533. unsigned flags,
  534. struct av_decision *avd)
  535. {
  536. struct avc_node *node;
  537. int rc = 0;
  538. u32 denied;
  539. BUG_ON(!requested);
  540. rcu_read_lock();
  541. node = avc_lookup(ssid, tsid, tclass);
  542. if (unlikely(!node)) {
  543. node = avc_compute_av(ssid, tsid, tclass, avd);
  544. } else {
  545. memcpy(avd, &node->ae.avd, sizeof(*avd));
  546. avd = &node->ae.avd;
  547. }
  548. denied = requested & ~(avd->allowed);
  549. if (unlikely(denied))
  550. rc = avc_denied(ssid, tsid, tclass, requested, flags, avd);
  551. rcu_read_unlock();
  552. return rc;
  553. }
  554. int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass,
  555. u32 requested, struct common_audit_data *auditdata,
  556. unsigned flags)
  557. {
  558. struct av_decision avd;
  559. int rc, rc2;
  560. rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
  561. rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata,
  562. flags);
  563. if (rc2)
  564. return rc2;
  565. return rc;
  566. }
  567. u32 avc_policy_seqno(void)
  568. {
  569. return avc_cache.latest_notif;
  570. }
  571. void avc_disable(void)
  572. {
  573. if (avc_node_cachep) {
  574. avc_flush();
  575. }
  576. }