PageRenderTime 38ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 0ms

/tools/testing/selftests/bpf/test_verifier.c

https://bitbucket.org/mirror/linux
C | 1217 lines | 1034 code | 119 blank | 64 comment | 192 complexity | 669421eba3f20651ad23eb7ee37912c5 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Testsuite for eBPF verifier
  4. *
  5. * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
  6. * Copyright (c) 2017 Facebook
  7. * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
  8. */
  9. #include <endian.h>
  10. #include <asm/types.h>
  11. #include <linux/types.h>
  12. #include <stdint.h>
  13. #include <stdio.h>
  14. #include <stdlib.h>
  15. #include <unistd.h>
  16. #include <errno.h>
  17. #include <string.h>
  18. #include <stddef.h>
  19. #include <stdbool.h>
  20. #include <sched.h>
  21. #include <limits.h>
  22. #include <assert.h>
  23. #include <sys/capability.h>
  24. #include <linux/unistd.h>
  25. #include <linux/filter.h>
  26. #include <linux/bpf_perf_event.h>
  27. #include <linux/bpf.h>
  28. #include <linux/if_ether.h>
  29. #include <linux/btf.h>
  30. #include <bpf/bpf.h>
  31. #include <bpf/libbpf.h>
  32. #ifdef HAVE_GENHDR
  33. # include "autoconf.h"
  34. #else
  35. # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
  36. # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
  37. # endif
  38. #endif
  39. #include "bpf_rlimit.h"
  40. #include "bpf_rand.h"
  41. #include "bpf_util.h"
  42. #include "test_btf.h"
  43. #include "../../../include/linux/filter.h"
  44. #define MAX_INSNS BPF_MAXINSNS
  45. #define MAX_TEST_INSNS 1000000
  46. #define MAX_FIXUPS 8
  47. #define MAX_NR_MAPS 20
  48. #define MAX_TEST_RUNS 8
  49. #define POINTER_VALUE 0xcafe4all
  50. #define TEST_DATA_LEN 64
  51. #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
  52. #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
  53. #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
  54. static bool unpriv_disabled = false;
  55. static int skips;
  56. static bool verbose = false;
  57. struct bpf_test {
  58. const char *descr;
  59. struct bpf_insn insns[MAX_INSNS];
  60. struct bpf_insn *fill_insns;
  61. int fixup_map_hash_8b[MAX_FIXUPS];
  62. int fixup_map_hash_48b[MAX_FIXUPS];
  63. int fixup_map_hash_16b[MAX_FIXUPS];
  64. int fixup_map_array_48b[MAX_FIXUPS];
  65. int fixup_map_sockmap[MAX_FIXUPS];
  66. int fixup_map_sockhash[MAX_FIXUPS];
  67. int fixup_map_xskmap[MAX_FIXUPS];
  68. int fixup_map_stacktrace[MAX_FIXUPS];
  69. int fixup_prog1[MAX_FIXUPS];
  70. int fixup_prog2[MAX_FIXUPS];
  71. int fixup_map_in_map[MAX_FIXUPS];
  72. int fixup_cgroup_storage[MAX_FIXUPS];
  73. int fixup_percpu_cgroup_storage[MAX_FIXUPS];
  74. int fixup_map_spin_lock[MAX_FIXUPS];
  75. int fixup_map_array_ro[MAX_FIXUPS];
  76. int fixup_map_array_wo[MAX_FIXUPS];
  77. int fixup_map_array_small[MAX_FIXUPS];
  78. int fixup_sk_storage_map[MAX_FIXUPS];
  79. int fixup_map_event_output[MAX_FIXUPS];
  80. int fixup_map_reuseport_array[MAX_FIXUPS];
  81. const char *errstr;
  82. const char *errstr_unpriv;
  83. uint32_t insn_processed;
  84. int prog_len;
  85. enum {
  86. UNDEF,
  87. ACCEPT,
  88. REJECT,
  89. VERBOSE_ACCEPT,
  90. } result, result_unpriv;
  91. enum bpf_prog_type prog_type;
  92. uint8_t flags;
  93. void (*fill_helper)(struct bpf_test *self);
  94. uint8_t runs;
  95. #define bpf_testdata_struct_t \
  96. struct { \
  97. uint32_t retval, retval_unpriv; \
  98. union { \
  99. __u8 data[TEST_DATA_LEN]; \
  100. __u64 data64[TEST_DATA_LEN / 8]; \
  101. }; \
  102. }
  103. union {
  104. bpf_testdata_struct_t;
  105. bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
  106. };
  107. enum bpf_attach_type expected_attach_type;
  108. };
  109. /* Note we want this to be 64 bit aligned so that the end of our array is
  110. * actually the end of the structure.
  111. */
  112. #define MAX_ENTRIES 11
  113. struct test_val {
  114. unsigned int index;
  115. int foo[MAX_ENTRIES];
  116. };
  117. struct other_val {
  118. long long foo;
  119. long long bar;
  120. };
  121. static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
  122. {
  123. /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
  124. #define PUSH_CNT 51
  125. /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
  126. unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
  127. struct bpf_insn *insn = self->fill_insns;
  128. int i = 0, j, k = 0;
  129. insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
  130. loop:
  131. for (j = 0; j < PUSH_CNT; j++) {
  132. insn[i++] = BPF_LD_ABS(BPF_B, 0);
  133. /* jump to error label */
  134. insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
  135. i++;
  136. insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
  137. insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
  138. insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
  139. insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  140. BPF_FUNC_skb_vlan_push),
  141. insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
  142. i++;
  143. }
  144. for (j = 0; j < PUSH_CNT; j++) {
  145. insn[i++] = BPF_LD_ABS(BPF_B, 0);
  146. insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
  147. i++;
  148. insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
  149. insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  150. BPF_FUNC_skb_vlan_pop),
  151. insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
  152. i++;
  153. }
  154. if (++k < 5)
  155. goto loop;
  156. for (; i < len - 3; i++)
  157. insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef);
  158. insn[len - 3] = BPF_JMP_A(1);
  159. /* error label */
  160. insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0);
  161. insn[len - 1] = BPF_EXIT_INSN();
  162. self->prog_len = len;
  163. }
  164. static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
  165. {
  166. struct bpf_insn *insn = self->fill_insns;
  167. /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns,
  168. * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted
  169. * to extend the error value of the inlined ld_abs sequence which then
  170. * contains 7 insns. so, set the dividend to 7 so the testcase could
  171. * work on all arches.
  172. */
  173. unsigned int len = (1 << 15) / 7;
  174. int i = 0;
  175. insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
  176. insn[i++] = BPF_LD_ABS(BPF_B, 0);
  177. insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
  178. i++;
  179. while (i < len - 1)
  180. insn[i++] = BPF_LD_ABS(BPF_B, 1);
  181. insn[i] = BPF_EXIT_INSN();
  182. self->prog_len = i + 1;
  183. }
  184. static void bpf_fill_rand_ld_dw(struct bpf_test *self)
  185. {
  186. struct bpf_insn *insn = self->fill_insns;
  187. uint64_t res = 0;
  188. int i = 0;
  189. insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
  190. while (i < self->retval) {
  191. uint64_t val = bpf_semi_rand_get();
  192. struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
  193. res ^= val;
  194. insn[i++] = tmp[0];
  195. insn[i++] = tmp[1];
  196. insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
  197. }
  198. insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
  199. insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
  200. insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
  201. insn[i] = BPF_EXIT_INSN();
  202. self->prog_len = i + 1;
  203. res ^= (res >> 32);
  204. self->retval = (uint32_t)res;
  205. }
  206. #define MAX_JMP_SEQ 8192
  207. /* test the sequence of 8k jumps */
  208. static void bpf_fill_scale1(struct bpf_test *self)
  209. {
  210. struct bpf_insn *insn = self->fill_insns;
  211. int i = 0, k = 0;
  212. insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
  213. /* test to check that the long sequence of jumps is acceptable */
  214. while (k++ < MAX_JMP_SEQ) {
  215. insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  216. BPF_FUNC_get_prandom_u32);
  217. insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
  218. insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
  219. insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
  220. -8 * (k % 64 + 1));
  221. }
  222. /* is_state_visited() doesn't allocate state for pruning for every jump.
  223. * Hence multiply jmps by 4 to accommodate that heuristic
  224. */
  225. while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
  226. insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
  227. insn[i] = BPF_EXIT_INSN();
  228. self->prog_len = i + 1;
  229. self->retval = 42;
  230. }
  231. /* test the sequence of 8k jumps in inner most function (function depth 8)*/
  232. static void bpf_fill_scale2(struct bpf_test *self)
  233. {
  234. struct bpf_insn *insn = self->fill_insns;
  235. int i = 0, k = 0;
  236. #define FUNC_NEST 7
  237. for (k = 0; k < FUNC_NEST; k++) {
  238. insn[i++] = BPF_CALL_REL(1);
  239. insn[i++] = BPF_EXIT_INSN();
  240. }
  241. insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
  242. /* test to check that the long sequence of jumps is acceptable */
  243. k = 0;
  244. while (k++ < MAX_JMP_SEQ) {
  245. insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  246. BPF_FUNC_get_prandom_u32);
  247. insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
  248. insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
  249. insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
  250. -8 * (k % (64 - 4 * FUNC_NEST) + 1));
  251. }
  252. while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
  253. insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
  254. insn[i] = BPF_EXIT_INSN();
  255. self->prog_len = i + 1;
  256. self->retval = 42;
  257. }
  258. static void bpf_fill_scale(struct bpf_test *self)
  259. {
  260. switch (self->retval) {
  261. case 1:
  262. return bpf_fill_scale1(self);
  263. case 2:
  264. return bpf_fill_scale2(self);
  265. default:
  266. self->prog_len = 0;
  267. break;
  268. }
  269. }
  270. /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
  271. #define BPF_SK_LOOKUP(func) \
  272. /* struct bpf_sock_tuple tuple = {} */ \
  273. BPF_MOV64_IMM(BPF_REG_2, 0), \
  274. BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
  275. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
  276. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
  277. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
  278. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
  279. BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
  280. /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
  281. BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
  282. BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
  283. BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
  284. BPF_MOV64_IMM(BPF_REG_4, 0), \
  285. BPF_MOV64_IMM(BPF_REG_5, 0), \
  286. BPF_EMIT_CALL(BPF_FUNC_ ## func)
  287. /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
  288. * value into 0 and does necessary preparation for direct packet access
  289. * through r2. The allowed access range is 8 bytes.
  290. */
  291. #define BPF_DIRECT_PKT_R2 \
  292. BPF_MOV64_IMM(BPF_REG_0, 0), \
  293. BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
  294. offsetof(struct __sk_buff, data)), \
  295. BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
  296. offsetof(struct __sk_buff, data_end)), \
  297. BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
  298. BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
  299. BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
  300. BPF_EXIT_INSN()
  301. /* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
  302. * positive u32, and zero-extend it into 64-bit.
  303. */
  304. #define BPF_RAND_UEXT_R7 \
  305. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
  306. BPF_FUNC_get_prandom_u32), \
  307. BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
  308. BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
  309. BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
  310. /* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
  311. * negative u32, and sign-extend it into 64-bit.
  312. */
  313. #define BPF_RAND_SEXT_R7 \
  314. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
  315. BPF_FUNC_get_prandom_u32), \
  316. BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
  317. BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
  318. BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
  319. BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
  320. static struct bpf_test tests[] = {
  321. #define FILL_ARRAY
  322. #include <verifier/tests.h>
  323. #undef FILL_ARRAY
  324. };
  325. static int probe_filter_length(const struct bpf_insn *fp)
  326. {
  327. int len;
  328. for (len = MAX_INSNS - 1; len > 0; --len)
  329. if (fp[len].code != 0 || fp[len].imm != 0)
  330. break;
  331. return len + 1;
  332. }
  333. static bool skip_unsupported_map(enum bpf_map_type map_type)
  334. {
  335. if (!bpf_probe_map_type(map_type, 0)) {
  336. printf("SKIP (unsupported map type %d)\n", map_type);
  337. skips++;
  338. return true;
  339. }
  340. return false;
  341. }
  342. static int __create_map(uint32_t type, uint32_t size_key,
  343. uint32_t size_value, uint32_t max_elem,
  344. uint32_t extra_flags)
  345. {
  346. int fd;
  347. fd = bpf_create_map(type, size_key, size_value, max_elem,
  348. (type == BPF_MAP_TYPE_HASH ?
  349. BPF_F_NO_PREALLOC : 0) | extra_flags);
  350. if (fd < 0) {
  351. if (skip_unsupported_map(type))
  352. return -1;
  353. printf("Failed to create hash map '%s'!\n", strerror(errno));
  354. }
  355. return fd;
  356. }
  357. static int create_map(uint32_t type, uint32_t size_key,
  358. uint32_t size_value, uint32_t max_elem)
  359. {
  360. return __create_map(type, size_key, size_value, max_elem, 0);
  361. }
  362. static void update_map(int fd, int index)
  363. {
  364. struct test_val value = {
  365. .index = (6 + 1) * sizeof(int),
  366. .foo[6] = 0xabcdef12,
  367. };
  368. assert(!bpf_map_update_elem(fd, &index, &value, 0));
  369. }
  370. static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret)
  371. {
  372. struct bpf_insn prog[] = {
  373. BPF_MOV64_IMM(BPF_REG_0, ret),
  374. BPF_EXIT_INSN(),
  375. };
  376. return bpf_load_program(prog_type, prog,
  377. ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
  378. }
  379. static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd,
  380. int idx, int ret)
  381. {
  382. struct bpf_insn prog[] = {
  383. BPF_MOV64_IMM(BPF_REG_3, idx),
  384. BPF_LD_MAP_FD(BPF_REG_2, mfd),
  385. BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  386. BPF_FUNC_tail_call),
  387. BPF_MOV64_IMM(BPF_REG_0, ret),
  388. BPF_EXIT_INSN(),
  389. };
  390. return bpf_load_program(prog_type, prog,
  391. ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
  392. }
  393. static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
  394. int p1key, int p2key, int p3key)
  395. {
  396. int mfd, p1fd, p2fd, p3fd;
  397. mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
  398. sizeof(int), max_elem, 0);
  399. if (mfd < 0) {
  400. if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
  401. return -1;
  402. printf("Failed to create prog array '%s'!\n", strerror(errno));
  403. return -1;
  404. }
  405. p1fd = create_prog_dummy_simple(prog_type, 42);
  406. p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41);
  407. p3fd = create_prog_dummy_simple(prog_type, 24);
  408. if (p1fd < 0 || p2fd < 0 || p3fd < 0)
  409. goto err;
  410. if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
  411. goto err;
  412. if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
  413. goto err;
  414. if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) {
  415. err:
  416. close(mfd);
  417. mfd = -1;
  418. }
  419. close(p3fd);
  420. close(p2fd);
  421. close(p1fd);
  422. return mfd;
  423. }
  424. static int create_map_in_map(void)
  425. {
  426. int inner_map_fd, outer_map_fd;
  427. inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
  428. sizeof(int), 1, 0);
  429. if (inner_map_fd < 0) {
  430. if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
  431. return -1;
  432. printf("Failed to create array '%s'!\n", strerror(errno));
  433. return inner_map_fd;
  434. }
  435. outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
  436. sizeof(int), inner_map_fd, 1, 0);
  437. if (outer_map_fd < 0) {
  438. if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
  439. return -1;
  440. printf("Failed to create array of maps '%s'!\n",
  441. strerror(errno));
  442. }
  443. close(inner_map_fd);
  444. return outer_map_fd;
  445. }
  446. static int create_cgroup_storage(bool percpu)
  447. {
  448. enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
  449. BPF_MAP_TYPE_CGROUP_STORAGE;
  450. int fd;
  451. fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
  452. TEST_DATA_LEN, 0, 0);
  453. if (fd < 0) {
  454. if (skip_unsupported_map(type))
  455. return -1;
  456. printf("Failed to create cgroup storage '%s'!\n",
  457. strerror(errno));
  458. }
  459. return fd;
  460. }
  461. /* struct bpf_spin_lock {
  462. * int val;
  463. * };
  464. * struct val {
  465. * int cnt;
  466. * struct bpf_spin_lock l;
  467. * };
  468. */
  469. static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
  470. static __u32 btf_raw_types[] = {
  471. /* int */
  472. BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
  473. /* struct bpf_spin_lock */ /* [2] */
  474. BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
  475. BTF_MEMBER_ENC(15, 1, 0), /* int val; */
  476. /* struct val */ /* [3] */
  477. BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
  478. BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
  479. BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
  480. };
  481. static int load_btf(void)
  482. {
  483. struct btf_header hdr = {
  484. .magic = BTF_MAGIC,
  485. .version = BTF_VERSION,
  486. .hdr_len = sizeof(struct btf_header),
  487. .type_len = sizeof(btf_raw_types),
  488. .str_off = sizeof(btf_raw_types),
  489. .str_len = sizeof(btf_str_sec),
  490. };
  491. void *ptr, *raw_btf;
  492. int btf_fd;
  493. ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
  494. sizeof(btf_str_sec));
  495. memcpy(ptr, &hdr, sizeof(hdr));
  496. ptr += sizeof(hdr);
  497. memcpy(ptr, btf_raw_types, hdr.type_len);
  498. ptr += hdr.type_len;
  499. memcpy(ptr, btf_str_sec, hdr.str_len);
  500. ptr += hdr.str_len;
  501. btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0);
  502. free(raw_btf);
  503. if (btf_fd < 0)
  504. return -1;
  505. return btf_fd;
  506. }
  507. static int create_map_spin_lock(void)
  508. {
  509. struct bpf_create_map_attr attr = {
  510. .name = "test_map",
  511. .map_type = BPF_MAP_TYPE_ARRAY,
  512. .key_size = 4,
  513. .value_size = 8,
  514. .max_entries = 1,
  515. .btf_key_type_id = 1,
  516. .btf_value_type_id = 3,
  517. };
  518. int fd, btf_fd;
  519. btf_fd = load_btf();
  520. if (btf_fd < 0)
  521. return -1;
  522. attr.btf_fd = btf_fd;
  523. fd = bpf_create_map_xattr(&attr);
  524. if (fd < 0)
  525. printf("Failed to create map with spin_lock\n");
  526. return fd;
  527. }
  528. static int create_sk_storage_map(void)
  529. {
  530. struct bpf_create_map_attr attr = {
  531. .name = "test_map",
  532. .map_type = BPF_MAP_TYPE_SK_STORAGE,
  533. .key_size = 4,
  534. .value_size = 8,
  535. .max_entries = 0,
  536. .map_flags = BPF_F_NO_PREALLOC,
  537. .btf_key_type_id = 1,
  538. .btf_value_type_id = 3,
  539. };
  540. int fd, btf_fd;
  541. btf_fd = load_btf();
  542. if (btf_fd < 0)
  543. return -1;
  544. attr.btf_fd = btf_fd;
  545. fd = bpf_create_map_xattr(&attr);
  546. close(attr.btf_fd);
  547. if (fd < 0)
  548. printf("Failed to create sk_storage_map\n");
  549. return fd;
  550. }
  551. static char bpf_vlog[UINT_MAX >> 8];
  552. static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
  553. struct bpf_insn *prog, int *map_fds)
  554. {
  555. int *fixup_map_hash_8b = test->fixup_map_hash_8b;
  556. int *fixup_map_hash_48b = test->fixup_map_hash_48b;
  557. int *fixup_map_hash_16b = test->fixup_map_hash_16b;
  558. int *fixup_map_array_48b = test->fixup_map_array_48b;
  559. int *fixup_map_sockmap = test->fixup_map_sockmap;
  560. int *fixup_map_sockhash = test->fixup_map_sockhash;
  561. int *fixup_map_xskmap = test->fixup_map_xskmap;
  562. int *fixup_map_stacktrace = test->fixup_map_stacktrace;
  563. int *fixup_prog1 = test->fixup_prog1;
  564. int *fixup_prog2 = test->fixup_prog2;
  565. int *fixup_map_in_map = test->fixup_map_in_map;
  566. int *fixup_cgroup_storage = test->fixup_cgroup_storage;
  567. int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
  568. int *fixup_map_spin_lock = test->fixup_map_spin_lock;
  569. int *fixup_map_array_ro = test->fixup_map_array_ro;
  570. int *fixup_map_array_wo = test->fixup_map_array_wo;
  571. int *fixup_map_array_small = test->fixup_map_array_small;
  572. int *fixup_sk_storage_map = test->fixup_sk_storage_map;
  573. int *fixup_map_event_output = test->fixup_map_event_output;
  574. int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
  575. if (test->fill_helper) {
  576. test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
  577. test->fill_helper(test);
  578. }
  579. /* Allocating HTs with 1 elem is fine here, since we only test
  580. * for verifier and not do a runtime lookup, so the only thing
  581. * that really matters is value size in this case.
  582. */
  583. if (*fixup_map_hash_8b) {
  584. map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
  585. sizeof(long long), 1);
  586. do {
  587. prog[*fixup_map_hash_8b].imm = map_fds[0];
  588. fixup_map_hash_8b++;
  589. } while (*fixup_map_hash_8b);
  590. }
  591. if (*fixup_map_hash_48b) {
  592. map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
  593. sizeof(struct test_val), 1);
  594. do {
  595. prog[*fixup_map_hash_48b].imm = map_fds[1];
  596. fixup_map_hash_48b++;
  597. } while (*fixup_map_hash_48b);
  598. }
  599. if (*fixup_map_hash_16b) {
  600. map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
  601. sizeof(struct other_val), 1);
  602. do {
  603. prog[*fixup_map_hash_16b].imm = map_fds[2];
  604. fixup_map_hash_16b++;
  605. } while (*fixup_map_hash_16b);
  606. }
  607. if (*fixup_map_array_48b) {
  608. map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
  609. sizeof(struct test_val), 1);
  610. update_map(map_fds[3], 0);
  611. do {
  612. prog[*fixup_map_array_48b].imm = map_fds[3];
  613. fixup_map_array_48b++;
  614. } while (*fixup_map_array_48b);
  615. }
  616. if (*fixup_prog1) {
  617. map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2);
  618. do {
  619. prog[*fixup_prog1].imm = map_fds[4];
  620. fixup_prog1++;
  621. } while (*fixup_prog1);
  622. }
  623. if (*fixup_prog2) {
  624. map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2);
  625. do {
  626. prog[*fixup_prog2].imm = map_fds[5];
  627. fixup_prog2++;
  628. } while (*fixup_prog2);
  629. }
  630. if (*fixup_map_in_map) {
  631. map_fds[6] = create_map_in_map();
  632. do {
  633. prog[*fixup_map_in_map].imm = map_fds[6];
  634. fixup_map_in_map++;
  635. } while (*fixup_map_in_map);
  636. }
  637. if (*fixup_cgroup_storage) {
  638. map_fds[7] = create_cgroup_storage(false);
  639. do {
  640. prog[*fixup_cgroup_storage].imm = map_fds[7];
  641. fixup_cgroup_storage++;
  642. } while (*fixup_cgroup_storage);
  643. }
  644. if (*fixup_percpu_cgroup_storage) {
  645. map_fds[8] = create_cgroup_storage(true);
  646. do {
  647. prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
  648. fixup_percpu_cgroup_storage++;
  649. } while (*fixup_percpu_cgroup_storage);
  650. }
  651. if (*fixup_map_sockmap) {
  652. map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
  653. sizeof(int), 1);
  654. do {
  655. prog[*fixup_map_sockmap].imm = map_fds[9];
  656. fixup_map_sockmap++;
  657. } while (*fixup_map_sockmap);
  658. }
  659. if (*fixup_map_sockhash) {
  660. map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
  661. sizeof(int), 1);
  662. do {
  663. prog[*fixup_map_sockhash].imm = map_fds[10];
  664. fixup_map_sockhash++;
  665. } while (*fixup_map_sockhash);
  666. }
  667. if (*fixup_map_xskmap) {
  668. map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
  669. sizeof(int), 1);
  670. do {
  671. prog[*fixup_map_xskmap].imm = map_fds[11];
  672. fixup_map_xskmap++;
  673. } while (*fixup_map_xskmap);
  674. }
  675. if (*fixup_map_stacktrace) {
  676. map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
  677. sizeof(u64), 1);
  678. do {
  679. prog[*fixup_map_stacktrace].imm = map_fds[12];
  680. fixup_map_stacktrace++;
  681. } while (*fixup_map_stacktrace);
  682. }
  683. if (*fixup_map_spin_lock) {
  684. map_fds[13] = create_map_spin_lock();
  685. do {
  686. prog[*fixup_map_spin_lock].imm = map_fds[13];
  687. fixup_map_spin_lock++;
  688. } while (*fixup_map_spin_lock);
  689. }
  690. if (*fixup_map_array_ro) {
  691. map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
  692. sizeof(struct test_val), 1,
  693. BPF_F_RDONLY_PROG);
  694. update_map(map_fds[14], 0);
  695. do {
  696. prog[*fixup_map_array_ro].imm = map_fds[14];
  697. fixup_map_array_ro++;
  698. } while (*fixup_map_array_ro);
  699. }
  700. if (*fixup_map_array_wo) {
  701. map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
  702. sizeof(struct test_val), 1,
  703. BPF_F_WRONLY_PROG);
  704. update_map(map_fds[15], 0);
  705. do {
  706. prog[*fixup_map_array_wo].imm = map_fds[15];
  707. fixup_map_array_wo++;
  708. } while (*fixup_map_array_wo);
  709. }
  710. if (*fixup_map_array_small) {
  711. map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
  712. 1, 1, 0);
  713. update_map(map_fds[16], 0);
  714. do {
  715. prog[*fixup_map_array_small].imm = map_fds[16];
  716. fixup_map_array_small++;
  717. } while (*fixup_map_array_small);
  718. }
  719. if (*fixup_sk_storage_map) {
  720. map_fds[17] = create_sk_storage_map();
  721. do {
  722. prog[*fixup_sk_storage_map].imm = map_fds[17];
  723. fixup_sk_storage_map++;
  724. } while (*fixup_sk_storage_map);
  725. }
  726. if (*fixup_map_event_output) {
  727. map_fds[18] = __create_map(BPF_MAP_TYPE_PERF_EVENT_ARRAY,
  728. sizeof(int), sizeof(int), 1, 0);
  729. do {
  730. prog[*fixup_map_event_output].imm = map_fds[18];
  731. fixup_map_event_output++;
  732. } while (*fixup_map_event_output);
  733. }
  734. if (*fixup_map_reuseport_array) {
  735. map_fds[19] = __create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
  736. sizeof(u32), sizeof(u64), 1, 0);
  737. do {
  738. prog[*fixup_map_reuseport_array].imm = map_fds[19];
  739. fixup_map_reuseport_array++;
  740. } while (*fixup_map_reuseport_array);
  741. }
  742. }
  743. struct libcap {
  744. struct __user_cap_header_struct hdr;
  745. struct __user_cap_data_struct data[2];
  746. };
  747. static int set_admin(bool admin)
  748. {
  749. cap_t caps;
  750. /* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */
  751. const cap_value_t cap_net_admin = CAP_NET_ADMIN;
  752. const cap_value_t cap_sys_admin = CAP_SYS_ADMIN;
  753. struct libcap *cap;
  754. int ret = -1;
  755. caps = cap_get_proc();
  756. if (!caps) {
  757. perror("cap_get_proc");
  758. return -1;
  759. }
  760. cap = (struct libcap *)caps;
  761. if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_sys_admin, CAP_CLEAR)) {
  762. perror("cap_set_flag clear admin");
  763. goto out;
  764. }
  765. if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_net_admin,
  766. admin ? CAP_SET : CAP_CLEAR)) {
  767. perror("cap_set_flag set_or_clear net");
  768. goto out;
  769. }
  770. /* libcap is likely old and simply ignores CAP_BPF and CAP_PERFMON,
  771. * so update effective bits manually
  772. */
  773. if (admin) {
  774. cap->data[1].effective |= 1 << (38 /* CAP_PERFMON */ - 32);
  775. cap->data[1].effective |= 1 << (39 /* CAP_BPF */ - 32);
  776. } else {
  777. cap->data[1].effective &= ~(1 << (38 - 32));
  778. cap->data[1].effective &= ~(1 << (39 - 32));
  779. }
  780. if (cap_set_proc(caps)) {
  781. perror("cap_set_proc");
  782. goto out;
  783. }
  784. ret = 0;
  785. out:
  786. if (cap_free(caps))
  787. perror("cap_free");
  788. return ret;
  789. }
  790. static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
  791. void *data, size_t size_data)
  792. {
  793. __u8 tmp[TEST_DATA_LEN << 2];
  794. __u32 size_tmp = sizeof(tmp);
  795. uint32_t retval;
  796. int err;
  797. if (unpriv)
  798. set_admin(true);
  799. err = bpf_prog_test_run(fd_prog, 1, data, size_data,
  800. tmp, &size_tmp, &retval, NULL);
  801. if (unpriv)
  802. set_admin(false);
  803. if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
  804. printf("Unexpected bpf_prog_test_run error ");
  805. return err;
  806. }
  807. if (!err && retval != expected_val &&
  808. expected_val != POINTER_VALUE) {
  809. printf("FAIL retval %d != %d ", retval, expected_val);
  810. return 1;
  811. }
  812. return 0;
  813. }
  814. static bool cmp_str_seq(const char *log, const char *exp)
  815. {
  816. char needle[80];
  817. const char *p, *q;
  818. int len;
  819. do {
  820. p = strchr(exp, '\t');
  821. if (!p)
  822. p = exp + strlen(exp);
  823. len = p - exp;
  824. if (len >= sizeof(needle) || !len) {
  825. printf("FAIL\nTestcase bug\n");
  826. return false;
  827. }
  828. strncpy(needle, exp, len);
  829. needle[len] = 0;
  830. q = strstr(log, needle);
  831. if (!q) {
  832. printf("FAIL\nUnexpected verifier log in successful load!\n"
  833. "EXP: %s\nRES:\n", needle);
  834. return false;
  835. }
  836. log = q + len;
  837. exp = p + 1;
  838. } while (*p);
  839. return true;
  840. }
  841. static void do_test_single(struct bpf_test *test, bool unpriv,
  842. int *passes, int *errors)
  843. {
  844. int fd_prog, expected_ret, alignment_prevented_execution;
  845. int prog_len, prog_type = test->prog_type;
  846. struct bpf_insn *prog = test->insns;
  847. struct bpf_load_program_attr attr;
  848. int run_errs, run_successes;
  849. int map_fds[MAX_NR_MAPS];
  850. const char *expected_err;
  851. int fixup_skips;
  852. __u32 pflags;
  853. int i, err;
  854. for (i = 0; i < MAX_NR_MAPS; i++)
  855. map_fds[i] = -1;
  856. if (!prog_type)
  857. prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
  858. fixup_skips = skips;
  859. do_test_fixup(test, prog_type, prog, map_fds);
  860. if (test->fill_insns) {
  861. prog = test->fill_insns;
  862. prog_len = test->prog_len;
  863. } else {
  864. prog_len = probe_filter_length(prog);
  865. }
  866. /* If there were some map skips during fixup due to missing bpf
  867. * features, skip this test.
  868. */
  869. if (fixup_skips != skips)
  870. return;
  871. pflags = BPF_F_TEST_RND_HI32;
  872. if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
  873. pflags |= BPF_F_STRICT_ALIGNMENT;
  874. if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
  875. pflags |= BPF_F_ANY_ALIGNMENT;
  876. if (test->flags & ~3)
  877. pflags |= test->flags;
  878. expected_ret = unpriv && test->result_unpriv != UNDEF ?
  879. test->result_unpriv : test->result;
  880. expected_err = unpriv && test->errstr_unpriv ?
  881. test->errstr_unpriv : test->errstr;
  882. memset(&attr, 0, sizeof(attr));
  883. attr.prog_type = prog_type;
  884. attr.expected_attach_type = test->expected_attach_type;
  885. attr.insns = prog;
  886. attr.insns_cnt = prog_len;
  887. attr.license = "GPL";
  888. if (verbose)
  889. attr.log_level = 1;
  890. else if (expected_ret == VERBOSE_ACCEPT)
  891. attr.log_level = 2;
  892. else
  893. attr.log_level = 4;
  894. attr.prog_flags = pflags;
  895. fd_prog = bpf_load_program_xattr(&attr, bpf_vlog, sizeof(bpf_vlog));
  896. if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
  897. printf("SKIP (unsupported program type %d)\n", prog_type);
  898. skips++;
  899. goto close_fds;
  900. }
  901. alignment_prevented_execution = 0;
  902. if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) {
  903. if (fd_prog < 0) {
  904. printf("FAIL\nFailed to load prog '%s'!\n",
  905. strerror(errno));
  906. goto fail_log;
  907. }
  908. #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
  909. if (fd_prog >= 0 &&
  910. (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
  911. alignment_prevented_execution = 1;
  912. #endif
  913. if (expected_ret == VERBOSE_ACCEPT && !cmp_str_seq(bpf_vlog, expected_err)) {
  914. goto fail_log;
  915. }
  916. } else {
  917. if (fd_prog >= 0) {
  918. printf("FAIL\nUnexpected success to load!\n");
  919. goto fail_log;
  920. }
  921. if (!expected_err || !strstr(bpf_vlog, expected_err)) {
  922. printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
  923. expected_err, bpf_vlog);
  924. goto fail_log;
  925. }
  926. }
  927. if (test->insn_processed) {
  928. uint32_t insn_processed;
  929. char *proc;
  930. proc = strstr(bpf_vlog, "processed ");
  931. insn_processed = atoi(proc + 10);
  932. if (test->insn_processed != insn_processed) {
  933. printf("FAIL\nUnexpected insn_processed %u vs %u\n",
  934. insn_processed, test->insn_processed);
  935. goto fail_log;
  936. }
  937. }
  938. if (verbose)
  939. printf(", verifier log:\n%s", bpf_vlog);
  940. run_errs = 0;
  941. run_successes = 0;
  942. if (!alignment_prevented_execution && fd_prog >= 0) {
  943. uint32_t expected_val;
  944. int i;
  945. if (!test->runs)
  946. test->runs = 1;
  947. for (i = 0; i < test->runs; i++) {
  948. if (unpriv && test->retvals[i].retval_unpriv)
  949. expected_val = test->retvals[i].retval_unpriv;
  950. else
  951. expected_val = test->retvals[i].retval;
  952. err = do_prog_test_run(fd_prog, unpriv, expected_val,
  953. test->retvals[i].data,
  954. sizeof(test->retvals[i].data));
  955. if (err) {
  956. printf("(run %d/%d) ", i + 1, test->runs);
  957. run_errs++;
  958. } else {
  959. run_successes++;
  960. }
  961. }
  962. }
  963. if (!run_errs) {
  964. (*passes)++;
  965. if (run_successes > 1)
  966. printf("%d cases ", run_successes);
  967. printf("OK");
  968. if (alignment_prevented_execution)
  969. printf(" (NOTE: not executed due to unknown alignment)");
  970. printf("\n");
  971. } else {
  972. printf("\n");
  973. goto fail_log;
  974. }
  975. close_fds:
  976. if (test->fill_insns)
  977. free(test->fill_insns);
  978. close(fd_prog);
  979. for (i = 0; i < MAX_NR_MAPS; i++)
  980. close(map_fds[i]);
  981. sched_yield();
  982. return;
  983. fail_log:
  984. (*errors)++;
  985. printf("%s", bpf_vlog);
  986. goto close_fds;
  987. }
  988. static bool is_admin(void)
  989. {
  990. cap_flag_value_t net_priv = CAP_CLEAR;
  991. bool perfmon_priv = false;
  992. bool bpf_priv = false;
  993. struct libcap *cap;
  994. cap_t caps;
  995. #ifdef CAP_IS_SUPPORTED
  996. if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
  997. perror("cap_get_flag");
  998. return false;
  999. }
  1000. #endif
  1001. caps = cap_get_proc();
  1002. if (!caps) {
  1003. perror("cap_get_proc");
  1004. return false;
  1005. }
  1006. cap = (struct libcap *)caps;
  1007. bpf_priv = cap->data[1].effective & (1 << (39/* CAP_BPF */ - 32));
  1008. perfmon_priv = cap->data[1].effective & (1 << (38/* CAP_PERFMON */ - 32));
  1009. if (cap_get_flag(caps, CAP_NET_ADMIN, CAP_EFFECTIVE, &net_priv))
  1010. perror("cap_get_flag NET");
  1011. if (cap_free(caps))
  1012. perror("cap_free");
  1013. return bpf_priv && perfmon_priv && net_priv == CAP_SET;
  1014. }
  1015. static void get_unpriv_disabled()
  1016. {
  1017. char buf[2];
  1018. FILE *fd;
  1019. fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
  1020. if (!fd) {
  1021. perror("fopen /proc/sys/"UNPRIV_SYSCTL);
  1022. unpriv_disabled = true;
  1023. return;
  1024. }
  1025. if (fgets(buf, 2, fd) == buf && atoi(buf))
  1026. unpriv_disabled = true;
  1027. fclose(fd);
  1028. }
  1029. static bool test_as_unpriv(struct bpf_test *test)
  1030. {
  1031. return !test->prog_type ||
  1032. test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
  1033. test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
  1034. }
  1035. static int do_test(bool unpriv, unsigned int from, unsigned int to)
  1036. {
  1037. int i, passes = 0, errors = 0;
  1038. for (i = from; i < to; i++) {
  1039. struct bpf_test *test = &tests[i];
  1040. /* Program types that are not supported by non-root we
  1041. * skip right away.
  1042. */
  1043. if (test_as_unpriv(test) && unpriv_disabled) {
  1044. printf("#%d/u %s SKIP\n", i, test->descr);
  1045. skips++;
  1046. } else if (test_as_unpriv(test)) {
  1047. if (!unpriv)
  1048. set_admin(false);
  1049. printf("#%d/u %s ", i, test->descr);
  1050. do_test_single(test, true, &passes, &errors);
  1051. if (!unpriv)
  1052. set_admin(true);
  1053. }
  1054. if (unpriv) {
  1055. printf("#%d/p %s SKIP\n", i, test->descr);
  1056. skips++;
  1057. } else {
  1058. printf("#%d/p %s ", i, test->descr);
  1059. do_test_single(test, false, &passes, &errors);
  1060. }
  1061. }
  1062. printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
  1063. skips, errors);
  1064. return errors ? EXIT_FAILURE : EXIT_SUCCESS;
  1065. }
  1066. int main(int argc, char **argv)
  1067. {
  1068. unsigned int from = 0, to = ARRAY_SIZE(tests);
  1069. bool unpriv = !is_admin();
  1070. int arg = 1;
  1071. if (argc > 1 && strcmp(argv[1], "-v") == 0) {
  1072. arg++;
  1073. verbose = true;
  1074. argc--;
  1075. }
  1076. if (argc == 3) {
  1077. unsigned int l = atoi(argv[arg]);
  1078. unsigned int u = atoi(argv[arg + 1]);
  1079. if (l < to && u < to) {
  1080. from = l;
  1081. to = u + 1;
  1082. }
  1083. } else if (argc == 2) {
  1084. unsigned int t = atoi(argv[arg]);
  1085. if (t < to) {
  1086. from = t;
  1087. to = t + 1;
  1088. }
  1089. }
  1090. get_unpriv_disabled();
  1091. if (unpriv && unpriv_disabled) {
  1092. printf("Cannot run as unprivileged user with sysctl %s.\n",
  1093. UNPRIV_SYSCTL);
  1094. return EXIT_FAILURE;
  1095. }
  1096. bpf_semi_rand_init();
  1097. return do_test(unpriv, from, to);
  1098. }