/drivers/thunderbolt/test.c

https://github.com/zen-kernel/zen-kernel · C · 1696 lines · 1195 code · 212 blank · 289 comment · 37 complexity · 5620fcec3959dc7625f08304f082bfe8 MD5 · raw file

  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * KUnit tests
  4. *
  5. * Copyright (C) 2020, Intel Corporation
  6. * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
  7. */
  8. #include <kunit/test.h>
  9. #include <linux/idr.h>
  10. #include "tb.h"
  11. #include "tunnel.h"
  12. static int __ida_init(struct kunit_resource *res, void *context)
  13. {
  14. struct ida *ida = context;
  15. ida_init(ida);
  16. res->data = ida;
  17. return 0;
  18. }
  19. static void __ida_destroy(struct kunit_resource *res)
  20. {
  21. struct ida *ida = res->data;
  22. ida_destroy(ida);
  23. }
  24. static void kunit_ida_init(struct kunit *test, struct ida *ida)
  25. {
  26. kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
  27. }
  28. static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
  29. u8 upstream_port, u8 max_port_number)
  30. {
  31. struct tb_switch *sw;
  32. size_t size;
  33. int i;
  34. sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
  35. if (!sw)
  36. return NULL;
  37. sw->config.upstream_port_number = upstream_port;
  38. sw->config.depth = tb_route_length(route);
  39. sw->config.route_hi = upper_32_bits(route);
  40. sw->config.route_lo = lower_32_bits(route);
  41. sw->config.enabled = 0;
  42. sw->config.max_port_number = max_port_number;
  43. size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
  44. sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
  45. if (!sw->ports)
  46. return NULL;
  47. for (i = 0; i <= sw->config.max_port_number; i++) {
  48. sw->ports[i].sw = sw;
  49. sw->ports[i].port = i;
  50. sw->ports[i].config.port_number = i;
  51. if (i) {
  52. kunit_ida_init(test, &sw->ports[i].in_hopids);
  53. kunit_ida_init(test, &sw->ports[i].out_hopids);
  54. }
  55. }
  56. return sw;
  57. }
  58. static struct tb_switch *alloc_host(struct kunit *test)
  59. {
  60. struct tb_switch *sw;
  61. sw = alloc_switch(test, 0, 7, 13);
  62. if (!sw)
  63. return NULL;
  64. sw->config.vendor_id = 0x8086;
  65. sw->config.device_id = 0x9a1b;
  66. sw->ports[0].config.type = TB_TYPE_PORT;
  67. sw->ports[0].config.max_in_hop_id = 7;
  68. sw->ports[0].config.max_out_hop_id = 7;
  69. sw->ports[1].config.type = TB_TYPE_PORT;
  70. sw->ports[1].config.max_in_hop_id = 19;
  71. sw->ports[1].config.max_out_hop_id = 19;
  72. sw->ports[1].total_credits = 60;
  73. sw->ports[1].ctl_credits = 2;
  74. sw->ports[1].dual_link_port = &sw->ports[2];
  75. sw->ports[2].config.type = TB_TYPE_PORT;
  76. sw->ports[2].config.max_in_hop_id = 19;
  77. sw->ports[2].config.max_out_hop_id = 19;
  78. sw->ports[2].total_credits = 60;
  79. sw->ports[2].ctl_credits = 2;
  80. sw->ports[2].dual_link_port = &sw->ports[1];
  81. sw->ports[2].link_nr = 1;
  82. sw->ports[3].config.type = TB_TYPE_PORT;
  83. sw->ports[3].config.max_in_hop_id = 19;
  84. sw->ports[3].config.max_out_hop_id = 19;
  85. sw->ports[3].total_credits = 60;
  86. sw->ports[3].ctl_credits = 2;
  87. sw->ports[3].dual_link_port = &sw->ports[4];
  88. sw->ports[4].config.type = TB_TYPE_PORT;
  89. sw->ports[4].config.max_in_hop_id = 19;
  90. sw->ports[4].config.max_out_hop_id = 19;
  91. sw->ports[4].total_credits = 60;
  92. sw->ports[4].ctl_credits = 2;
  93. sw->ports[4].dual_link_port = &sw->ports[3];
  94. sw->ports[4].link_nr = 1;
  95. sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
  96. sw->ports[5].config.max_in_hop_id = 9;
  97. sw->ports[5].config.max_out_hop_id = 9;
  98. sw->ports[5].cap_adap = -1;
  99. sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
  100. sw->ports[6].config.max_in_hop_id = 9;
  101. sw->ports[6].config.max_out_hop_id = 9;
  102. sw->ports[6].cap_adap = -1;
  103. sw->ports[7].config.type = TB_TYPE_NHI;
  104. sw->ports[7].config.max_in_hop_id = 11;
  105. sw->ports[7].config.max_out_hop_id = 11;
  106. sw->ports[7].config.nfc_credits = 0x41800000;
  107. sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
  108. sw->ports[8].config.max_in_hop_id = 8;
  109. sw->ports[8].config.max_out_hop_id = 8;
  110. sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
  111. sw->ports[9].config.max_in_hop_id = 8;
  112. sw->ports[9].config.max_out_hop_id = 8;
  113. sw->ports[10].disabled = true;
  114. sw->ports[11].disabled = true;
  115. sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
  116. sw->ports[12].config.max_in_hop_id = 8;
  117. sw->ports[12].config.max_out_hop_id = 8;
  118. sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
  119. sw->ports[13].config.max_in_hop_id = 8;
  120. sw->ports[13].config.max_out_hop_id = 8;
  121. return sw;
  122. }
  123. static struct tb_switch *alloc_host_usb4(struct kunit *test)
  124. {
  125. struct tb_switch *sw;
  126. sw = alloc_host(test);
  127. if (!sw)
  128. return NULL;
  129. sw->generation = 4;
  130. sw->credit_allocation = true;
  131. sw->max_usb3_credits = 32;
  132. sw->min_dp_aux_credits = 1;
  133. sw->min_dp_main_credits = 0;
  134. sw->max_pcie_credits = 64;
  135. sw->max_dma_credits = 14;
  136. return sw;
  137. }
  138. static struct tb_switch *alloc_dev_default(struct kunit *test,
  139. struct tb_switch *parent,
  140. u64 route, bool bonded)
  141. {
  142. struct tb_port *port, *upstream_port;
  143. struct tb_switch *sw;
  144. sw = alloc_switch(test, route, 1, 19);
  145. if (!sw)
  146. return NULL;
  147. sw->config.vendor_id = 0x8086;
  148. sw->config.device_id = 0x15ef;
  149. sw->ports[0].config.type = TB_TYPE_PORT;
  150. sw->ports[0].config.max_in_hop_id = 8;
  151. sw->ports[0].config.max_out_hop_id = 8;
  152. sw->ports[1].config.type = TB_TYPE_PORT;
  153. sw->ports[1].config.max_in_hop_id = 19;
  154. sw->ports[1].config.max_out_hop_id = 19;
  155. sw->ports[1].total_credits = 60;
  156. sw->ports[1].ctl_credits = 2;
  157. sw->ports[1].dual_link_port = &sw->ports[2];
  158. sw->ports[2].config.type = TB_TYPE_PORT;
  159. sw->ports[2].config.max_in_hop_id = 19;
  160. sw->ports[2].config.max_out_hop_id = 19;
  161. sw->ports[2].total_credits = 60;
  162. sw->ports[2].ctl_credits = 2;
  163. sw->ports[2].dual_link_port = &sw->ports[1];
  164. sw->ports[2].link_nr = 1;
  165. sw->ports[3].config.type = TB_TYPE_PORT;
  166. sw->ports[3].config.max_in_hop_id = 19;
  167. sw->ports[3].config.max_out_hop_id = 19;
  168. sw->ports[3].total_credits = 60;
  169. sw->ports[3].ctl_credits = 2;
  170. sw->ports[3].dual_link_port = &sw->ports[4];
  171. sw->ports[4].config.type = TB_TYPE_PORT;
  172. sw->ports[4].config.max_in_hop_id = 19;
  173. sw->ports[4].config.max_out_hop_id = 19;
  174. sw->ports[4].total_credits = 60;
  175. sw->ports[4].ctl_credits = 2;
  176. sw->ports[4].dual_link_port = &sw->ports[3];
  177. sw->ports[4].link_nr = 1;
  178. sw->ports[5].config.type = TB_TYPE_PORT;
  179. sw->ports[5].config.max_in_hop_id = 19;
  180. sw->ports[5].config.max_out_hop_id = 19;
  181. sw->ports[5].total_credits = 60;
  182. sw->ports[5].ctl_credits = 2;
  183. sw->ports[5].dual_link_port = &sw->ports[6];
  184. sw->ports[6].config.type = TB_TYPE_PORT;
  185. sw->ports[6].config.max_in_hop_id = 19;
  186. sw->ports[6].config.max_out_hop_id = 19;
  187. sw->ports[6].total_credits = 60;
  188. sw->ports[6].ctl_credits = 2;
  189. sw->ports[6].dual_link_port = &sw->ports[5];
  190. sw->ports[6].link_nr = 1;
  191. sw->ports[7].config.type = TB_TYPE_PORT;
  192. sw->ports[7].config.max_in_hop_id = 19;
  193. sw->ports[7].config.max_out_hop_id = 19;
  194. sw->ports[7].total_credits = 60;
  195. sw->ports[7].ctl_credits = 2;
  196. sw->ports[7].dual_link_port = &sw->ports[8];
  197. sw->ports[8].config.type = TB_TYPE_PORT;
  198. sw->ports[8].config.max_in_hop_id = 19;
  199. sw->ports[8].config.max_out_hop_id = 19;
  200. sw->ports[8].total_credits = 60;
  201. sw->ports[8].ctl_credits = 2;
  202. sw->ports[8].dual_link_port = &sw->ports[7];
  203. sw->ports[8].link_nr = 1;
  204. sw->ports[9].config.type = TB_TYPE_PCIE_UP;
  205. sw->ports[9].config.max_in_hop_id = 8;
  206. sw->ports[9].config.max_out_hop_id = 8;
  207. sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
  208. sw->ports[10].config.max_in_hop_id = 8;
  209. sw->ports[10].config.max_out_hop_id = 8;
  210. sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
  211. sw->ports[11].config.max_in_hop_id = 8;
  212. sw->ports[11].config.max_out_hop_id = 8;
  213. sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
  214. sw->ports[12].config.max_in_hop_id = 8;
  215. sw->ports[12].config.max_out_hop_id = 8;
  216. sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
  217. sw->ports[13].config.max_in_hop_id = 9;
  218. sw->ports[13].config.max_out_hop_id = 9;
  219. sw->ports[13].cap_adap = -1;
  220. sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
  221. sw->ports[14].config.max_in_hop_id = 9;
  222. sw->ports[14].config.max_out_hop_id = 9;
  223. sw->ports[14].cap_adap = -1;
  224. sw->ports[15].disabled = true;
  225. sw->ports[16].config.type = TB_TYPE_USB3_UP;
  226. sw->ports[16].config.max_in_hop_id = 8;
  227. sw->ports[16].config.max_out_hop_id = 8;
  228. sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
  229. sw->ports[17].config.max_in_hop_id = 8;
  230. sw->ports[17].config.max_out_hop_id = 8;
  231. sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
  232. sw->ports[18].config.max_in_hop_id = 8;
  233. sw->ports[18].config.max_out_hop_id = 8;
  234. sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
  235. sw->ports[19].config.max_in_hop_id = 8;
  236. sw->ports[19].config.max_out_hop_id = 8;
  237. if (!parent)
  238. return sw;
  239. /* Link them */
  240. upstream_port = tb_upstream_port(sw);
  241. port = tb_port_at(route, parent);
  242. port->remote = upstream_port;
  243. upstream_port->remote = port;
  244. if (port->dual_link_port && upstream_port->dual_link_port) {
  245. port->dual_link_port->remote = upstream_port->dual_link_port;
  246. upstream_port->dual_link_port->remote = port->dual_link_port;
  247. if (bonded) {
  248. /* Bonding is used */
  249. port->bonded = true;
  250. port->total_credits *= 2;
  251. port->dual_link_port->bonded = true;
  252. port->dual_link_port->total_credits = 0;
  253. upstream_port->bonded = true;
  254. upstream_port->total_credits *= 2;
  255. upstream_port->dual_link_port->bonded = true;
  256. upstream_port->dual_link_port->total_credits = 0;
  257. }
  258. }
  259. return sw;
  260. }
  261. static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
  262. struct tb_switch *parent,
  263. u64 route, bool bonded)
  264. {
  265. struct tb_switch *sw;
  266. sw = alloc_dev_default(test, parent, route, bonded);
  267. if (!sw)
  268. return NULL;
  269. sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
  270. sw->ports[13].config.max_in_hop_id = 9;
  271. sw->ports[13].config.max_out_hop_id = 9;
  272. sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
  273. sw->ports[14].config.max_in_hop_id = 9;
  274. sw->ports[14].config.max_out_hop_id = 9;
  275. return sw;
  276. }
  277. static struct tb_switch *alloc_dev_usb4(struct kunit *test,
  278. struct tb_switch *parent,
  279. u64 route, bool bonded)
  280. {
  281. struct tb_switch *sw;
  282. sw = alloc_dev_default(test, parent, route, bonded);
  283. if (!sw)
  284. return NULL;
  285. sw->generation = 4;
  286. sw->credit_allocation = true;
  287. sw->max_usb3_credits = 14;
  288. sw->min_dp_aux_credits = 1;
  289. sw->min_dp_main_credits = 18;
  290. sw->max_pcie_credits = 32;
  291. sw->max_dma_credits = 14;
  292. return sw;
  293. }
  294. static void tb_test_path_basic(struct kunit *test)
  295. {
  296. struct tb_port *src_port, *dst_port, *p;
  297. struct tb_switch *host;
  298. host = alloc_host(test);
  299. src_port = &host->ports[5];
  300. dst_port = src_port;
  301. p = tb_next_port_on_path(src_port, dst_port, NULL);
  302. KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
  303. p = tb_next_port_on_path(src_port, dst_port, p);
  304. KUNIT_EXPECT_TRUE(test, !p);
  305. }
  306. static void tb_test_path_not_connected_walk(struct kunit *test)
  307. {
  308. struct tb_port *src_port, *dst_port, *p;
  309. struct tb_switch *host, *dev;
  310. host = alloc_host(test);
  311. /* No connection between host and dev */
  312. dev = alloc_dev_default(test, NULL, 3, true);
  313. src_port = &host->ports[12];
  314. dst_port = &dev->ports[16];
  315. p = tb_next_port_on_path(src_port, dst_port, NULL);
  316. KUNIT_EXPECT_PTR_EQ(test, p, src_port);
  317. p = tb_next_port_on_path(src_port, dst_port, p);
  318. KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
  319. p = tb_next_port_on_path(src_port, dst_port, p);
  320. KUNIT_EXPECT_TRUE(test, !p);
  321. /* Other direction */
  322. p = tb_next_port_on_path(dst_port, src_port, NULL);
  323. KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
  324. p = tb_next_port_on_path(dst_port, src_port, p);
  325. KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
  326. p = tb_next_port_on_path(dst_port, src_port, p);
  327. KUNIT_EXPECT_TRUE(test, !p);
  328. }
  329. struct port_expectation {
  330. u64 route;
  331. u8 port;
  332. enum tb_port_type type;
  333. };
  334. static void tb_test_path_single_hop_walk(struct kunit *test)
  335. {
  336. /*
  337. * Walks from Host PCIe downstream port to Device #1 PCIe
  338. * upstream port.
  339. *
  340. * [Host]
  341. * 1 |
  342. * 1 |
  343. * [Device]
  344. */
  345. static const struct port_expectation test_data[] = {
  346. { .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
  347. { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
  348. { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
  349. { .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
  350. };
  351. struct tb_port *src_port, *dst_port, *p;
  352. struct tb_switch *host, *dev;
  353. int i;
  354. host = alloc_host(test);
  355. dev = alloc_dev_default(test, host, 1, true);
  356. src_port = &host->ports[8];
  357. dst_port = &dev->ports[9];
  358. /* Walk both directions */
  359. i = 0;
  360. tb_for_each_port_on_path(src_port, dst_port, p) {
  361. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  362. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  363. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  364. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  365. test_data[i].type);
  366. i++;
  367. }
  368. KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
  369. i = ARRAY_SIZE(test_data) - 1;
  370. tb_for_each_port_on_path(dst_port, src_port, p) {
  371. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  372. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  373. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  374. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  375. test_data[i].type);
  376. i--;
  377. }
  378. KUNIT_EXPECT_EQ(test, i, -1);
  379. }
  380. static void tb_test_path_daisy_chain_walk(struct kunit *test)
  381. {
  382. /*
  383. * Walks from Host DP IN to Device #2 DP OUT.
  384. *
  385. * [Host]
  386. * 1 |
  387. * 1 |
  388. * [Device #1]
  389. * 3 /
  390. * 1 /
  391. * [Device #2]
  392. */
  393. static const struct port_expectation test_data[] = {
  394. { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
  395. { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
  396. { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
  397. { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
  398. { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
  399. { .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
  400. };
  401. struct tb_port *src_port, *dst_port, *p;
  402. struct tb_switch *host, *dev1, *dev2;
  403. int i;
  404. host = alloc_host(test);
  405. dev1 = alloc_dev_default(test, host, 0x1, true);
  406. dev2 = alloc_dev_default(test, dev1, 0x301, true);
  407. src_port = &host->ports[5];
  408. dst_port = &dev2->ports[13];
  409. /* Walk both directions */
  410. i = 0;
  411. tb_for_each_port_on_path(src_port, dst_port, p) {
  412. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  413. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  414. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  415. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  416. test_data[i].type);
  417. i++;
  418. }
  419. KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
  420. i = ARRAY_SIZE(test_data) - 1;
  421. tb_for_each_port_on_path(dst_port, src_port, p) {
  422. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  423. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  424. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  425. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  426. test_data[i].type);
  427. i--;
  428. }
  429. KUNIT_EXPECT_EQ(test, i, -1);
  430. }
  431. static void tb_test_path_simple_tree_walk(struct kunit *test)
  432. {
  433. /*
  434. * Walks from Host DP IN to Device #3 DP OUT.
  435. *
  436. * [Host]
  437. * 1 |
  438. * 1 |
  439. * [Device #1]
  440. * 3 / | 5 \ 7
  441. * 1 / | \ 1
  442. * [Device #2] | [Device #4]
  443. * | 1
  444. * [Device #3]
  445. */
  446. static const struct port_expectation test_data[] = {
  447. { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
  448. { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
  449. { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
  450. { .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
  451. { .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
  452. { .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
  453. };
  454. struct tb_port *src_port, *dst_port, *p;
  455. struct tb_switch *host, *dev1, *dev3;
  456. int i;
  457. host = alloc_host(test);
  458. dev1 = alloc_dev_default(test, host, 0x1, true);
  459. alloc_dev_default(test, dev1, 0x301, true);
  460. dev3 = alloc_dev_default(test, dev1, 0x501, true);
  461. alloc_dev_default(test, dev1, 0x701, true);
  462. src_port = &host->ports[5];
  463. dst_port = &dev3->ports[13];
  464. /* Walk both directions */
  465. i = 0;
  466. tb_for_each_port_on_path(src_port, dst_port, p) {
  467. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  468. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  469. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  470. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  471. test_data[i].type);
  472. i++;
  473. }
  474. KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
  475. i = ARRAY_SIZE(test_data) - 1;
  476. tb_for_each_port_on_path(dst_port, src_port, p) {
  477. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  478. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  479. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  480. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  481. test_data[i].type);
  482. i--;
  483. }
  484. KUNIT_EXPECT_EQ(test, i, -1);
  485. }
  486. static void tb_test_path_complex_tree_walk(struct kunit *test)
  487. {
  488. /*
  489. * Walks from Device #3 DP IN to Device #9 DP OUT.
  490. *
  491. * [Host]
  492. * 1 |
  493. * 1 |
  494. * [Device #1]
  495. * 3 / | 5 \ 7
  496. * 1 / | \ 1
  497. * [Device #2] | [Device #5]
  498. * 5 | | 1 \ 7
  499. * 1 | [Device #4] \ 1
  500. * [Device #3] [Device #6]
  501. * 3 /
  502. * 1 /
  503. * [Device #7]
  504. * 3 / | 5
  505. * 1 / |
  506. * [Device #8] | 1
  507. * [Device #9]
  508. */
  509. static const struct port_expectation test_data[] = {
  510. { .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
  511. { .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
  512. { .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
  513. { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
  514. { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
  515. { .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
  516. { .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
  517. { .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
  518. { .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
  519. { .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
  520. { .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
  521. { .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
  522. { .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
  523. { .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
  524. };
  525. struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
  526. struct tb_port *src_port, *dst_port, *p;
  527. int i;
  528. host = alloc_host(test);
  529. dev1 = alloc_dev_default(test, host, 0x1, true);
  530. dev2 = alloc_dev_default(test, dev1, 0x301, true);
  531. dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
  532. alloc_dev_default(test, dev1, 0x501, true);
  533. dev5 = alloc_dev_default(test, dev1, 0x701, true);
  534. dev6 = alloc_dev_default(test, dev5, 0x70701, true);
  535. dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
  536. alloc_dev_default(test, dev7, 0x303070701, true);
  537. dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
  538. src_port = &dev3->ports[13];
  539. dst_port = &dev9->ports[14];
  540. /* Walk both directions */
  541. i = 0;
  542. tb_for_each_port_on_path(src_port, dst_port, p) {
  543. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  544. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  545. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  546. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  547. test_data[i].type);
  548. i++;
  549. }
  550. KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
  551. i = ARRAY_SIZE(test_data) - 1;
  552. tb_for_each_port_on_path(dst_port, src_port, p) {
  553. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  554. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  555. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  556. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  557. test_data[i].type);
  558. i--;
  559. }
  560. KUNIT_EXPECT_EQ(test, i, -1);
  561. }
  562. static void tb_test_path_max_length_walk(struct kunit *test)
  563. {
  564. struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
  565. struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
  566. struct tb_port *src_port, *dst_port, *p;
  567. int i;
  568. /*
  569. * Walks from Device #6 DP IN to Device #12 DP OUT.
  570. *
  571. * [Host]
  572. * 1 / \ 3
  573. * 1 / \ 1
  574. * [Device #1] [Device #7]
  575. * 3 | | 3
  576. * 1 | | 1
  577. * [Device #2] [Device #8]
  578. * 3 | | 3
  579. * 1 | | 1
  580. * [Device #3] [Device #9]
  581. * 3 | | 3
  582. * 1 | | 1
  583. * [Device #4] [Device #10]
  584. * 3 | | 3
  585. * 1 | | 1
  586. * [Device #5] [Device #11]
  587. * 3 | | 3
  588. * 1 | | 1
  589. * [Device #6] [Device #12]
  590. */
  591. static const struct port_expectation test_data[] = {
  592. { .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
  593. { .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
  594. { .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
  595. { .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
  596. { .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
  597. { .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
  598. { .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
  599. { .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
  600. { .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
  601. { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
  602. { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
  603. { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
  604. { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
  605. { .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
  606. { .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
  607. { .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
  608. { .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
  609. { .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
  610. { .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
  611. { .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
  612. { .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
  613. { .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
  614. { .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
  615. { .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
  616. { .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
  617. { .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
  618. };
  619. host = alloc_host(test);
  620. dev1 = alloc_dev_default(test, host, 0x1, true);
  621. dev2 = alloc_dev_default(test, dev1, 0x301, true);
  622. dev3 = alloc_dev_default(test, dev2, 0x30301, true);
  623. dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
  624. dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
  625. dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
  626. dev7 = alloc_dev_default(test, host, 0x3, true);
  627. dev8 = alloc_dev_default(test, dev7, 0x303, true);
  628. dev9 = alloc_dev_default(test, dev8, 0x30303, true);
  629. dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
  630. dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
  631. dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
  632. src_port = &dev6->ports[13];
  633. dst_port = &dev12->ports[13];
  634. /* Walk both directions */
  635. i = 0;
  636. tb_for_each_port_on_path(src_port, dst_port, p) {
  637. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  638. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  639. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  640. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  641. test_data[i].type);
  642. i++;
  643. }
  644. KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
  645. i = ARRAY_SIZE(test_data) - 1;
  646. tb_for_each_port_on_path(dst_port, src_port, p) {
  647. KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
  648. KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
  649. KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
  650. KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
  651. test_data[i].type);
  652. i--;
  653. }
  654. KUNIT_EXPECT_EQ(test, i, -1);
  655. }
  656. static void tb_test_path_not_connected(struct kunit *test)
  657. {
  658. struct tb_switch *host, *dev1, *dev2;
  659. struct tb_port *down, *up;
  660. struct tb_path *path;
  661. host = alloc_host(test);
  662. dev1 = alloc_dev_default(test, host, 0x3, false);
  663. /* Not connected to anything */
  664. dev2 = alloc_dev_default(test, NULL, 0x303, false);
  665. down = &dev1->ports[10];
  666. up = &dev2->ports[9];
  667. path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
  668. KUNIT_ASSERT_TRUE(test, path == NULL);
  669. path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
  670. KUNIT_ASSERT_TRUE(test, path == NULL);
  671. }
  672. struct hop_expectation {
  673. u64 route;
  674. u8 in_port;
  675. enum tb_port_type in_type;
  676. u8 out_port;
  677. enum tb_port_type out_type;
  678. };
  679. static void tb_test_path_not_bonded_lane0(struct kunit *test)
  680. {
  681. /*
  682. * PCIe path from host to device using lane 0.
  683. *
  684. * [Host]
  685. * 3 |: 4
  686. * 1 |: 2
  687. * [Device]
  688. */
  689. static const struct hop_expectation test_data[] = {
  690. {
  691. .route = 0x0,
  692. .in_port = 9,
  693. .in_type = TB_TYPE_PCIE_DOWN,
  694. .out_port = 3,
  695. .out_type = TB_TYPE_PORT,
  696. },
  697. {
  698. .route = 0x3,
  699. .in_port = 1,
  700. .in_type = TB_TYPE_PORT,
  701. .out_port = 9,
  702. .out_type = TB_TYPE_PCIE_UP,
  703. },
  704. };
  705. struct tb_switch *host, *dev;
  706. struct tb_port *down, *up;
  707. struct tb_path *path;
  708. int i;
  709. host = alloc_host(test);
  710. dev = alloc_dev_default(test, host, 0x3, false);
  711. down = &host->ports[9];
  712. up = &dev->ports[9];
  713. path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
  714. KUNIT_ASSERT_TRUE(test, path != NULL);
  715. KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
  716. for (i = 0; i < ARRAY_SIZE(test_data); i++) {
  717. const struct tb_port *in_port, *out_port;
  718. in_port = path->hops[i].in_port;
  719. out_port = path->hops[i].out_port;
  720. KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
  721. KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
  722. KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
  723. test_data[i].in_type);
  724. KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
  725. KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
  726. KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
  727. test_data[i].out_type);
  728. }
  729. tb_path_free(path);
  730. }
  731. static void tb_test_path_not_bonded_lane1(struct kunit *test)
  732. {
  733. /*
  734. * DP Video path from host to device using lane 1. Paths like
  735. * these are only used with Thunderbolt 1 devices where lane
  736. * bonding is not possible. USB4 specifically does not allow
  737. * paths like this (you either use lane 0 where lane 1 is
  738. * disabled or both lanes are bonded).
  739. *
  740. * [Host]
  741. * 1 :| 2
  742. * 1 :| 2
  743. * [Device]
  744. */
  745. static const struct hop_expectation test_data[] = {
  746. {
  747. .route = 0x0,
  748. .in_port = 5,
  749. .in_type = TB_TYPE_DP_HDMI_IN,
  750. .out_port = 2,
  751. .out_type = TB_TYPE_PORT,
  752. },
  753. {
  754. .route = 0x1,
  755. .in_port = 2,
  756. .in_type = TB_TYPE_PORT,
  757. .out_port = 13,
  758. .out_type = TB_TYPE_DP_HDMI_OUT,
  759. },
  760. };
  761. struct tb_switch *host, *dev;
  762. struct tb_port *in, *out;
  763. struct tb_path *path;
  764. int i;
  765. host = alloc_host(test);
  766. dev = alloc_dev_default(test, host, 0x1, false);
  767. in = &host->ports[5];
  768. out = &dev->ports[13];
  769. path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
  770. KUNIT_ASSERT_TRUE(test, path != NULL);
  771. KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
  772. for (i = 0; i < ARRAY_SIZE(test_data); i++) {
  773. const struct tb_port *in_port, *out_port;
  774. in_port = path->hops[i].in_port;
  775. out_port = path->hops[i].out_port;
  776. KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
  777. KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
  778. KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
  779. test_data[i].in_type);
  780. KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
  781. KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
  782. KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
  783. test_data[i].out_type);
  784. }
  785. tb_path_free(path);
  786. }
  787. static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
  788. {
  789. /*
  790. * DP Video path from host to device 3 using lane 1.
  791. *
  792. * [Host]
  793. * 1 :| 2
  794. * 1 :| 2
  795. * [Device #1]
  796. * 7 :| 8
  797. * 1 :| 2
  798. * [Device #2]
  799. * 5 :| 6
  800. * 1 :| 2
  801. * [Device #3]
  802. */
  803. static const struct hop_expectation test_data[] = {
  804. {
  805. .route = 0x0,
  806. .in_port = 5,
  807. .in_type = TB_TYPE_DP_HDMI_IN,
  808. .out_port = 2,
  809. .out_type = TB_TYPE_PORT,
  810. },
  811. {
  812. .route = 0x1,
  813. .in_port = 2,
  814. .in_type = TB_TYPE_PORT,
  815. .out_port = 8,
  816. .out_type = TB_TYPE_PORT,
  817. },
  818. {
  819. .route = 0x701,
  820. .in_port = 2,
  821. .in_type = TB_TYPE_PORT,
  822. .out_port = 6,
  823. .out_type = TB_TYPE_PORT,
  824. },
  825. {
  826. .route = 0x50701,
  827. .in_port = 2,
  828. .in_type = TB_TYPE_PORT,
  829. .out_port = 13,
  830. .out_type = TB_TYPE_DP_HDMI_OUT,
  831. },
  832. };
  833. struct tb_switch *host, *dev1, *dev2, *dev3;
  834. struct tb_port *in, *out;
  835. struct tb_path *path;
  836. int i;
  837. host = alloc_host(test);
  838. dev1 = alloc_dev_default(test, host, 0x1, false);
  839. dev2 = alloc_dev_default(test, dev1, 0x701, false);
  840. dev3 = alloc_dev_default(test, dev2, 0x50701, false);
  841. in = &host->ports[5];
  842. out = &dev3->ports[13];
  843. path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
  844. KUNIT_ASSERT_TRUE(test, path != NULL);
  845. KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
  846. for (i = 0; i < ARRAY_SIZE(test_data); i++) {
  847. const struct tb_port *in_port, *out_port;
  848. in_port = path->hops[i].in_port;
  849. out_port = path->hops[i].out_port;
  850. KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
  851. KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
  852. KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
  853. test_data[i].in_type);
  854. KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
  855. KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
  856. KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
  857. test_data[i].out_type);
  858. }
  859. tb_path_free(path);
  860. }
  861. static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
  862. {
  863. /*
  864. * DP Video path from device 3 to host using lane 1.
  865. *
  866. * [Host]
  867. * 1 :| 2
  868. * 1 :| 2
  869. * [Device #1]
  870. * 7 :| 8
  871. * 1 :| 2
  872. * [Device #2]
  873. * 5 :| 6
  874. * 1 :| 2
  875. * [Device #3]
  876. */
  877. static const struct hop_expectation test_data[] = {
  878. {
  879. .route = 0x50701,
  880. .in_port = 13,
  881. .in_type = TB_TYPE_DP_HDMI_IN,
  882. .out_port = 2,
  883. .out_type = TB_TYPE_PORT,
  884. },
  885. {
  886. .route = 0x701,
  887. .in_port = 6,
  888. .in_type = TB_TYPE_PORT,
  889. .out_port = 2,
  890. .out_type = TB_TYPE_PORT,
  891. },
  892. {
  893. .route = 0x1,
  894. .in_port = 8,
  895. .in_type = TB_TYPE_PORT,
  896. .out_port = 2,
  897. .out_type = TB_TYPE_PORT,
  898. },
  899. {
  900. .route = 0x0,
  901. .in_port = 2,
  902. .in_type = TB_TYPE_PORT,
  903. .out_port = 5,
  904. .out_type = TB_TYPE_DP_HDMI_IN,
  905. },
  906. };
  907. struct tb_switch *host, *dev1, *dev2, *dev3;
  908. struct tb_port *in, *out;
  909. struct tb_path *path;
  910. int i;
  911. host = alloc_host(test);
  912. dev1 = alloc_dev_default(test, host, 0x1, false);
  913. dev2 = alloc_dev_default(test, dev1, 0x701, false);
  914. dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
  915. in = &dev3->ports[13];
  916. out = &host->ports[5];
  917. path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
  918. KUNIT_ASSERT_TRUE(test, path != NULL);
  919. KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
  920. for (i = 0; i < ARRAY_SIZE(test_data); i++) {
  921. const struct tb_port *in_port, *out_port;
  922. in_port = path->hops[i].in_port;
  923. out_port = path->hops[i].out_port;
  924. KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
  925. KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
  926. KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
  927. test_data[i].in_type);
  928. KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
  929. KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
  930. KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
  931. test_data[i].out_type);
  932. }
  933. tb_path_free(path);
  934. }
  935. static void tb_test_path_mixed_chain(struct kunit *test)
  936. {
  937. /*
  938. * DP Video path from host to device 4 where first and last link
  939. * is bonded.
  940. *
  941. * [Host]
  942. * 1 |
  943. * 1 |
  944. * [Device #1]
  945. * 7 :| 8
  946. * 1 :| 2
  947. * [Device #2]
  948. * 5 :| 6
  949. * 1 :| 2
  950. * [Device #3]
  951. * 3 |
  952. * 1 |
  953. * [Device #4]
  954. */
  955. static const struct hop_expectation test_data[] = {
  956. {
  957. .route = 0x0,
  958. .in_port = 5,
  959. .in_type = TB_TYPE_DP_HDMI_IN,
  960. .out_port = 1,
  961. .out_type = TB_TYPE_PORT,
  962. },
  963. {
  964. .route = 0x1,
  965. .in_port = 1,
  966. .in_type = TB_TYPE_PORT,
  967. .out_port = 8,
  968. .out_type = TB_TYPE_PORT,
  969. },
  970. {
  971. .route = 0x701,
  972. .in_port = 2,
  973. .in_type = TB_TYPE_PORT,
  974. .out_port = 6,
  975. .out_type = TB_TYPE_PORT,
  976. },
  977. {
  978. .route = 0x50701,
  979. .in_port = 2,
  980. .in_type = TB_TYPE_PORT,
  981. .out_port = 3,
  982. .out_type = TB_TYPE_PORT,
  983. },
  984. {
  985. .route = 0x3050701,
  986. .in_port = 1,
  987. .in_type = TB_TYPE_PORT,
  988. .out_port = 13,
  989. .out_type = TB_TYPE_DP_HDMI_OUT,
  990. },
  991. };
  992. struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
  993. struct tb_port *in, *out;
  994. struct tb_path *path;
  995. int i;
  996. host = alloc_host(test);
  997. dev1 = alloc_dev_default(test, host, 0x1, true);
  998. dev2 = alloc_dev_default(test, dev1, 0x701, false);
  999. dev3 = alloc_dev_default(test, dev2, 0x50701, false);
  1000. dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
  1001. in = &host->ports[5];
  1002. out = &dev4->ports[13];
  1003. path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
  1004. KUNIT_ASSERT_TRUE(test, path != NULL);
  1005. KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
  1006. for (i = 0; i < ARRAY_SIZE(test_data); i++) {
  1007. const struct tb_port *in_port, *out_port;
  1008. in_port = path->hops[i].in_port;
  1009. out_port = path->hops[i].out_port;
  1010. KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
  1011. KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
  1012. KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
  1013. test_data[i].in_type);
  1014. KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
  1015. KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
  1016. KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
  1017. test_data[i].out_type);
  1018. }
  1019. tb_path_free(path);
  1020. }
  1021. static void tb_test_path_mixed_chain_reverse(struct kunit *test)
  1022. {
  1023. /*
  1024. * DP Video path from device 4 to host where first and last link
  1025. * is bonded.
  1026. *
  1027. * [Host]
  1028. * 1 |
  1029. * 1 |
  1030. * [Device #1]
  1031. * 7 :| 8
  1032. * 1 :| 2
  1033. * [Device #2]
  1034. * 5 :| 6
  1035. * 1 :| 2
  1036. * [Device #3]
  1037. * 3 |
  1038. * 1 |
  1039. * [Device #4]
  1040. */
  1041. static const struct hop_expectation test_data[] = {
  1042. {
  1043. .route = 0x3050701,
  1044. .in_port = 13,
  1045. .in_type = TB_TYPE_DP_HDMI_OUT,
  1046. .out_port = 1,
  1047. .out_type = TB_TYPE_PORT,
  1048. },
  1049. {
  1050. .route = 0x50701,
  1051. .in_port = 3,
  1052. .in_type = TB_TYPE_PORT,
  1053. .out_port = 2,
  1054. .out_type = TB_TYPE_PORT,
  1055. },
  1056. {
  1057. .route = 0x701,
  1058. .in_port = 6,
  1059. .in_type = TB_TYPE_PORT,
  1060. .out_port = 2,
  1061. .out_type = TB_TYPE_PORT,
  1062. },
  1063. {
  1064. .route = 0x1,
  1065. .in_port = 8,
  1066. .in_type = TB_TYPE_PORT,
  1067. .out_port = 1,
  1068. .out_type = TB_TYPE_PORT,
  1069. },
  1070. {
  1071. .route = 0x0,
  1072. .in_port = 1,
  1073. .in_type = TB_TYPE_PORT,
  1074. .out_port = 5,
  1075. .out_type = TB_TYPE_DP_HDMI_IN,
  1076. },
  1077. };
  1078. struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
  1079. struct tb_port *in, *out;
  1080. struct tb_path *path;
  1081. int i;
  1082. host = alloc_host(test);
  1083. dev1 = alloc_dev_default(test, host, 0x1, true);
  1084. dev2 = alloc_dev_default(test, dev1, 0x701, false);
  1085. dev3 = alloc_dev_default(test, dev2, 0x50701, false);
  1086. dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
  1087. in = &dev4->ports[13];
  1088. out = &host->ports[5];
  1089. path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
  1090. KUNIT_ASSERT_TRUE(test, path != NULL);
  1091. KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
  1092. for (i = 0; i < ARRAY_SIZE(test_data); i++) {
  1093. const struct tb_port *in_port, *out_port;
  1094. in_port = path->hops[i].in_port;
  1095. out_port = path->hops[i].out_port;
  1096. KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
  1097. KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
  1098. KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
  1099. test_data[i].in_type);
  1100. KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
  1101. KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
  1102. KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
  1103. test_data[i].out_type);
  1104. }
  1105. tb_path_free(path);
  1106. }
  1107. static void tb_test_tunnel_pcie(struct kunit *test)
  1108. {
  1109. struct tb_switch *host, *dev1, *dev2;
  1110. struct tb_tunnel *tunnel1, *tunnel2;
  1111. struct tb_port *down, *up;
  1112. /*
  1113. * Create PCIe tunnel between host and two devices.
  1114. *
  1115. * [Host]
  1116. * 1 |
  1117. * 1 |
  1118. * [Device #1]
  1119. * 5 |
  1120. * 1 |
  1121. * [Device #2]
  1122. */
  1123. host = alloc_host(test);
  1124. dev1 = alloc_dev_default(test, host, 0x1, true);
  1125. dev2 = alloc_dev_default(test, dev1, 0x501, true);
  1126. down = &host->ports[8];
  1127. up = &dev1->ports[9];
  1128. tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
  1129. KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
  1130. KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_PCI);
  1131. KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
  1132. KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
  1133. KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
  1134. KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
  1135. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
  1136. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
  1137. KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
  1138. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
  1139. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
  1140. down = &dev1->ports[10];
  1141. up = &dev2->ports[9];
  1142. tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
  1143. KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
  1144. KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_PCI);
  1145. KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
  1146. KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
  1147. KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
  1148. KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
  1149. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
  1150. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
  1151. KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
  1152. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
  1153. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
  1154. tb_tunnel_free(tunnel2);
  1155. tb_tunnel_free(tunnel1);
  1156. }
  1157. static void tb_test_tunnel_dp(struct kunit *test)
  1158. {
  1159. struct tb_switch *host, *dev;
  1160. struct tb_port *in, *out;
  1161. struct tb_tunnel *tunnel;
  1162. /*
  1163. * Create DP tunnel between Host and Device
  1164. *
  1165. * [Host]
  1166. * 1 |
  1167. * 1 |
  1168. * [Device]
  1169. */
  1170. host = alloc_host(test);
  1171. dev = alloc_dev_default(test, host, 0x3, true);
  1172. in = &host->ports[5];
  1173. out = &dev->ports[13];
  1174. tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  1175. KUNIT_ASSERT_TRUE(test, tunnel != NULL);
  1176. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
  1177. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
  1178. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
  1179. KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
  1180. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
  1181. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
  1182. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
  1183. KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
  1184. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
  1185. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
  1186. KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
  1187. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
  1188. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
  1189. tb_tunnel_free(tunnel);
  1190. }
  1191. static void tb_test_tunnel_dp_chain(struct kunit *test)
  1192. {
  1193. struct tb_switch *host, *dev1, *dev4;
  1194. struct tb_port *in, *out;
  1195. struct tb_tunnel *tunnel;
  1196. /*
  1197. * Create DP tunnel from Host DP IN to Device #4 DP OUT.
  1198. *
  1199. * [Host]
  1200. * 1 |
  1201. * 1 |
  1202. * [Device #1]
  1203. * 3 / | 5 \ 7
  1204. * 1 / | \ 1
  1205. * [Device #2] | [Device #4]
  1206. * | 1
  1207. * [Device #3]
  1208. */
  1209. host = alloc_host(test);
  1210. dev1 = alloc_dev_default(test, host, 0x1, true);
  1211. alloc_dev_default(test, dev1, 0x301, true);
  1212. alloc_dev_default(test, dev1, 0x501, true);
  1213. dev4 = alloc_dev_default(test, dev1, 0x701, true);
  1214. in = &host->ports[5];
  1215. out = &dev4->ports[14];
  1216. tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  1217. KUNIT_ASSERT_TRUE(test, tunnel != NULL);
  1218. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
  1219. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
  1220. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
  1221. KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
  1222. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
  1223. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
  1224. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
  1225. KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
  1226. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
  1227. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
  1228. KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
  1229. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
  1230. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
  1231. tb_tunnel_free(tunnel);
  1232. }
  1233. static void tb_test_tunnel_dp_tree(struct kunit *test)
  1234. {
  1235. struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
  1236. struct tb_port *in, *out;
  1237. struct tb_tunnel *tunnel;
  1238. /*
  1239. * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
  1240. *
  1241. * [Host]
  1242. * 3 |
  1243. * 1 |
  1244. * [Device #1]
  1245. * 3 / | 5 \ 7
  1246. * 1 / | \ 1
  1247. * [Device #2] | [Device #4]
  1248. * | 1
  1249. * [Device #3]
  1250. * | 5
  1251. * | 1
  1252. * [Device #5]
  1253. */
  1254. host = alloc_host(test);
  1255. dev1 = alloc_dev_default(test, host, 0x3, true);
  1256. dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
  1257. dev3 = alloc_dev_default(test, dev1, 0x503, true);
  1258. alloc_dev_default(test, dev1, 0x703, true);
  1259. dev5 = alloc_dev_default(test, dev3, 0x50503, true);
  1260. in = &dev2->ports[13];
  1261. out = &dev5->ports[13];
  1262. tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  1263. KUNIT_ASSERT_TRUE(test, tunnel != NULL);
  1264. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
  1265. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
  1266. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
  1267. KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
  1268. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
  1269. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
  1270. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
  1271. KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
  1272. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
  1273. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
  1274. KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
  1275. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
  1276. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
  1277. tb_tunnel_free(tunnel);
  1278. }
  1279. static void tb_test_tunnel_dp_max_length(struct kunit *test)
  1280. {
  1281. struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
  1282. struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
  1283. struct tb_port *in, *out;
  1284. struct tb_tunnel *tunnel;
  1285. /*
  1286. * Creates DP tunnel from Device #6 to Device #12.
  1287. *
  1288. * [Host]
  1289. * 1 / \ 3
  1290. * 1 / \ 1
  1291. * [Device #1] [Device #7]
  1292. * 3 | | 3
  1293. * 1 | | 1
  1294. * [Device #2] [Device #8]
  1295. * 3 | | 3
  1296. * 1 | | 1
  1297. * [Device #3] [Device #9]
  1298. * 3 | | 3
  1299. * 1 | | 1
  1300. * [Device #4] [Device #10]
  1301. * 3 | | 3
  1302. * 1 | | 1
  1303. * [Device #5] [Device #11]
  1304. * 3 | | 3
  1305. * 1 | | 1
  1306. * [Device #6] [Device #12]
  1307. */
  1308. host = alloc_host(test);
  1309. dev1 = alloc_dev_default(test, host, 0x1, true);
  1310. dev2 = alloc_dev_default(test, dev1, 0x301, true);
  1311. dev3 = alloc_dev_default(test, dev2, 0x30301, true);
  1312. dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
  1313. dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
  1314. dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
  1315. dev7 = alloc_dev_default(test, host, 0x3, true);
  1316. dev8 = alloc_dev_default(test, dev7, 0x303, true);
  1317. dev9 = alloc_dev_default(test, dev8, 0x30303, true);
  1318. dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
  1319. dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
  1320. dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
  1321. in = &dev6->ports[13];
  1322. out = &dev12->ports[13];
  1323. tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  1324. KUNIT_ASSERT_TRUE(test, tunnel != NULL);
  1325. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
  1326. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
  1327. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
  1328. KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
  1329. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
  1330. /* First hop */
  1331. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
  1332. /* Middle */
  1333. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
  1334. &host->ports[1]);
  1335. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
  1336. &host->ports[3]);
  1337. /* Last */
  1338. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
  1339. KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
  1340. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
  1341. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
  1342. &host->ports[1]);
  1343. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
  1344. &host->ports[3]);
  1345. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
  1346. KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
  1347. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
  1348. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
  1349. &host->ports[3]);
  1350. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
  1351. &host->ports[1]);
  1352. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
  1353. tb_tunnel_free(tunnel);
  1354. }
  1355. static void tb_test_tunnel_usb3(struct kunit *test)
  1356. {
  1357. struct tb_switch *host, *dev1, *dev2;
  1358. struct tb_tunnel *tunnel1, *tunnel2;
  1359. struct tb_port *down, *up;
  1360. /*
  1361. * Create USB3 tunnel between host and two devices.
  1362. *
  1363. * [Host]
  1364. * 1 |
  1365. * 1 |
  1366. * [Device #1]
  1367. * \ 7
  1368. * \ 1
  1369. * [Device #2]
  1370. */
  1371. host = alloc_host(test);
  1372. dev1 = alloc_dev_default(test, host, 0x1, true);
  1373. dev2 = alloc_dev_default(test, dev1, 0x701, true);
  1374. down = &host->ports[12];
  1375. up = &dev1->ports[16];
  1376. tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
  1377. KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
  1378. KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_USB3);
  1379. KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
  1380. KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
  1381. KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
  1382. KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
  1383. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
  1384. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
  1385. KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
  1386. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
  1387. KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
  1388. down = &dev1->ports[17];
  1389. up = &dev2->ports[16];
  1390. tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
  1391. KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
  1392. KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_USB3);
  1393. KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
  1394. KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
  1395. KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
  1396. KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
  1397. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
  1398. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
  1399. KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
  1400. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
  1401. KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
  1402. tb_tunnel_free(tunnel2);
  1403. tb_tunnel_free(tunnel1);
  1404. }
  1405. static void tb_test_tunnel_port_on_path(struct kunit *test)
  1406. {
  1407. struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
  1408. struct tb_port *in, *out, *port;
  1409. struct tb_tunnel *dp_tunnel;
  1410. /*
  1411. * [Host]
  1412. * 3 |
  1413. * 1 |
  1414. * [Device #1]
  1415. * 3 / | 5 \ 7
  1416. * 1 / | \ 1
  1417. * [Device #2] | [Device #4]
  1418. * | 1
  1419. * [Device #3]
  1420. * | 5
  1421. * | 1
  1422. * [Device #5]
  1423. */
  1424. host = alloc_host(test);
  1425. dev1 = alloc_dev_default(test, host, 0x3, true);
  1426. dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
  1427. dev3 = alloc_dev_default(test, dev1, 0x503, true);
  1428. dev4 = alloc_dev_default(test, dev1, 0x703, true);
  1429. dev5 = alloc_dev_default(test, dev3, 0x50503, true);
  1430. in = &dev2->ports[13];
  1431. out = &dev5->ports[13];
  1432. dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
  1433. KUNIT_ASSERT_TRUE(test, dp_tunnel != NULL);
  1434. KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
  1435. KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
  1436. port = &host->ports[8];
  1437. KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1438. port = &host->ports[3];
  1439. KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1440. port = &dev1->ports[1];
  1441. KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1442. port = &dev1->ports[3];
  1443. KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1444. port = &dev1->ports[5];
  1445. KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1446. port = &dev1->ports[7];
  1447. KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1448. port = &dev3->ports[1];
  1449. KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1450. port = &dev5->ports[1];
  1451. KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1452. port = &dev4->ports[1];
  1453. KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
  1454. tb_tunnel_free(dp_tunnel);
  1455. }
  1456. static void tb_test_tunnel_dma(struct kunit *test)
  1457. {
  1458. struct tb_port *nhi, *port;
  1459. struct tb_tunnel *tunnel;
  1460. struct tb_switch *host;
  1461. /*
  1462. * Create DMA tunnel from NHI to port 1 and back.
  1463. *
  1464. * [Host 1]
  1465. * 1 ^ In HopID 1 -> Out HopID 8
  1466. * |
  1467. * v In HopID 8 -> Out HopID 1
  1468. * ............ Domain border
  1469. * |
  1470. * [Host 2]
  1471. */
  1472. host = alloc_host(test);
  1473. nhi = &host->ports[7];
  1474. port = &host->ports[1];
  1475. tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
  1476. KUNIT_ASSERT_TRUE(test, tunnel != NULL);
  1477. KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
  1478. KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
  1479. KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
  1480. KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
  1481. /* RX path */
  1482. KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
  1483. KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
  1484. KUN