/kernel/2.6.32_froyo_photon_nightly/drivers/ieee1394/csr1212.c

http://photon-android.googlecode.com/ · C · 1467 lines · 1087 code · 245 blank · 135 comment · 252 complexity · f3cff3f32f35a62baf52629fa6e4358d MD5 · raw file

  1. /*
  2. * csr1212.c -- IEEE 1212 Control and Status Register support for Linux
  3. *
  4. * Copyright (C) 2003 Francois Retief <fgretief@sun.ac.za>
  5. * Steve Kinneberg <kinnebergsteve@acmsystems.com>
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions are met:
  9. *
  10. * 1. Redistributions of source code must retain the above copyright notice,
  11. * this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. The name of the author may not be used to endorse or promote products
  16. * derived from this software without specific prior written permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
  19. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  20. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
  21. * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  23. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  24. * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  25. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
  26. * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  27. * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. */
  29. /* TODO List:
  30. * - Verify interface consistency: i.e., public functions that take a size
  31. * parameter expect size to be in bytes.
  32. */
  33. #include <linux/errno.h>
  34. #include <linux/kernel.h>
  35. #include <linux/kmemcheck.h>
  36. #include <linux/string.h>
  37. #include <asm/bug.h>
  38. #include <asm/byteorder.h>
  39. #include "csr1212.h"
  40. /* Permitted key type for each key id */
  41. #define __I (1 << CSR1212_KV_TYPE_IMMEDIATE)
  42. #define __C (1 << CSR1212_KV_TYPE_CSR_OFFSET)
  43. #define __D (1 << CSR1212_KV_TYPE_DIRECTORY)
  44. #define __L (1 << CSR1212_KV_TYPE_LEAF)
  45. static const u8 csr1212_key_id_type_map[0x30] = {
  46. __C, /* used by Apple iSight */
  47. __D | __L, /* Descriptor */
  48. __I | __D | __L, /* Bus_Dependent_Info */
  49. __I | __D | __L, /* Vendor */
  50. __I, /* Hardware_Version */
  51. 0, 0, /* Reserved */
  52. __D | __L | __I, /* Module */
  53. __I, 0, 0, 0, /* used by Apple iSight, Reserved */
  54. __I, /* Node_Capabilities */
  55. __L, /* EUI_64 */
  56. 0, 0, 0, /* Reserved */
  57. __D, /* Unit */
  58. __I, /* Specifier_ID */
  59. __I, /* Version */
  60. __I | __C | __D | __L, /* Dependent_Info */
  61. __L, /* Unit_Location */
  62. 0, /* Reserved */
  63. __I, /* Model */
  64. __D, /* Instance */
  65. __L, /* Keyword */
  66. __D, /* Feature */
  67. __L, /* Extended_ROM */
  68. __I, /* Extended_Key_Specifier_ID */
  69. __I, /* Extended_Key */
  70. __I | __C | __D | __L, /* Extended_Data */
  71. __L, /* Modifiable_Descriptor */
  72. __I, /* Directory_ID */
  73. __I, /* Revision */
  74. };
  75. #undef __I
  76. #undef __C
  77. #undef __D
  78. #undef __L
  79. #define quads_to_bytes(_q) ((_q) * sizeof(u32))
  80. #define bytes_to_quads(_b) DIV_ROUND_UP(_b, sizeof(u32))
  81. static void free_keyval(struct csr1212_keyval *kv)
  82. {
  83. if ((kv->key.type == CSR1212_KV_TYPE_LEAF) &&
  84. (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM))
  85. CSR1212_FREE(kv->value.leaf.data);
  86. CSR1212_FREE(kv);
  87. }
  88. static u16 csr1212_crc16(const u32 *buffer, size_t length)
  89. {
  90. int shift;
  91. u32 data;
  92. u16 sum, crc = 0;
  93. for (; length; length--) {
  94. data = be32_to_cpu(*buffer);
  95. buffer++;
  96. for (shift = 28; shift >= 0; shift -= 4 ) {
  97. sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
  98. crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
  99. }
  100. crc &= 0xffff;
  101. }
  102. return cpu_to_be16(crc);
  103. }
  104. /* Microsoft computes the CRC with the bytes in reverse order. */
  105. static u16 csr1212_msft_crc16(const u32 *buffer, size_t length)
  106. {
  107. int shift;
  108. u32 data;
  109. u16 sum, crc = 0;
  110. for (; length; length--) {
  111. data = le32_to_cpu(*buffer);
  112. buffer++;
  113. for (shift = 28; shift >= 0; shift -= 4 ) {
  114. sum = ((crc >> 12) ^ (data >> shift)) & 0xf;
  115. crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ (sum);
  116. }
  117. crc &= 0xffff;
  118. }
  119. return cpu_to_be16(crc);
  120. }
  121. static struct csr1212_dentry *
  122. csr1212_find_keyval(struct csr1212_keyval *dir, struct csr1212_keyval *kv)
  123. {
  124. struct csr1212_dentry *pos;
  125. for (pos = dir->value.directory.dentries_head;
  126. pos != NULL; pos = pos->next)
  127. if (pos->kv == kv)
  128. return pos;
  129. return NULL;
  130. }
  131. static struct csr1212_keyval *
  132. csr1212_find_keyval_offset(struct csr1212_keyval *kv_list, u32 offset)
  133. {
  134. struct csr1212_keyval *kv;
  135. for (kv = kv_list->next; kv && (kv != kv_list); kv = kv->next)
  136. if (kv->offset == offset)
  137. return kv;
  138. return NULL;
  139. }
  140. /* Creation Routines */
  141. struct csr1212_csr *csr1212_create_csr(struct csr1212_bus_ops *ops,
  142. size_t bus_info_size, void *private)
  143. {
  144. struct csr1212_csr *csr;
  145. csr = CSR1212_MALLOC(sizeof(*csr));
  146. if (!csr)
  147. return NULL;
  148. csr->cache_head =
  149. csr1212_rom_cache_malloc(CSR1212_CONFIG_ROM_SPACE_OFFSET,
  150. CSR1212_CONFIG_ROM_SPACE_SIZE);
  151. if (!csr->cache_head) {
  152. CSR1212_FREE(csr);
  153. return NULL;
  154. }
  155. /* The keyval key id is not used for the root node, but a valid key id
  156. * that can be used for a directory needs to be passed to
  157. * csr1212_new_directory(). */
  158. csr->root_kv = csr1212_new_directory(CSR1212_KV_ID_VENDOR);
  159. if (!csr->root_kv) {
  160. CSR1212_FREE(csr->cache_head);
  161. CSR1212_FREE(csr);
  162. return NULL;
  163. }
  164. csr->bus_info_data = csr->cache_head->data;
  165. csr->bus_info_len = bus_info_size;
  166. csr->crc_len = bus_info_size;
  167. csr->ops = ops;
  168. csr->private = private;
  169. csr->cache_tail = csr->cache_head;
  170. return csr;
  171. }
  172. void csr1212_init_local_csr(struct csr1212_csr *csr,
  173. const u32 *bus_info_data, int max_rom)
  174. {
  175. static const int mr_map[] = { 4, 64, 1024, 0 };
  176. BUG_ON(max_rom & ~0x3);
  177. csr->max_rom = mr_map[max_rom];
  178. memcpy(csr->bus_info_data, bus_info_data, csr->bus_info_len);
  179. }
  180. static struct csr1212_keyval *csr1212_new_keyval(u8 type, u8 key)
  181. {
  182. struct csr1212_keyval *kv;
  183. if (key < 0x30 && ((csr1212_key_id_type_map[key] & (1 << type)) == 0))
  184. return NULL;
  185. kv = CSR1212_MALLOC(sizeof(*kv));
  186. if (!kv)
  187. return NULL;
  188. atomic_set(&kv->refcnt, 1);
  189. kv->key.type = type;
  190. kv->key.id = key;
  191. kv->associate = NULL;
  192. kv->next = NULL;
  193. kv->prev = NULL;
  194. kv->offset = 0;
  195. kv->valid = 0;
  196. return kv;
  197. }
  198. struct csr1212_keyval *csr1212_new_immediate(u8 key, u32 value)
  199. {
  200. struct csr1212_keyval *kv;
  201. kv = csr1212_new_keyval(CSR1212_KV_TYPE_IMMEDIATE, key);
  202. if (!kv)
  203. return NULL;
  204. kv->value.immediate = value;
  205. kv->valid = 1;
  206. return kv;
  207. }
  208. static struct csr1212_keyval *
  209. csr1212_new_leaf(u8 key, const void *data, size_t data_len)
  210. {
  211. struct csr1212_keyval *kv;
  212. kv = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF, key);
  213. if (!kv)
  214. return NULL;
  215. if (data_len > 0) {
  216. kv->value.leaf.data = CSR1212_MALLOC(data_len);
  217. if (!kv->value.leaf.data) {
  218. CSR1212_FREE(kv);
  219. return NULL;
  220. }
  221. if (data)
  222. memcpy(kv->value.leaf.data, data, data_len);
  223. } else {
  224. kv->value.leaf.data = NULL;
  225. }
  226. kv->value.leaf.len = bytes_to_quads(data_len);
  227. kv->offset = 0;
  228. kv->valid = 1;
  229. return kv;
  230. }
  231. static struct csr1212_keyval *
  232. csr1212_new_csr_offset(u8 key, u32 csr_offset)
  233. {
  234. struct csr1212_keyval *kv;
  235. kv = csr1212_new_keyval(CSR1212_KV_TYPE_CSR_OFFSET, key);
  236. if (!kv)
  237. return NULL;
  238. kv->value.csr_offset = csr_offset;
  239. kv->offset = 0;
  240. kv->valid = 1;
  241. return kv;
  242. }
  243. struct csr1212_keyval *csr1212_new_directory(u8 key)
  244. {
  245. struct csr1212_keyval *kv;
  246. kv = csr1212_new_keyval(CSR1212_KV_TYPE_DIRECTORY, key);
  247. if (!kv)
  248. return NULL;
  249. kv->value.directory.len = 0;
  250. kv->offset = 0;
  251. kv->value.directory.dentries_head = NULL;
  252. kv->value.directory.dentries_tail = NULL;
  253. kv->valid = 1;
  254. return kv;
  255. }
  256. void csr1212_associate_keyval(struct csr1212_keyval *kv,
  257. struct csr1212_keyval *associate)
  258. {
  259. BUG_ON(!kv || !associate || kv->key.id == CSR1212_KV_ID_DESCRIPTOR ||
  260. (associate->key.id != CSR1212_KV_ID_DESCRIPTOR &&
  261. associate->key.id != CSR1212_KV_ID_DEPENDENT_INFO &&
  262. associate->key.id != CSR1212_KV_ID_EXTENDED_KEY &&
  263. associate->key.id != CSR1212_KV_ID_EXTENDED_DATA &&
  264. associate->key.id < 0x30) ||
  265. (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID &&
  266. associate->key.id != CSR1212_KV_ID_EXTENDED_KEY) ||
  267. (kv->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
  268. associate->key.id != CSR1212_KV_ID_EXTENDED_DATA) ||
  269. (associate->key.id == CSR1212_KV_ID_EXTENDED_KEY &&
  270. kv->key.id != CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) ||
  271. (associate->key.id == CSR1212_KV_ID_EXTENDED_DATA &&
  272. kv->key.id != CSR1212_KV_ID_EXTENDED_KEY));
  273. if (kv->associate)
  274. csr1212_release_keyval(kv->associate);
  275. csr1212_keep_keyval(associate);
  276. kv->associate = associate;
  277. }
  278. static int __csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
  279. struct csr1212_keyval *kv,
  280. bool keep_keyval)
  281. {
  282. struct csr1212_dentry *dentry;
  283. BUG_ON(!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY);
  284. dentry = CSR1212_MALLOC(sizeof(*dentry));
  285. if (!dentry)
  286. return -ENOMEM;
  287. if (keep_keyval)
  288. csr1212_keep_keyval(kv);
  289. dentry->kv = kv;
  290. dentry->next = NULL;
  291. dentry->prev = dir->value.directory.dentries_tail;
  292. if (!dir->value.directory.dentries_head)
  293. dir->value.directory.dentries_head = dentry;
  294. if (dir->value.directory.dentries_tail)
  295. dir->value.directory.dentries_tail->next = dentry;
  296. dir->value.directory.dentries_tail = dentry;
  297. return CSR1212_SUCCESS;
  298. }
  299. int csr1212_attach_keyval_to_directory(struct csr1212_keyval *dir,
  300. struct csr1212_keyval *kv)
  301. {
  302. return __csr1212_attach_keyval_to_directory(dir, kv, true);
  303. }
  304. #define CSR1212_DESCRIPTOR_LEAF_DATA(kv) \
  305. (&((kv)->value.leaf.data[1]))
  306. #define CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, type) \
  307. ((kv)->value.leaf.data[0] = \
  308. cpu_to_be32(CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID(kv) | \
  309. ((type) << CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT)))
  310. #define CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, spec_id) \
  311. ((kv)->value.leaf.data[0] = \
  312. cpu_to_be32((CSR1212_DESCRIPTOR_LEAF_TYPE(kv) << \
  313. CSR1212_DESCRIPTOR_LEAF_TYPE_SHIFT) | \
  314. ((spec_id) & CSR1212_DESCRIPTOR_LEAF_SPECIFIER_ID_MASK)))
  315. static struct csr1212_keyval *
  316. csr1212_new_descriptor_leaf(u8 dtype, u32 specifier_id,
  317. const void *data, size_t data_len)
  318. {
  319. struct csr1212_keyval *kv;
  320. kv = csr1212_new_leaf(CSR1212_KV_ID_DESCRIPTOR, NULL,
  321. data_len + CSR1212_DESCRIPTOR_LEAF_OVERHEAD);
  322. if (!kv)
  323. return NULL;
  324. kmemcheck_annotate_variable(kv->value.leaf.data[0]);
  325. CSR1212_DESCRIPTOR_LEAF_SET_TYPE(kv, dtype);
  326. CSR1212_DESCRIPTOR_LEAF_SET_SPECIFIER_ID(kv, specifier_id);
  327. if (data)
  328. memcpy(CSR1212_DESCRIPTOR_LEAF_DATA(kv), data, data_len);
  329. return kv;
  330. }
  331. /* Check if string conforms to minimal ASCII as per IEEE 1212 clause 7.4 */
  332. static int csr1212_check_minimal_ascii(const char *s)
  333. {
  334. static const char minimal_ascii_table[] = {
  335. /* 1 2 4 8 16 32 64 128 */
  336. 128, /* --, --, --, --, --, --, --, 07, */
  337. 4 + 16 + 32, /* --, --, 0a, --, 0C, 0D, --, --, */
  338. 0, /* --, --, --, --, --, --, --, --, */
  339. 0, /* --, --, --, --, --, --, --, --, */
  340. 255 - 8 - 16, /* 20, 21, 22, --, --, 25, 26, 27, */
  341. 255, /* 28, 29, 2a, 2b, 2c, 2d, 2e, 2f, */
  342. 255, /* 30, 31, 32, 33, 34, 35, 36, 37, */
  343. 255, /* 38, 39, 3a, 3b, 3c, 3d, 3e, 3f, */
  344. 255, /* 40, 41, 42, 43, 44, 45, 46, 47, */
  345. 255, /* 48, 49, 4a, 4b, 4c, 4d, 4e, 4f, */
  346. 255, /* 50, 51, 52, 53, 54, 55, 56, 57, */
  347. 1 + 2 + 4 + 128, /* 58, 59, 5a, --, --, --, --, 5f, */
  348. 255 - 1, /* --, 61, 62, 63, 64, 65, 66, 67, */
  349. 255, /* 68, 69, 6a, 6b, 6c, 6d, 6e, 6f, */
  350. 255, /* 70, 71, 72, 73, 74, 75, 76, 77, */
  351. 1 + 2 + 4, /* 78, 79, 7a, --, --, --, --, --, */
  352. };
  353. int i, j;
  354. for (; *s; s++) {
  355. i = *s >> 3; /* i = *s / 8; */
  356. j = 1 << (*s & 3); /* j = 1 << (*s % 8); */
  357. if (i >= ARRAY_SIZE(minimal_ascii_table) ||
  358. !(minimal_ascii_table[i] & j))
  359. return -EINVAL;
  360. }
  361. return 0;
  362. }
  363. /* IEEE 1212 clause 7.5.4.1 textual descriptors (English, minimal ASCII) */
  364. struct csr1212_keyval *csr1212_new_string_descriptor_leaf(const char *s)
  365. {
  366. struct csr1212_keyval *kv;
  367. u32 *text;
  368. size_t str_len, quads;
  369. if (!s || !*s || csr1212_check_minimal_ascii(s))
  370. return NULL;
  371. str_len = strlen(s);
  372. quads = bytes_to_quads(str_len);
  373. kv = csr1212_new_descriptor_leaf(0, 0, NULL, quads_to_bytes(quads) +
  374. CSR1212_TEXTUAL_DESCRIPTOR_LEAF_OVERHEAD);
  375. if (!kv)
  376. return NULL;
  377. kv->value.leaf.data[1] = 0; /* width, character_set, language */
  378. text = CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(kv);
  379. text[quads - 1] = 0; /* padding */
  380. memcpy(text, s, str_len);
  381. return kv;
  382. }
  383. /* Destruction Routines */
  384. void csr1212_detach_keyval_from_directory(struct csr1212_keyval *dir,
  385. struct csr1212_keyval *kv)
  386. {
  387. struct csr1212_dentry *dentry;
  388. if (!kv || !dir || dir->key.type != CSR1212_KV_TYPE_DIRECTORY)
  389. return;
  390. dentry = csr1212_find_keyval(dir, kv);
  391. if (!dentry)
  392. return;
  393. if (dentry->prev)
  394. dentry->prev->next = dentry->next;
  395. if (dentry->next)
  396. dentry->next->prev = dentry->prev;
  397. if (dir->value.directory.dentries_head == dentry)
  398. dir->value.directory.dentries_head = dentry->next;
  399. if (dir->value.directory.dentries_tail == dentry)
  400. dir->value.directory.dentries_tail = dentry->prev;
  401. CSR1212_FREE(dentry);
  402. csr1212_release_keyval(kv);
  403. }
  404. /* This function is used to free the memory taken by a keyval. If the given
  405. * keyval is a directory type, then any keyvals contained in that directory
  406. * will be destroyed as well if noone holds a reference on them. By means of
  407. * list manipulation, this routine will descend a directory structure in a
  408. * non-recursive manner. */
  409. void csr1212_release_keyval(struct csr1212_keyval *kv)
  410. {
  411. struct csr1212_keyval *k, *a;
  412. struct csr1212_dentry dentry;
  413. struct csr1212_dentry *head, *tail;
  414. if (!atomic_dec_and_test(&kv->refcnt))
  415. return;
  416. dentry.kv = kv;
  417. dentry.next = NULL;
  418. dentry.prev = NULL;
  419. head = &dentry;
  420. tail = head;
  421. while (head) {
  422. k = head->kv;
  423. while (k) {
  424. /* must not dec_and_test kv->refcnt again */
  425. if (k != kv && !atomic_dec_and_test(&k->refcnt))
  426. break;
  427. a = k->associate;
  428. if (k->key.type == CSR1212_KV_TYPE_DIRECTORY) {
  429. /* If the current entry is a directory, move all
  430. * the entries to the destruction list. */
  431. if (k->value.directory.dentries_head) {
  432. tail->next =
  433. k->value.directory.dentries_head;
  434. k->value.directory.dentries_head->prev =
  435. tail;
  436. tail = k->value.directory.dentries_tail;
  437. }
  438. }
  439. free_keyval(k);
  440. k = a;
  441. }
  442. head = head->next;
  443. if (head) {
  444. if (head->prev && head->prev != &dentry)
  445. CSR1212_FREE(head->prev);
  446. head->prev = NULL;
  447. } else if (tail != &dentry) {
  448. CSR1212_FREE(tail);
  449. }
  450. }
  451. }
  452. void csr1212_destroy_csr(struct csr1212_csr *csr)
  453. {
  454. struct csr1212_csr_rom_cache *c, *oc;
  455. struct csr1212_cache_region *cr, *ocr;
  456. csr1212_release_keyval(csr->root_kv);
  457. c = csr->cache_head;
  458. while (c) {
  459. oc = c;
  460. cr = c->filled_head;
  461. while (cr) {
  462. ocr = cr;
  463. cr = cr->next;
  464. CSR1212_FREE(ocr);
  465. }
  466. c = c->next;
  467. CSR1212_FREE(oc);
  468. }
  469. CSR1212_FREE(csr);
  470. }
  471. /* CSR Image Creation */
  472. static int csr1212_append_new_cache(struct csr1212_csr *csr, size_t romsize)
  473. {
  474. struct csr1212_csr_rom_cache *cache;
  475. u64 csr_addr;
  476. BUG_ON(!csr || !csr->ops || !csr->ops->allocate_addr_range ||
  477. !csr->ops->release_addr || csr->max_rom < 1);
  478. /* ROM size must be a multiple of csr->max_rom */
  479. romsize = (romsize + (csr->max_rom - 1)) & ~(csr->max_rom - 1);
  480. csr_addr = csr->ops->allocate_addr_range(romsize, csr->max_rom,
  481. csr->private);
  482. if (csr_addr == CSR1212_INVALID_ADDR_SPACE)
  483. return -ENOMEM;
  484. if (csr_addr < CSR1212_REGISTER_SPACE_BASE) {
  485. /* Invalid address returned from allocate_addr_range(). */
  486. csr->ops->release_addr(csr_addr, csr->private);
  487. return -ENOMEM;
  488. }
  489. cache = csr1212_rom_cache_malloc(csr_addr - CSR1212_REGISTER_SPACE_BASE,
  490. romsize);
  491. if (!cache) {
  492. csr->ops->release_addr(csr_addr, csr->private);
  493. return -ENOMEM;
  494. }
  495. cache->ext_rom = csr1212_new_keyval(CSR1212_KV_TYPE_LEAF,
  496. CSR1212_KV_ID_EXTENDED_ROM);
  497. if (!cache->ext_rom) {
  498. csr->ops->release_addr(csr_addr, csr->private);
  499. CSR1212_FREE(cache);
  500. return -ENOMEM;
  501. }
  502. if (csr1212_attach_keyval_to_directory(csr->root_kv, cache->ext_rom) !=
  503. CSR1212_SUCCESS) {
  504. csr1212_release_keyval(cache->ext_rom);
  505. csr->ops->release_addr(csr_addr, csr->private);
  506. CSR1212_FREE(cache);
  507. return -ENOMEM;
  508. }
  509. cache->ext_rom->offset = csr_addr - CSR1212_REGISTER_SPACE_BASE;
  510. cache->ext_rom->value.leaf.len = -1;
  511. cache->ext_rom->value.leaf.data = cache->data;
  512. /* Add cache to tail of cache list */
  513. cache->prev = csr->cache_tail;
  514. csr->cache_tail->next = cache;
  515. csr->cache_tail = cache;
  516. return CSR1212_SUCCESS;
  517. }
  518. static void csr1212_remove_cache(struct csr1212_csr *csr,
  519. struct csr1212_csr_rom_cache *cache)
  520. {
  521. if (csr->cache_head == cache)
  522. csr->cache_head = cache->next;
  523. if (csr->cache_tail == cache)
  524. csr->cache_tail = cache->prev;
  525. if (cache->prev)
  526. cache->prev->next = cache->next;
  527. if (cache->next)
  528. cache->next->prev = cache->prev;
  529. if (cache->ext_rom) {
  530. csr1212_detach_keyval_from_directory(csr->root_kv,
  531. cache->ext_rom);
  532. csr1212_release_keyval(cache->ext_rom);
  533. }
  534. CSR1212_FREE(cache);
  535. }
  536. static int csr1212_generate_layout_subdir(struct csr1212_keyval *dir,
  537. struct csr1212_keyval **layout_tail)
  538. {
  539. struct csr1212_dentry *dentry;
  540. struct csr1212_keyval *dkv;
  541. struct csr1212_keyval *last_extkey_spec = NULL;
  542. struct csr1212_keyval *last_extkey = NULL;
  543. int num_entries = 0;
  544. for (dentry = dir->value.directory.dentries_head; dentry;
  545. dentry = dentry->next) {
  546. for (dkv = dentry->kv; dkv; dkv = dkv->associate) {
  547. /* Special Case: Extended Key Specifier_ID */
  548. if (dkv->key.id ==
  549. CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
  550. if (last_extkey_spec == NULL)
  551. last_extkey_spec = dkv;
  552. else if (dkv->value.immediate !=
  553. last_extkey_spec->value.immediate)
  554. last_extkey_spec = dkv;
  555. else
  556. continue;
  557. /* Special Case: Extended Key */
  558. } else if (dkv->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
  559. if (last_extkey == NULL)
  560. last_extkey = dkv;
  561. else if (dkv->value.immediate !=
  562. last_extkey->value.immediate)
  563. last_extkey = dkv;
  564. else
  565. continue;
  566. }
  567. num_entries += 1;
  568. switch (dkv->key.type) {
  569. default:
  570. case CSR1212_KV_TYPE_IMMEDIATE:
  571. case CSR1212_KV_TYPE_CSR_OFFSET:
  572. break;
  573. case CSR1212_KV_TYPE_LEAF:
  574. case CSR1212_KV_TYPE_DIRECTORY:
  575. /* Remove from list */
  576. if (dkv->prev && (dkv->prev->next == dkv))
  577. dkv->prev->next = dkv->next;
  578. if (dkv->next && (dkv->next->prev == dkv))
  579. dkv->next->prev = dkv->prev;
  580. //if (dkv == *layout_tail)
  581. // *layout_tail = dkv->prev;
  582. /* Special case: Extended ROM leafs */
  583. if (dkv->key.id == CSR1212_KV_ID_EXTENDED_ROM) {
  584. dkv->value.leaf.len = -1;
  585. /* Don't add Extended ROM leafs in the
  586. * layout list, they are handled
  587. * differently. */
  588. break;
  589. }
  590. /* Add to tail of list */
  591. dkv->next = NULL;
  592. dkv->prev = *layout_tail;
  593. (*layout_tail)->next = dkv;
  594. *layout_tail = dkv;
  595. break;
  596. }
  597. }
  598. }
  599. return num_entries;
  600. }
  601. static size_t csr1212_generate_layout_order(struct csr1212_keyval *kv)
  602. {
  603. struct csr1212_keyval *ltail = kv;
  604. size_t agg_size = 0;
  605. while (kv) {
  606. switch (kv->key.type) {
  607. case CSR1212_KV_TYPE_LEAF:
  608. /* Add 1 quadlet for crc/len field */
  609. agg_size += kv->value.leaf.len + 1;
  610. break;
  611. case CSR1212_KV_TYPE_DIRECTORY:
  612. kv->value.directory.len =
  613. csr1212_generate_layout_subdir(kv, &ltail);
  614. /* Add 1 quadlet for crc/len field */
  615. agg_size += kv->value.directory.len + 1;
  616. break;
  617. }
  618. kv = kv->next;
  619. }
  620. return quads_to_bytes(agg_size);
  621. }
  622. static struct csr1212_keyval *
  623. csr1212_generate_positions(struct csr1212_csr_rom_cache *cache,
  624. struct csr1212_keyval *start_kv, int start_pos)
  625. {
  626. struct csr1212_keyval *kv = start_kv;
  627. struct csr1212_keyval *okv = start_kv;
  628. int pos = start_pos;
  629. int kv_len = 0, okv_len = 0;
  630. cache->layout_head = kv;
  631. while (kv && pos < cache->size) {
  632. /* Special case: Extended ROM leafs */
  633. if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
  634. kv->offset = cache->offset + pos;
  635. switch (kv->key.type) {
  636. case CSR1212_KV_TYPE_LEAF:
  637. kv_len = kv->value.leaf.len;
  638. break;
  639. case CSR1212_KV_TYPE_DIRECTORY:
  640. kv_len = kv->value.directory.len;
  641. break;
  642. default:
  643. /* Should never get here */
  644. WARN_ON(1);
  645. break;
  646. }
  647. pos += quads_to_bytes(kv_len + 1);
  648. if (pos <= cache->size) {
  649. okv = kv;
  650. okv_len = kv_len;
  651. kv = kv->next;
  652. }
  653. }
  654. cache->layout_tail = okv;
  655. cache->len = okv->offset - cache->offset + quads_to_bytes(okv_len + 1);
  656. return kv;
  657. }
  658. #define CSR1212_KV_KEY_SHIFT 24
  659. #define CSR1212_KV_KEY_TYPE_SHIFT 6
  660. #define CSR1212_KV_KEY_ID_MASK 0x3f
  661. #define CSR1212_KV_KEY_TYPE_MASK 0x3 /* after shift */
  662. static void
  663. csr1212_generate_tree_subdir(struct csr1212_keyval *dir, u32 *data_buffer)
  664. {
  665. struct csr1212_dentry *dentry;
  666. struct csr1212_keyval *last_extkey_spec = NULL;
  667. struct csr1212_keyval *last_extkey = NULL;
  668. int index = 0;
  669. for (dentry = dir->value.directory.dentries_head;
  670. dentry;
  671. dentry = dentry->next) {
  672. struct csr1212_keyval *a;
  673. for (a = dentry->kv; a; a = a->associate) {
  674. u32 value = 0;
  675. /* Special Case: Extended Key Specifier_ID */
  676. if (a->key.id ==
  677. CSR1212_KV_ID_EXTENDED_KEY_SPECIFIER_ID) {
  678. if (last_extkey_spec == NULL)
  679. last_extkey_spec = a;
  680. else if (a->value.immediate !=
  681. last_extkey_spec->value.immediate)
  682. last_extkey_spec = a;
  683. else
  684. continue;
  685. /* Special Case: Extended Key */
  686. } else if (a->key.id == CSR1212_KV_ID_EXTENDED_KEY) {
  687. if (last_extkey == NULL)
  688. last_extkey = a;
  689. else if (a->value.immediate !=
  690. last_extkey->value.immediate)
  691. last_extkey = a;
  692. else
  693. continue;
  694. }
  695. switch (a->key.type) {
  696. case CSR1212_KV_TYPE_IMMEDIATE:
  697. value = a->value.immediate;
  698. break;
  699. case CSR1212_KV_TYPE_CSR_OFFSET:
  700. value = a->value.csr_offset;
  701. break;
  702. case CSR1212_KV_TYPE_LEAF:
  703. value = a->offset;
  704. value -= dir->offset + quads_to_bytes(1+index);
  705. value = bytes_to_quads(value);
  706. break;
  707. case CSR1212_KV_TYPE_DIRECTORY:
  708. value = a->offset;
  709. value -= dir->offset + quads_to_bytes(1+index);
  710. value = bytes_to_quads(value);
  711. break;
  712. default:
  713. /* Should never get here */
  714. WARN_ON(1);
  715. break;
  716. }
  717. value |= (a->key.id & CSR1212_KV_KEY_ID_MASK) <<
  718. CSR1212_KV_KEY_SHIFT;
  719. value |= (a->key.type & CSR1212_KV_KEY_TYPE_MASK) <<
  720. (CSR1212_KV_KEY_SHIFT +
  721. CSR1212_KV_KEY_TYPE_SHIFT);
  722. data_buffer[index] = cpu_to_be32(value);
  723. index++;
  724. }
  725. }
  726. }
  727. struct csr1212_keyval_img {
  728. u16 length;
  729. u16 crc;
  730. /* Must be last */
  731. u32 data[0]; /* older gcc can't handle [] which is standard */
  732. };
  733. static void csr1212_fill_cache(struct csr1212_csr_rom_cache *cache)
  734. {
  735. struct csr1212_keyval *kv, *nkv;
  736. struct csr1212_keyval_img *kvi;
  737. for (kv = cache->layout_head;
  738. kv != cache->layout_tail->next;
  739. kv = nkv) {
  740. kvi = (struct csr1212_keyval_img *)(cache->data +
  741. bytes_to_quads(kv->offset - cache->offset));
  742. switch (kv->key.type) {
  743. default:
  744. case CSR1212_KV_TYPE_IMMEDIATE:
  745. case CSR1212_KV_TYPE_CSR_OFFSET:
  746. /* Should never get here */
  747. WARN_ON(1);
  748. break;
  749. case CSR1212_KV_TYPE_LEAF:
  750. /* Don't copy over Extended ROM areas, they are
  751. * already filled out! */
  752. if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
  753. memcpy(kvi->data, kv->value.leaf.data,
  754. quads_to_bytes(kv->value.leaf.len));
  755. kvi->length = cpu_to_be16(kv->value.leaf.len);
  756. kvi->crc = csr1212_crc16(kvi->data, kv->value.leaf.len);
  757. break;
  758. case CSR1212_KV_TYPE_DIRECTORY:
  759. csr1212_generate_tree_subdir(kv, kvi->data);
  760. kvi->length = cpu_to_be16(kv->value.directory.len);
  761. kvi->crc = csr1212_crc16(kvi->data,
  762. kv->value.directory.len);
  763. break;
  764. }
  765. nkv = kv->next;
  766. if (kv->prev)
  767. kv->prev->next = NULL;
  768. if (kv->next)
  769. kv->next->prev = NULL;
  770. kv->prev = NULL;
  771. kv->next = NULL;
  772. }
  773. }
  774. /* This size is arbitrarily chosen.
  775. * The struct overhead is subtracted for more economic allocations. */
  776. #define CSR1212_EXTENDED_ROM_SIZE (2048 - sizeof(struct csr1212_csr_rom_cache))
  777. int csr1212_generate_csr_image(struct csr1212_csr *csr)
  778. {
  779. struct csr1212_bus_info_block_img *bi;
  780. struct csr1212_csr_rom_cache *cache;
  781. struct csr1212_keyval *kv;
  782. size_t agg_size;
  783. int ret;
  784. int init_offset;
  785. BUG_ON(!csr);
  786. cache = csr->cache_head;
  787. bi = (struct csr1212_bus_info_block_img*)cache->data;
  788. bi->length = bytes_to_quads(csr->bus_info_len) - 1;
  789. bi->crc_length = bi->length;
  790. bi->crc = csr1212_crc16(bi->data, bi->crc_length);
  791. csr->root_kv->next = NULL;
  792. csr->root_kv->prev = NULL;
  793. agg_size = csr1212_generate_layout_order(csr->root_kv);
  794. init_offset = csr->bus_info_len;
  795. for (kv = csr->root_kv, cache = csr->cache_head;
  796. kv;
  797. cache = cache->next) {
  798. if (!cache) {
  799. /* Estimate approximate number of additional cache
  800. * regions needed (it assumes that the cache holding
  801. * the first 1K Config ROM space always exists). */
  802. int est_c = agg_size / (CSR1212_EXTENDED_ROM_SIZE -
  803. (2 * sizeof(u32))) + 1;
  804. /* Add additional cache regions, extras will be
  805. * removed later */
  806. for (; est_c; est_c--) {
  807. ret = csr1212_append_new_cache(csr,
  808. CSR1212_EXTENDED_ROM_SIZE);
  809. if (ret != CSR1212_SUCCESS)
  810. return ret;
  811. }
  812. /* Need to re-layout for additional cache regions */
  813. agg_size = csr1212_generate_layout_order(csr->root_kv);
  814. kv = csr->root_kv;
  815. cache = csr->cache_head;
  816. init_offset = csr->bus_info_len;
  817. }
  818. kv = csr1212_generate_positions(cache, kv, init_offset);
  819. agg_size -= cache->len;
  820. init_offset = sizeof(u32);
  821. }
  822. /* Remove unused, excess cache regions */
  823. while (cache) {
  824. struct csr1212_csr_rom_cache *oc = cache;
  825. cache = cache->next;
  826. csr1212_remove_cache(csr, oc);
  827. }
  828. /* Go through the list backward so that when done, the correct CRC
  829. * will be calculated for the Extended ROM areas. */
  830. for (cache = csr->cache_tail; cache; cache = cache->prev) {
  831. /* Only Extended ROM caches should have this set. */
  832. if (cache->ext_rom) {
  833. int leaf_size;
  834. /* Make sure the Extended ROM leaf is a multiple of
  835. * max_rom in size. */
  836. BUG_ON(csr->max_rom < 1);
  837. leaf_size = (cache->len + (csr->max_rom - 1)) &
  838. ~(csr->max_rom - 1);
  839. /* Zero out the unused ROM region */
  840. memset(cache->data + bytes_to_quads(cache->len), 0x00,
  841. leaf_size - cache->len);
  842. /* Subtract leaf header */
  843. leaf_size -= sizeof(u32);
  844. /* Update the Extended ROM leaf length */
  845. cache->ext_rom->value.leaf.len =
  846. bytes_to_quads(leaf_size);
  847. } else {
  848. /* Zero out the unused ROM region */
  849. memset(cache->data + bytes_to_quads(cache->len), 0x00,
  850. cache->size - cache->len);
  851. }
  852. /* Copy the data into the cache buffer */
  853. csr1212_fill_cache(cache);
  854. if (cache != csr->cache_head) {
  855. /* Set the length and CRC of the extended ROM. */
  856. struct csr1212_keyval_img *kvi =
  857. (struct csr1212_keyval_img*)cache->data;
  858. u16 len = bytes_to_quads(cache->len) - 1;
  859. kvi->length = cpu_to_be16(len);
  860. kvi->crc = csr1212_crc16(kvi->data, len);
  861. }
  862. }
  863. return CSR1212_SUCCESS;
  864. }
  865. int csr1212_read(struct csr1212_csr *csr, u32 offset, void *buffer, u32 len)
  866. {
  867. struct csr1212_csr_rom_cache *cache;
  868. for (cache = csr->cache_head; cache; cache = cache->next)
  869. if (offset >= cache->offset &&
  870. (offset + len) <= (cache->offset + cache->size)) {
  871. memcpy(buffer, &cache->data[
  872. bytes_to_quads(offset - cache->offset)],
  873. len);
  874. return CSR1212_SUCCESS;
  875. }
  876. return -ENOENT;
  877. }
  878. /*
  879. * Apparently there are many different wrong implementations of the CRC
  880. * algorithm. We don't fail, we just warn... approximately once per GUID.
  881. */
  882. static void
  883. csr1212_check_crc(const u32 *buffer, size_t length, u16 crc, __be32 *guid)
  884. {
  885. static u64 last_bad_eui64;
  886. u64 eui64 = ((u64)be32_to_cpu(guid[0]) << 32) | be32_to_cpu(guid[1]);
  887. if (csr1212_crc16(buffer, length) == crc ||
  888. csr1212_msft_crc16(buffer, length) == crc ||
  889. eui64 == last_bad_eui64)
  890. return;
  891. printk(KERN_DEBUG "ieee1394: config ROM CRC error\n");
  892. last_bad_eui64 = eui64;
  893. }
  894. /* Parse a chunk of data as a Config ROM */
  895. static int csr1212_parse_bus_info_block(struct csr1212_csr *csr)
  896. {
  897. struct csr1212_bus_info_block_img *bi;
  898. struct csr1212_cache_region *cr;
  899. int i;
  900. int ret;
  901. for (i = 0; i < csr->bus_info_len; i += sizeof(u32)) {
  902. ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
  903. &csr->cache_head->data[bytes_to_quads(i)],
  904. csr->private);
  905. if (ret != CSR1212_SUCCESS)
  906. return ret;
  907. /* check ROM header's info_length */
  908. if (i == 0 &&
  909. be32_to_cpu(csr->cache_head->data[0]) >> 24 !=
  910. bytes_to_quads(csr->bus_info_len) - 1)
  911. return -EINVAL;
  912. }
  913. bi = (struct csr1212_bus_info_block_img*)csr->cache_head->data;
  914. csr->crc_len = quads_to_bytes(bi->crc_length);
  915. /* IEEE 1212 recommends that crc_len be equal to bus_info_len, but that
  916. * is not always the case, so read the rest of the crc area 1 quadlet at
  917. * a time. */
  918. for (i = csr->bus_info_len; i <= csr->crc_len; i += sizeof(u32)) {
  919. ret = csr->ops->bus_read(csr, CSR1212_CONFIG_ROM_SPACE_BASE + i,
  920. &csr->cache_head->data[bytes_to_quads(i)],
  921. csr->private);
  922. if (ret != CSR1212_SUCCESS)
  923. return ret;
  924. }
  925. csr1212_check_crc(bi->data, bi->crc_length, bi->crc,
  926. &csr->bus_info_data[3]);
  927. cr = CSR1212_MALLOC(sizeof(*cr));
  928. if (!cr)
  929. return -ENOMEM;
  930. cr->next = NULL;
  931. cr->prev = NULL;
  932. cr->offset_start = 0;
  933. cr->offset_end = csr->crc_len + 4;
  934. csr->cache_head->filled_head = cr;
  935. csr->cache_head->filled_tail = cr;
  936. return CSR1212_SUCCESS;
  937. }
  938. #define CSR1212_KV_KEY(q) (be32_to_cpu(q) >> CSR1212_KV_KEY_SHIFT)
  939. #define CSR1212_KV_KEY_TYPE(q) (CSR1212_KV_KEY(q) >> CSR1212_KV_KEY_TYPE_SHIFT)
  940. #define CSR1212_KV_KEY_ID(q) (CSR1212_KV_KEY(q) & CSR1212_KV_KEY_ID_MASK)
  941. #define CSR1212_KV_VAL_MASK 0xffffff
  942. #define CSR1212_KV_VAL(q) (be32_to_cpu(q) & CSR1212_KV_VAL_MASK)
  943. static int
  944. csr1212_parse_dir_entry(struct csr1212_keyval *dir, u32 ki, u32 kv_pos)
  945. {
  946. int ret = CSR1212_SUCCESS;
  947. struct csr1212_keyval *k = NULL;
  948. u32 offset;
  949. bool keep_keyval = true;
  950. switch (CSR1212_KV_KEY_TYPE(ki)) {
  951. case CSR1212_KV_TYPE_IMMEDIATE:
  952. k = csr1212_new_immediate(CSR1212_KV_KEY_ID(ki),
  953. CSR1212_KV_VAL(ki));
  954. if (!k) {
  955. ret = -ENOMEM;
  956. goto out;
  957. }
  958. /* Don't keep local reference when parsing. */
  959. keep_keyval = false;
  960. break;
  961. case CSR1212_KV_TYPE_CSR_OFFSET:
  962. k = csr1212_new_csr_offset(CSR1212_KV_KEY_ID(ki),
  963. CSR1212_KV_VAL(ki));
  964. if (!k) {
  965. ret = -ENOMEM;
  966. goto out;
  967. }
  968. /* Don't keep local reference when parsing. */
  969. keep_keyval = false;
  970. break;
  971. default:
  972. /* Compute the offset from 0xffff f000 0000. */
  973. offset = quads_to_bytes(CSR1212_KV_VAL(ki)) + kv_pos;
  974. if (offset == kv_pos) {
  975. /* Uh-oh. Can't have a relative offset of 0 for Leaves
  976. * or Directories. The Config ROM image is most likely
  977. * messed up, so we'll just abort here. */
  978. ret = -EIO;
  979. goto out;
  980. }
  981. k = csr1212_find_keyval_offset(dir, offset);
  982. if (k)
  983. break; /* Found it. */
  984. if (CSR1212_KV_KEY_TYPE(ki) == CSR1212_KV_TYPE_DIRECTORY)
  985. k = csr1212_new_directory(CSR1212_KV_KEY_ID(ki));
  986. else
  987. k = csr1212_new_leaf(CSR1212_KV_KEY_ID(ki), NULL, 0);
  988. if (!k) {
  989. ret = -ENOMEM;
  990. goto out;
  991. }
  992. /* Don't keep local reference when parsing. */
  993. keep_keyval = false;
  994. /* Contents not read yet so it's not valid. */
  995. k->valid = 0;
  996. k->offset = offset;
  997. k->prev = dir;
  998. k->next = dir->next;
  999. dir->next->prev = k;
  1000. dir->next = k;
  1001. }
  1002. ret = __csr1212_attach_keyval_to_directory(dir, k, keep_keyval);
  1003. out:
  1004. if (ret != CSR1212_SUCCESS && k != NULL)
  1005. free_keyval(k);
  1006. return ret;
  1007. }
  1008. int csr1212_parse_keyval(struct csr1212_keyval *kv,
  1009. struct csr1212_csr_rom_cache *cache)
  1010. {
  1011. struct csr1212_keyval_img *kvi;
  1012. int i;
  1013. int ret = CSR1212_SUCCESS;
  1014. int kvi_len;
  1015. kvi = (struct csr1212_keyval_img*)
  1016. &cache->data[bytes_to_quads(kv->offset - cache->offset)];
  1017. kvi_len = be16_to_cpu(kvi->length);
  1018. /* GUID is wrong in here in case of extended ROM. We don't care. */
  1019. csr1212_check_crc(kvi->data, kvi_len, kvi->crc, &cache->data[3]);
  1020. switch (kv->key.type) {
  1021. case CSR1212_KV_TYPE_DIRECTORY:
  1022. for (i = 0; i < kvi_len; i++) {
  1023. u32 ki = kvi->data[i];
  1024. /* Some devices put null entries in their unit
  1025. * directories. If we come across such an entry,
  1026. * then skip it. */
  1027. if (ki == 0x0)
  1028. continue;
  1029. ret = csr1212_parse_dir_entry(kv, ki,
  1030. kv->offset + quads_to_bytes(i + 1));
  1031. }
  1032. kv->value.directory.len = kvi_len;
  1033. break;
  1034. case CSR1212_KV_TYPE_LEAF:
  1035. if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM) {
  1036. size_t size = quads_to_bytes(kvi_len);
  1037. kv->value.leaf.data = CSR1212_MALLOC(size);
  1038. if (!kv->value.leaf.data) {
  1039. ret = -ENOMEM;
  1040. goto out;
  1041. }
  1042. kv->value.leaf.len = kvi_len;
  1043. memcpy(kv->value.leaf.data, kvi->data, size);
  1044. }
  1045. break;
  1046. }
  1047. kv->valid = 1;
  1048. out:
  1049. return ret;
  1050. }
  1051. static int
  1052. csr1212_read_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
  1053. {
  1054. struct csr1212_cache_region *cr, *ncr, *newcr = NULL;
  1055. struct csr1212_keyval_img *kvi = NULL;
  1056. struct csr1212_csr_rom_cache *cache;
  1057. int cache_index;
  1058. u64 addr;
  1059. u32 *cache_ptr;
  1060. u16 kv_len = 0;
  1061. BUG_ON(!csr || !kv || csr->max_rom < 1);
  1062. /* First find which cache the data should be in (or go in if not read
  1063. * yet). */
  1064. for (cache = csr->cache_head; cache; cache = cache->next)
  1065. if (kv->offset >= cache->offset &&
  1066. kv->offset < (cache->offset + cache->size))
  1067. break;
  1068. if (!cache) {
  1069. u32 q, cache_size;
  1070. /* Only create a new cache for Extended ROM leaves. */
  1071. if (kv->key.id != CSR1212_KV_ID_EXTENDED_ROM)
  1072. return -EINVAL;
  1073. if (csr->ops->bus_read(csr,
  1074. CSR1212_REGISTER_SPACE_BASE + kv->offset,
  1075. &q, csr->private))
  1076. return -EIO;
  1077. kv->value.leaf.len = be32_to_cpu(q) >> 16;
  1078. cache_size = (quads_to_bytes(kv->value.leaf.len + 1) +
  1079. (csr->max_rom - 1)) & ~(csr->max_rom - 1);
  1080. cache = csr1212_rom_cache_malloc(kv->offset, cache_size);
  1081. if (!cache)
  1082. return -ENOMEM;
  1083. kv->value.leaf.data = &cache->data[1];
  1084. csr->cache_tail->next = cache;
  1085. cache->prev = csr->cache_tail;
  1086. cache->next = NULL;
  1087. csr->cache_tail = cache;
  1088. cache->filled_head =
  1089. CSR1212_MALLOC(sizeof(*cache->filled_head));
  1090. if (!cache->filled_head)
  1091. return -ENOMEM;
  1092. cache->filled_head->offset_start = 0;
  1093. cache->filled_head->offset_end = sizeof(u32);
  1094. cache->filled_tail = cache->filled_head;
  1095. cache->filled_head->next = NULL;
  1096. cache->filled_head->prev = NULL;
  1097. cache->data[0] = q;
  1098. /* Don't read the entire extended ROM now. Pieces of it will
  1099. * be read when entries inside it are read. */
  1100. return csr1212_parse_keyval(kv, cache);
  1101. }
  1102. cache_index = kv->offset - cache->offset;
  1103. /* Now seach read portions of the cache to see if it is there. */
  1104. for (cr = cache->filled_head; cr; cr = cr->next) {
  1105. if (cache_index < cr->offset_start) {
  1106. newcr = CSR1212_MALLOC(sizeof(*newcr));
  1107. if (!newcr)
  1108. return -ENOMEM;
  1109. newcr->offset_start = cache_index & ~(csr->max_rom - 1);
  1110. newcr->offset_end = newcr->offset_start;
  1111. newcr->next = cr;
  1112. newcr->prev = cr->prev;
  1113. cr->prev = newcr;
  1114. cr = newcr;
  1115. break;
  1116. } else if ((cache_index >= cr->offset_start) &&
  1117. (cache_index < cr->offset_end)) {
  1118. kvi = (struct csr1212_keyval_img*)
  1119. (&cache->data[bytes_to_quads(cache_index)]);
  1120. kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
  1121. break;
  1122. } else if (cache_index == cr->offset_end) {
  1123. break;
  1124. }
  1125. }
  1126. if (!cr) {
  1127. cr = cache->filled_tail;
  1128. newcr = CSR1212_MALLOC(sizeof(*newcr));
  1129. if (!newcr)
  1130. return -ENOMEM;
  1131. newcr->offset_start = cache_index & ~(csr->max_rom - 1);
  1132. newcr->offset_end = newcr->offset_start;
  1133. newcr->prev = cr;
  1134. newcr->next = cr->next;
  1135. cr->next = newcr;
  1136. cr = newcr;
  1137. cache->filled_tail = newcr;
  1138. }
  1139. while(!kvi || cr->offset_end < cache_index + kv_len) {
  1140. cache_ptr = &cache->data[bytes_to_quads(cr->offset_end &
  1141. ~(csr->max_rom - 1))];
  1142. addr = (CSR1212_CSR_ARCH_REG_SPACE_BASE + cache->offset +
  1143. cr->offset_end) & ~(csr->max_rom - 1);
  1144. if (csr->ops->bus_read(csr, addr, cache_ptr, csr->private))
  1145. return -EIO;
  1146. cr->offset_end += csr->max_rom - (cr->offset_end &
  1147. (csr->max_rom - 1));
  1148. if (!kvi && (cr->offset_end > cache_index)) {
  1149. kvi = (struct csr1212_keyval_img*)
  1150. (&cache->data[bytes_to_quads(cache_index)]);
  1151. kv_len = quads_to_bytes(be16_to_cpu(kvi->length) + 1);
  1152. }
  1153. if ((kv_len + (kv->offset - cache->offset)) > cache->size) {
  1154. /* The Leaf or Directory claims its length extends
  1155. * beyond the ConfigROM image region and thus beyond the
  1156. * end of our cache region. Therefore, we abort now
  1157. * rather than seg faulting later. */
  1158. return -EIO;
  1159. }
  1160. ncr = cr->next;
  1161. if (ncr && (cr->offset_end >= ncr->offset_start)) {
  1162. /* consolidate region entries */
  1163. ncr->offset_start = cr->offset_start;
  1164. if (cr->prev)
  1165. cr->prev->next = cr->next;
  1166. ncr->prev = cr->prev;
  1167. if (cache->filled_head == cr)
  1168. cache->filled_head = ncr;
  1169. CSR1212_FREE(cr);
  1170. cr = ncr;
  1171. }
  1172. }
  1173. return csr1212_parse_keyval(kv, cache);
  1174. }
  1175. struct csr1212_keyval *
  1176. csr1212_get_keyval(struct csr1212_csr *csr, struct csr1212_keyval *kv)
  1177. {
  1178. if (!kv)
  1179. return NULL;
  1180. if (!kv->valid)
  1181. if (csr1212_read_keyval(csr, kv) != CSR1212_SUCCESS)
  1182. return NULL;
  1183. return kv;
  1184. }
  1185. int csr1212_parse_csr(struct csr1212_csr *csr)
  1186. {
  1187. struct csr1212_dentry *dentry;
  1188. int ret;
  1189. BUG_ON(!csr || !csr->ops || !csr->ops->bus_read);
  1190. ret = csr1212_parse_bus_info_block(csr);
  1191. if (ret != CSR1212_SUCCESS)
  1192. return ret;
  1193. /*
  1194. * There has been a buggy firmware with bus_info_block.max_rom > 0
  1195. * spotted which actually only supported quadlet read requests to the
  1196. * config ROM. Therefore read everything quadlet by quadlet regardless
  1197. * of what the bus info block says.
  1198. */
  1199. csr->max_rom = 4;
  1200. csr->cache_head->layout_head = csr->root_kv;
  1201. csr->cache_head->layout_tail = csr->root_kv;
  1202. csr->root_kv->offset = (CSR1212_CONFIG_ROM_SPACE_BASE & 0xffff) +
  1203. csr->bus_info_len;
  1204. csr->root_kv->valid = 0;
  1205. csr->root_kv->next = csr->root_kv;
  1206. csr->root_kv->prev = csr->root_kv;
  1207. ret = csr1212_read_keyval(csr, csr->root_kv);
  1208. if (ret != CSR1212_SUCCESS)
  1209. return ret;
  1210. /* Scan through the Root directory finding all extended ROM regions
  1211. * and make cache regions for them */
  1212. for (dentry = csr->root_kv->value.directory.dentries_head;
  1213. dentry; dentry = dentry->next) {
  1214. if (dentry->kv->key.id == CSR1212_KV_ID_EXTENDED_ROM &&
  1215. !dentry->kv->valid) {
  1216. ret = csr1212_read_keyval(csr, dentry->kv);
  1217. if (ret != CSR1212_SUCCESS)
  1218. return ret;
  1219. }
  1220. }
  1221. return CSR1212_SUCCESS;
  1222. }