PageRenderTime 71ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 0ms

/stubdom/vtpmmgr/disk_write.c

https://gitlab.com/martyros/xen
C | 419 lines | 289 code | 78 blank | 52 comment | 39 complexity | b8431fb8741ec174c2eff9067627e515 MD5 | raw file
  1. #include <console.h>
  2. #include <unistd.h>
  3. #include <errno.h>
  4. #include <string.h>
  5. #include <inttypes.h>
  6. #include <stdlib.h>
  7. #include <stdbool.h>
  8. #include <mini-os/byteorder.h>
  9. #include "vtpm_manager.h"
  10. #include "log.h"
  11. #include "uuid.h"
  12. #include "vtpmmgr.h"
  13. #include "vtpm_disk.h"
  14. #include "disk_tpm.h"
  15. #include "disk_io.h"
  16. #include "disk_crypto.h"
  17. #include "disk_format.h"
  18. #include "mgmt_authority.h"
  19. static void disk_write_crypt_sector(sector_t *dst, const void *data, size_t size, const struct mem_tpm_mgr *mgr)
  20. {
  21. struct disk_crypt_sector_plain *sector = disk_write_buf();
  22. *dst = disk_find_free(mgr);
  23. aes_encrypt_ctr(sector->iv_data, sizeof(sector->iv_data), data, size, &mgr->tm_key_e);
  24. aes_cmac(&sector->mac, sector->data, sizeof(sector->data), &mgr->tm_key_e);
  25. disk_write_sector(*dst, sector, sizeof(*sector));
  26. }
  27. /*
  28. * Mark unchanged sectors on disk as being used
  29. */
  30. static void disk_populate_used_vtpm(const struct mem_vtpm_page *src, const struct mem_tpm_mgr *mgr)
  31. {
  32. if (be32_native(src->disk_loc) != 0)
  33. disk_set_used(src->disk_loc, mgr);
  34. }
  35. /*
  36. * Write out a vTPM page to disk, doing nothing if the existing copy is valid
  37. */
  38. static void disk_write_vtpm_page(struct mem_vtpm_page *dst, const aes_context *auth_key,
  39. const struct mem_tpm_mgr *mgr)
  40. {
  41. struct disk_vtpm_sector pt;
  42. int i;
  43. memset(&pt, 0, sizeof(pt));
  44. if (be32_native(dst->disk_loc) != 0)
  45. return;
  46. for(i=0; i < dst->size; i++) {
  47. memcpy(pt.header[i].uuid, dst->vtpms[i]->uuid, 16);
  48. memcpy(pt.data[i].data, dst->vtpms[i]->data, 64);
  49. pt.header[i].flags = native_be32(dst->vtpms[i]->flags & VTPM_FLAG_DISK_MASK);
  50. }
  51. aes_encrypt_ctr(&pt.iv, sizeof(pt.data) + 16, &pt.data, sizeof(pt.data), auth_key);
  52. sha256(&dst->disk_hash, &pt, sizeof(pt));
  53. disk_write_crypt_sector(&dst->disk_loc, &pt, sizeof(pt), mgr);
  54. }
  55. /*
  56. * Generate TPM seal blobs for a group's keys; do nothing if existing copy is valid
  57. */
  58. static void generate_group_seals(struct mem_group *src, const struct mem_tpm_mgr *parent)
  59. {
  60. int i;
  61. struct disk_group_sealed_data sblob;
  62. // previous seals are still valid, skip talking to the TPM
  63. if (src->flags & MEM_GROUP_FLAG_SEAL_VALID)
  64. return;
  65. memcpy(&sblob.magic, DISK_GROUP_BOUND_MAGIC, 4);
  66. memcpy(sblob.tpm_manager_uuid, parent->uuid, 16);
  67. memcpy(&sblob.aik_authdata, &src->aik_authdata, 20);
  68. memcpy(&sblob.group_key, &src->group_key, 16);
  69. memcpy(&sblob.rollback_mac_key, &src->rollback_mac_key, 16);
  70. /* TODO support for more than NR_SEALS_PER_GROUP seals */
  71. if (src->nr_seals > NR_SEALS_PER_GROUP)
  72. abort();
  73. for(i=0; i < src->nr_seals; i++) {
  74. struct disk_seal_entry *dst = &src->seal_bits.entry[i];
  75. dst->pcr_selection = src->seals[i].pcr_selection;
  76. memcpy(&dst->digest_release, &src->seals[i].digest_release, 20);
  77. TPM_pcr_digest(&dst->digest_at_seal, dst->pcr_selection);
  78. /*TPM 2.0 bind | TPM 1.x seal*/
  79. if (hw_is_tpm2())
  80. TPM2_disk_bind(dst, &sblob, sizeof(sblob));
  81. else
  82. TPM_disk_seal(dst, &sblob, sizeof(sblob));
  83. }
  84. src->seal_bits.nr_cfgs = native_be32(src->nr_seals);
  85. src->flags |= MEM_GROUP_FLAG_SEAL_VALID;
  86. }
  87. /*
  88. * Mark unchanged sectors on disk as being used
  89. */
  90. static void disk_populate_used_group(const struct mem_group_hdr *src, const struct mem_tpm_mgr *mgr)
  91. {
  92. int i;
  93. struct mem_group *group = src->v;
  94. if (be32_native(src->disk_loc) != 0) {
  95. // entire group is unchanged - mark group, itree, and vtpm sectors
  96. // TODO mark other children (seal)
  97. disk_set_used(src->disk_loc, mgr);
  98. for(i = 0; i < src->disk_nr_inuse; i++)
  99. disk_set_used(src->disk_inuse[i], mgr);
  100. return;
  101. }
  102. // unopened groups should never have been invalidated
  103. if (!group)
  104. abort();
  105. for (i = 0; i < group->nr_pages; i++)
  106. disk_populate_used_vtpm(&group->data[i], mgr);
  107. }
  108. static void disk_write_vtpm_itree(struct mem_group_hdr *hdr, int base, int nr_entries,
  109. struct hash256 *hash, sector_t *loc, int hsize,
  110. const aes_context *group_key, const struct mem_tpm_mgr *mgr);
  111. static void disk_write_vtpm_itree(struct mem_group_hdr *hdr, int base, int nr_entries,
  112. struct hash256 *hash, sector_t *loc, int hsize,
  113. const aes_context *group_key, const struct mem_tpm_mgr *mgr)
  114. {
  115. int i, incr = 1, inuse_base, lsize;
  116. while (nr_entries > incr * hsize)
  117. incr *= NR_ENTRIES_PER_ITREE;
  118. if (nr_entries <= hsize) {
  119. struct mem_group *group = hdr->v;
  120. for (i = 0; i < nr_entries; i++) {
  121. struct mem_vtpm_page *page = group->data + base + i;
  122. disk_write_vtpm_page(page, group_key, mgr);
  123. loc[i] = page->disk_loc;
  124. hash[i] = page->disk_hash;
  125. }
  126. } else {
  127. for (i = 0; i * incr < nr_entries; i++) {
  128. struct disk_itree_sector pt;
  129. int child_entries = incr;
  130. // the last sector is not completely full
  131. if (nr_entries - i * incr < incr)
  132. child_entries = nr_entries - i * incr;
  133. disk_write_vtpm_itree(hdr, base, child_entries, pt.hash, pt.location,
  134. NR_ENTRIES_PER_ITREE, group_key, mgr);
  135. sha256(&hash[i], &pt.hash, sizeof(pt.hash));
  136. disk_write_crypt_sector(&loc[i], &pt, sizeof(pt), mgr);
  137. base += incr;
  138. }
  139. }
  140. // save the list of used sectors (itree and vtpm) in the header
  141. inuse_base = hdr->disk_nr_inuse;
  142. lsize = 1 + (nr_entries - 1) / incr;
  143. hdr->disk_nr_inuse += lsize;
  144. hdr->disk_inuse = realloc(hdr->disk_inuse, hdr->disk_nr_inuse * sizeof(sector_t));
  145. memcpy(&hdr->disk_inuse[inuse_base], loc, lsize * sizeof(sector_t));
  146. }
  147. /*
  148. * Write out a vTPM group sector and its children
  149. */
  150. static void disk_write_group_sector(struct mem_group_hdr *src,
  151. const struct mem_tpm_mgr *mgr)
  152. {
  153. struct disk_group_sector disk;
  154. struct mem_group *group = src->v;
  155. aes_context key_e;
  156. /* Don't write if the data hasn't changed */
  157. if (be32_native(src->disk_loc) != 0)
  158. return;
  159. // if the group was not opened, it should not have been changed
  160. if (!group)
  161. abort();
  162. memset(&disk, 0, sizeof(disk));
  163. memcpy(&disk.v.id_data, &group->id_data, sizeof(disk.v.id_data));
  164. memcpy(&disk.v.details, &group->details, sizeof(disk.v.details));
  165. aes_setup(&key_e, &group->group_key);
  166. disk.v.nr_vtpms = native_be32(group->nr_vtpms);
  167. // regenerated
  168. src->disk_nr_inuse = 0;
  169. disk_write_vtpm_itree(src, 0, group->nr_pages, disk.v.vtpm_hash, disk.vtpm_location,
  170. NR_ENTRIES_PER_GROUP_BASE, &key_e, mgr);
  171. generate_group_seals(group, mgr);
  172. memcpy(&disk.v.boot_configs, &group->seal_bits, sizeof(group->seal_bits));
  173. aes_cmac(&disk.group_mac, &disk.v, sizeof(disk.v), &key_e);
  174. sha256(&src->disk_hash, &disk.v, sizeof(disk.v) + sizeof(disk.group_mac));
  175. disk_write_crypt_sector(&src->disk_loc, &disk, sizeof(disk), mgr);
  176. }
  177. /*
  178. * Write TPM seal blobs for the manager's keys, using the given group's list
  179. * of valid configurations
  180. */
  181. static void disk_write_seal_list(struct mem_tpm_mgr *mgr, struct mem_group *group)
  182. {
  183. int i;
  184. struct disk_seal_list *seal = disk_write_buf();
  185. struct disk_root_sealed_data sblob;
  186. if (mgr->root_seals_valid & (1 + mgr->active_root))
  187. return;
  188. memcpy(&sblob.magic, DISK_ROOT_BOUND_MAGIC, 4);
  189. memcpy(sblob.tpm_manager_uuid, mgr->uuid, 16);
  190. memcpy(&sblob.nvram_slot, &mgr->nvram_slot, 4);
  191. memcpy(&sblob.nvram_auth, &mgr->nvram_auth, 20);
  192. memcpy(&sblob.counter_index, &mgr->counter_index, 4);
  193. memcpy(&sblob.counter_auth, &mgr->counter_auth, 20);
  194. // TODO when an NV slot in the physical TPM is used to populate nv_key,
  195. // that value should be used to mask the master key so that the value
  196. // can be changed to revoke old disk state
  197. #if 0
  198. aes_encrypt_one(&sblob.tm_key, &mgr->tm_key, &mgr->nv_key);
  199. #else
  200. memcpy(&sblob.tm_key, &mgr->tm_key, 16);
  201. #endif
  202. memset(seal, 0, sizeof(*seal));
  203. seal->length = native_be32(group->nr_seals);
  204. // TODO support for more entries
  205. if (group->nr_seals > SEALS_PER_ROOT_SEAL_LIST)
  206. abort();
  207. for(i=0; i < group->nr_seals; i++) {
  208. struct mem_seal *src = &group->seals[i];
  209. struct disk_seal_entry *dst = &seal->entry[i];
  210. dst->pcr_selection = src->pcr_selection;
  211. memcpy(&dst->digest_release, &src->digest_release, 20);
  212. TPM_pcr_digest(&dst->digest_at_seal, dst->pcr_selection);
  213. /*TPM 2.0 bind / TPM 1.x seal*/
  214. if (hw_is_tpm2())
  215. TPM2_disk_bind(dst, &sblob, sizeof(sblob));
  216. else
  217. TPM_disk_seal(dst, &sblob, sizeof(sblob));
  218. }
  219. memcpy(seal->hdr.magic, TPM_MGR_MAGIC, 12);
  220. seal->hdr.version = native_be32(TPM_MGR_VERSION);
  221. disk_write_sector(seal_loc(mgr), seal, sizeof(*seal));
  222. mgr->root_seals_valid |= 1 + mgr->active_root;
  223. }
  224. /*
  225. * Mark unchanged sectors on disk as being used
  226. */
  227. static void disk_populate_used_mgr(const struct mem_tpm_mgr *mgr)
  228. {
  229. int i;
  230. // TODO walk the linked lists for seals, rb_macs here (when supported)
  231. for(i=0; i < mgr->nr_groups; i++)
  232. disk_populate_used_group(&mgr->groups[i], mgr);
  233. }
  234. static void disk_write_group_itree(struct mem_tpm_mgr *mgr, int base, int nr_entries,
  235. struct hash256 *hash, sector_t *loc, int hsize);
  236. static void disk_write_group_itree(struct mem_tpm_mgr *mgr, int base, int nr_entries,
  237. struct hash256 *hash, sector_t *loc, int hsize)
  238. {
  239. int i, incr = 1;
  240. if (nr_entries <= hsize) {
  241. for(i=0; i < mgr->nr_groups; i++) {
  242. struct mem_group_hdr *group = mgr->groups + base + i;
  243. disk_write_group_sector(group, mgr);
  244. loc[i] = group->disk_loc;
  245. hash[i] = group->disk_hash;
  246. }
  247. return;
  248. }
  249. while (nr_entries > incr * hsize)
  250. incr *= NR_ENTRIES_PER_ITREE;
  251. for (i = 0; i * incr < nr_entries; i++) {
  252. struct disk_itree_sector pt;
  253. int child_entries = incr;
  254. // the last sector is not completely full
  255. if (nr_entries - i * incr < incr)
  256. child_entries = nr_entries - i * incr;
  257. disk_write_group_itree(mgr, base, child_entries, pt.hash, pt.location, NR_ENTRIES_PER_ITREE);
  258. sha256(&hash[i], &pt.hash, sizeof(pt.hash));
  259. disk_write_crypt_sector(&loc[i], &pt, sizeof(pt), mgr);
  260. base += incr;
  261. }
  262. }
  263. /*
  264. * Write out the root TPM Manager sector and its children
  265. */
  266. static void disk_write_root_sector(struct mem_tpm_mgr *mgr)
  267. {
  268. int i, j;
  269. struct disk_root_sector root;
  270. memset(&root, 0, sizeof(root));
  271. root.v.sequence = native_be64(mgr->sequence);
  272. root.v.tpm_counter_value = mgr->counter_value;
  273. root.v.nr_groups = native_be32(mgr->nr_groups);
  274. disk_write_group_itree(mgr, 0, mgr->nr_groups, root.v.group_hash, root.group_loc, NR_ENTRIES_PER_ROOT);
  275. i = 0;
  276. j = 0;
  277. while (i < mgr->nr_groups) {
  278. aes_context key_e;
  279. struct mem_group_hdr *group = &mgr->groups[i];
  280. struct mem_group *groupv = group->v;
  281. if (!groupv) {
  282. i++;
  283. continue;
  284. }
  285. if (groupv->details.flags.value & FLAG_ROLLBACK_DETECTED) {
  286. i++;
  287. continue;
  288. }
  289. if (j >= NR_RB_MACS_PER_ROOT)
  290. break; // TODO support for nr_rb_macs > 128
  291. aes_setup(&key_e, &groupv->rollback_mac_key);
  292. root.rb_macs[j].id = native_be32(i);
  293. aes_cmac(&root.rb_macs[j].mac, &root.v, sizeof(root.v), &key_e);
  294. i++; j++;
  295. }
  296. root.nr_rb_macs = native_be32(j);
  297. struct disk_crypt_sector_plain *root_sect = disk_write_buf();
  298. aes_encrypt_ctr(root_sect->iv_data, sizeof(root_sect->iv_data), &root, sizeof(root), &mgr->tm_key_e);
  299. aes_cmac(&root_sect->mac, &root_sect->data, sizeof(root_sect->data), &mgr->tm_key_e);
  300. disk_write_sector(root_loc(mgr), root_sect, sizeof(*root_sect));
  301. }
  302. /*
  303. * Write out changes to disk
  304. */
  305. void disk_write_all(struct mem_tpm_mgr *mgr)
  306. {
  307. disk_flush_slot(mgr);
  308. disk_populate_used_mgr(mgr);
  309. disk_write_root_sector(mgr);
  310. disk_write_seal_list(mgr, mgr->groups[0].v);
  311. disk_write_barrier();
  312. }
  313. /*
  314. * Create a new (blank) TPM Manager disk image.
  315. *
  316. * Does not actually write anything to disk.
  317. */
  318. int vtpm_new_disk(void)
  319. {
  320. int rc;
  321. struct mem_tpm_mgr *mgr = calloc(1, sizeof(*mgr));
  322. do_random(mgr->uuid, 16);
  323. do_random(&mgr->tm_key, 16);
  324. do_random(&mgr->nvram_auth, 20);
  325. do_random(&mgr->counter_auth, 20);
  326. do_random(&mgr->nv_key, 16);
  327. aes_setup(&mgr->tm_key_e, &mgr->tm_key);
  328. // TODO postpone these allocs until first write?
  329. rc = TPM_disk_nvalloc(&mgr->nvram_slot, mgr->nvram_auth);
  330. if (rc)
  331. return rc;
  332. rc = TPM_disk_alloc_counter(&mgr->counter_index, mgr->counter_auth, &mgr->counter_value);
  333. if (rc)
  334. return rc;
  335. mgr->nr_groups = 1;
  336. mgr->groups = calloc(1, sizeof(mgr->groups[0]));
  337. mgr->groups[0].v = vtpm_new_group(NULL);
  338. TPM_disk_nvwrite(&mgr->nv_key, 16, mgr->nvram_slot, mgr->nvram_auth);
  339. g_mgr = mgr;
  340. return 0;
  341. }