PageRenderTime 52ms CodeModel.GetById 15ms RepoModel.GetById 1ms app.codeStats 0ms

/lib/open-vm-tools/include/x86cpuid.h

https://github.com/raphaeldias/vmware
C Header | 967 lines | 613 code | 104 blank | 250 comment | 34 complexity | 575eff3742d1bb2798d1a620d3772727 MD5 | raw file
Possible License(s): LGPL-2.1
  1. /*********************************************************
  2. * Copyright (C) 1998-2008 VMware, Inc. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of the GNU Lesser General Public License as published
  6. * by the Free Software Foundation version 2.1 and no later version.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  10. * or FITNESS FOR A PARTICULAR PURPOSE. See the Lesser GNU General Public
  11. * License for more details.
  12. *
  13. * You should have received a copy of the GNU Lesser General Public License
  14. * along with this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. *********************************************************/
  18. #ifndef _X86CPUID_H_
  19. #define _X86CPUID_H_
  20. /* http://www.sandpile.org/ia32/cpuid.htm */
  21. #define INCLUDE_ALLOW_USERLEVEL
  22. #define INCLUDE_ALLOW_VMX
  23. #define INCLUDE_ALLOW_VMMEXT
  24. #define INCLUDE_ALLOW_VMKERNEL
  25. #define INCLUDE_ALLOW_MODULE
  26. #define INCLUDE_ALLOW_VMNIXMOD
  27. #define INCLUDE_ALLOW_DISTRIBUTE
  28. #define INCLUDE_ALLOW_VMK_MODULE
  29. #define INCLUDE_ALLOW_VMCORE
  30. #define INCLUDE_ALLOW_VMMON
  31. #include "includeCheck.h"
  32. #include "vm_basic_types.h"
  33. /*
  34. * The linux kernel's ptrace.h stupidly defines the bare
  35. * EAX/EBX/ECX/EDX, which wrecks havoc with our preprocessor tricks.
  36. */
  37. #undef EAX
  38. #undef EBX
  39. #undef ECX
  40. #undef EDX
  41. typedef struct CPUIDRegs {
  42. uint32 eax, ebx, ecx, edx;
  43. } CPUIDRegs;
  44. typedef union CPUIDRegsUnion {
  45. uint32 array[4];
  46. CPUIDRegs regs;
  47. } CPUIDRegsUnion;
  48. /*
  49. * Results of calling cpuid(eax, ecx) on all host logical CPU.
  50. */
  51. #ifdef _MSC_VER
  52. #pragma warning (disable :4200) // non-std extension: zero-sized array in struct
  53. #endif
  54. typedef
  55. #include "vmware_pack_begin.h"
  56. struct CPUIDReply {
  57. /*
  58. * Unique host logical CPU identifier. It does not change across queries, so
  59. * we use it to correlate the replies of multiple queries.
  60. */
  61. uint64 tag; // OUT
  62. CPUIDRegs regs; // OUT
  63. }
  64. #include "vmware_pack_end.h"
  65. CPUIDReply;
  66. typedef
  67. #include "vmware_pack_begin.h"
  68. struct CPUIDQuery {
  69. uint32 eax; // IN
  70. uint32 ecx; // IN
  71. uint32 numLogicalCPUs; // IN/OUT
  72. CPUIDReply logicalCPUs[0]; // OUT
  73. }
  74. #include "vmware_pack_end.h"
  75. CPUIDQuery;
  76. /*
  77. * CPUID levels the monitor caches and ones that are not cached, but
  78. * have fields defined below (short name and actual value).
  79. *
  80. * The first parameter defines whether the level is masked/tested
  81. * during power-on/migration. Any level which is marked as FALSE here
  82. * *must* have all field masks defined as IGNORE in CPUID_FIELD_DATA.
  83. * A static assert in lib/cpuidcompat/cpuidcompat.c will check this.
  84. *
  85. * IMPORTANT: WHEN ADDING A NEW FIELD TO THE CACHED LEVELS, make sure
  86. * you update vmcore/vmm/cpu/priv.c:Priv_CPUID() and vmcore/vmm64/bt/
  87. * cpuid_shared.S (and geninfo) to include the new level.
  88. */
  89. #define CPUID_CACHED_LEVELS \
  90. CPUIDLEVEL(TRUE, 0, 0) \
  91. CPUIDLEVEL(TRUE, 1, 1) \
  92. CPUIDLEVEL(FALSE,400, 0x40000000) \
  93. CPUIDLEVEL(FALSE,410, 0x40000010) \
  94. CPUIDLEVEL(FALSE, 80, 0x80000000) \
  95. CPUIDLEVEL(TRUE, 81, 0x80000001) \
  96. CPUIDLEVEL(FALSE, 88, 0x80000008) \
  97. CPUIDLEVEL(TRUE, 8A, 0x8000000A)
  98. #define CPUID_UNCACHED_LEVELS \
  99. CPUIDLEVEL(FALSE, 4, 4) \
  100. CPUIDLEVEL(FALSE, 5, 5) \
  101. CPUIDLEVEL(FALSE, 6, 6) \
  102. CPUIDLEVEL(FALSE, A, 0xA) \
  103. CPUIDLEVEL(FALSE, 86, 0x80000006) \
  104. CPUIDLEVEL(FALSE, 87, 0x80000007) \
  105. #define CPUID_ALL_LEVELS \
  106. CPUID_CACHED_LEVELS \
  107. CPUID_UNCACHED_LEVELS
  108. /* Define cached CPUID levels in the form: CPUID_LEVEL_<ShortName> */
  109. typedef enum {
  110. #define CPUIDLEVEL(t, s, v) CPUID_LEVEL_##s,
  111. CPUID_CACHED_LEVELS
  112. #undef CPUIDLEVEL
  113. CPUID_NUM_LEVELS
  114. } CpuidLevels;
  115. /*
  116. * CPUID result registers
  117. */
  118. #define CPUID_REGS \
  119. CPUIDREG(EAX, eax) \
  120. CPUIDREG(EBX, ebx) \
  121. CPUIDREG(ECX, ecx) \
  122. CPUIDREG(EDX, edx)
  123. typedef enum {
  124. #define CPUIDREG(uc, lc) CPUID_REG_##uc,
  125. CPUID_REGS
  126. #undef CPUIDREG
  127. CPUID_NUM_REGS
  128. } CpuidRegs;
  129. /*
  130. * CPU vendors
  131. */
  132. typedef enum {
  133. CPUID_VENDOR_UNKNOWN,
  134. CPUID_VENDOR_COMMON,
  135. CPUID_VENDOR_INTEL,
  136. CPUID_VENDOR_AMD,
  137. CPUID_VENDOR_CYRIX,
  138. CPUID_NUM_VENDORS
  139. } CpuidVendors;
  140. #define CPUID_INTEL_VENDOR_STRING "GenuntelineI"
  141. #define CPUID_AMD_VENDOR_STRING "AuthcAMDenti"
  142. #define CPUID_CYRIX_VENDOR_STRING "CyriteadxIns"
  143. #define CPUID_HYPERV_HYPERVISOR_VENDOR_STRING "Microsoft Hv"
  144. #define CPUID_INTEL_VENDOR_STRING_FIXED "GenuineIntel"
  145. #define CPUID_AMD_VENDOR_STRING_FIXED "AuthenticAMD"
  146. #define CPUID_CYRIX_VENDOR_STRING_FIXED "CyrixInstead"
  147. #define CPUID_HYPERV_HYPERVISOR_VENDOR_STRING "Microsoft Hv"
  148. /*
  149. * FIELDDEF can be defined to process the CPUID information provided
  150. * in the following CPUID_FIELD_DATA macro. The first parameter is
  151. * the CPUID level of the feature (must be defined in CPUID_*_LEVELS.
  152. * The second parameter is the register the field is contained in
  153. * (defined in CPUID_REGS). The third field is the vendor this
  154. * feature applies to. "COMMON" means all vendors apply. UNKNOWN may
  155. * not be used here. The fourth and fifth parameters are the bit
  156. * position of the field and the width, respectively. The sixth is
  157. * the text name of the field.
  158. *
  159. * The seventh and eighth parameters specify the default CPUID
  160. * behavior for power-on, guest view, and migration tests (cpt/rsm &
  161. * vmotion). The eighth parameter is ignored for types other than
  162. * MASK & TEST, and must be zero in this case.
  163. *
  164. * When adding a new field, be sure to consider its purpose. The
  165. * following list of types is provided in order of likely use.
  166. *
  167. * NOTE: this form of representation is separate from the masking
  168. * system specified via the config file. That is because this
  169. * representation must take into account multi-bit fields.
  170. *
  171. * HOST - Passthrough host value and cannot change during migration.
  172. * MASK, 0 - Hide from the guest, because we don't support it or we
  173. * don't want the guest to know that it exists.
  174. * IGNORE - Ignore this field for all tests
  175. *
  176. * (Think twice before using the below mask types/combinations)
  177. *
  178. * MASK, x - Force the guest to always see x, and don't compare for
  179. * migration -- only APIC as of today; it is controlled by
  180. * software and we know how to toggle it
  181. * TEST, x - Require host CPUID field to be x for power-on
  182. * RSVD - Hidden from the guest, but compared during migration
  183. *
  184. *
  185. * Table to explain mask type meanings:
  186. *
  187. * IGNR MASK TEST HOST RSVD
  188. * --------------------------------------------------------
  189. * Req'd val for power-on - - x - -
  190. * Value guest sees * x * * 0
  191. * Checked on migration? N N Y Y Y
  192. *
  193. * * - initial host's power-on CPUID value
  194. *
  195. * FIELDDEFA takes a ninth parameter, the name used when creating
  196. * accessor functions in lib/public/cpuidInfoFuncs.h.
  197. *
  198. * FLAGDEF/FLAGDEFA is defined identically to fields, but their
  199. * accessors are more appropriate for 1-bit flags.
  200. */
  201. typedef enum {
  202. CPUID_FIELD_MASK_IGNORE,
  203. CPUID_FIELD_MASK_MASK,
  204. CPUID_FIELD_MASK_TEST,
  205. CPUID_FIELD_MASK_HOST,
  206. CPUID_FIELD_MASK_RSVD,
  207. CPUID_NUM_FIELD_MASKS
  208. } CpuidFieldMasks;
  209. typedef enum {
  210. CPUID_FIELD_SUPPORTED_NO,
  211. CPUID_FIELD_SUPPORTED_YES,
  212. CPUID_FIELD_SUPPORTED_ANY,
  213. CPUID_FIELD_SUPPORTED_NA,
  214. CPUID_NUM_FIELD_SUPPORTEDS
  215. } CpuidFieldSupported;
  216. /* LEVEL, REG, VENDOR, POS, SIZE, NAME, MON SUPP, MASK TYPE, SET TO, CPL3, [FUNC] */
  217. #define CPUID_FIELD_DATA_LEVEL_0 \
  218. FIELDDEF( 0, EAX, COMMON, 0, 32, NUMLEVELS, ANY, IGNORE, 0, FALSE) \
  219. FIELDDEF( 0, EBX, COMMON, 0, 32, VENDOR1, YES, HOST, 0, TRUE) \
  220. FIELDDEF( 0, ECX, COMMON, 0, 32, VENDOR3, YES, HOST, 0, TRUE) \
  221. FIELDDEF( 0, EDX, COMMON, 0, 32, VENDOR2, YES, HOST, 0, TRUE)
  222. /* LEVEL, REG, VENDOR, POS, SIZE, NAME, MON SUPP, MASK TYPE, SET TO, CPL3, [FUNC] */
  223. #define CPUID_FIELD_DATA_LEVEL_1 \
  224. FIELDDEFA( 1, EAX, COMMON, 0, 4, STEPPING, ANY, IGNORE, 0, FALSE, STEPPING) \
  225. FIELDDEFA( 1, EAX, COMMON, 4, 4, MODEL, ANY, IGNORE, 0, FALSE, MODEL) \
  226. FIELDDEFA( 1, EAX, COMMON, 8, 4, FAMILY, YES, HOST, 0, FALSE, FAMILY) \
  227. FIELDDEF( 1, EAX, COMMON, 12, 2, TYPE, ANY, IGNORE, 0, FALSE) \
  228. FIELDDEFA( 1, EAX, COMMON, 16, 4, EXTMODEL, ANY, IGNORE, 0, FALSE, EXT_MODEL) \
  229. FIELDDEFA( 1, EAX, COMMON, 20, 8, EXTFAMILY, YES, HOST, 0, FALSE, EXT_FAMILY) \
  230. FIELDDEF( 1, EBX, COMMON, 0, 8, BRAND_ID, ANY, IGNORE, 0, FALSE) \
  231. FIELDDEF( 1, EBX, COMMON, 8, 8, CLFL_SIZE, ANY, IGNORE, 0, FALSE) \
  232. FIELDDEFA( 1, EBX, COMMON, 16, 8, LCPU_COUNT, ANY, IGNORE, 0, FALSE, LCPU_COUNT) \
  233. FIELDDEFA( 1, EBX, COMMON, 24, 8, APICID, ANY, IGNORE, 0, FALSE, APICID) \
  234. FLAGDEFA( 1, ECX, COMMON, 0, 1, SSE3, YES, HOST, 0, TRUE, SSE3) \
  235. FLAGDEF( 1, ECX, INTEL, 2, 1, NDA2, NO, MASK, 0, FALSE) \
  236. FLAGDEFA( 1, ECX, COMMON, 3, 1, MWAIT, NO, MASK, 0, FALSE, MWAIT) \
  237. FLAGDEFA( 1, ECX, INTEL, 4, 1, DSCPL, NO, MASK, 0, FALSE, DSCPL) \
  238. FLAGDEFA( 1, ECX, INTEL, 5, 1, VMX, NO, MASK, 0, FALSE, VMX) \
  239. FLAGDEF( 1, ECX, INTEL, 6, 1, SMX, NO, MASK, 0, FALSE) \
  240. FLAGDEF( 1, ECX, INTEL, 7, 1, EST, NO, MASK, 0, FALSE) \
  241. FLAGDEF( 1, ECX, INTEL, 8, 1, TM2, NO, MASK, 0, FALSE) \
  242. FLAGDEFA( 1, ECX, COMMON, 9, 1, SSSE3, YES, HOST, 0, TRUE, SSSE3) \
  243. FLAGDEF( 1, ECX, INTEL, 10, 1, HTCACHE, NO, MASK, 0, FALSE) \
  244. FLAGDEFA( 1, ECX, COMMON, 13, 1, CMPX16, YES, HOST, 0, TRUE, CMPX16) \
  245. FLAGDEF( 1, ECX, INTEL, 14, 1, xPPR, NO, MASK, 0, FALSE) \
  246. FLAGDEF( 1, ECX, INTEL, 15, 1, PERF_MSR, NO, MASK, 0, FALSE) \
  247. FLAGDEF( 1, ECX, INTEL, 18, 1, DCA, NO, MASK, 0, FALSE) \
  248. FLAGDEFA( 1, ECX, INTEL, 19, 1, SSE41, YES, HOST, 0, TRUE, SSE41) \
  249. FLAGDEFA( 1, ECX, INTEL, 20, 1, SSE42, YES, HOST, 0, TRUE, SSE42) \
  250. FLAGDEF( 1, ECX, INTEL, 21, 1, X2APIC, NO, MASK, 0, FALSE) \
  251. FLAGDEF( 1, ECX, INTEL, 22, 1, MOVBE, NO, RSVD, 0, TRUE) \
  252. FLAGDEFA( 1, ECX, COMMON, 23, 1, POPCNT, YES, HOST, 0, TRUE, POPCNT) \
  253. FLAGDEF( 1, ECX, INTEL, 24, 1, ULE, NO, RSVD, 0, TRUE) \
  254. FLAGDEF( 1, ECX, INTEL, 26, 1, XSAVE, NO, MASK, 0, FALSE) \
  255. FLAGDEF( 1, ECX, INTEL, 27, 1, OSXSAVE, NO, RSVD, 0, TRUE) \
  256. FLAGDEFA( 1, ECX, COMMON, 31, 1, HYPERVISOR, ANY, IGNORE, 0, FALSE, HYPERVISOR)\
  257. FLAGDEFA( 1, EDX, COMMON, 0, 1, FPU, YES, HOST, 0, TRUE, FPU) \
  258. FLAGDEFA( 1, EDX, COMMON, 1, 1, VME, YES, HOST, 0, FALSE, VME) \
  259. FLAGDEF( 1, EDX, COMMON, 2, 1, DBGE, YES, HOST, 0, FALSE) \
  260. FLAGDEF( 1, EDX, COMMON, 3, 1, PGSZE, YES, HOST, 0, FALSE) \
  261. FLAGDEFA( 1, EDX, COMMON, 4, 1, TSC, YES, HOST, 0, TRUE, TSC) \
  262. FLAGDEF( 1, EDX, COMMON, 5, 1, MSR, YES, HOST, 0, FALSE) \
  263. FLAGDEFA( 1, EDX, COMMON, 6, 1, PAE, YES, HOST, 0, FALSE, PAE) \
  264. FLAGDEF( 1, EDX, COMMON, 7, 1, MCK, YES, HOST, 0, FALSE) \
  265. FLAGDEF( 1, EDX, COMMON, 8, 1, CPMX, YES, HOST, 0, TRUE) \
  266. FLAGDEFA( 1, EDX, COMMON, 9, 1, APIC, ANY, MASK, 1, FALSE, APIC) \
  267. FLAGDEFA( 1, EDX, COMMON, 11, 1, SEP, YES, HOST, 0, TRUE, SEP) \
  268. FLAGDEFA( 1, EDX, COMMON, 12, 1, MTRR, YES, HOST, 0, FALSE, MTRR) \
  269. FLAGDEFA( 1, EDX, COMMON, 13, 1, PGE, YES, HOST, 0, FALSE, PGE) \
  270. FLAGDEFA( 1, EDX, COMMON, 14, 1, MCA, YES, HOST, 0, FALSE, MCA) \
  271. FLAGDEFA( 1, EDX, COMMON, 15, 1, CMOV, YES, HOST, 0, TRUE, CMOV) \
  272. FLAGDEFA( 1, EDX, COMMON, 16, 1, PAT, YES, HOST, 0, FALSE, PAT) \
  273. FLAGDEF( 1, EDX, COMMON, 17, 1, 36PG, YES, HOST, 0, FALSE) \
  274. FLAGDEF( 1, EDX, INTEL, 18, 1, PSN, YES, HOST, 0, FALSE) \
  275. FLAGDEFA( 1, EDX, COMMON, 19, 1, CLFL, YES, HOST, 0, TRUE, CLFL) \
  276. FLAGDEF( 1, EDX, INTEL, 21, 1, DTES, YES, HOST, 0, FALSE) \
  277. FLAGDEF( 1, EDX, INTEL, 22, 1, ACPI, YES, HOST, 0, FALSE) \
  278. FLAGDEFA( 1, EDX, COMMON, 23, 1, MMX, YES, HOST, 0, TRUE, MMX) \
  279. FLAGDEFA( 1, EDX, COMMON, 24, 1, FXSAVE, YES, HOST, 0, TRUE, FXSAVE) \
  280. FLAGDEFA( 1, EDX, COMMON, 25, 1, SSE, YES, HOST, 0, TRUE, SSE) \
  281. FLAGDEFA( 1, EDX, COMMON, 26, 1, SSE2, YES, HOST, 0, TRUE, SSE2) \
  282. FLAGDEF( 1, EDX, INTEL, 27, 1, SS, YES, HOST, 0, FALSE) \
  283. FLAGDEFA( 1, EDX, COMMON, 28, 1, HT, NO, MASK, 0, FALSE, HT) \
  284. FLAGDEF( 1, EDX, INTEL, 29, 1, TM, NO, MASK, 0, FALSE) \
  285. FLAGDEF( 1, EDX, INTEL, 30, 1, IA64, NO, MASK, 0, FALSE) \
  286. FLAGDEF( 1, EDX, INTEL, 31, 1, PBE, NO, MASK, 0, FALSE)
  287. /* LEVEL, REG, VENDOR, POS, SIZE, NAME, MON SUPP, MASK TYPE, SET TO, CPL3, [FUNC] */
  288. #define CPUID_FIELD_DATA_LEVEL_4 \
  289. FIELDDEF( 4, EAX, INTEL, 0, 5, CACHE_TYPE, NA, IGNORE, 0, FALSE) \
  290. FIELDDEF( 4, EAX, INTEL, 5, 3, CACHE_LEVEL, NA, IGNORE, 0, FALSE) \
  291. FIELDDEF( 4, EAX, INTEL, 14, 12, CACHE_NUMHT_SHARING, NA, IGNORE, 0, FALSE) \
  292. FIELDDEFA( 4, EAX, INTEL, 26, 6, CORE_COUNT, NA, IGNORE, 0, FALSE, INTEL_CORE_COUNT) \
  293. FIELDDEF( 4, EBX, INTEL, 0, 12, CACHE_LINE, NA, IGNORE, 0, FALSE) \
  294. FIELDDEF( 4, EBX, INTEL, 12, 10, CACHE_PART, NA, IGNORE, 0, FALSE) \
  295. FIELDDEF( 4, EBX, INTEL, 22, 10, CACHE_WAYS, NA, IGNORE, 0, FALSE)
  296. /* LEVEL, REG, VENDOR, POS, SIZE, NAME, MON SUPP, MASK TYPE, SET TO, CPL3, [FUNC] */
  297. #define CPUID_FIELD_DATA_LEVEL_5 \
  298. FIELDDEF( 5, EAX, COMMON, 0, 16, MWAIT_MIN_SIZE, NA, IGNORE, 0, FALSE) \
  299. FIELDDEF( 5, EBX, COMMON, 0, 16, MWAIT_MAX_SIZE, NA, IGNORE, 0, FALSE) \
  300. FLAGDEF( 5, ECX, COMMON, 0, 1, MWAIT_EXTENSIONS, NA, IGNORE, 0, FALSE) \
  301. FLAGDEF( 5, ECX, COMMON, 1, 1, MWAIT_INTR_BREAK, NA, IGNORE, 0, FALSE) \
  302. FIELDDEF( 5, EDX, INTEL, 0, 4, MWAIT_C0_SUBSTATE, NA, IGNORE, 0, FALSE) \
  303. FIELDDEF( 5, EDX, INTEL, 4, 4, MWAIT_C1_SUBSTATE, NA, IGNORE, 0, FALSE) \
  304. FIELDDEF( 5, EDX, INTEL, 8, 4, MWAIT_C2_SUBSTATE, NA, IGNORE, 0, FALSE) \
  305. FIELDDEF( 5, EDX, INTEL, 12, 4, MWAIT_C3_SUBSTATE, NA, IGNORE, 0, FALSE) \
  306. FIELDDEF( 5, EDX, INTEL, 16, 4, MWAIT_C4_SUBSTATE, NA, IGNORE, 0, FALSE)
  307. /* LEVEL, REG, VENDOR, POS, SIZE, NAME, MON SUPP, MASK TYPE, SET TO, CPL3, [FUNC] */
  308. #define CPUID_FIELD_DATA_LEVEL_6 \
  309. FLAGDEF( 6, EAX, INTEL, 0, 1, THERMAL_SENSOR, NA, IGNORE, 0, FALSE) \
  310. FLAGDEF( 6, EAX, INTEL, 1, 1, TURBO_MODE, NA, IGNORE, 0, FALSE) \
  311. FIELDDEF( 6, EBX, INTEL, 0, 4, NUM_INTR_THRESHOLDS, NA, IGNORE, 0, FALSE) \
  312. FLAGDEF( 6, ECX, INTEL, 0, 1, HW_COORD_FEEDBACK, NA, IGNORE, 0, FALSE)
  313. /* LEVEL, REG, VENDOR, POS, SIZE, NAME, MON SUPP, MASK TYPE, SET TO, CPL3, [FUNC] */
  314. #define CPUID_FIELD_DATA_LEVEL_A \
  315. FIELDDEFA( A, EAX, INTEL, 0, 8, PMC_VERSION, NA, IGNORE, 0, FALSE, PMC_VERSION) \
  316. FIELDDEFA( A, EAX, INTEL, 8, 8, NUM_PMCS, NA, IGNORE, 0, FALSE, NUM_PMCS) \
  317. FIELDDEF( A, EAX, INTEL, 16, 8, PMC_BIT_WIDTH, NA, IGNORE, 0, FALSE) \
  318. FIELDDEFA( A, EAX, INTEL, 24, 8, PMC_EBX_LENGTH, NA, IGNORE, 0, FALSE, PMC_EBX_LENGTH) \
  319. FLAGDEF( A, EBX, INTEL, 0, 1, PMC_CORE_CYCLE, NA, IGNORE, 0, FALSE) \
  320. FLAGDEF( A, EBX, INTEL, 1, 1, PMC_INSTR_RETIRED, NA, IGNORE, 0, FALSE) \
  321. FLAGDEF( A, EBX, INTEL, 2, 1, PMC_REF_CYCLES, NA, IGNORE, 0, FALSE) \
  322. FLAGDEF( A, EBX, INTEL, 3, 1, PMC_LAST_LVL_CREF, NA, IGNORE, 0, FALSE) \
  323. FLAGDEF( A, EBX, INTEL, 4, 1, PMC_LAST_LVL_CMISS, NA, IGNORE, 0, FALSE) \
  324. FLAGDEF( A, EBX, INTEL, 5, 1, PMC_BR_INST_RETIRED, NA, IGNORE, 0, FALSE) \
  325. FLAGDEF( A, EBX, INTEL, 6, 1, PMC_BR_MISS_RETIRED, NA, IGNORE, 0, FALSE)
  326. /* LEVEL, REG, VENDOR, POS, SIZE, NAME, MON SUPP, MASK TYPE, SET TO, CPL3, [FUNC] */
  327. #define CPUID_FIELD_DATA_LEVEL_80 \
  328. FIELDDEF( 80, EAX, COMMON, 0, 32, NUM_EXT_LEVELS, NA, IGNORE, 0, FALSE) \
  329. FIELDDEF( 80, EBX, AMD, 0, 32, AMD_VENDOR1, NA, IGNORE, 0, FALSE) \
  330. FIELDDEF( 80, ECX, AMD, 0, 32, AMD_VENDOR3, NA, IGNORE, 0, FALSE) \
  331. FIELDDEF( 80, EDX, AMD, 0, 32, AMD_VENDOR2, NA, IGNORE, 0, FALSE)
  332. /* LEVEL, REG, VENDOR, POS, SIZE, NAME, MON SUPP, MASK TYPE, SET TO, CPL3, [FUNC] */
  333. #define CPUID_FIELD_DATA_LEVEL_81 \
  334. FIELDDEF( 81, EAX, INTEL, 0, 32, UNKNOWN81EAX, ANY, IGNORE, 0, FALSE) \
  335. FIELDDEF( 81, EAX, AMD, 0, 4, STEPPING, ANY, IGNORE, 0, FALSE) \
  336. FIELDDEF( 81, EAX, AMD, 4, 4, MODEL, ANY, IGNORE, 0, FALSE) \
  337. FIELDDEF( 81, EAX, AMD, 8, 4, FAMILY, ANY, IGNORE, 0, FALSE) \
  338. FIELDDEF( 81, EAX, AMD, 12, 2, TYPE, ANY, IGNORE, 0, FALSE) \
  339. FIELDDEF( 81, EAX, AMD, 16, 4, EXTMODEL, ANY, IGNORE, 0, FALSE) \
  340. FIELDDEF( 81, EAX, AMD, 20, 8, EXTFAMILY, ANY, IGNORE, 0, FALSE) \
  341. FIELDDEF( 81, EBX, INTEL, 0, 32, UNKNOWN81EBX, ANY, IGNORE, 0, FALSE) \
  342. FIELDDEF( 81, EBX, AMD, 0, 16, BRAND_ID, ANY, IGNORE, 0, FALSE) \
  343. FIELDDEF( 81, EBX, AMD, 16, 16, UNDEF, ANY, IGNORE, 0, FALSE) \
  344. FLAGDEFA( 81, ECX, COMMON, 0, 1, LAHF, YES, HOST, 0, TRUE, LAHF64) \
  345. FLAGDEFA( 81, ECX, AMD, 1, 1, CMPLEGACY, NO, MASK, 0, FALSE, CMPLEGACY) \
  346. FLAGDEFA( 81, ECX, AMD, 2, 1, SVM, NO, MASK, 0, FALSE, SVM) \
  347. FLAGDEFA( 81, ECX, AMD, 3, 1, EXTAPICSPC, YES, HOST, 0, FALSE, EXTAPICSPC) \
  348. FLAGDEFA( 81, ECX, AMD, 4, 1, CR8AVAIL, NO, MASK, 0, FALSE, CR8AVAIL) \
  349. FLAGDEFA( 81, ECX, AMD, 5, 1, ABM, YES, HOST, 0, TRUE, ABM) \
  350. FLAGDEFA( 81, ECX, AMD, 6, 1, SSE4A, YES, HOST, 0, TRUE, SSE4A) \
  351. FLAGDEF( 81, ECX, AMD, 7, 1, MISALIGNED_SSE, YES, HOST, 0, TRUE) \
  352. FLAGDEFA( 81, ECX, AMD, 8, 1, 3DNPREFETCH, YES, HOST, 0, TRUE, 3DNPREFETCH) \
  353. FLAGDEF( 81, ECX, AMD, 9, 1, OSVW, NO, MASK, 0, FALSE) \
  354. FLAGDEF( 81, ECX, AMD, 10, 1, IBS, NO, MASK, 0, FALSE) \
  355. FLAGDEF( 81, ECX, AMD, 11, 1, SSE5, NO, RSVD, 0, TRUE) \
  356. FLAGDEF( 81, ECX, AMD, 12, 1, SKINIT, NO, MASK, 0, FALSE) \
  357. FLAGDEF( 81, ECX, AMD, 13, 1, WATCHDOG, NO, MASK, 0, FALSE) \
  358. FLAGDEF( 81, EDX, AMD, 0, 1, FPU, YES, HOST, 0, TRUE) \
  359. FLAGDEF( 81, EDX, AMD, 1, 1, VME, YES, HOST, 0, FALSE) \
  360. FLAGDEF( 81, EDX, AMD, 2, 1, DBGE, YES, HOST, 0, FALSE) \
  361. FLAGDEF( 81, EDX, AMD, 3, 1, PGSZE, YES, HOST, 0, FALSE) \
  362. FLAGDEF( 81, EDX, AMD, 4, 1, TSC, YES, HOST, 0, TRUE) \
  363. FLAGDEF( 81, EDX, AMD, 5, 1, MSR, YES, HOST, 0, FALSE) \
  364. FLAGDEF( 81, EDX, AMD, 6, 1, PAE, YES, HOST, 0, FALSE) \
  365. FLAGDEF( 81, EDX, AMD, 7, 1, MCK, YES, HOST, 0, FALSE) \
  366. FLAGDEF( 81, EDX, AMD, 8, 1, CPMX, YES, HOST, 0, TRUE) \
  367. FLAGDEF( 81, EDX, AMD, 9, 1, APIC, ANY, MASK, 1, FALSE) \
  368. FLAGDEFA( 81, EDX, COMMON, 11, 1, SYSC, ANY, IGNORE, 0, TRUE, SYSC) \
  369. FLAGDEF( 81, EDX, AMD, 12, 1, MTRR, YES, HOST, 0, FALSE) \
  370. FLAGDEF( 81, EDX, AMD, 13, 1, PGE, YES, HOST, 0, FALSE) \
  371. FLAGDEF( 81, EDX, AMD, 14, 1, MCA, YES, HOST, 0, FALSE) \
  372. FLAGDEF( 81, EDX, AMD, 15, 1, CMOV, YES, HOST, 0, TRUE) \
  373. FLAGDEF( 81, EDX, AMD, 16, 1, PAT, YES, HOST, 0, FALSE) \
  374. FLAGDEF( 81, EDX, AMD, 17, 1, 36PG, YES, HOST, 0, FALSE) \
  375. FLAGDEFA( 81, EDX, COMMON, 20, 1, NX, YES, HOST, 0, FALSE, NX) \
  376. FLAGDEFA( 81, EDX, AMD, 22, 1, MMXEXT, YES, HOST, 0, TRUE, MMXEXT) \
  377. FLAGDEF( 81, EDX, AMD, 23, 1, MMX, YES, HOST, 0, TRUE) \
  378. FLAGDEF( 81, EDX, AMD, 24, 1, FXSAVE, YES, HOST, 0, TRUE) \
  379. FLAGDEFA( 81, EDX, AMD, 25, 1, FFXSR, YES, HOST, 0, FALSE, FFXSR) \
  380. FLAGDEF( 81, EDX, AMD, 26, 1, PDPE1GB, NO, MASK, 0, FALSE) \
  381. FLAGDEFA( 81, EDX, COMMON, 27, 1, RDTSCP, YES, HOST, 0, TRUE, RDTSCP) \
  382. FLAGDEFA( 81, EDX, COMMON, 29, 1, LM, YES, TEST, 1, FALSE, LM) \
  383. FLAGDEFA( 81, EDX, AMD, 30, 1, 3DNOWPLUS, YES, HOST, 0, TRUE, 3DNOWPLUS) \
  384. FLAGDEFA( 81, EDX, AMD, 31, 1, 3DNOW, YES, HOST, 0, TRUE, 3DNOW)
  385. /* LEVEL, REG, VENDOR, POS, SIZE, NAME, MON SUPP, MASK TYPE, SET TO, CPL3, [FUNC] */
  386. #define CPUID_FIELD_DATA_LEVEL_8x \
  387. FIELDDEF( 86, ECX, AMD, 0, 8, L2CACHE_LINE, NA, IGNORE, 0, FALSE) \
  388. FIELDDEF( 86, ECX, AMD, 8, 4, L2CACHE_LINE_PER_TAG, NA, IGNORE, 0, FALSE) \
  389. FIELDDEF( 86, ECX, AMD, 12, 4, L2CACHE_WAYS, NA, IGNORE, 0, FALSE) \
  390. FIELDDEF( 86, ECX, AMD, 16, 16, L2CACHE_SIZE, NA, IGNORE, 0, FALSE) \
  391. FIELDDEF( 86, EDX, AMD, 0, 8, L3CACHE_LINE, NA, IGNORE, 0, FALSE) \
  392. FIELDDEF( 86, EDX, AMD, 8, 4, L3CACHE_LINE_PER_TAG,NA, IGNORE, 0, FALSE) \
  393. FIELDDEF( 86, EDX, AMD, 12, 4, L3CACHE_WAYS, NA, IGNORE, 0, FALSE) \
  394. FIELDDEF( 86, EDX, AMD, 18, 14, L3CACHE_SIZE, NA, IGNORE, 0, FALSE) \
  395. FLAGDEF( 87, EDX, AMD, 0, 1, TS, NA, IGNORE, 0, FALSE) \
  396. FLAGDEF( 87, EDX, AMD, 1, 1, FID, NA, IGNORE, 0, FALSE) \
  397. FLAGDEF( 87, EDX, AMD, 2, 1, VID, NA, IGNORE, 0, FALSE) \
  398. FLAGDEF( 87, EDX, AMD, 3, 1, TTP, NA, IGNORE, 0, FALSE) \
  399. FLAGDEF( 87, EDX, AMD, 4, 1, TM, NA, IGNORE, 0, FALSE) \
  400. FLAGDEF( 87, EDX, AMD, 5, 1, STC, NA, IGNORE, 0, FALSE) \
  401. FLAGDEF( 87, EDX, AMD, 6, 1, 100MHZSTEPS, NA, IGNORE, 0, FALSE) \
  402. FLAGDEF( 87, EDX, AMD, 7, 1, HWPSTATE, NA, IGNORE, 0, FALSE) \
  403. FLAGDEF( 87, EDX, AMD, 8, 1, TSC_INVARIANT, NA, IGNORE, 0, FALSE) \
  404. FIELDDEFA(88, EAX, COMMON, 0, 8, PHYSBITS, NA, IGNORE, 0, FALSE, PHYS_BITS) \
  405. FIELDDEFA(88, EAX, COMMON, 8, 8, VIRTBITS, NA, IGNORE, 0, FALSE, VIRT_BITS) \
  406. FIELDDEFA(88, ECX, AMD, 0, 8, CORE_COUNT, NA, IGNORE, 0, FALSE, AMD_CORE_COUNT) \
  407. FIELDDEF( 88, ECX, AMD, 12, 4, APICID_COREID_SIZE, NA, IGNORE, 0, FALSE) \
  408. FIELDDEFA(8A, EAX, AMD, 0, 8, SVM_REVISION, NO, MASK, 0, FALSE, SVM_REVISION) \
  409. FLAGDEF( 8A, EAX, AMD, 8, 1, SVM_HYPERVISOR, NO, MASK, 0, FALSE) \
  410. FIELDDEF( 8A, EAX, AMD, 9, 23, SVMEAX_RSVD, NO, MASK, 0, FALSE) \
  411. FIELDDEF( 8A, EBX, AMD, 0, 32, SVM_N_ASIDS, NO, MASK, 0, FALSE) \
  412. FIELDDEF( 8A, ECX, AMD, 0, 32, SVMECX_RSVD, NO, MASK, 0, FALSE) \
  413. FLAGDEFA( 8A, EDX, AMD, 0, 1, SVM_NP, NO, MASK, 0, FALSE, NPT) \
  414. FLAGDEF( 8A, EDX, AMD, 1, 1, SVM_LBR, NO, MASK, 0, FALSE) \
  415. FLAGDEF( 8A, EDX, AMD, 2, 1, SVM_LOCK, NO, MASK, 0, FALSE) \
  416. FLAGDEF( 8A, EDX, AMD, 3, 1, SVM_NRIP, NO, MASK, 0, FALSE) \
  417. FIELDDEF( 8A, EDX, AMD, 4, 28, SVMEDX_RSVD, NO, MASK, 0, FALSE)
  418. #define CPUID_FIELD_DATA \
  419. CPUID_FIELD_DATA_LEVEL_0 \
  420. CPUID_FIELD_DATA_LEVEL_1 \
  421. CPUID_FIELD_DATA_LEVEL_4 \
  422. CPUID_FIELD_DATA_LEVEL_5 \
  423. CPUID_FIELD_DATA_LEVEL_6 \
  424. CPUID_FIELD_DATA_LEVEL_A \
  425. CPUID_FIELD_DATA_LEVEL_80 \
  426. CPUID_FIELD_DATA_LEVEL_81 \
  427. CPUID_FIELD_DATA_LEVEL_8x
  428. /*
  429. * Define all field and flag values as an enum. The result is a full
  430. * set of values taken from the table above in the form:
  431. *
  432. * CPUID_FEATURE_<vendor>_ID<level><reg>_<name> == mask for feature
  433. * CPUID_<vendor>_ID<level><reg>_<name>_MASK == mask for field
  434. * CPUID_<vendor>_ID<level><reg>_<name>_SHIFT == offset of field
  435. *
  436. * e.g. - CPUID_FEATURE_COMMON_ID1EDX_FPU = 0x1
  437. * - CPUID_COMMON_ID88EAX_VIRTBITS_MASK = 0xff00
  438. * - CPUID_COMMON_ID88EAX_VIRTBITS_SHIFT = 8
  439. *
  440. * Note: The FEATURE/MASK definitions must use some gymnastics to get
  441. * around a warning when shifting left by 32.
  442. */
  443. #define VMW_BIT_MASK(shift) (((1 << (shift - 1)) << 1) - 1)
  444. #define FIELDDEF(lvl, reg, vend, bitpos, size, name, s, m, v, c3) \
  445. CPUID_##vend##_ID##lvl##reg##_##name##_SHIFT = bitpos, \
  446. CPUID_##vend##_ID##lvl##reg##_##name##_MASK = \
  447. VMW_BIT_MASK(size) << bitpos, \
  448. CPUID_FEATURE_##vend##_ID##lvl##reg##_##name = \
  449. CPUID_##vend##_ID##lvl##reg##_##name##_MASK,
  450. /* Before simplifying this take a look at bug 293638... */
  451. #define FIELDDEFA(lvl, reg, vend, bitpos, size, name, s, m, v, c3, f) \
  452. CPUID_##vend##_ID##lvl##reg##_##name##_SHIFT = bitpos, \
  453. CPUID_##vend##_ID##lvl##reg##_##name##_MASK = \
  454. VMW_BIT_MASK(size) << bitpos, \
  455. CPUID_FEATURE_##vend##_ID##lvl##reg##_##name = \
  456. CPUID_##vend##_ID##lvl##reg##_##name##_MASK,
  457. #define FLAGDEFA FIELDDEFA
  458. #define FLAGDEF FIELDDEF
  459. enum {
  460. /* Define data for every CPUID field we have */
  461. CPUID_FIELD_DATA
  462. };
  463. #undef VMW_BIT_MASK
  464. #undef FIELDDEF
  465. #undef FLAGDEF
  466. #undef FIELDDEFA
  467. #undef FLAGDEFA
  468. /*
  469. * Legal CPUID config file mask characters. For a description of the
  470. * cpuid masking system, please see:
  471. *
  472. * http://vmweb.vmware.com/~mts/cgi-bin/view.cgi/Apps/CpuMigrationChecks
  473. */
  474. #define CPUID_MASK_HIDE_CHR '0'
  475. #define CPUID_MASK_HIDE_STR "0"
  476. #define CPUID_MASK_FORCE_CHR '1'
  477. #define CPUID_MASK_FORCE_STR "1"
  478. #define CPUID_MASK_PASS_CHR '-'
  479. #define CPUID_MASK_PASS_STR "-"
  480. #define CPUID_MASK_TRUE_CHR 'T'
  481. #define CPUID_MASK_TRUE_STR "T"
  482. #define CPUID_MASK_FALSE_CHR 'F'
  483. #define CPUID_MASK_FALSE_STR "F"
  484. #define CPUID_MASK_IGNORE_CHR 'X'
  485. #define CPUID_MASK_IGNORE_STR "X"
  486. #define CPUID_MASK_HOST_CHR 'H'
  487. #define CPUID_MASK_HOST_STR "H"
  488. #define CPUID_MASK_RSVD_CHR 'R'
  489. #define CPUID_MASK_RSVD_STR "R"
  490. #define CPUID_MASK_INSTALL_CHR 'I'
  491. #define CPUID_MASK_INSTALL_STR "I"
  492. /*
  493. * If a level is listed as not masked/tested in CPUID_LEVELS above,
  494. * use all "don't care" values for its mask.
  495. */
  496. #define CPT_DFLT_UNDEFINED_MASK "XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX"
  497. /*
  498. * When LM is disabled, we overlay the following masks onto the
  499. * guest's default masks. Any level that is not defined below should
  500. * be treated as all "-"s
  501. */
  502. #define CPT_ID1ECX_LM_DISABLED "----:----:----:----:--0-:----:----:----"
  503. #define CPT_ID81EDX_LM_DISABLED "--0-:----:----:----:----:----:----:----"
  504. #define CPT_ID81ECX_LM_DISABLED "----:----:----:----:----:----:----:---0"
  505. #define CPT_GET_LM_DISABLED_MASK(lvl, reg) \
  506. ((lvl == 1 && reg == CPUID_REG_ECX) ? CPT_ID1ECX_LM_DISABLED : \
  507. (lvl == 0x80000001 && reg == CPUID_REG_ECX) ? CPT_ID81ECX_LM_DISABLED : \
  508. (lvl == 0x80000001 && reg == CPUID_REG_EDX) ? CPT_ID81EDX_LM_DISABLED : \
  509. NULL)
  510. /*
  511. * Macro to define GET and SET functions for various common CPUID
  512. * fields. To create function for a new field, simply name it (CPUID_
  513. * and CPUID_SET_ are automatically prepended), and list the field
  514. * name that it needs to use.
  515. */
  516. #define FIELD_FUNC(name, field) \
  517. static INLINE uint32 CPUID_##name(uint32 reg) \
  518. { \
  519. return (reg & field##_MASK) >> field##_SHIFT; \
  520. } \
  521. static INLINE void CPUID_SET_##name(uint32 *reg, uint32 val) \
  522. { \
  523. *reg = (*reg & ~field##_MASK) | (val << field##_SHIFT); \
  524. }
  525. FIELD_FUNC(STEPPING, CPUID_COMMON_ID1EAX_STEPPING)
  526. FIELD_FUNC(MODEL, CPUID_COMMON_ID1EAX_MODEL)
  527. FIELD_FUNC(FAMILY, CPUID_COMMON_ID1EAX_FAMILY)
  528. FIELD_FUNC(TYPE, CPUID_COMMON_ID1EAX_TYPE)
  529. FIELD_FUNC(EXTENDED_MODEL, CPUID_COMMON_ID1EAX_EXTMODEL)
  530. FIELD_FUNC(EXTENDED_FAMILY, CPUID_COMMON_ID1EAX_EXTFAMILY)
  531. FIELD_FUNC(LCPU_COUNT, CPUID_COMMON_ID1EBX_LCPU_COUNT)
  532. FIELD_FUNC(APICID, CPUID_COMMON_ID1EBX_APICID)
  533. FIELD_FUNC(PA_BITS, CPUID_COMMON_ID88EAX_PHYSBITS)
  534. FIELD_FUNC(VIRT_BITS, CPUID_COMMON_ID88EAX_VIRTBITS)
  535. FIELD_FUNC(SVM_REVISION, CPUID_AMD_ID8AEAX_SVM_REVISION)
  536. FIELD_FUNC(SVM_N_ASIDS, CPUID_AMD_ID8AEBX_SVM_N_ASIDS)
  537. FIELD_FUNC(INTEL_CORE_COUNT, CPUID_INTEL_ID4EAX_CORE_COUNT)
  538. FIELD_FUNC(AMD_CORE_COUNT, CPUID_AMD_ID88ECX_CORE_COUNT)
  539. FIELD_FUNC(AMD_APICID_COREID_SIZE, CPUID_AMD_ID88ECX_APICID_COREID_SIZE)
  540. FIELD_FUNC(AMD_EXTAPICSPC, CPUID_AMD_ID81ECX_EXTAPICSPC)
  541. FIELD_FUNC(NUM_PMCS, CPUID_INTEL_IDAEAX_NUM_PMCS)
  542. FIELD_FUNC(MWAIT_MIN_SIZE, CPUID_COMMON_ID5EAX_MWAIT_MIN_SIZE)
  543. FIELD_FUNC(MWAIT_MAX_SIZE, CPUID_COMMON_ID5EBX_MWAIT_MAX_SIZE)
  544. FIELD_FUNC(MWAIT_C0_SUBSTATE, CPUID_INTEL_ID5EDX_MWAIT_C0_SUBSTATE)
  545. FIELD_FUNC(MWAIT_C1_SUBSTATE, CPUID_INTEL_ID5EDX_MWAIT_C1_SUBSTATE)
  546. FIELD_FUNC(MWAIT_C2_SUBSTATE, CPUID_INTEL_ID5EDX_MWAIT_C2_SUBSTATE)
  547. FIELD_FUNC(MWAIT_C3_SUBSTATE, CPUID_INTEL_ID5EDX_MWAIT_C3_SUBSTATE)
  548. FIELD_FUNC(MWAIT_C4_SUBSTATE, CPUID_INTEL_ID5EDX_MWAIT_C4_SUBSTATE)
  549. #undef FIELD_FUNC
  550. /*
  551. * Definitions of various fields' values and more complicated
  552. * macros/functions for reading cpuid fields.
  553. */
  554. /* Effective Intel CPU Families */
  555. #define CPUID_FAMILY_486 4
  556. #define CPUID_FAMILY_P5 5
  557. #define CPUID_FAMILY_P6 6
  558. #define CPUID_FAMILY_P4 15
  559. /* Effective AMD CPU Families */
  560. #define CPUID_FAMILY_5x86 4
  561. #define CPUID_FAMILY_K5 5
  562. #define CPUID_FAMILY_K6 5
  563. #define CPUID_FAMILY_K7 6
  564. #define CPUID_FAMILY_K8 15
  565. #define CPUID_FAMILY_K8L 16
  566. #define CPUID_FAMILY_K8MOBILE 17
  567. #define CPUID_FAMILY_EXTENDED 15
  568. /* Intel model information */
  569. #define CPUID_MODEL_PPRO 1
  570. #define CPUID_MODEL_PII_03 3
  571. #define CPUID_MODEL_PII_05 5
  572. #define CPUID_MODEL_CELERON_06 6
  573. #define CPUID_MODEL_PM_09 9
  574. #define CPUID_MODEL_PM_0D 13
  575. #define CPUID_MODEL_PM_0E 14 // Yonah / Sossaman
  576. #define CPUID_MODEL_CORE_0F 15 // Conroe / Merom
  577. #define CPUID_MODEL_CORE_17 0x17 // Penryn
  578. #define CPUID_MODEL_NEHALEM_1A 0x1a // Nehalem / Gainestown
  579. #define CPUID_MODEL_ATOM_1C 0x1c // Silverthorne / Diamondville
  580. #define CPUID_MODEL_CORE_1D 0x1d // Dunnington
  581. #define CPUID_MODEL_PIII_07 7
  582. #define CPUID_MODEL_PIII_08 8
  583. #define CPUID_MODEL_PIII_0A 10
  584. /*
  585. *----------------------------------------------------------------------
  586. *
  587. * CPUID_IsVendor{AMD,Intel} --
  588. *
  589. * Determines if the vendor string in cpuid id0 is from {AMD,Intel}.
  590. *
  591. * Results:
  592. * True iff vendor string is CPUID_{AMD,INTEL}_VENDOR_STRING
  593. *
  594. * Side effects:
  595. * None.
  596. *
  597. *----------------------------------------------------------------------
  598. */
  599. static INLINE Bool
  600. CPUID_IsRawVendor(CPUIDRegs *id0, const char* vendor)
  601. {
  602. // hard to get strcmp() in some environments, so do it in the raw
  603. return (id0->ebx == *(const uint32 *) (vendor + 0) &&
  604. id0->ecx == *(const uint32 *) (vendor + 4) &&
  605. id0->edx == *(const uint32 *) (vendor + 8));
  606. }
  607. static INLINE Bool
  608. CPUID_IsVendorAMD(CPUIDRegs *id0)
  609. {
  610. return CPUID_IsRawVendor(id0, CPUID_AMD_VENDOR_STRING);
  611. }
  612. static INLINE Bool
  613. CPUID_IsVendorIntel(CPUIDRegs *id0)
  614. {
  615. return CPUID_IsRawVendor(id0, CPUID_INTEL_VENDOR_STRING);
  616. }
  617. static INLINE uint32
  618. CPUID_EFFECTIVE_FAMILY(uint32 v) /* %eax from CPUID with %eax=1. */
  619. {
  620. return CPUID_FAMILY(v) +
  621. (CPUID_FAMILY(v) == CPUID_FAMILY_EXTENDED ? CPUID_EXTENDED_FAMILY(v) : 0);
  622. }
  623. /* Normally only used when FAMILY==CPUID_FAMILY_EXTENDED, but Intel is
  624. * now using the extended model field for FAMILY==CPUID_FAMILY_P6 to
  625. * refer to the newer Core2 CPUs
  626. */
  627. static INLINE uint32
  628. CPUID_EFFECTIVE_MODEL(uint32 v) /* %eax from CPUID with %eax=1. */
  629. {
  630. return CPUID_MODEL(v) + (CPUID_EXTENDED_MODEL(v) << 4);
  631. }
  632. /*
  633. * Notice that CPUID families for Intel and AMD overlap. The following macros
  634. * should only be used AFTER the manufacturer has been established (through
  635. * the use of CPUID standard function 0).
  636. */
  637. static INLINE Bool
  638. CPUID_FAMILY_IS_486(uint32 _eax)
  639. {
  640. return CPUID_EFFECTIVE_FAMILY(_eax) == CPUID_FAMILY_486;
  641. }
  642. static INLINE Bool
  643. CPUID_FAMILY_IS_P5(uint32 _eax)
  644. {
  645. return CPUID_EFFECTIVE_FAMILY(_eax) == CPUID_FAMILY_P5;
  646. }
  647. static INLINE Bool
  648. CPUID_FAMILY_IS_P6(uint32 _eax)
  649. {
  650. return CPUID_EFFECTIVE_FAMILY(_eax) == CPUID_FAMILY_P6;
  651. }
  652. static INLINE Bool
  653. CPUID_FAMILY_IS_PENTIUM4(uint32 _eax)
  654. {
  655. return CPUID_EFFECTIVE_FAMILY(_eax) == CPUID_FAMILY_P4;
  656. }
  657. /*
  658. * Intel Pentium M processors are Yonah/Sossaman or an older P-M
  659. */
  660. static INLINE Bool
  661. CPUID_UARCH_IS_PENTIUM_M(uint32 v) // IN: %eax from CPUID with %eax=1.
  662. {
  663. /* Assumes the CPU manufacturer is Intel. */
  664. return CPUID_FAMILY_IS_P6(v) &&
  665. (CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_PM_09 ||
  666. CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_PM_0D ||
  667. CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_PM_0E);
  668. }
  669. /*
  670. * Intel Core processors are Merom, Conroe, Woodcrest, Clovertown,
  671. * Penryn, Dunnington, Kentsfield, Yorktown, Harpertown, ........
  672. */
  673. static INLINE Bool
  674. CPUID_UARCH_IS_CORE(uint32 v) // IN: %eax from CPUID with %eax=1.
  675. {
  676. uint32 model = CPUID_EFFECTIVE_MODEL(v);
  677. /* Assumes the CPU manufacturer is Intel. */
  678. return CPUID_FAMILY_IS_P6(v) &&
  679. model >= CPUID_MODEL_CORE_0F &&
  680. (model < CPUID_MODEL_NEHALEM_1A ||
  681. model == CPUID_MODEL_CORE_1D);
  682. }
  683. /*
  684. * Intel Nehalem processors are: Nehalem, Gainestown.
  685. */
  686. static INLINE Bool
  687. CPUID_UARCH_IS_NEHALEM(uint32 v) // IN: %eax from CPUID with %eax=1.
  688. {
  689. /* Assumes the CPU manufacturer is Intel. */
  690. return CPUID_FAMILY_IS_P6(v) &&
  691. CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_NEHALEM_1A;
  692. }
  693. static INLINE Bool
  694. CPUID_FAMILY_IS_K7(uint32 _eax)
  695. {
  696. return CPUID_EFFECTIVE_FAMILY(_eax) == CPUID_FAMILY_K7;
  697. }
  698. static INLINE Bool
  699. CPUID_FAMILY_IS_K8(uint32 _eax)
  700. {
  701. return CPUID_EFFECTIVE_FAMILY(_eax) == CPUID_FAMILY_K8;
  702. }
  703. static INLINE Bool
  704. CPUID_FAMILY_IS_K8EXT(uint32 _eax)
  705. {
  706. /*
  707. * We check for this pattern often enough that it's
  708. * worth a separate function, for syntactic sugar.
  709. */
  710. return CPUID_FAMILY_IS_K8(_eax) &&
  711. CPUID_EXTENDED_MODEL(_eax) != 0;
  712. }
  713. static INLINE Bool
  714. CPUID_FAMILY_IS_K8L(uint32 _eax)
  715. {
  716. return CPUID_EFFECTIVE_FAMILY(_eax) == CPUID_FAMILY_K8L;
  717. }
  718. static INLINE Bool
  719. CPUID_FAMILY_IS_K8MOBILE(uint32 _eax)
  720. {
  721. /* Essentially a K8 (not K8L) part, but with mobile features. */
  722. return CPUID_EFFECTIVE_FAMILY(_eax) == CPUID_FAMILY_K8MOBILE;
  723. }
  724. static INLINE Bool
  725. CPUID_FAMILY_IS_K8STAR(uint32 _eax)
  726. {
  727. /*
  728. * Read function name as "K8*", as in wildcard.
  729. * Matches K8 or K8L or K8MOBILE
  730. */
  731. return CPUID_FAMILY_IS_K8(_eax) || CPUID_FAMILY_IS_K8L(_eax) ||
  732. CPUID_FAMILY_IS_K8MOBILE(_eax);
  733. }
  734. #define CPUID_TYPE_PRIMARY 0
  735. #define CPUID_TYPE_OVERDRIVE 1
  736. #define CPUID_TYPE_SECONDARY 2
  737. #define CPUID_INTEL_ID4EAX_CACHE_TYPE_NULL 0
  738. #define CPUID_INTEL_ID4EAX_CACHE_TYPE_DATA 1
  739. #define CPUID_INTEL_ID4EAX_CACHE_TYPE_INST 2
  740. #define CPUID_INTEL_ID4EAX_CACHE_TYPE_UNIF 3
  741. #define CPUID_INTEL_ID4EAX_CACHE_SELF_INIT 0x00000100
  742. #define CPUID_INTEL_ID4EAX_CACHE_FULLY_ASSOC 0x00000200
  743. /*
  744. * On AMD chips before Opteron and Intel chips before P4 model 3,
  745. * WRMSR(TSC) clears the upper half of the TSC instead of using %edx.
  746. */
  747. static INLINE Bool
  748. CPUID_FullyWritableTSC(Bool isIntel, // IN
  749. uint32 v) // IN: %eax from CPUID with %eax=1.
  750. {
  751. /*
  752. * Returns FALSE if:
  753. * - Intel && P6 (pre-core) or
  754. * - Intel && P4 (model < 3) or
  755. * - !Intel && pre-K8 Opteron
  756. * Otherwise, returns TRUE.
  757. */
  758. return !((isIntel &&
  759. ((CPUID_FAMILY_IS_P6(v) &&
  760. CPUID_EFFECTIVE_MODEL(v) < CPUID_MODEL_PM_0E) ||
  761. (CPUID_FAMILY_IS_PENTIUM4(v) &&
  762. CPUID_EFFECTIVE_MODEL(v) < 3))) ||
  763. (!isIntel &&
  764. CPUID_FAMILY(v) < CPUID_FAMILY_K8));
  765. }
  766. /*
  767. * For certain AMD processors, an lfence instruction is necessary at various
  768. * places to ensure ordering.
  769. */
  770. static INLINE Bool
  771. CPUID_VendorRequiresFence(CpuidVendors vendor)
  772. {
  773. return vendor == CPUID_VENDOR_AMD;
  774. }
  775. static INLINE Bool
  776. CPUID_VersionRequiresFence(uint32 version)
  777. {
  778. return CPUID_EFFECTIVE_FAMILY(version) == CPUID_FAMILY_K8 &&
  779. CPUID_EFFECTIVE_MODEL(version) < 0x40;
  780. }
  781. static INLINE Bool
  782. CPUID_ID0RequiresFence(CPUIDRegs *id0)
  783. {
  784. if (id0->eax == 0) {
  785. return FALSE;
  786. }
  787. return CPUID_IsVendorAMD(id0);
  788. }
  789. static INLINE Bool
  790. CPUID_ID1RequiresFence(CPUIDRegs *id1)
  791. {
  792. return CPUID_VersionRequiresFence(id1->eax);
  793. }
  794. static INLINE Bool
  795. CPUID_RequiresFence(CpuidVendors vendor, // IN
  796. uint32 version) // IN: %eax from CPUID with %eax=1.
  797. {
  798. return CPUID_VendorRequiresFence(vendor) &&
  799. CPUID_VersionRequiresFence(version);
  800. }
  801. /*
  802. *----------------------------------------------------------------------
  803. *
  804. * CPUID_CountsCPUIDAsBranch --
  805. *
  806. * Returns TRUE iff the cpuid given counts CPUID as a branch
  807. * (i.e. is a pre-Merom E CPU).
  808. *
  809. *----------------------------------------------------------------------
  810. */
  811. static INLINE Bool
  812. CPUID_CountsCPUIDAsBranch(uint32 v) /* %eax from CPUID with %eax=1 */
  813. {
  814. /*
  815. * CPUID no longer a branch starting with Merom E. Bug 148411.
  816. * Penryn (Extended Model: 1) also has this fixed.
  817. *
  818. * Merom E is: CPUID.1.eax & 0xfff = 0x6f9
  819. */
  820. return !(CPUID_FAMILY_IS_P6(v) &&
  821. (CPUID_EFFECTIVE_MODEL(v) > CPUID_MODEL_CORE_0F ||
  822. (CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_CORE_0F &&
  823. CPUID_STEPPING(v) >= 9)));
  824. }
  825. /*
  826. * On Merom and later Intel chips, not present PDPTEs with reserved bits
  827. * set do not fault with a #GP. See PR# 109120.
  828. */
  829. static INLINE Bool
  830. CPUID_FaultOnNPReservedPDPTE(uint32 v) // IN: %eax from CPUID with %eax=1.
  831. {
  832. return !(CPUID_FAMILY_IS_P6(v) &&
  833. (CPUID_EFFECTIVE_MODEL(v) >= CPUID_MODEL_CORE_0F));
  834. }
  835. /*
  836. * The following low-level functions compute the number of
  837. * cores per cpu. They should be used cautiously because
  838. * they do not necessarily work on all types of CPUs.
  839. * High-level functions that are correct for all CPUs are
  840. * available elsewhere: see lib/cpuidInfo/cpuidInfo.c.
  841. */
  842. static INLINE uint32
  843. CPUID_IntelCoresPerPackage(uint32 v) /* %eax from CPUID with %eax=4 and %ecx=0. */
  844. {
  845. // Note: This is not guaranteed to work on older Intel CPUs.
  846. return 1 + CPUID_INTEL_CORE_COUNT(v);
  847. }
  848. static INLINE uint32
  849. CPUID_AMDCoresPerPackage(uint32 v) /* %ecx from CPUID with %eax=0x80000008. */
  850. {
  851. // Note: This is not guaranteed to work on older AMD CPUs.
  852. return 1 + CPUID_AMD_CORE_COUNT(v);
  853. }
  854. /*
  855. * Hypervisor CPUID space is 0x400000XX.
  856. */
  857. static INLINE Bool
  858. CPUID_IsHypervisorLevel(uint32 level, uint32 *offset)
  859. {
  860. *offset = level & 0xff;
  861. return (level & 0xffffff00) == 0x40000000;
  862. }
  863. #endif