Chameleon

Chameleon Svn Source Tree

Root/trunk/i386/libsaio/cpu.c

1/*
2 * Copyright 2008 Islam Ahmed Zaid. All rights reserved. <azismed@gmail.com>
3 * AsereBLN: 2009: cleanup and bugfix
4 * Bronya: 2015 Improve AMD support, cleanup and bugfix
5 */
6
7#include "config.h"
8#include "libsaio.h"
9#include "platform.h"
10#include "cpu.h"
11#include "bootstruct.h"
12#include "boot.h"
13
14#if DEBUG_CPU
15#define DBG(x...)printf(x)
16#else
17#define DBG(x...)
18#endif
19
20#define UI_CPUFREQ_ROUNDING_FACTOR10000000
21
22clock_frequency_info_t gPEClockFrequencyInfo;
23
24//static __unused uint64_t rdtsc32(void)
25//{
26//unsigned int lo,hi;
27//__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
28//return ((uint64_t)hi << 32) | lo;
29//}
30
31uint64_t getCycles(void)
32{
33#if defined(__ARM_ARCH_7A__)
34 uint32_t r;
35 asm volatile("mrc p15, 0, %0, c9, c13, 0\t\n" : "=r" (r)); /* Read PMCCNTR */
36 return ((uint64_t)r) << 6; /* 1 tick = 64 clocks */
37#elif defined(__x86_64__)
38 unsigned a, d;
39 asm volatile("rdtsc" : "=a" (a), "=d" (d));
40 return ((uint64_t)a) | (((uint64_t)d) << 32);
41#elif defined(__i386__)
42 uint64_t ret;
43 asm volatile("rdtsc": "=A" (ret));
44 return ret;
45#else
46 return 0;
47#endif
48}
49
50/*
51 * timeRDTSC()
52 * This routine sets up PIT counter 2 to count down 1/20 of a second.
53 * It pauses until the value is latched in the counter
54 * and then reads the time stamp counter to return to the caller.
55 */
56static uint64_t timeRDTSC(void)
57{
58intattempts = 0;
59uint32_t latchTime;
60uint64_tsaveTime,intermediate;
61unsigned inttimerValue, lastValue;
62//boolean_tint_enabled;
63/*
64 * Table of correction factors to account for
65 * - timer counter quantization errors, and
66 * - undercounts 0..5
67 */
68#define SAMPLE_CLKS_EXACT(((double) CLKNUM) / 20.0)
69#define SAMPLE_CLKS_INT((int) CLKNUM / 20)
70#define SAMPLE_NSECS(2000000000LL)
71#define SAMPLE_MULTIPLIER(((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)
72#define ROUND64(x)((uint64_t)((x) + 0.5))
73uint64_tscale[6] = {
74ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)),
75ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)),
76ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)),
77ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)),
78ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)),
79ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5))
80};
81
82//int_enabled = ml_set_interrupts_enabled(false);
83
84restart:
85if (attempts >= 3) // increase to up to 9 attempts.
86{
87// This will flash-reboot. TODO: Use tscPanic instead.
88//printf("Timestamp counter calibation failed with %d attempts\n", attempts);
89}
90attempts++;
91enable_PIT2();// turn on PIT2
92set_PIT2(0);// reset timer 2 to be zero
93latchTime = getCycles();// get the time stamp to time
94latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes
95set_PIT2(SAMPLE_CLKS_INT);// set up the timer for (almost) 1/20th a second
96saveTime = getCycles();// now time how long a 20th a second is...
97get_PIT2(&lastValue);
98get_PIT2(&lastValue);// read twice, first value may be unreliable
99do {
100intermediate = get_PIT2(&timerValue);
101if (timerValue > lastValue)
102{
103// Timer wrapped
104set_PIT2(0);
105disable_PIT2();
106goto restart;
107}
108lastValue = timerValue;
109} while (timerValue > 5);
110//printf("timerValue %d\n",timerValue);
111//printf("intermediate 0x%016llX\n",intermediate);
112//printf("saveTime 0x%016llX\n",saveTime);
113
114intermediate -= saveTime;// raw count for about 1/20 second
115intermediate *= scale[timerValue];// rescale measured time spent
116intermediate /= SAMPLE_NSECS;// so its exactly 1/20 a second
117intermediate += latchTime;// add on our save fudge
118
119set_PIT2(0);// reset timer 2 to be zero
120disable_PIT2();// turn off PIT 2
121
122//ml_set_interrupts_enabled(int_enabled);
123return intermediate;
124}
125
126/*
127 * DFE: Measures the TSC frequency in Hz (64-bit) using the ACPI PM timer
128 */
129static uint64_t __unused measure_tsc_frequency(void)
130{
131uint64_t tscStart;
132uint64_t tscEnd;
133uint64_t tscDelta = 0xffffffffffffffffULL;
134unsigned long pollCount;
135uint64_t retval = 0;
136int i;
137
138/* Time how many TSC ticks elapse in 30 msec using the 8254 PIT
139 * counter 2. We run this loop 3 times to make sure the cache
140 * is hot and we take the minimum delta from all of the runs.
141 * That is to say that we're biased towards measuring the minimum
142 * number of TSC ticks that occur while waiting for the timer to
143 * expire. That theoretically helps avoid inconsistencies when
144 * running under a VM if the TSC is not virtualized and the host
145 * steals time. The TSC is normally virtualized for VMware.
146 */
147for(i = 0; i < 10; ++i)
148{
149enable_PIT2();
150set_PIT2_mode0(CALIBRATE_LATCH);
151tscStart = getCycles();
152pollCount = poll_PIT2_gate();
153tscEnd = getCycles();
154/* The poll loop must have run at least a few times for accuracy */
155if (pollCount <= 1)
156{
157continue;
158}
159/* The TSC must increment at LEAST once every millisecond.
160 * We should have waited exactly 30 msec so the TSC delta should
161 * be >= 30. Anything less and the processor is way too slow.
162 */
163if ((tscEnd - tscStart) <= CALIBRATE_TIME_MSEC)
164{
165continue;
166}
167// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
168if ( (tscEnd - tscStart) < tscDelta )
169{
170tscDelta = tscEnd - tscStart;
171}
172}
173/* tscDelta is now the least number of TSC ticks the processor made in
174 * a timespan of 0.03 s (e.g. 30 milliseconds)
175 * Linux thus divides by 30 which gives the answer in kiloHertz because
176 * 1 / ms = kHz. But we're xnu and most of the rest of the code uses
177 * Hz so we need to convert our milliseconds to seconds. Since we're
178 * dividing by the milliseconds, we simply multiply by 1000.
179 */
180
181/* Unlike linux, we're not limited to 32-bit, but we do need to take care
182 * that we're going to multiply by 1000 first so we do need at least some
183 * arithmetic headroom. For now, 32-bit should be enough.
184 * Also unlike Linux, our compiler can do 64-bit integer arithmetic.
185 */
186if (tscDelta > (1ULL<<32))
187{
188retval = 0;
189}
190else
191{
192retval = tscDelta * 1000 / 30;
193}
194disable_PIT2();
195return retval;
196}
197
198static uint64_trtc_set_cyc_per_sec(uint64_t cycles);
199#define RTC_FAST_DENOM0xFFFFFFFF
200
201inline static uint32_t
202create_mul_quant_GHZ(int shift, uint32_t quant)
203{
204return (uint32_t)((((uint64_t)NSEC_PER_SEC/20) << shift) / quant);
205}
206
207struct{
208mach_timespec_tcalend_offset;
209boolean_tcalend_is_set;
210
211int64_tcalend_adjtotal;
212int32_tcalend_adjdelta;
213
214uint32_tboottime;
215
216mach_timebase_info_data_ttimebase_const;
217
218decl_simple_lock_data(,lock)/* real-time clock device lock */
219} rtclock;
220
221uint32_trtc_quant_shift;/* clock to nanos right shift */
222uint32_trtc_quant_scale;/* clock to nanos multiplier */
223uint64_trtc_cyc_per_sec;/* processor cycles per sec */
224uint64_trtc_cycle_count;/* clocks in 1/20th second */
225
226static uint64_t rtc_set_cyc_per_sec(uint64_t cycles)
227{
228
229if (cycles > (NSEC_PER_SEC/20))
230{
231// we can use just a "fast" multiply to get nanos
232rtc_quant_shift = 32;
233rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, (uint32_t)cycles);
234rtclock.timebase_const.numer = rtc_quant_scale; // timeRDTSC is 1/20
235rtclock.timebase_const.denom = (uint32_t)RTC_FAST_DENOM;
236}
237else
238{
239rtc_quant_shift = 26;
240rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, (uint32_t)cycles);
241rtclock.timebase_const.numer = NSEC_PER_SEC/20; // timeRDTSC is 1/20
242rtclock.timebase_const.denom = (uint32_t)cycles;
243}
244rtc_cyc_per_sec = cycles*20;// multiply it by 20 and we are done..
245// BUT we also want to calculate...
246
247cycles = ((rtc_cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
248 / UI_CPUFREQ_ROUNDING_FACTOR)
249* UI_CPUFREQ_ROUNDING_FACTOR;
250
251/*
252 * Set current measured speed.
253 */
254if (cycles >= 0x100000000ULL)
255{
256gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
257}
258else
259{
260gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
261}
262gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
263
264//printf("[RTCLOCK_1] frequency %llu (%llu) %llu\n", cycles, rtc_cyc_per_sec,timeRDTSC() * 20);
265return(rtc_cyc_per_sec);
266}
267
268// Bronya C1E fix
269static void post_startup_cpu_fixups(void)
270{
271/*
272 * Some AMD processors support C1E state. Entering this state will
273 * cause the local APIC timer to stop, which we can't deal with at
274 * this time.
275 */
276
277uint64_t reg;
278verbose("\tLooking to disable C1E if is already enabled by the BIOS:\n");
279reg = rdmsr64(MSR_AMD_INT_PENDING_CMP_HALT);
280/* Disable C1E state if it is enabled by the BIOS */
281if ((reg >> AMD_ACTONCMPHALT_SHIFT) & AMD_ACTONCMPHALT_MASK)
282{
283reg &= ~(AMD_ACTONCMPHALT_MASK << AMD_ACTONCMPHALT_SHIFT);
284wrmsr64(MSR_AMD_INT_PENDING_CMP_HALT, reg);
285verbose("\tC1E disabled!\n");
286}
287}
288
289/*
290 * Large memcpy() into MMIO space can take longer than 1 clock tick (55ms).
291 * The timer interrupt must remain responsive when updating VRAM so
292 * as not to miss timer interrupts during countdown().
293 *
294 * If interrupts are enabled, use normal memcpy.
295 *
296 * If interrupts are disabled, breaks memcpy down
297 * into 128K chunks, times itself and makes a bios
298 * real-mode call every 25 msec in order to service
299 * pending interrupts.
300 *
301 * -- zenith432, May 22nd, 2016
302 */
303void *memcpy_interruptible(void *dst, const void *src, size_t len)
304{
305uint64_t tscFreq, lastTsc;
306uint32_t eflags, threshold;
307ptrdiff_t offset;
308const size_t chunk = 131072U;// 128K
309
310if (len <= chunk)
311{
312/*
313 * Short memcpy - use normal.
314 */
315return memcpy(dst, src, len);
316}
317
318__asm__ volatile("pushfl; popl %0" : "=r"(eflags));
319if (eflags & 0x200U)
320{
321/*
322 * Interrupts are enabled - use normal memcpy.
323 */
324return memcpy(dst, src, len);
325}
326
327tscFreq = Platform.CPU.TSCFrequency;
328if ((uint32_t) (tscFreq >> 32))
329{
330/*
331 * If TSC Frequency >= 2 ** 32, use a default time threshold.
332 */
333threshold = (~0U) / 40U;
334}
335else if (!(uint32_t) tscFreq)
336{
337/*
338 * If early on and TSC Frequency hasn't been estimated yet,
339 * use normal memcpy.
340 */
341return memcpy(dst, src, len);
342}
343else
344{
345threshold = ((uint32_t) tscFreq) / 40U;
346}
347
348/*
349 * Do the work
350 */
351offset = 0;
352lastTsc = getCycles();
353do
354{
355(void) memcpy((char*) dst + offset, (const char*) src + offset, chunk);
356offset += (ptrdiff_t) chunk;
357len -= chunk;
358if ((getCycles() - lastTsc) < threshold)
359{
360continue;
361}
362(void) readKeyboardStatus();// visit real-mode
363lastTsc = getCycles();
364}
365while (len > chunk);
366if (len)
367{
368(void) memcpy((char*) dst + offset, (const char*) src + offset, len);
369}
370return dst;
371}
372
373/*
374 * Calculates the FSB and CPU frequencies using specific MSRs for each CPU
375 * - multi. is read from a specific MSR. In the case of Intel, there is:
376 * a max multi. (used to calculate the FSB freq.),
377 * and a current multi. (used to calculate the CPU freq.)
378 * - busFrequency = tscFrequency / multi
379 * - cpuFrequency = busFrequency * multi
380 */
381
382/* Decimal powers: */
383#define kilo (1000ULL)
384#define Mega (kilo * kilo)
385#define Giga (kilo * Mega)
386#define Tera (kilo * Giga)
387#define Peta (kilo * Tera)
388
389#define quad(hi,lo)(((uint64_t)(hi)) << 32 | (lo))
390
391void get_cpuid(PlatformInfo_t *p)
392{
393
394charstr[128];
395uint32_treg[4];
396char*s= 0;
397
398do_cpuid(0x00000000, p->CPU.CPUID[CPUID_0]); // MaxFn, Vendor
399do_cpuid(0x00000001, p->CPU.CPUID[CPUID_1]); // Signature, stepping, features
400do_cpuid(0x00000002, p->CPU.CPUID[CPUID_2]); // TLB/Cache/Prefetch
401
402do_cpuid(0x00000003, p->CPU.CPUID[CPUID_3]); // S/N
403do_cpuid(0x80000000, p->CPU.CPUID[CPUID_80]); // Get the max extended cpuid
404
405if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 8)
406{
407do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);
408do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
409}
410else if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 1)
411{
412do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
413}
414
415// ==============================================================
416
417/* get BrandString (if supported) */
418/* Copyright: from Apple's XNU cpuid.c */
419if (p->CPU.CPUID[CPUID_80][0] > 0x80000004)
420{
421bzero(str, 128);
422/*
423 * The BrandString 48 bytes (max), guaranteed to
424 * be NULL terminated.
425 */
426do_cpuid(0x80000002, reg); // Processor Brand String
427memcpy(&str[0], (char *)reg, 16);
428
429
430do_cpuid(0x80000003, reg); // Processor Brand String
431memcpy(&str[16], (char *)reg, 16);
432do_cpuid(0x80000004, reg); // Processor Brand String
433memcpy(&str[32], (char *)reg, 16);
434for (s = str; *s != '\0'; s++)
435{
436if (*s != ' ')
437{
438break;
439}
440}
441strlcpy(p->CPU.BrandString, s, 48);
442
443if (!strncmp(p->CPU.BrandString, CPU_STRING_UNKNOWN, MIN(sizeof(p->CPU.BrandString), (unsigned)strlen(CPU_STRING_UNKNOWN) + 1)))
444{
445/*
446 * This string means we have a firmware-programmable brand string,
447 * and the firmware couldn't figure out what sort of CPU we have.
448 */
449p->CPU.BrandString[0] = '\0';
450}
451p->CPU.BrandString[47] = '\0';
452//DBG("\tBrandstring = %s\n", p->CPU.BrandString);
453}
454
455// ==============================================================
456
457switch(p->CPU.BrandString[0])
458{
459case 'A':
460/* AMD Processors */
461// The cache information is only in ecx and edx so only save
462// those registers
463
464do_cpuid(5, p->CPU.CPUID[CPUID_5]); // Monitor/Mwait
465
466do_cpuid(0x80000005, p->CPU.CPUID[CPUID_85]); // TLB/Cache/Prefetch
467do_cpuid(0x80000006, p->CPU.CPUID[CPUID_86]); // TLB/Cache/Prefetch
468do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);
469do_cpuid(0x8000001E, p->CPU.CPUID[CPUID_81E]);
470
471break;
472
473case 'G':
474/* Intel Processors */
475do_cpuid2(0x00000004, 0, p->CPU.CPUID[CPUID_4]); // Cache Index for Inte
476
477if (p->CPU.CPUID[CPUID_0][0] >= 0x5)// Monitor/Mwait
478{
479do_cpuid(5, p->CPU.CPUID[CPUID_5]);
480}
481
482if (p->CPU.CPUID[CPUID_0][0] >= 6)// Thermal/Power
483{
484do_cpuid(6, p->CPU.CPUID[CPUID_6]);
485}
486
487break;
488}
489}
490
491void scan_cpu(PlatformInfo_t *p)
492{
493verbose("[ CPU INFO ]\n");
494get_cpuid(p);
495
496uint64_tbusFCvtt2n;
497uint64_ttscFCvtt2n;
498uint64_ttscFreq= 0;
499uint64_tbusFrequency= 0;
500uint64_tcpuFrequency= 0;
501uint64_tmsr= 0;
502uint64_tflex_ratio= 0;
503uint64_tcpuid_features;
504
505uint32_tmax_ratio= 0;
506uint32_tmin_ratio= 0;
507uint32_treg[4];
508uint32_tcores_per_package= 0;
509uint32_tlogical_per_package= 1;
510uint32_tthreads_per_core= 1;
511
512uint8_tbus_ratio_max= 0;
513uint8_tbus_ratio_min= 0;
514uint32_tcurrdiv= 0;
515uint32_tcurrcoef= 0;
516uint8_tmaxdiv= 0;
517uint8_tmaxcoef= 0;
518uint8_tpic0_mask= 0;
519uint32_tcpuMultN2= 0;
520
521const char*newratio;
522
523intlen= 0;
524intmyfsb= 0;
525inti= 0;
526
527
528/* http://www.flounder.com/cpuid_explorer2.htm
529 EAX (Intel):
530 31 28 27 20 19 16 1514 1312 11 8 7 4 3 0
531 +--------+----------------+--------+----+----+--------+--------+--------+
532 |########|Extended family |Extmodel|####|type|familyid| model |stepping|
533 +--------+----------------+--------+----+----+--------+--------+--------+
534
535 EAX (AMD):
536 31 28 27 20 19 16 1514 1312 11 8 7 4 3 0
537 +--------+----------------+--------+----+----+--------+--------+--------+
538 |########|Extended family |Extmodel|####|####|familyid| model |stepping|
539 +--------+----------------+--------+----+----+--------+--------+--------+
540*/
541///////////////////-- MaxFn,Vendor --////////////////////////
542p->CPU.Vendor= p->CPU.CPUID[CPUID_0][1];
543
544///////////////////-- Signature, stepping, features -- //////
545cpuid_features = quad(p->CPU.CPUID[CPUID_1][ecx], p->CPU.CPUID[CPUID_1][edx]);
546if (bit(28) & p->CPU.CPUID[CPUID_1][edx]) // HTT/Multicore
547{
548logical_per_package = bitfield(p->CPU.CPUID[CPUID_1][ebx], 23, 16);
549}
550else
551{
552logical_per_package = 1;
553}
554
555p->CPU.Signature= p->CPU.CPUID[CPUID_1][0];
556p->CPU.Stepping= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 3, 0);// stepping = cpu_feat_eax & 0xF;
557p->CPU.Model= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 7, 4);// model = (cpu_feat_eax >> 4) & 0xF;
558p->CPU.Family= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 11, 8);// family = (cpu_feat_eax >> 8) & 0xF;
559//p->CPU.Type= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 13, 12);// type = (cpu_feat_eax >> 12) & 0x3;
560p->CPU.ExtModel= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 19, 16);// ext_model = (cpu_feat_eax >> 16) & 0xF;
561p->CPU.ExtFamily= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 27, 20);// ext_family = (cpu_feat_eax >> 20) & 0xFF;
562
563if (p->CPU.Family == 0x0f)
564{
565p->CPU.Family += p->CPU.ExtFamily;
566}
567
568if (p->CPU.Family == 0x0f || p->CPU.Family == 0x06)
569{
570p->CPU.Model += (p->CPU.ExtModel << 4);
571}
572
573switch (p->CPU.Vendor)
574{
575case CPUID_VENDOR_INTEL:
576{
577/* Based on Apple's XNU cpuid.c - Deterministic cache parameters */
578if ((p->CPU.CPUID[CPUID_0][eax] > 3) && (p->CPU.CPUID[CPUID_0][eax] < 0x80000000))
579{
580for (i = 0; i < 0xFF; i++) // safe loop
581{
582do_cpuid2(0x00000004, i, reg); // AX=4: Fn, CX=i: cache index
583if (bitfield(reg[eax], 4, 0) == 0)
584{
585break;
586}
587cores_per_package = bitfield(reg[eax], 31, 26) + 1;
588}
589}
590
591if (i > 0)
592{
593cores_per_package = bitfield(p->CPU.CPUID[CPUID_4][eax], 31, 26) + 1; // i = cache index
594threads_per_core = bitfield(p->CPU.CPUID[CPUID_4][eax], 25, 14) + 1;
595}
596
597if (cores_per_package == 0)
598{
599cores_per_package = 1;
600}
601
602switch (p->CPU.Model)
603{
604case CPUID_MODEL_NEHALEM: // Intel Core i7 LGA1366 (45nm)
605case CPUID_MODEL_FIELDS: // Intel Core i5, i7 LGA1156 (45nm)
606case CPUID_MODEL_CLARKDALE: // Intel Core i3, i5, i7 LGA1156 (32nm)
607case CPUID_MODEL_NEHALEM_EX:
608case CPUID_MODEL_JAKETOWN:
609case CPUID_MODEL_SANDYBRIDGE:
610case CPUID_MODEL_IVYBRIDGE:
611case CPUID_MODEL_IVYBRIDGE_XEON:
612case CPUID_MODEL_HASWELL_U5:
613case CPUID_MODEL_HASWELL:
614case CPUID_MODEL_HASWELL_SVR:
615case CPUID_MODEL_HASWELL_ULT:
616case CPUID_MODEL_HASWELL_ULX:
617case CPUID_MODEL_BROADWELL_HQ:
618case CPUID_MODEL_BRASWELL:
619case CPUID_MODEL_AVOTON:
620case CPUID_MODEL_SKYLAKE:
621case CPUID_MODEL_BRODWELL_SVR:
622case CPUID_MODEL_BRODWELL_MSVR:
623case CPUID_MODEL_KNIGHT:
624case CPUID_MODEL_ANNIDALE:
625case CPUID_MODEL_GOLDMONT:
626case CPUID_MODEL_VALLEYVIEW:
627case CPUID_MODEL_SKYLAKE_S:
628case CPUID_MODEL_SKYLAKE_AVX:
629case CPUID_MODEL_CANNONLAKE:
630msr = rdmsr64(MSR_CORE_THREAD_COUNT); // 0x35
631p->CPU.NoCores= (uint32_t)bitfield((uint32_t)msr, 31, 16);
632p->CPU.NoThreads= (uint32_t)bitfield((uint32_t)msr, 15, 0);
633break;
634
635case CPUID_MODEL_DALES:
636case CPUID_MODEL_WESTMERE: // Intel Core i7 LGA1366 (32nm) 6 Core
637case CPUID_MODEL_WESTMERE_EX:
638msr = rdmsr64(MSR_CORE_THREAD_COUNT);
639p->CPU.NoCores= (uint32_t)bitfield((uint32_t)msr, 19, 16);
640p->CPU.NoThreads= (uint32_t)bitfield((uint32_t)msr, 15, 0);
641break;
642case CPUID_MODEL_ATOM_3700:
643p->CPU.NoCores= 4;
644p->CPU.NoThreads= 4;
645break;
646case CPUID_MODEL_ATOM:
647p->CPU.NoCores= 2;
648p->CPU.NoThreads= 2;
649break;
650default:
651p->CPU.NoCores= 0;
652break;
653}
654
655// workaround for Xeon Harpertown and Yorkfield
656if ((p->CPU.Model == CPUID_MODEL_PENRYN) &&
657(p->CPU.NoCores== 0))
658{
659if ((strstr(p->CPU.BrandString, "X54")) ||
660(strstr(p->CPU.BrandString, "E54")) ||
661(strstr(p->CPU.BrandString, "W35")) ||
662(strstr(p->CPU.BrandString, "X34")) ||
663(strstr(p->CPU.BrandString, "X33")) ||
664(strstr(p->CPU.BrandString, "L33")) ||
665(strstr(p->CPU.BrandString, "X32")) ||
666(strstr(p->CPU.BrandString, "L3426")) ||
667(strstr(p->CPU.BrandString, "L54")))
668{
669p->CPU.NoCores= 4;
670p->CPU.NoThreads= 4;
671} else if (strstr(p->CPU.BrandString, "W36")) {
672p->CPU.NoCores= 6;
673p->CPU.NoThreads= 6;
674} else { //other Penryn and Wolfdale
675p->CPU.NoCores= 0;
676p->CPU.NoThreads= 0;
677}
678}
679
680if (p->CPU.NoCores == 0)
681{
682p->CPU.NoCores= cores_per_package;
683p->CPU.NoThreads= logical_per_package;
684}
685
686// MSR is *NOT* available on the Intel Atom CPU
687// workaround for N270. I don't know why it detected wrong
688if ((p->CPU.Model == CPUID_MODEL_ATOM) && (strstr(p->CPU.BrandString, "270")))
689{
690p->CPU.NoCores= 1;
691p->CPU.NoThreads= 2;
692}
693
694// workaround for Quad
695if ( strstr(p->CPU.BrandString, "Quad") )
696{
697p->CPU.NoCores= 4;
698p->CPU.NoThreads= 4;
699}
700}
701
702break;
703
704case CPUID_VENDOR_AMD:
705{
706post_startup_cpu_fixups();
707
708if (p->CPU.ExtFamily < 0x8)
709{
710cores_per_package = bitfield(p->CPU.CPUID[CPUID_88][ecx], 7, 0) + 1;
711//threads_per_core = cores_per_package;
712}
713else
714
715// Bronya : test for SMT
716// Properly calculate number of cores on AMD Zen
717// TODO: Check MSR for SMT
718if (p->CPU.ExtFamily >= 0x8)
719{
720uint64_t cores = 0;
721uint64_t logical = 0;
722
723cores = bitfield(p->CPU.CPUID[CPUID_81E][ebx], 7, 0); // cores
724logical = bitfield(p->CPU.CPUID[CPUID_81E][ebx], 15, 8) + 1; // 2
725
726cores_per_package = (bitfield(p->CPU.CPUID[CPUID_88][ecx], 7, 0) + 1) / logical; //8 cores
727
728//threads_per_core = cores_per_package;
729
730}
731
732if (cores_per_package == 0)
733{
734cores_per_package = 1;
735}
736
737p->CPU.NoCores= cores_per_package;
738p->CPU.NoThreads= logical_per_package;
739
740if (p->CPU.NoCores == 0)
741{
742p->CPU.NoCores = 1;
743p->CPU.NoThreads= 1;
744}
745}
746break;
747
748default :
749stop("Unsupported CPU detected! System halted.");
750}
751
752/* setup features */
753if ((bit(23) & p->CPU.CPUID[CPUID_1][3]) != 0)
754{
755p->CPU.Features |= CPU_FEATURE_MMX;
756}
757
758if ((bit(25) & p->CPU.CPUID[CPUID_1][3]) != 0)
759{
760p->CPU.Features |= CPU_FEATURE_SSE;
761}
762
763if ((bit(26) & p->CPU.CPUID[CPUID_1][3]) != 0)
764{
765p->CPU.Features |= CPU_FEATURE_SSE2;
766}
767
768if ((bit(0) & p->CPU.CPUID[CPUID_1][2]) != 0)
769{
770p->CPU.Features |= CPU_FEATURE_SSE3;
771}
772
773if ((bit(19) & p->CPU.CPUID[CPUID_1][2]) != 0)
774{
775p->CPU.Features |= CPU_FEATURE_SSE41;
776}
777
778if ((bit(20) & p->CPU.CPUID[CPUID_1][2]) != 0)
779{
780p->CPU.Features |= CPU_FEATURE_SSE42;
781}
782
783if ((bit(5) & p->CPU.CPUID[CPUID_1][3]) != 0)
784{
785p->CPU.Features |= CPU_FEATURE_MSR;
786}
787
788if ((p->CPU.NoThreads > p->CPU.NoCores))
789{
790p->CPU.Features |= CPU_FEATURE_HTT;
791}
792
793if ((bit(29) & p->CPU.CPUID[CPUID_81][3]) != 0)
794{
795p->CPU.Features |= CPU_FEATURE_EM64T;
796}
797
798pic0_mask = inb(0x21U);
799outb(0x21U, 0xFFU); // mask PIC0 interrupts for duration of timing tests
800
801uint64_t cycles;
802cycles = timeRDTSC();
803tscFreq = rtc_set_cyc_per_sec(cycles);
804DBG("cpu freq classic = 0x%016llx\n", tscFreq);
805// if usual method failed
806if ( tscFreq < 1000 )//TEST
807{
808tscFreq = measure_tsc_frequency();//timeRDTSC() * 20;//measure_tsc_frequency();
809// DBG("cpu freq timeRDTSC = 0x%016llx\n", tscFrequency);
810}
811
812if (p->CPU.Vendor==CPUID_VENDOR_INTEL && ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0c) || (p->CPU.Family == 0x0f && p->CPU.Model >= 0x03)))
813{
814int intelCPU = p->CPU.Model;
815if (p->CPU.Family == 0x06)
816{
817/* Nehalem CPU model */
818switch (p->CPU.Model)
819{
820case CPUID_MODEL_NEHALEM:
821case CPUID_MODEL_FIELDS:
822case CPUID_MODEL_CLARKDALE:
823case CPUID_MODEL_DALES:
824case CPUID_MODEL_WESTMERE:
825case CPUID_MODEL_NEHALEM_EX:
826case CPUID_MODEL_WESTMERE_EX:
827/* --------------------------------------------------------- */
828case CPUID_MODEL_SANDYBRIDGE:
829case CPUID_MODEL_JAKETOWN:
830case CPUID_MODEL_IVYBRIDGE_XEON:
831case CPUID_MODEL_IVYBRIDGE:
832case CPUID_MODEL_ATOM_3700:
833case CPUID_MODEL_HASWELL:
834case CPUID_MODEL_HASWELL_U5:
835case CPUID_MODEL_HASWELL_SVR:
836
837case CPUID_MODEL_HASWELL_ULT:
838case CPUID_MODEL_HASWELL_ULX:
839case CPUID_MODEL_BROADWELL_HQ:
840case CPUID_MODEL_SKYLAKE_S:
841/* --------------------------------------------------------- */
842msr = rdmsr64(MSR_PLATFORM_INFO);
843DBG("msr(%d): platform_info %08llx\n", __LINE__, bitfield(msr, 31, 0));
844bus_ratio_max = bitfield(msr, 15, 8);
845bus_ratio_min = bitfield(msr, 47, 40); //valv: not sure about this one (Remarq.1)
846msr = rdmsr64(MSR_FLEX_RATIO);
847DBG("msr(%d): flex_ratio %08llx\n", __LINE__, bitfield(msr, 31, 0));
848if (bitfield(msr, 16, 16))
849{
850flex_ratio = bitfield(msr, 15, 8);
851// bcc9: at least on the gigabyte h67ma-ud2h,
852// where the cpu multipler can't be changed to
853// allow overclocking, the flex_ratio msr has unexpected (to OSX)
854// contents.These contents cause mach_kernel to
855// fail to compute the bus ratio correctly, instead
856// causing the system to crash since tscGranularity
857// is inadvertently set to 0.
858
859if (flex_ratio == 0)
860{
861// Clear bit 16 (evidently the presence bit)
862wrmsr64(MSR_FLEX_RATIO, (msr & 0xFFFFFFFFFFFEFFFFULL));
863msr = rdmsr64(MSR_FLEX_RATIO);
864DBG("CPU: Unusable flex ratio detected. Patched MSR now %08llx\n", bitfield(msr, 31, 0));
865}
866else
867{
868if (bus_ratio_max > flex_ratio)
869{
870bus_ratio_max = flex_ratio;
871}
872}
873}
874
875if (bus_ratio_max)
876{
877busFrequency = (tscFreq / bus_ratio_max);
878}
879
880//valv: Turbo Ratio Limit
881if ((intelCPU != 0x2e) && (intelCPU != 0x2f))
882{
883msr = rdmsr64(MSR_TURBO_RATIO_LIMIT);
884
885cpuFrequency = bus_ratio_max * busFrequency;
886max_ratio = bus_ratio_max * 10;
887}
888else
889{
890cpuFrequency = tscFreq;
891}
892
893if ((getValueForKey(kbusratio, &newratio, &len, &bootInfo->chameleonConfig)) && (len <= 4))
894{
895max_ratio = atoi(newratio);
896max_ratio = (max_ratio * 10);
897if (len >= 3)
898{
899max_ratio = (max_ratio + 5);
900}
901
902verbose("\tBus-Ratio: min=%d, max=%s\n", bus_ratio_min, newratio);
903
904// extreme overclockers may love 320 ;)
905if ((max_ratio >= min_ratio) && (max_ratio <= 320))
906{
907cpuFrequency = (busFrequency * max_ratio) / 10;
908if (len >= 3)
909{
910maxdiv = 1;
911}
912else
913{
914maxdiv = 0;
915}
916}
917else
918{
919max_ratio = (bus_ratio_max * 10);
920}
921}
922//valv: to be uncommented if Remarq.1 didn't stick
923//if (bus_ratio_max > 0) bus_ratio = flex_ratio;
924p->CPU.MaxRatio = max_ratio;
925p->CPU.MinRatio = min_ratio;
926
927myfsb = busFrequency / 1000000;
928verbose("\tSticking with [BCLK: %dMhz, Bus-Ratio: %d]\n", myfsb, max_ratio/10); // Bungo: fixed wrong Bus-Ratio readout
929currcoef = bus_ratio_max;
930
931break;
932
933default:
934msr = rdmsr64(MSR_IA32_PERF_STATUS);
935DBG("msr(%d): ia32_perf_stat 0x%08llx\n", __LINE__, bitfield(msr, 31, 0));
936currcoef = bitfield(msr, 12, 8); // Bungo: reverted to 2263 state because of wrong old CPUs freq. calculating
937// Non-integer bus ratio for the max-multi
938maxdiv = bitfield(msr, 46, 46);
939// Non-integer bus ratio for the current-multi (undocumented)
940currdiv = bitfield(msr, 14, 14);
941
942// This will always be model >= 3
943if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0e) || (p->CPU.Family == 0x0f))
944{
945/* On these models, maxcoef defines TSC freq */
946maxcoef = bitfield(msr, 44, 40);
947}
948else
949{
950// On lower models, currcoef defines TSC freq
951// XXX
952maxcoef = currcoef;
953}
954
955if (!currcoef)
956{
957currcoef = maxcoef;
958}
959
960if (maxcoef)
961{
962if (maxdiv)
963{
964busFrequency = ((tscFreq * 2) / ((maxcoef * 2) + 1));
965}
966else
967{
968busFrequency = (tscFreq / maxcoef);
969}
970
971if (currdiv)
972{
973cpuFrequency = (busFrequency * ((currcoef * 2) + 1) / 2);
974}
975else
976{
977cpuFrequency = (busFrequency * currcoef);
978}
979
980DBG("max: %d%s current: %d%s\n", maxcoef, maxdiv ? ".5" : "",currcoef, currdiv ? ".5" : "");
981}
982break;
983}
984}
985// Mobile CPU
986if (rdmsr64(MSR_IA32_PLATFORM_ID) & (1<<28))
987{
988p->CPU.Features |= CPU_FEATURE_MOBILE;
989}
990}
991
992else if (p->CPU.Vendor==CPUID_VENDOR_AMD)
993{
994switch(p->CPU.Family)
995{
996case 0xF: /* K8 */
997{
998uint64_t fidvid = 0;
999uint64_t cpuMult;
1000uint64_t cpuFid;
1001
1002fidvid = rdmsr64(AMD_K8_PERF_STS);
1003cpuFid = bitfield(fidvid, 5, 0);
1004
1005cpuMult = (cpuFid + 0x8) * 10 / 2;
1006currcoef = cpuMult;
1007
1008cpuMultN2 = (fidvid & (uint64_t)bit(0));
1009currdiv = cpuMultN2;
1010/****** Addon END ******/
1011}
1012break;
1013
1014case 0x10: /*** AMD Family 10h ***/
1015{
1016
1017uint64_t prfsts = 0;
1018uint64_t cpuMult;
1019uint64_t divisor = 0;
1020uint64_t cpuDid;
1021uint64_t cpuFid;
1022
1023prfsts = rdmsr64(AMD_COFVID_STS);
1024cpuDid = bitfield(prfsts, 8, 6);
1025cpuFid = bitfield(prfsts, 5, 0);
1026if (cpuDid == 0) divisor = 2;
1027else if (cpuDid == 1) divisor = 4;
1028else if (cpuDid == 2) divisor = 8;
1029else if (cpuDid == 3) divisor = 16;
1030else if (cpuDid == 4) divisor = 32;
1031
1032cpuMult = ((cpuFid + 0x10) * 10) / (2^cpuDid);
1033currcoef = cpuMult;
1034
1035cpuMultN2 = (prfsts & (uint64_t)bit(0));
1036currdiv = cpuMultN2;
1037
1038/****** Addon END ******/
1039}
1040break;
1041
1042case 0x11: /*** AMD Family 11h ***/
1043{
1044
1045uint64_t prfsts;
1046uint64_t cpuMult;
1047uint64_t divisor = 0;
1048uint64_t cpuDid;
1049uint64_t cpuFid;
1050
1051prfsts = rdmsr64(AMD_COFVID_STS);
1052
1053cpuDid = bitfield(prfsts, 8, 6);
1054cpuFid = bitfield(prfsts, 5, 0);
1055if (cpuDid == 0) divisor = 2;
1056else if (cpuDid == 1) divisor = 4;
1057else if (cpuDid == 2) divisor = 8;
1058else if (cpuDid == 3) divisor = 16;
1059else if (cpuDid == 4) divisor = 0;
1060cpuMult = ((cpuFid + 0x8) * 10 ) / divisor;
1061currcoef = cpuMult;
1062
1063cpuMultN2 = (prfsts & (uint64_t)bit(0));
1064currdiv = cpuMultN2;
1065
1066/****** Addon END ******/
1067}
1068 break;
1069
1070case 0x12: /*** AMD Family 12h ***/
1071{
1072// 8:4 CpuFid: current CPU core frequency ID
1073// 3:0 CpuDid: current CPU core divisor ID
1074uint64_t prfsts,CpuFid,CpuDid;
1075prfsts = rdmsr64(AMD_COFVID_STS);
1076
1077CpuDid = bitfield(prfsts, 3, 0) ;
1078CpuFid = bitfield(prfsts, 8, 4) ;
1079uint64_t divisor;
1080switch (CpuDid)
1081{
1082case 0: divisor = 1; break;
1083case 1: divisor = (3/2); break;
1084case 2: divisor = 2; break;
1085case 3: divisor = 3; break;
1086case 4: divisor = 4; break;
1087case 5: divisor = 6; break;
1088case 6: divisor = 8; break;
1089case 7: divisor = 12; break;
1090case 8: divisor = 16; break;
1091default: divisor = 1; break;
1092}
1093currcoef = ((CpuFid + 0x10) * 10) / divisor;
1094
1095cpuMultN2 = (prfsts & (uint64_t)bit(0));
1096currdiv = cpuMultN2;
1097
1098}
1099break;
1100
1101case 0x14: /* K14 */
1102
1103{
1104// 8:4: current CPU core divisor ID most significant digit
1105// 3:0: current CPU core divisor ID least significant digit
1106uint64_t prfsts;
1107prfsts = rdmsr64(AMD_COFVID_STS);
1108
1109uint64_t CpuDidMSD,CpuDidLSD;
1110CpuDidMSD = bitfield(prfsts, 8, 4) ;
1111CpuDidLSD = bitfield(prfsts, 3, 0) ;
1112
1113uint64_t frequencyId = tscFreq/Mega;
1114currcoef = (((frequencyId + 5) / 100) + 0x10) * 10 /
1115(CpuDidMSD + (CpuDidLSD * 0.25) + 1);
1116currdiv = ((CpuDidMSD) + 1) << 2;
1117currdiv += bitfield(prfsts, 3, 0);
1118
1119cpuMultN2 = (prfsts & (uint64_t)bit(0));
1120currdiv = cpuMultN2;
1121}
1122
1123break;
1124
1125case 0x15: /*** AMD Family 15h ***/
1126case 0x06: /*** AMD Family 06h ***/
1127{
1128
1129uint64_t prfsts = 0;
1130uint64_t cpuMult;
1131//uint64_t divisor = 0;
1132uint64_t cpuDid;
1133uint64_t cpuFid;
1134
1135prfsts = rdmsr64(AMD_COFVID_STS);
1136cpuDid = bitfield(prfsts, 8, 6);
1137cpuFid = bitfield(prfsts, 5, 0);
1138
1139cpuMult = ((cpuFid + 0x10) * 10) / (2^cpuDid);
1140currcoef = cpuMult;
1141
1142cpuMultN2 = (prfsts & 0x01) * 1;//(prfsts & (uint64_t)bit(0));
1143currdiv = cpuMultN2;
1144}
1145break;
1146
1147case 0x16: /*** AMD Family 16h kabini ***/
1148{
1149uint64_t prfsts = 0;
1150uint64_t cpuMult;
1151uint64_t divisor = 0;
1152uint64_t cpuDid;
1153uint64_t cpuFid;
1154prfsts = rdmsr64(AMD_COFVID_STS);
1155cpuDid = bitfield(prfsts, 8, 6);
1156cpuFid = bitfield(prfsts, 5, 0);
1157if (cpuDid == 0) divisor = 1;
1158else if (cpuDid == 1) divisor = 2;
1159else if (cpuDid == 2) divisor = 4;
1160else if (cpuDid == 3) divisor = 8;
1161else if (cpuDid == 4) divisor = 16;
1162
1163cpuMult = ((cpuFid + 0x10) * 10) / divisor;
1164currcoef = cpuMult;
1165
1166cpuMultN2 = (prfsts & (uint64_t)bit(0));
1167currdiv = cpuMultN2;
1168
1169/****** Addon END ******/
1170}
1171break;
1172
1173case 0x17: /*** AMD Family 17h Ryzen ***/
1174{
1175uint64_t cpuMult;
1176uint64_t CpuDfsId;
1177uint64_t CpuFid;
1178uint64_t fid = 0;
1179uint64_t prfsts = 0;
1180
1181prfsts = rdmsr64(AMD_PSTATE0_STS);
1182
1183CpuDfsId = bitfield(prfsts, 13, 8);
1184CpuFid = bitfield(prfsts, 7, 0);
1185
1186cpuMult = (CpuFid * 10 / CpuDfsId) * 2;
1187
1188currcoef = cpuMult;
1189
1190fid = (int)(cpuMult / 10);
1191
1192uint8_t fdiv = cpuMult - (fid * 10);
1193if (fdiv > 0) {
1194currdiv = 1;
1195}
1196
1197/****** Addon END ******/
1198}
1199break;
1200
1201default:
1202{
1203currcoef = tscFreq / (200 * Mega);
1204}
1205}
1206
1207#define nya(x) x/10,x%10
1208
1209if (currcoef)
1210{
1211if (currdiv)
1212{
1213currcoef = nya(currcoef);
1214
1215busFrequency = ((tscFreq * 2) / ((currcoef * 2) + 1));
1216busFCvtt2n = ((1 * Giga) << 32) / busFrequency;
1217tscFCvtt2n = busFCvtt2n * 2 / (1 + (2 * currcoef));
1218cpuFrequency = ((1 * Giga) << 32) / tscFCvtt2n;
1219
1220DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
1221}
1222else
1223{
1224currcoef = nya(currcoef);
1225
1226busFrequency = (tscFreq / currcoef);
1227busFCvtt2n = ((1 * Giga) << 32) / busFrequency;
1228tscFCvtt2n = busFCvtt2n / currcoef;
1229cpuFrequency = ((1 * Giga) << 32) / tscFCvtt2n;
1230DBG("%d\n", currcoef);
1231}
1232}
1233else if (!cpuFrequency)
1234{
1235cpuFrequency = tscFreq;
1236}
1237}
1238
1239#if 0
1240if (!busFrequency)
1241{
1242busFrequency = (DEFAULT_FSB * 1000);
1243DBG("\tCPU: busFrequency = 0! using the default value for FSB!\n");
1244cpuFrequency = tscFreq;
1245}
1246
1247DBG("\tcpu freq = 0x%016llxn", timeRDTSC() * 20);
1248
1249#endif
1250
1251outb(0x21U, pic0_mask); // restore PIC0 interrupts
1252
1253p->CPU.MaxCoef = maxcoef = currcoef;
1254p->CPU.MaxDiv = maxdiv = currdiv;
1255p->CPU.CurrCoef = currcoef;
1256p->CPU.CurrDiv = currdiv;
1257p->CPU.TSCFrequency = tscFreq;
1258p->CPU.FSBFrequency = busFrequency;
1259p->CPU.CPUFrequency = cpuFrequency;
1260
1261// keep formatted with spaces instead of tabs
1262
1263DBG("\tCPUID Raw Values:\n");
1264for (i = 0; i < CPUID_MAX; i++)
1265{
1266DBG("\t%02d: %08X-%08X-%08X-%08X\n", i, p->CPU.CPUID[i][eax], p->CPU.CPUID[i][ebx], p->CPU.CPUID[i][ecx], p->CPU.CPUID[i][edx]);
1267}
1268DBG("\n");
1269DBG("\tBrand String: %s\n",p->CPU.BrandString);// Processor name (BIOS)
1270DBG("\tVendor: 0x%X\n",p->CPU.Vendor);// Vendor ex: GenuineIntel
1271DBG("\tFamily: 0x%X\n",p->CPU.Family);// Family ex: 6 (06h)
1272DBG("\tExtFamily: 0x%X\n",p->CPU.ExtFamily);
1273DBG("\tSignature: 0x%08X\n",p->CPU.Signature);// CPUID signature
1274/*switch (p->CPU.Type) {
1275case PT_OEM:
1276DBG("\tProcessor type: Intel Original OEM Processor\n");
1277break;
1278case PT_OD:
1279DBG("\tProcessor type: Intel Over Drive Processor\n");
1280break;
1281case PT_DUAL:
1282DBG("\tProcessor type: Intel Dual Processor\n");
1283break;
1284case PT_RES:
1285DBG("\tProcessor type: Intel Reserved\n");
1286break;
1287default:
1288break;
1289}*/
1290DBG("\tModel: 0x%X\n",p->CPU.Model);// Model ex: 37 (025h)
1291DBG("\tExtModel: 0x%X\n",p->CPU.ExtModel);
1292DBG("\tStepping: 0x%X\n",p->CPU.Stepping);// Stepping ex: 5 (05h)
1293DBG("\tMaxCoef: %d\n",p->CPU.MaxCoef);
1294DBG("\tCurrCoef: %d\n",p->CPU.CurrCoef);
1295DBG("\tMaxDiv: %d\n",p->CPU.MaxDiv);
1296DBG("\tCurrDiv: %d\n",p->CPU.CurrDiv);
1297DBG("\tTSCFreq: %dMHz\n",p->CPU.TSCFrequency / 1000000);
1298DBG("\tFSBFreq: %dMHz\n",p->CPU.FSBFrequency / 1000000);
1299DBG("\tCPUFreq: %dMHz\n",p->CPU.CPUFrequency / 1000000);
1300DBG("\tCores: %d\n",p->CPU.NoCores);// Cores
1301DBG("\tLogical processor: %d\n",p->CPU.NoThreads);// Logical procesor
1302DBG("\tFeatures: 0x%08x\n",p->CPU.Features);
1303//DBG("\tMicrocode version: %d\n",p->CPU.MCodeVersion);// CPU microcode version
1304
1305verbose("\n");
1306#if DEBUG_CPU
1307pause();
1308#endif
1309}
1310

Archive Download this file

Revision: 2899