Chameleon

Chameleon Svn Source Tree

Root/trunk/i386/libsaio/cpu.c

1/*
2 * Copyright 2008 Islam Ahmed Zaid. All rights reserved. <azismed@gmail.com>
3 * AsereBLN: 2009: cleanup and bugfix
4 * Bronya: 2015 Improve AMD support, cleanup and bugfix
5 */
6
7#include "libsaio.h"
8#include "platform.h"
9#include "cpu.h"
10#include "bootstruct.h"
11#include "boot.h"
12
13#ifndef DEBUG_CPU
14#define DEBUG_CPU 0
15#endif
16
17#if DEBUG_CPU
18#define DBG(x...)printf(x)
19#else
20#define DBG(x...)
21#endif
22
23
24#define UI_CPUFREQ_ROUNDING_FACTOR10000000
25
26clock_frequency_info_t gPEClockFrequencyInfo;
27
28static __unused uint64_t rdtsc32(void)
29{
30unsigned int lo,hi;
31__asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
32return ((uint64_t)hi << 32) | lo;
33}
34
35/*
36 * timeRDTSC()
37 * This routine sets up PIT counter 2 to count down 1/20 of a second.
38 * It pauses until the value is latched in the counter
39 * and then reads the time stamp counter to return to the caller.
40 */
41static uint64_t timeRDTSC(void)
42{
43intattempts = 0;
44uint32_t latchTime;
45uint64_tsaveTime,intermediate;
46unsigned inttimerValue, lastValue;
47//boolean_tint_enabled;
48/*
49 * Table of correction factors to account for
50 * - timer counter quantization errors, and
51 * - undercounts 0..5
52 */
53#define SAMPLE_CLKS_EXACT(((double) CLKNUM) / 20.0)
54#define SAMPLE_CLKS_INT((int) CLKNUM / 20)
55#define SAMPLE_NSECS(2000000000LL)
56#define SAMPLE_MULTIPLIER(((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)
57#define ROUND64(x)((uint64_t)((x) + 0.5))
58uint64_tscale[6] = {
59ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)),
60ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)),
61ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)),
62ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)),
63ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)),
64ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5))
65};
66
67//int_enabled = ml_set_interrupts_enabled(false);
68
69restart:
70if (attempts >= 3) // increase to up to 9 attempts.
71{
72// This will flash-reboot. TODO: Use tscPanic instead.
73//printf("Timestamp counter calibation failed with %d attempts\n", attempts);
74}
75attempts++;
76enable_PIT2();// turn on PIT2
77set_PIT2(0);// reset timer 2 to be zero
78latchTime = rdtsc32();// get the time stamp to time
79latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes
80set_PIT2(SAMPLE_CLKS_INT);// set up the timer for (almost) 1/20th a second
81saveTime = rdtsc32();// now time how long a 20th a second is...
82get_PIT2(&lastValue);
83get_PIT2(&lastValue);// read twice, first value may be unreliable
84do {
85intermediate = get_PIT2(&timerValue);
86if (timerValue > lastValue)
87{
88// Timer wrapped
89set_PIT2(0);
90disable_PIT2();
91goto restart;
92}
93lastValue = timerValue;
94} while (timerValue > 5);
95//printf("timerValue %d\n",timerValue);
96//printf("intermediate 0x%016llX\n",intermediate);
97//printf("saveTime 0x%016llX\n",saveTime);
98
99intermediate -= saveTime;// raw count for about 1/20 second
100intermediate *= scale[timerValue];// rescale measured time spent
101intermediate /= SAMPLE_NSECS;// so its exactly 1/20 a second
102intermediate += latchTime;// add on our save fudge
103
104set_PIT2(0);// reset timer 2 to be zero
105disable_PIT2();// turn off PIT 2
106
107//ml_set_interrupts_enabled(int_enabled);
108return intermediate;
109}
110
111/*
112 * DFE: Measures the TSC frequency in Hz (64-bit) using the ACPI PM timer
113 */
114static uint64_t __unused measure_tsc_frequency(void)
115{
116uint64_t tscStart;
117uint64_t tscEnd;
118uint64_t tscDelta = 0xffffffffffffffffULL;
119unsigned long pollCount;
120uint64_t retval = 0;
121int i;
122
123/* Time how many TSC ticks elapse in 30 msec using the 8254 PIT
124 * counter 2. We run this loop 3 times to make sure the cache
125 * is hot and we take the minimum delta from all of the runs.
126 * That is to say that we're biased towards measuring the minimum
127 * number of TSC ticks that occur while waiting for the timer to
128 * expire. That theoretically helps avoid inconsistencies when
129 * running under a VM if the TSC is not virtualized and the host
130 * steals time. The TSC is normally virtualized for VMware.
131 */
132for(i = 0; i < 10; ++i)
133{
134enable_PIT2();
135set_PIT2_mode0(CALIBRATE_LATCH);
136tscStart = rdtsc64();
137pollCount = poll_PIT2_gate();
138tscEnd = rdtsc64();
139/* The poll loop must have run at least a few times for accuracy */
140if (pollCount <= 1)
141{
142continue;
143}
144/* The TSC must increment at LEAST once every millisecond.
145 * We should have waited exactly 30 msec so the TSC delta should
146 * be >= 30. Anything less and the processor is way too slow.
147 */
148if ((tscEnd - tscStart) <= CALIBRATE_TIME_MSEC)
149{
150continue;
151}
152// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
153if ( (tscEnd - tscStart) < tscDelta )
154{
155tscDelta = tscEnd - tscStart;
156}
157}
158/* tscDelta is now the least number of TSC ticks the processor made in
159 * a timespan of 0.03 s (e.g. 30 milliseconds)
160 * Linux thus divides by 30 which gives the answer in kiloHertz because
161 * 1 / ms = kHz. But we're xnu and most of the rest of the code uses
162 * Hz so we need to convert our milliseconds to seconds. Since we're
163 * dividing by the milliseconds, we simply multiply by 1000.
164 */
165
166/* Unlike linux, we're not limited to 32-bit, but we do need to take care
167 * that we're going to multiply by 1000 first so we do need at least some
168 * arithmetic headroom. For now, 32-bit should be enough.
169 * Also unlike Linux, our compiler can do 64-bit integer arithmetic.
170 */
171if (tscDelta > (1ULL<<32))
172{
173retval = 0;
174}
175else
176{
177retval = tscDelta * 1000 / 30;
178}
179disable_PIT2();
180return retval;
181}
182
183static uint64_trtc_set_cyc_per_sec(uint64_t cycles);
184#define RTC_FAST_DENOM0xFFFFFFFF
185
186inline static uint32_t
187create_mul_quant_GHZ(int shift, uint32_t quant)
188{
189return (uint32_t)((((uint64_t)NSEC_PER_SEC/20) << shift) / quant);
190}
191
192struct{
193mach_timespec_tcalend_offset;
194boolean_tcalend_is_set;
195
196int64_tcalend_adjtotal;
197int32_tcalend_adjdelta;
198
199uint32_tboottime;
200
201mach_timebase_info_data_ttimebase_const;
202
203decl_simple_lock_data(,lock)/* real-time clock device lock */
204} rtclock;
205
206uint32_trtc_quant_shift;/* clock to nanos right shift */
207uint32_trtc_quant_scale;/* clock to nanos multiplier */
208uint64_trtc_cyc_per_sec;/* processor cycles per sec */
209uint64_trtc_cycle_count;/* clocks in 1/20th second */
210
211static uint64_t rtc_set_cyc_per_sec(uint64_t cycles)
212{
213
214if (cycles > (NSEC_PER_SEC/20))
215{
216// we can use just a "fast" multiply to get nanos
217rtc_quant_shift = 32;
218rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, (uint32_t)cycles);
219rtclock.timebase_const.numer = rtc_quant_scale; // timeRDTSC is 1/20
220rtclock.timebase_const.denom = (uint32_t)RTC_FAST_DENOM;
221}
222else
223{
224rtc_quant_shift = 26;
225rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, (uint32_t)cycles);
226rtclock.timebase_const.numer = NSEC_PER_SEC/20; // timeRDTSC is 1/20
227rtclock.timebase_const.denom = (uint32_t)cycles;
228}
229rtc_cyc_per_sec = cycles*20;// multiply it by 20 and we are done..
230// BUT we also want to calculate...
231
232cycles = ((rtc_cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
233 / UI_CPUFREQ_ROUNDING_FACTOR)
234* UI_CPUFREQ_ROUNDING_FACTOR;
235
236/*
237 * Set current measured speed.
238 */
239if (cycles >= 0x100000000ULL)
240{
241gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
242}
243else
244{
245gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
246}
247gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
248
249//printf("[RTCLOCK_1] frequency %llu (%llu) %llu\n", cycles, rtc_cyc_per_sec,timeRDTSC() * 20);
250return(rtc_cyc_per_sec);
251}
252
253// Bronya C1E fix
254static void post_startup_cpu_fixups(void)
255{
256/*
257 * Some AMD processors support C1E state. Entering this state will
258 * cause the local APIC timer to stop, which we can't deal with at
259 * this time.
260 */
261
262uint64_t reg;
263verbose("\tLooking to disable C1E if is already enabled by the BIOS:\n");
264reg = rdmsr64(MSR_AMD_INT_PENDING_CMP_HALT);
265/* Disable C1E state if it is enabled by the BIOS */
266if ((reg >> AMD_ACTONCMPHALT_SHIFT) & AMD_ACTONCMPHALT_MASK)
267{
268reg &= ~(AMD_ACTONCMPHALT_MASK << AMD_ACTONCMPHALT_SHIFT);
269wrmsr64(MSR_AMD_INT_PENDING_CMP_HALT, reg);
270verbose("\tC1E disabled!\n");
271}
272}
273
274/*
275 * Large memcpy() into MMIO space can take longer than 1 clock tick (55ms).
276 * The timer interrupt must remain responsive when updating VRAM so
277 * as not to miss timer interrupts during countdown().
278 *
279 * If interrupts are enabled, use normal memcpy.
280 *
281 * If interrupts are disabled, breaks memcpy down
282 * into 128K chunks, times itself and makes a bios
283 * real-mode call every 25 msec in order to service
284 * pending interrupts.
285 *
286 * -- zenith432, May 22nd, 2016
287 */
288void* memcpy_interruptible(void* dst, const void* src, size_t len)
289{
290uint64_t tscFreq, lastTsc;
291uint32_t eflags, threshold;
292ptrdiff_t offset;
293const size_t chunk = 131072U;// 128K
294
295if (len <= chunk)
296{
297/*
298 * Short memcpy - use normal.
299 */
300return memcpy(dst, src, len);
301}
302
303__asm__ volatile("pushfl; popl %0" : "=r"(eflags));
304if (eflags & 0x200U)
305{
306/*
307 * Interrupts are enabled - use normal memcpy.
308 */
309return memcpy(dst, src, len);
310}
311
312tscFreq = Platform.CPU.TSCFrequency;
313if ((uint32_t) (tscFreq >> 32))
314{
315/*
316 * If TSC Frequency >= 2 ** 32, use a default time threshold.
317 */
318threshold = (~0U) / 40U;
319}
320else if (!(uint32_t) tscFreq)
321{
322/*
323 * If early on and TSC Frequency hasn't been estimated yet,
324 * use normal memcpy.
325 */
326return memcpy(dst, src, len);
327}
328else
329{
330threshold = ((uint32_t) tscFreq) / 40U;
331}
332
333/*
334 * Do the work
335 */
336offset = 0;
337lastTsc = rdtsc64();
338do
339{
340(void) memcpy((char*) dst + offset, (const char*) src + offset, chunk);
341offset += (ptrdiff_t) chunk;
342len -= chunk;
343if ((rdtsc64() - lastTsc) < threshold)
344{
345continue;
346}
347(void) readKeyboardStatus();// visit real-mode
348lastTsc = rdtsc64();
349}
350while (len > chunk);
351if (len)
352{
353(void) memcpy((char*) dst + offset, (const char*) src + offset, len);
354}
355return dst;
356}
357
358/*
359 * Calculates the FSB and CPU frequencies using specific MSRs for each CPU
360 * - multi. is read from a specific MSR. In the case of Intel, there is:
361 * a max multi. (used to calculate the FSB freq.),
362 * and a current multi. (used to calculate the CPU freq.)
363 * - busFrequency = tscFrequency / multi
364 * - cpuFrequency = busFrequency * multi
365 */
366
367/* Decimal powers: */
368#define kilo (1000ULL)
369#define Mega (kilo * kilo)
370#define Giga (kilo * Mega)
371#define Tera (kilo * Giga)
372#define Peta (kilo * Tera)
373
374#define quad(hi,lo)(((uint64_t)(hi)) << 32 | (lo))
375
376void get_cpuid(PlatformInfo_t *p)
377{
378
379charstr[128];
380uint32_treg[4];
381char*s= 0;
382
383
384do_cpuid(0x00000000, p->CPU.CPUID[CPUID_0]); // MaxFn, Vendor
385do_cpuid(0x00000001, p->CPU.CPUID[CPUID_1]); // Signature, stepping, features
386do_cpuid(0x00000002, p->CPU.CPUID[CPUID_2]); // TLB/Cache/Prefetch
387
388do_cpuid(0x00000003, p->CPU.CPUID[CPUID_3]); // S/N
389do_cpuid(0x80000000, p->CPU.CPUID[CPUID_80]); // Get the max extended cpuid
390
391if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 8)
392{
393do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);
394do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
395}
396else if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 1)
397{
398do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
399}
400
401// ==============================================================
402
403/* get BrandString (if supported) */
404/* Copyright: from Apple's XNU cpuid.c */
405if (p->CPU.CPUID[CPUID_80][0] > 0x80000004)
406{
407bzero(str, 128);
408/*
409 * The BrandString 48 bytes (max), guaranteed to
410 * be NULL terminated.
411 */
412do_cpuid(0x80000002, reg);
413memcpy(&str[0], (char *)reg, 16);
414do_cpuid(0x80000003, reg);
415memcpy(&str[16], (char *)reg, 16);
416do_cpuid(0x80000004, reg);
417memcpy(&str[32], (char *)reg, 16);
418for (s = str; *s != '\0'; s++)
419{
420if (*s != ' ')
421{
422break;
423}
424}
425strlcpy(p->CPU.BrandString, s, 48);
426
427if (!strncmp(p->CPU.BrandString, CPU_STRING_UNKNOWN, MIN(sizeof(p->CPU.BrandString), (unsigned)strlen(CPU_STRING_UNKNOWN) + 1)))
428{
429/*
430 * This string means we have a firmware-programmable brand string,
431 * and the firmware couldn't figure out what sort of CPU we have.
432 */
433p->CPU.BrandString[0] = '\0';
434}
435p->CPU.BrandString[47] = '\0';
436//DBG("\tBrandstring = %s\n", p->CPU.BrandString);
437}
438
439// ==============================================================
440
441switch(p->CPU.BrandString[0])
442{
443case 'A':
444/* AMD Processors */
445// The cache information is only in ecx and edx so only save
446// those registers
447
448do_cpuid(5, p->CPU.CPUID[CPUID_5]); // Monitor/Mwait
449
450do_cpuid(0x80000005, p->CPU.CPUID[CPUID_85]); // TLB/Cache/Prefetch
451do_cpuid(0x80000006, p->CPU.CPUID[CPUID_86]); // TLB/Cache/Prefetch
452do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);
453
454break;
455
456case 'G':
457/* Intel Processors */
458do_cpuid2(0x00000004, 0, p->CPU.CPUID[CPUID_4]); // Cache Index for Inte
459
460if (p->CPU.CPUID[CPUID_0][0] >= 0x5)// Monitor/Mwait
461{
462do_cpuid(5, p->CPU.CPUID[CPUID_5]);
463}
464
465if (p->CPU.CPUID[CPUID_0][0] >= 6)// Thermal/Power
466{
467do_cpuid(6, p->CPU.CPUID[CPUID_6]);
468}
469
470break;
471}
472}
473void scan_cpu(PlatformInfo_t *p)
474{
475verbose("[ CPU INFO ]\n");
476get_cpuid(p);
477
478uint64_tbusFCvtt2n;
479uint64_ttscFCvtt2n;
480uint64_ttscFreq= 0;
481uint64_tbusFrequency= 0;
482uint64_tcpuFrequency= 0;
483uint64_tmsr= 0;
484uint64_tflex_ratio= 0;
485uint64_tcpuid_features;
486
487uint32_tmax_ratio= 0;
488uint32_tmin_ratio= 0;
489uint32_treg[4];
490uint32_tcores_per_package= 0;
491uint32_tlogical_per_package= 1;
492uint32_tthreads_per_core= 1;
493
494uint8_tbus_ratio_max= 0;
495uint8_tbus_ratio_min= 0;
496uint8_tcurrdiv= 0;
497uint8_tcurrcoef= 0;
498uint8_tmaxdiv= 0;
499uint8_tmaxcoef= 0;
500uint8_tpic0_mask;
501uint8_tcpuMultN2= 0;
502
503const char*newratio;
504
505intlen= 0;
506intmyfsb= 0;
507inti= 0;
508
509
510/* http://www.flounder.com/cpuid_explorer2.htm
511 EAX (Intel):
512 31 28 27 20 19 16 1514 1312 11 8 7 4 3 0
513 +--------+----------------+--------+----+----+--------+--------+--------+
514 |########|Extended family |Extmodel|####|type|familyid| model |stepping|
515 +--------+----------------+--------+----+----+--------+--------+--------+
516
517 EAX (AMD):
518 31 28 27 20 19 16 1514 1312 11 8 7 4 3 0
519 +--------+----------------+--------+----+----+--------+--------+--------+
520 |########|Extended family |Extmodel|####|####|familyid| model |stepping|
521 +--------+----------------+--------+----+----+--------+--------+--------+
522*/
523///////////////////-- MaxFn,Vendor --////////////////////////
524p->CPU.Vendor= p->CPU.CPUID[CPUID_0][1];
525
526///////////////////-- Signature, stepping, features -- //////
527cpuid_features = quad(p->CPU.CPUID[CPUID_1][ecx], p->CPU.CPUID[CPUID_1][edx]);
528if (bit(28) & p->CPU.CPUID[CPUID_1][edx]) // HTT/Multicore
529{
530logical_per_package = bitfield(p->CPU.CPUID[CPUID_1][ebx], 23, 16);
531}
532else
533{
534logical_per_package = 1;
535}
536
537p->CPU.Signature= p->CPU.CPUID[CPUID_1][0];
538p->CPU.Stepping= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 3, 0);// stepping = cpu_feat_eax & 0xF;
539p->CPU.Model= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 7, 4);// model = (cpu_feat_eax >> 4) & 0xF;
540p->CPU.Family= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 11, 8);// family = (cpu_feat_eax >> 8) & 0xF;
541//p->CPU.Type= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 13, 12);// type = (cpu_feat_eax >> 12) & 0x3;
542p->CPU.ExtModel= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 19, 16);// ext_model = (cpu_feat_eax >> 16) & 0xF;
543p->CPU.ExtFamily= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 27, 20);// ext_family = (cpu_feat_eax >> 20) & 0xFF;
544
545if (p->CPU.Family == 0x0f)
546{
547p->CPU.Family += p->CPU.ExtFamily;
548}
549
550if (p->CPU.Family == 0x0f || p->CPU.Family == 0x06)
551{
552p->CPU.Model += (p->CPU.ExtModel << 4);
553}
554
555switch (p->CPU.Vendor)
556{
557case CPUID_VENDOR_INTEL:
558{
559/* Based on Apple's XNU cpuid.c - Deterministic cache parameters */
560if ((p->CPU.CPUID[CPUID_0][eax] > 3) && (p->CPU.CPUID[CPUID_0][eax] < 0x80000000))
561{
562for (i = 0; i < 0xFF; i++) // safe loop
563{
564do_cpuid2(0x00000004, i, reg); // AX=4: Fn, CX=i: cache index
565if (bitfield(reg[eax], 4, 0) == 0)
566{
567break;
568}
569cores_per_package = bitfield(reg[eax], 31, 26) + 1;
570}
571}
572
573if (i > 0)
574{
575cores_per_package = bitfield(p->CPU.CPUID[CPUID_4][eax], 31, 26) + 1; // i = cache index
576threads_per_core = bitfield(p->CPU.CPUID[CPUID_4][eax], 25, 14) + 1;
577}
578
579if (cores_per_package == 0)
580{
581cores_per_package = 1;
582}
583
584switch (p->CPU.Model)
585{
586case CPUID_MODEL_NEHALEM: // Intel Core i7 LGA1366 (45nm)
587case CPUID_MODEL_FIELDS: // Intel Core i5, i7 LGA1156 (45nm)
588case CPUID_MODEL_CLARKDALE: // Intel Core i3, i5, i7 LGA1156 (32nm)
589case CPUID_MODEL_NEHALEM_EX:
590case CPUID_MODEL_JAKETOWN:
591case CPUID_MODEL_SANDYBRIDGE:
592case CPUID_MODEL_IVYBRIDGE:
593case CPUID_MODEL_HASWELL_U5:
594case CPUID_MODEL_HASWELL:
595case CPUID_MODEL_HASWELL_SVR:
596//case CPUID_MODEL_HASWELL_H:
597case CPUID_MODEL_HASWELL_ULT:
598case CPUID_MODEL_HASWELL_ULX:
599case CPUID_MODEL_BROADWELL_HQ:
600case CPUID_MODEL_BRODWELL_SVR:
601case CPUID_MODEL_SKYLAKE_S:
602//case CPUID_MODEL_:
603msr = rdmsr64(MSR_CORE_THREAD_COUNT); // 0x35
604p->CPU.NoCores= (uint32_t)bitfield((uint32_t)msr, 31, 16);
605p->CPU.NoThreads= (uint32_t)bitfield((uint32_t)msr, 15, 0);
606break;
607
608case CPUID_MODEL_DALES:
609case CPUID_MODEL_WESTMERE: // Intel Core i7 LGA1366 (32nm) 6 Core
610case CPUID_MODEL_WESTMERE_EX:
611msr = rdmsr64(MSR_CORE_THREAD_COUNT);
612p->CPU.NoCores= (uint32_t)bitfield((uint32_t)msr, 19, 16);
613p->CPU.NoThreads= (uint32_t)bitfield((uint32_t)msr, 15, 0);
614break;
615case CPUID_MODEL_ATOM_3700:
616case CPUID_MODEL_ATOM:
617p->CPU.NoCores= 2;
618p->CPU.NoThreads= 2;
619break;
620default:
621p->CPU.NoCores= 0;
622break;
623}
624
625if (p->CPU.NoCores == 0)
626{
627p->CPU.NoCores= cores_per_package;
628p->CPU.NoThreads= logical_per_package;
629}
630
631// MSR is *NOT* available on the Intel Atom CPU
632// workaround for N270. I don't know why it detected wrong
633if ((p->CPU.Model == CPUID_MODEL_ATOM) && (strstr(p->CPU.BrandString, "270")))
634{
635p->CPU.NoCores= 1;
636p->CPU.NoThreads= 2;
637}
638
639
640// workaround for Xeon Harpertown and Yorkfield
641if ((p->CPU.Model == CPUID_MODEL_PENRYN) &&
642(p->CPU.NoCores== 0))
643{
644if ((strstr(p->CPU.BrandString, "X54")) ||
645(strstr(p->CPU.BrandString, "E54")) ||
646(strstr(p->CPU.BrandString, "W35")) ||
647(strstr(p->CPU.BrandString, "X34")) ||
648(strstr(p->CPU.BrandString, "X33")) ||
649(strstr(p->CPU.BrandString, "L33")) ||
650(strstr(p->CPU.BrandString, "X32")) ||
651(strstr(p->CPU.BrandString, "L3426")) ||
652(strstr(p->CPU.BrandString, "L54")))
653{
654p->CPU.NoCores= 4;
655p->CPU.NoThreads= 4;
656} else if (strstr(p->CPU.BrandString, "W36")) {
657p->CPU.NoCores= 6;
658p->CPU.NoThreads= 6;
659} else { //other Penryn and Wolfdale
660p->CPU.NoCores= 0;
661p->CPU.NoThreads= 0;
662}
663}
664
665// workaround for Quad
666if ( strstr(p->CPU.BrandString, "Quad") )
667{
668p->CPU.NoCores= 4;
669p->CPU.NoThreads= 4;
670}
671}
672
673break;
674
675case CPUID_VENDOR_AMD:
676{
677post_startup_cpu_fixups();
678cores_per_package = bitfield(p->CPU.CPUID[CPUID_88][ecx], 7, 0) + 1;
679threads_per_core = cores_per_package;
680
681if (cores_per_package == 0)
682{
683cores_per_package = 1;
684}
685
686p->CPU.NoCores= cores_per_package;
687p->CPU.NoThreads= logical_per_package;
688
689if (p->CPU.NoCores == 0)
690{
691p->CPU.NoCores = 1;
692p->CPU.NoThreads= 1;
693}
694}
695break;
696
697default :
698stop("Unsupported CPU detected! System halted.");
699}
700
701/* setup features */
702if ((bit(23) & p->CPU.CPUID[CPUID_1][3]) != 0)
703{
704p->CPU.Features |= CPU_FEATURE_MMX;
705}
706
707if ((bit(25) & p->CPU.CPUID[CPUID_1][3]) != 0)
708{
709p->CPU.Features |= CPU_FEATURE_SSE;
710}
711
712if ((bit(26) & p->CPU.CPUID[CPUID_1][3]) != 0)
713{
714p->CPU.Features |= CPU_FEATURE_SSE2;
715}
716
717if ((bit(0) & p->CPU.CPUID[CPUID_1][2]) != 0)
718{
719p->CPU.Features |= CPU_FEATURE_SSE3;
720}
721
722if ((bit(19) & p->CPU.CPUID[CPUID_1][2]) != 0)
723{
724p->CPU.Features |= CPU_FEATURE_SSE41;
725}
726
727if ((bit(20) & p->CPU.CPUID[CPUID_1][2]) != 0)
728{
729p->CPU.Features |= CPU_FEATURE_SSE42;
730}
731
732if ((bit(29) & p->CPU.CPUID[CPUID_81][3]) != 0)
733{
734p->CPU.Features |= CPU_FEATURE_EM64T;
735}
736
737if ((bit(5) & p->CPU.CPUID[CPUID_1][3]) != 0)
738{
739p->CPU.Features |= CPU_FEATURE_MSR;
740}
741
742if ((p->CPU.NoThreads > p->CPU.NoCores))
743{
744p->CPU.Features |= CPU_FEATURE_HTT;
745}
746
747pic0_mask = inb(0x21U);
748outb(0x21U, 0xFFU); // mask PIC0 interrupts for duration of timing tests
749
750uint64_t cycles;
751cycles = timeRDTSC();
752tscFreq = rtc_set_cyc_per_sec(cycles);
753DBG("cpu freq classic = 0x%016llx\n", tscFreq);
754// if usual method failed
755if ( tscFreq < 1000 )//TEST
756{
757tscFreq = measure_tsc_frequency();//timeRDTSC() * 20;//measure_tsc_frequency();
758// DBG("cpu freq timeRDTSC = 0x%016llx\n", tscFrequency);
759}
760
761if (p->CPU.Vendor==CPUID_VENDOR_INTEL && ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0c) || (p->CPU.Family == 0x0f && p->CPU.Model >= 0x03)))
762{
763int intelCPU = p->CPU.Model;
764if (p->CPU.Family == 0x06)
765{
766/* Nehalem CPU model */
767switch (p->CPU.Model)
768{
769case CPUID_MODEL_NEHALEM:
770case CPUID_MODEL_FIELDS:
771case CPUID_MODEL_CLARKDALE:
772case CPUID_MODEL_DALES:
773case CPUID_MODEL_WESTMERE:
774case CPUID_MODEL_NEHALEM_EX:
775case CPUID_MODEL_WESTMERE_EX:
776/* --------------------------------------------------------- */
777case CPUID_MODEL_SANDYBRIDGE:
778case CPUID_MODEL_JAKETOWN:
779case CPUID_MODEL_IVYBRIDGE_XEON:
780case CPUID_MODEL_IVYBRIDGE:
781case CPUID_MODEL_ATOM_3700:
782case CPUID_MODEL_HASWELL:
783case CPUID_MODEL_HASWELL_U5:
784case CPUID_MODEL_HASWELL_SVR:
785
786case CPUID_MODEL_HASWELL_ULT:
787case CPUID_MODEL_HASWELL_ULX:
788case CPUID_MODEL_BROADWELL_HQ:
789case CPUID_MODEL_SKYLAKE_S:
790/* --------------------------------------------------------- */
791msr = rdmsr64(MSR_PLATFORM_INFO);
792DBG("msr(%d): platform_info %08x\n", __LINE__, bitfield(msr, 31, 0));
793bus_ratio_max = bitfield(msr, 15, 8);
794bus_ratio_min = bitfield(msr, 47, 40); //valv: not sure about this one (Remarq.1)
795msr = rdmsr64(MSR_FLEX_RATIO);
796DBG("msr(%d): flex_ratio %08x\n", __LINE__, bitfield(msr, 31, 0));
797if (bitfield(msr, 16, 16))
798{
799flex_ratio = bitfield(msr, 15, 8);
800// bcc9: at least on the gigabyte h67ma-ud2h,
801// where the cpu multipler can't be changed to
802// allow overclocking, the flex_ratio msr has unexpected (to OSX)
803// contents.These contents cause mach_kernel to
804// fail to compute the bus ratio correctly, instead
805// causing the system to crash since tscGranularity
806// is inadvertently set to 0.
807
808if (flex_ratio == 0)
809{
810// Clear bit 16 (evidently the presence bit)
811wrmsr64(MSR_FLEX_RATIO, (msr & 0xFFFFFFFFFFFEFFFFULL));
812msr = rdmsr64(MSR_FLEX_RATIO);
813DBG("CPU: Unusable flex ratio detected. Patched MSR now %08x\n", bitfield(msr, 31, 0));
814}
815else
816{
817if (bus_ratio_max > flex_ratio)
818{
819bus_ratio_max = flex_ratio;
820}
821}
822}
823
824if (bus_ratio_max)
825{
826busFrequency = (tscFreq / bus_ratio_max);
827}
828
829//valv: Turbo Ratio Limit
830if ((intelCPU != 0x2e) && (intelCPU != 0x2f))
831{
832msr = rdmsr64(MSR_TURBO_RATIO_LIMIT);
833
834cpuFrequency = bus_ratio_max * busFrequency;
835max_ratio = bus_ratio_max * 10;
836}
837else
838{
839cpuFrequency = tscFreq;
840}
841
842if ((getValueForKey(kbusratio, &newratio, &len, &bootInfo->chameleonConfig)) && (len <= 4))
843{
844max_ratio = atoi(newratio);
845max_ratio = (max_ratio * 10);
846if (len >= 3)
847{
848max_ratio = (max_ratio + 5);
849}
850
851verbose("\tBus-Ratio: min=%d, max=%s\n", bus_ratio_min, newratio);
852
853// extreme overclockers may love 320 ;)
854if ((max_ratio >= min_ratio) && (max_ratio <= 320))
855{
856cpuFrequency = (busFrequency * max_ratio) / 10;
857if (len >= 3)
858{
859maxdiv = 1;
860}
861else
862{
863maxdiv = 0;
864}
865}
866else
867{
868max_ratio = (bus_ratio_max * 10);
869}
870}
871//valv: to be uncommented if Remarq.1 didn't stick
872//if (bus_ratio_max > 0) bus_ratio = flex_ratio;
873p->CPU.MaxRatio = max_ratio;
874p->CPU.MinRatio = min_ratio;
875
876myfsb = busFrequency / 1000000;
877verbose("\tSticking with [BCLK: %dMhz, Bus-Ratio: %d]\n", myfsb, max_ratio/10); // Bungo: fixed wrong Bus-Ratio readout
878currcoef = bus_ratio_max;
879
880break;
881
882default:
883msr = rdmsr64(MSR_IA32_PERF_STATUS);
884DBG("msr(%d): ia32_perf_stat 0x%08x\n", __LINE__, bitfield(msr, 31, 0));
885currcoef = bitfield(msr, 12, 8); // Bungo: reverted to 2263 state because of wrong old CPUs freq. calculating
886// Non-integer bus ratio for the max-multi
887maxdiv = bitfield(msr, 46, 46);
888// Non-integer bus ratio for the current-multi (undocumented)
889currdiv = bitfield(msr, 14, 14);
890
891// This will always be model >= 3
892if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0e) || (p->CPU.Family == 0x0f))
893{
894/* On these models, maxcoef defines TSC freq */
895maxcoef = bitfield(msr, 44, 40);
896}
897else
898{
899// On lower models, currcoef defines TSC freq
900// XXX
901maxcoef = currcoef;
902}
903
904if (!currcoef)
905{
906currcoef = maxcoef;
907}
908
909if (maxcoef)
910{
911if (maxdiv)
912{
913busFrequency = ((tscFreq * 2) / ((maxcoef * 2) + 1));
914}
915else
916{
917busFrequency = (tscFreq / maxcoef);
918}
919
920if (currdiv)
921{
922cpuFrequency = (busFrequency * ((currcoef * 2) + 1) / 2);
923}
924else
925{
926cpuFrequency = (busFrequency * currcoef);
927}
928
929DBG("max: %d%s current: %d%s\n", maxcoef, maxdiv ? ".5" : "",currcoef, currdiv ? ".5" : "");
930}
931break;
932}
933}
934// Mobile CPU
935if (rdmsr64(MSR_IA32_PLATFORM_ID) & (1<<28))
936{
937p->CPU.Features |= CPU_FEATURE_MOBILE;
938}
939}
940
941else if (p->CPU.Vendor==CPUID_VENDOR_AMD)
942{
943switch(p->CPU.Family)
944{
945case 0xF: /* K8 */
946{
947uint64_t fidvid = 0;
948uint64_t cpuMult;
949uint64_t fid;
950
951fidvid = rdmsr64(K8_FIDVID_STATUS);
952fid = bitfield(fidvid, 5, 0);
953
954cpuMult = (fid + 8) / 2;
955currcoef = cpuMult;
956
957cpuMultN2 = (fidvid & (uint64_t)bit(0));
958currdiv = cpuMultN2;
959/****** Addon END ******/
960}
961break;
962
963case 0x10: /*** AMD Family 10h ***/
964{
965uint64_t cofvid = 0;
966uint64_t cpuMult;
967uint64_t divisor = 0;
968uint64_t did;
969uint64_t fid;
970
971cofvid = rdmsr64(K10_COFVID_STATUS);
972did = bitfield(cofvid, 8, 6);
973fid = bitfield(cofvid, 5, 0);
974if (did == 0) divisor = 2;
975else if (did == 1) divisor = 4;
976else if (did == 2) divisor = 8;
977else if (did == 3) divisor = 16;
978else if (did == 4) divisor = 32;
979
980cpuMult = (fid + 16) / divisor;
981currcoef = cpuMult;
982
983cpuMultN2 = (cofvid & (uint64_t)bit(0));
984currdiv = cpuMultN2;
985
986/****** Addon END ******/
987}
988break;
989
990case 0x11: /*** AMD Family 11h ***/
991{
992uint64_t cofvid = 0;
993uint64_t cpuMult;
994uint64_t divisor = 0;
995uint64_t did;
996uint64_t fid;
997
998cofvid = rdmsr64(K10_COFVID_STATUS);
999did = bitfield(cofvid, 8, 6);
1000fid = bitfield(cofvid, 5, 0);
1001if (did == 0) divisor = 2;
1002else if (did == 1) divisor = 4;
1003else if (did == 2) divisor = 8;
1004else if (did == 3) divisor = 16;
1005else if (did == 4) divisor = 32;
1006
1007cpuMult = (fid + 8) / divisor;
1008currcoef = cpuMult;
1009
1010cpuMultN2 = (cofvid & (uint64_t)bit(0));
1011currdiv = cpuMultN2;
1012
1013/****** Addon END ******/
1014}
1015 break;
1016
1017case 0x12: /*** AMD Family 12h ***/
1018{
1019// 8:4 CpuFid: current CPU core frequency ID
1020// 3:0 CpuDid: current CPU core divisor ID
1021uint64_t prfsts,CpuFid,CpuDid;
1022prfsts = rdmsr64(K10_COFVID_STATUS);
1023
1024CpuDid = bitfield(prfsts, 3, 0) ;
1025CpuFid = bitfield(prfsts, 8, 4) ;
1026uint64_t divisor;
1027switch (CpuDid)
1028{
1029case 0: divisor = 1; break;
1030case 1: divisor = (3/2); break;
1031case 2: divisor = 2; break;
1032case 3: divisor = 3; break;
1033case 4: divisor = 4; break;
1034case 5: divisor = 6; break;
1035case 6: divisor = 8; break;
1036case 7: divisor = 12; break;
1037case 8: divisor = 16; break;
1038default: divisor = 1; break;
1039}
1040currcoef = (CpuFid + 0x10) / divisor;
1041
1042cpuMultN2 = (prfsts & (uint64_t)bit(0));
1043currdiv = cpuMultN2;
1044
1045}
1046break;
1047
1048case 0x14: /* K14 */
1049
1050{
1051// 8:4: current CPU core divisor ID most significant digit
1052// 3:0: current CPU core divisor ID least significant digit
1053uint64_t prfsts;
1054prfsts = rdmsr64(K10_COFVID_STATUS);
1055
1056uint64_t CpuDidMSD,CpuDidLSD;
1057CpuDidMSD = bitfield(prfsts, 8, 4) ;
1058CpuDidLSD = bitfield(prfsts, 3, 0) ;
1059
1060uint64_t frequencyId = 0x10;
1061currcoef = (frequencyId + 0x10) /
1062(CpuDidMSD + (CpuDidLSD * 0.25) + 1);
1063currdiv = ((CpuDidMSD) + 1) << 2;
1064currdiv += bitfield(msr, 3, 0);
1065
1066cpuMultN2 = (prfsts & (uint64_t)bit(0));
1067currdiv = cpuMultN2;
1068}
1069
1070break;
1071
1072case 0x15: /*** AMD Family 15h ***/
1073case 0x06: /*** AMD Family 06h ***/
1074{
1075
1076uint64_t cofvid = 0;
1077uint64_t cpuMult;
1078uint64_t divisor = 0;
1079uint64_t did;
1080uint64_t fid;
1081
1082cofvid = rdmsr64(K10_COFVID_STATUS);
1083did = bitfield(cofvid, 8, 6);
1084fid = bitfield(cofvid, 5, 0);
1085if (did == 0) divisor = 2;
1086else if (did == 1) divisor = 4;
1087else if (did == 2) divisor = 8;
1088else if (did == 3) divisor = 16;
1089else if (did == 4) divisor = 32;
1090
1091cpuMult = (fid + 16) / divisor;
1092currcoef = cpuMult;
1093
1094cpuMultN2 = (cofvid & (uint64_t)bit(0));
1095currdiv = cpuMultN2;
1096}
1097break;
1098
1099case 0x16: /*** AMD Family 16h kabini ***/
1100{
1101uint64_t cofvid = 0;
1102uint64_t cpuMult;
1103uint64_t divisor = 0;
1104uint64_t did;
1105uint64_t fid;
1106
1107cofvid = rdmsr64(K10_COFVID_STATUS);
1108did = bitfield(cofvid, 8, 6);
1109fid = bitfield(cofvid, 5, 0);
1110if (did == 0) divisor = 1;
1111else if (did == 1) divisor = 2;
1112else if (did == 2) divisor = 4;
1113else if (did == 3) divisor = 8;
1114else if (did == 4) divisor = 16;
1115
1116cpuMult = (fid + 16) / divisor;
1117currcoef = cpuMult;
1118
1119cpuMultN2 = (cofvid & (uint64_t)bit(0));
1120currdiv = cpuMultN2;
1121/****** Addon END ******/
1122}
1123break;
1124
1125default:
1126{
1127typedef unsigned long long vlong;
1128uint64_t prfsts;
1129prfsts = rdmsr64(K10_COFVID_STATUS);
1130uint64_t r;
1131vlong hz;
1132r = (prfsts>>6) & 0x07;
1133hz = (((prfsts & 0x3f)+0x10)*100000000ll)/(1<<r);
1134
1135currcoef = hz / (200 * Mega);
1136}
1137}
1138
1139if (currcoef)
1140{
1141if (currdiv)
1142{
1143busFrequency = ((tscFreq * 2) / ((currcoef * 2) + 1));
1144busFCvtt2n = ((1 * Giga) << 32) / busFrequency;
1145tscFCvtt2n = busFCvtt2n * 2 / (1 + (2 * currcoef));
1146cpuFrequency = ((1 * Giga) << 32) / tscFCvtt2n;
1147
1148DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
1149}
1150else
1151{
1152busFrequency = (tscFreq / currcoef);
1153busFCvtt2n = ((1 * Giga) << 32) / busFrequency;
1154tscFCvtt2n = busFCvtt2n / currcoef;
1155cpuFrequency = ((1 * Giga) << 32) / tscFCvtt2n;
1156DBG("%d\n", currcoef);
1157}
1158}
1159else if (!cpuFrequency)
1160{
1161cpuFrequency = tscFreq;
1162}
1163}
1164
1165#if 0
1166if (!busFrequency)
1167{
1168busFrequency = (DEFAULT_FSB * 1000);
1169DBG("\tCPU: busFrequency = 0! using the default value for FSB!\n");
1170cpuFrequency = tscFreq;
1171}
1172
1173DBG("\tcpu freq = 0x%016llxn", timeRDTSC() * 20);
1174
1175#endif
1176
1177outb(0x21U, pic0_mask); // restore PIC0 interrupts
1178
1179p->CPU.MaxCoef = maxcoef = currcoef;
1180p->CPU.MaxDiv = maxdiv = currdiv;
1181p->CPU.CurrCoef = currcoef;
1182p->CPU.CurrDiv = currdiv;
1183p->CPU.TSCFrequency = tscFreq;
1184p->CPU.FSBFrequency = busFrequency;
1185p->CPU.CPUFrequency = cpuFrequency;
1186
1187// keep formatted with spaces instead of tabs
1188
1189DBG("\tCPUID Raw Values:\n");
1190for (i = 0; i < CPUID_MAX; i++)
1191{
1192DBG("\t%02d: %08X-%08X-%08X-%08X\n", i, p->CPU.CPUID[i][eax], p->CPU.CPUID[i][ebx], p->CPU.CPUID[i][ecx], p->CPU.CPUID[i][edx]);
1193}
1194DBG("\n");
1195DBG("\tBrand String: %s\n",p->CPU.BrandString);// Processor name (BIOS)
1196DBG("\tVendor: 0x%X\n",p->CPU.Vendor);// Vendor ex: GenuineIntel
1197DBG("\tFamily: 0x%X\n",p->CPU.Family);// Family ex: 6 (06h)
1198DBG("\tExtFamily: 0x%X\n",p->CPU.ExtFamily);
1199DBG("\tSignature: 0x%08X\n",p->CPU.Signature);// CPUID signature
1200/*switch (p->CPU.Type) {
1201case PT_OEM:
1202DBG("\tProcessor type: Intel Original OEM Processor\n");
1203break;
1204case PT_OD:
1205DBG("\tProcessor type: Intel Over Drive Processor\n");
1206break;
1207case PT_DUAL:
1208DBG("\tProcessor type: Intel Dual Processor\n");
1209break;
1210case PT_RES:
1211DBG("\tProcessor type: Intel Reserved\n");
1212break;
1213default:
1214break;
1215}*/
1216DBG("\tModel: 0x%X\n",p->CPU.Model);// Model ex: 37 (025h)
1217DBG("\tExtModel: 0x%X\n",p->CPU.ExtModel);
1218DBG("\tStepping: 0x%X\n",p->CPU.Stepping);// Stepping ex: 5 (05h)
1219DBG("\tMaxCoef: %d\n",p->CPU.MaxCoef);
1220DBG("\tCurrCoef: %d\n",p->CPU.CurrCoef);
1221DBG("\tMaxDiv: %d\n",p->CPU.MaxDiv);
1222DBG("\tCurrDiv: %d\n",p->CPU.CurrDiv);
1223DBG("\tTSCFreq: %dMHz\n",p->CPU.TSCFrequency / 1000000);
1224DBG("\tFSBFreq: %dMHz\n",p->CPU.FSBFrequency / 1000000);
1225DBG("\tCPUFreq: %dMHz\n",p->CPU.CPUFrequency / 1000000);
1226DBG("\tCores: %d\n",p->CPU.NoCores);// Cores
1227DBG("\tLogical processor: %d\n",p->CPU.NoThreads);// Logical procesor
1228DBG("\tFeatures: 0x%08x\n",p->CPU.Features);
1229
1230verbose("\n");
1231#if DEBUG_CPU
1232pause();
1233#endif
1234}
1235

Archive Download this file

Revision: 2824