Chameleon

Chameleon Svn Source Tree

Root/branches/ErmaC/Enoch/i386/libsaio/cpu.c

1/*
2 * Copyright 2008 Islam Ahmed Zaid. All rights reserved. <azismed@gmail.com>
3 * AsereBLN: 2009: cleanup and bugfix
4 */
5
6#include "libsaio.h"
7#include "platform.h"
8#include "cpu.h"
9#include "bootstruct.h"
10#include "boot.h"
11
12#ifndef DEBUG_CPU
13#define DEBUG_CPU 0
14#endif
15
16#if DEBUG_CPU
17#define DBG(x...)printf(x)
18#else
19#define DBG(x...)msglog(x)
20#endif
21
22/*
23 * DFE: Measures the TSC frequency in Hz (64-bit) using the ACPI PM timer
24 */
25static uint64_t measure_tsc_frequency(void)
26{
27uint64_t tscStart;
28uint64_t tscEnd;
29uint64_t tscDelta = 0xffffffffffffffffULL;
30unsigned long pollCount;
31uint64_t retval = 0;
32int i;
33
34/* Time how many TSC ticks elapse in 30 msec using the 8254 PIT
35 * counter 2. We run this loop 3 times to make sure the cache
36 * is hot and we take the minimum delta from all of the runs.
37 * That is to say that we're biased towards measuring the minimum
38 * number of TSC ticks that occur while waiting for the timer to
39 * expire. That theoretically helps avoid inconsistencies when
40 * running under a VM if the TSC is not virtualized and the host
41 * steals time. The TSC is normally virtualized for VMware.
42 */
43for(i = 0; i < 10; ++i)
44{
45enable_PIT2();
46set_PIT2_mode0(CALIBRATE_LATCH);
47tscStart = rdtsc64();
48pollCount = poll_PIT2_gate();
49tscEnd = rdtsc64();
50/* The poll loop must have run at least a few times for accuracy */
51if (pollCount <= 1)
52{
53continue;
54}
55/* The TSC must increment at LEAST once every millisecond.
56 * We should have waited exactly 30 msec so the TSC delta should
57 * be >= 30. Anything less and the processor is way too slow.
58 */
59if ((tscEnd - tscStart) <= CALIBRATE_TIME_MSEC)
60{
61continue;
62}
63// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
64if ( (tscEnd - tscStart) < tscDelta )
65{
66tscDelta = tscEnd - tscStart;
67}
68}
69/* tscDelta is now the least number of TSC ticks the processor made in
70 * a timespan of 0.03 s (e.g. 30 milliseconds)
71 * Linux thus divides by 30 which gives the answer in kiloHertz because
72 * 1 / ms = kHz. But we're xnu and most of the rest of the code uses
73 * Hz so we need to convert our milliseconds to seconds. Since we're
74 * dividing by the milliseconds, we simply multiply by 1000.
75 */
76
77/* Unlike linux, we're not limited to 32-bit, but we do need to take care
78 * that we're going to multiply by 1000 first so we do need at least some
79 * arithmetic headroom. For now, 32-bit should be enough.
80 * Also unlike Linux, our compiler can do 64-bit integer arithmetic.
81 */
82if (tscDelta > (1ULL<<32))
83{
84retval = 0;
85}
86else
87{
88retval = tscDelta * 1000 / 30;
89}
90disable_PIT2();
91return retval;
92}
93
94/*
95 * timeRDTSC()
96 * This routine sets up PIT counter 2 to count down 1/20 of a second.
97 * It pauses until the value is latched in the counter
98 * and then reads the time stamp counter to return to the caller.
99 */
100static uint64_t timeRDTSC(void)
101{
102intattempts = 0;
103uint64_t latchTime;
104uint64_tsaveTime,intermediate;
105unsigned int timerValue, lastValue;
106//boolean_tint_enabled;
107/*
108 * Table of correction factors to account for
109 * - timer counter quantization errors, and
110 * - undercounts 0..5
111 */
112#define SAMPLE_CLKS_EXACT(((double) CLKNUM) / 20.0)
113#define SAMPLE_CLKS_INT((int) CLKNUM / 20)
114#define SAMPLE_NSECS(2000000000LL)
115#define SAMPLE_MULTIPLIER(((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)
116#define ROUND64(x)((uint64_t)((x) + 0.5))
117uint64_tscale[6] = {
118ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)),
119ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)),
120ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)),
121ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)),
122ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)),
123ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5))
124};
125
126//int_enabled = ml_set_interrupts_enabled(FALSE);
127
128restart:
129if (attempts >= 9) // increase to up to 9 attempts.
130{
131 // This will flash-reboot. TODO: Use tscPanic instead.
132printf("Timestamp counter calibation failed with %d attempts\n", attempts);
133}
134attempts++;
135enable_PIT2();// turn on PIT2
136set_PIT2(0);// reset timer 2 to be zero
137latchTime = rdtsc64();// get the time stamp to time
138latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes
139set_PIT2(SAMPLE_CLKS_INT);// set up the timer for (almost) 1/20th a second
140saveTime = rdtsc64();// now time how long a 20th a second is...
141get_PIT2(&lastValue);
142get_PIT2(&lastValue);// read twice, first value may be unreliable
143do {
144intermediate = get_PIT2(&timerValue);
145if (timerValue > lastValue)
146{
147// Timer wrapped
148set_PIT2(0);
149disable_PIT2();
150goto restart;
151}
152lastValue = timerValue;
153} while (timerValue > 5);
154printf("timerValue %d\n",timerValue);
155printf("intermediate 0x%016llX\n",intermediate);
156printf("saveTime 0x%016llX\n",saveTime);
157
158intermediate -= saveTime;// raw count for about 1/20 second
159intermediate *= scale[timerValue];// rescale measured time spent
160intermediate /= SAMPLE_NSECS;// so its exactly 1/20 a second
161intermediate += latchTime;// add on our save fudge
162
163set_PIT2(0);// reset timer 2 to be zero
164disable_PIT2();// turn off PIT 2
165
166//ml_set_interrupts_enabled(int_enabled);
167return intermediate;
168}
169
170/*
171 * Original comment/code:
172 * "DFE: Measures the Max Performance Frequency in Hz (64-bit)"
173 *
174 * Measures the Actual Performance Frequency in Hz (64-bit)
175 * (just a naming change, mperf --> aperf )
176 */
177static uint64_t measure_aperf_frequency(void)
178{
179uint64_t aperfStart;
180uint64_t aperfEnd;
181uint64_t aperfDelta = 0xffffffffffffffffULL;
182unsigned long pollCount;
183uint64_t retval = 0;
184int i;
185
186/* Time how many APERF ticks elapse in 30 msec using the 8254 PIT
187 * counter 2. We run this loop 3 times to make sure the cache
188 * is hot and we take the minimum delta from all of the runs.
189 * That is to say that we're biased towards measuring the minimum
190 * number of APERF ticks that occur while waiting for the timer to
191 * expire.
192 */
193for(i = 0; i < 10; ++i)
194{
195enable_PIT2();
196set_PIT2_mode0(CALIBRATE_LATCH);
197aperfStart = rdmsr64(MSR_AMD_APERF);
198pollCount = poll_PIT2_gate();
199aperfEnd = rdmsr64(MSR_AMD_APERF);
200/* The poll loop must have run at least a few times for accuracy */
201if (pollCount <= 1)
202{
203continue;
204}
205/* The TSC must increment at LEAST once every millisecond.
206 * We should have waited exactly 30 msec so the APERF delta should
207 * be >= 30. Anything less and the processor is way too slow.
208 */
209if ((aperfEnd - aperfStart) <= CALIBRATE_TIME_MSEC)
210{
211continue;
212}
213// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
214if ( (aperfEnd - aperfStart) < aperfDelta )
215{
216aperfDelta = aperfEnd - aperfStart;
217}
218}
219/* mperfDelta is now the least number of MPERF ticks the processor made in
220 * a timespan of 0.03 s (e.g. 30 milliseconds)
221 */
222
223if (aperfDelta > (1ULL<<32))
224{
225retval = 0;
226}
227else
228{
229retval = aperfDelta * 1000 / 30;
230}
231disable_PIT2();
232return retval;
233}
234
235/*
236 * Calculates the FSB and CPU frequencies using specific MSRs for each CPU
237 * - multi. is read from a specific MSR. In the case of Intel, there is:
238 * a max multi. (used to calculate the FSB freq.),
239 * and a current multi. (used to calculate the CPU freq.)
240 * - fsbFrequency = tscFrequency / multi
241 * - cpuFrequency = fsbFrequency * multi
242 */
243void scan_cpu(PlatformInfo_t *p)
244{
245uint64_ttscFrequency = 0;
246uint64_tfsbFrequency = 0;
247uint64_tcpuFrequency = 0;
248uint64_tmsr = 0;
249uint64_tflex_ratio = 0;
250
251uint32_tmax_ratio = 0;
252uint32_tmin_ratio = 0;
253uint8_tbus_ratio_max = 0;
254uint8_tbus_ratio_min = 0;
255uint8_tcurrdiv = 0;
256uint8_tcurrcoef = 0;
257uint8_tmaxdiv = 0;
258uint8_tmaxcoef = 0;
259const char*newratio;
260intlen = 0;
261intmyfsb = 0;
262
263/* get cpuid values */
264do_cpuid(0x00000000, p->CPU.CPUID[CPUID_0]);
265do_cpuid(0x00000001, p->CPU.CPUID[CPUID_1]);
266
267do_cpuid(0x00000002, p->CPU.CPUID[CPUID_2]);
268do_cpuid(0x00000003, p->CPU.CPUID[CPUID_3]);
269do_cpuid2(0x00000004, 0, p->CPU.CPUID[CPUID_4]);
270
271do_cpuid(0x80000000, p->CPU.CPUID[CPUID_80]);
272if (p->CPU.CPUID[CPUID_0][0] >= 0x5)
273{
274do_cpuid(5, p->CPU.CPUID[CPUID_5]);
275}
276if (p->CPU.CPUID[CPUID_0][0] >= 6)
277{
278do_cpuid(6, p->CPU.CPUID[CPUID_6]);
279}
280if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 8)
281{
282do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);
283do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
284}
285else if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 1)
286{
287do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
288}
289
290// #if DEBUG_CPU
291{
292inti;
293DBG("CPUID Raw Values:\n");
294for (i = 0; i < CPUID_MAX; i++) {
295DBG("%02d: %08x-%08x-%08x-%08x\n", i,
296 p->CPU.CPUID[i][0], p->CPU.CPUID[i][1],
297 p->CPU.CPUID[i][2], p->CPU.CPUID[i][3]);
298}
299}
300// #endif
301
302/* http://www.flounder.com/cpuid_explorer2.htm
303 EAX (Intel):
304 31 28 27 20 19 16 1514 1312 11 8 7 4 3 0
305 +--------+----------------+--------+----+----+--------+--------+--------+
306 |########|Extended family |Extmodel|####|type|familyid| model |stepping|
307 +--------+----------------+--------+----+----+--------+--------+--------+
308
309 EAX (AMD):
310 31 28 27 20 19 16 1514 1312 11 8 7 4 3 0
311 +--------+----------------+--------+----+----+--------+--------+--------+
312 |########|Extended family |Extmodel|####|####|familyid| model |stepping|
313 +--------+----------------+--------+----+----+--------+--------+--------+
314*/
315p->CPU.MCodeVersion= (uint32_t)(rdmsr64(MSR_IA32_BIOS_SIGN_ID) >> 32);
316p->CPU.Vendor= p->CPU.CPUID[CPUID_0][1];
317p->CPU.Signature= p->CPU.CPUID[CPUID_1][0];
318p->CPU.Stepping= bitfield(p->CPU.CPUID[CPUID_1][0], 3, 0); // stepping = cpu_feat_eax & 0xF;
319p->CPU.Model= bitfield(p->CPU.CPUID[CPUID_1][0], 7, 4); // model = (cpu_feat_eax >> 4) & 0xF;
320p->CPU.Family= bitfield(p->CPU.CPUID[CPUID_1][0], 11, 8); // family = (cpu_feat_eax >> 8) & 0xF;
321//p->CPU.Type= bitfield(p->CPU.CPUID[CPUID_1][0], 13, 12);// type = (cpu_feat_eax >> 12) & 0x3;
322p->CPU.ExtModel= bitfield(p->CPU.CPUID[CPUID_1][0], 19, 16); // ext_model = (cpu_feat_eax >> 16) & 0xF;
323p->CPU.ExtFamily= bitfield(p->CPU.CPUID[CPUID_1][0], 27, 20);// ext_family = (cpu_feat_eax >> 20) & 0xFF;
324
325p->CPU.Model += (p->CPU.ExtModel << 4);
326
327if (p->CPU.Vendor == CPUID_VENDOR_INTEL &&
328p->CPU.Family == 0x06 &&
329p->CPU.Model >= CPUID_MODEL_NEHALEM &&
330p->CPU.Model != CPUID_MODEL_ATOM// MSR is *NOT* available on the Intel Atom CPU
331)
332{
333/*
334 * Find the number of enabled cores and threads
335 * (which determines whether SMT/Hyperthreading is active).
336 */
337switch (p->CPU.Model)
338{
339case CPUID_MODEL_NEHALEM:
340case CPUID_MODEL_FIELDS:
341case CPUID_MODEL_DALES:
342case CPUID_MODEL_NEHALEM_EX:
343case CPUID_MODEL_JAKETOWN:
344case CPUID_MODEL_SANDYBRIDGE:
345case CPUID_MODEL_IVYBRIDGE:
346case CPUID_MODEL_HASWELL:
347case CPUID_MODEL_HASWELL_SVR:
348//case CPUID_MODEL_HASWELL_H:
349case CPUID_MODEL_HASWELL_ULT:
350case CPUID_MODEL_CRYSTALWELL:
351msr = rdmsr64(MSR_CORE_THREAD_COUNT);
352p->CPU.NoCores= (uint8_t)bitfield((uint32_t)msr, 31, 16);
353p->CPU.NoThreads= (uint8_t)bitfield((uint32_t)msr, 15, 0);
354break;
355
356case CPUID_MODEL_DALES_32NM:
357case CPUID_MODEL_WESTMERE:
358case CPUID_MODEL_WESTMERE_EX:
359msr = rdmsr64(MSR_CORE_THREAD_COUNT);
360p->CPU.NoCores= (uint8_t)bitfield((uint32_t)msr, 19, 16);
361p->CPU.NoThreads= (uint8_t)bitfield((uint32_t)msr, 15, 0);
362break;
363
364default:
365p->CPU.NoCores = bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
366p->CPU.NoThreads = (uint8_t)(p->CPU.LogicalPerPackage & 0xff);
367//workaround for N270. I don't know why it detected wrong
368if ((p->CPU.Model == CPUID_MODEL_ATOM) && (p->CPU.Stepping == 2))
369{
370p->CPU.NoCores = 1;
371}
372break;
373
374} // end switch
375
376}
377else if (p->CPU.Vendor == CPUID_VENDOR_AMD)
378{
379p->CPU.NoThreads= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
380p->CPU.NoCores= bitfield(p->CPU.CPUID[CPUID_88][2], 7, 0) + 1;
381}
382else
383{
384// Use previous method for Cores and Threads
385p->CPU.NoThreads= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
386p->CPU.NoCores= bitfield(p->CPU.CPUID[CPUID_4][0], 31, 26) + 1;
387}
388
389/* get BrandString (if supported) */
390/* Copyright: from Apple's XNU cpuid.c */
391if (p->CPU.CPUID[CPUID_80][0] > 0x80000004)
392{
393uint32_treg[4];
394charstr[128], *s;
395/*
396 * The BrandString 48 bytes (max), guaranteed to
397 * be NULL terminated.
398 */
399do_cpuid(0x80000002, reg);
400bcopy((char *)reg, &str[0], 16);
401do_cpuid(0x80000003, reg);
402bcopy((char *)reg, &str[16], 16);
403do_cpuid(0x80000004, reg);
404bcopy((char *)reg, &str[32], 16);
405for (s = str; *s != '\0'; s++)
406{
407if (*s != ' ')
408{
409break;
410}
411}
412
413if (!strncmp(p->CPU.BrandString, CPU_STRING_UNKNOWN, MIN(sizeof(p->CPU.BrandString), strlen(CPU_STRING_UNKNOWN) + 1)))
414{
415/*
416 * This string means we have a firmware-programmable brand string,
417 * and the firmware couldn't figure out what sort of CPU we have.
418 */
419p->CPU.BrandString[0] = '\0';
420}
421}
422
423/* setup features */
424if ((bit(23) & p->CPU.CPUID[CPUID_1][3]) != 0)
425{
426p->CPU.Features |= CPU_FEATURE_MMX;
427}
428
429if ((bit(25) & p->CPU.CPUID[CPUID_1][3]) != 0)
430{
431p->CPU.Features |= CPU_FEATURE_SSE;
432}
433
434if ((bit(26) & p->CPU.CPUID[CPUID_1][3]) != 0)
435{
436p->CPU.Features |= CPU_FEATURE_SSE2;
437}
438
439if ((bit(0) & p->CPU.CPUID[CPUID_1][2]) != 0)
440{
441p->CPU.Features |= CPU_FEATURE_SSE3;
442}
443
444if ((bit(19) & p->CPU.CPUID[CPUID_1][2]) != 0)
445{
446p->CPU.Features |= CPU_FEATURE_SSE41;
447}
448
449if ((bit(20) & p->CPU.CPUID[CPUID_1][2]) != 0)
450{
451p->CPU.Features |= CPU_FEATURE_SSE42;
452}
453
454if ((bit(29) & p->CPU.CPUID[CPUID_81][3]) != 0)
455{
456p->CPU.Features |= CPU_FEATURE_EM64T;
457}
458
459if ((bit(5) & p->CPU.CPUID[CPUID_1][3]) != 0)
460{
461p->CPU.Features |= CPU_FEATURE_MSR;
462}
463
464//if ((bit(28) & p->CPU.CPUID[CPUID_1][3]) != 0) {
465
466if (p->CPU.NoThreads > p->CPU.NoCores)
467{
468p->CPU.Features |= CPU_FEATURE_HTT;
469}
470
471tscFrequency = measure_tsc_frequency();
472DBG("cpu freq classic = 0x%016llx\n", tscFrequency);
473/* if usual method failed */
474if ( tscFrequency < 1000 )//TEST
475{
476tscFrequency = timeRDTSC() * 20;//measure_tsc_frequency();
477// DBG("cpu freq timeRDTSC = 0x%016llx\n", tscFrequency);
478}
479else
480{
481// DBG("cpu freq timeRDTSC = 0x%016llxn", timeRDTSC() * 20);
482}
483
484fsbFrequency = 0;
485cpuFrequency = 0;
486
487if ((p->CPU.Vendor == CPUID_VENDOR_INTEL) && ((p->CPU.Family == 0x06) || (p->CPU.Family == 0x0f)))
488{
489int intelCPU = p->CPU.Model;
490if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0c) || (p->CPU.Family == 0x0f && p->CPU.Model >= 0x03))
491{
492/* Nehalem CPU model */
493switch (p->CPU.Model)
494{
495case CPUID_MODEL_NEHALEM:
496case CPUID_MODEL_FIELDS:
497case CPUID_MODEL_DALES:
498case CPUID_MODEL_DALES_32NM:
499case CPUID_MODEL_WESTMERE:
500case CPUID_MODEL_NEHALEM_EX:
501case CPUID_MODEL_WESTMERE_EX:
502/* --------------------------------------------------------- */
503case CPUID_MODEL_SANDYBRIDGE:
504case CPUID_MODEL_JAKETOWN:
505case CPUID_MODEL_IVYBRIDGE_XEON:
506case CPUID_MODEL_IVYBRIDGE:
507case CPUID_MODEL_HASWELL:
508case CPUID_MODEL_HASWELL_SVR:
509
510case CPUID_MODEL_HASWELL_ULT:
511case CPUID_MODEL_CRYSTALWELL:
512/* --------------------------------------------------------- */
513msr = rdmsr64(MSR_PLATFORM_INFO);
514DBG("msr(%d): platform_info %08x\n", __LINE__, bitfield(msr, 31, 0));
515bus_ratio_max = bitfield(msr, 15, 8);
516bus_ratio_min = bitfield(msr, 47, 40); //valv: not sure about this one (Remarq.1)
517msr = rdmsr64(MSR_FLEX_RATIO);
518DBG("msr(%d): flex_ratio %08x\n", __LINE__, bitfield(msr, 31, 0));
519if (bitfield(msr, 16, 16))
520{
521flex_ratio = bitfield(msr, 15, 8);
522/* bcc9: at least on the gigabyte h67ma-ud2h,
523 where the cpu multipler can't be changed to
524 allow overclocking, the flex_ratio msr has unexpected (to OSX)
525 contents.These contents cause mach_kernel to
526 fail to compute the bus ratio correctly, instead
527 causing the system to crash since tscGranularity
528 is inadvertently set to 0.
529 */
530if (flex_ratio == 0)
531{
532/* Clear bit 16 (evidently the presence bit) */
533wrmsr64(MSR_FLEX_RATIO, (msr & 0xFFFFFFFFFFFEFFFFULL));
534msr = rdmsr64(MSR_FLEX_RATIO);
535DBG("Unusable flex ratio detected. Patched MSR now %08x\n", bitfield(msr, 31, 0));
536}
537else
538{
539if (bus_ratio_max > flex_ratio)
540{
541bus_ratio_max = flex_ratio;
542}
543}
544}
545
546if (bus_ratio_max)
547{
548fsbFrequency = (tscFrequency / bus_ratio_max);
549}
550
551//valv: Turbo Ratio Limit
552if ((intelCPU != 0x2e) && (intelCPU != 0x2f))
553{
554msr = rdmsr64(MSR_TURBO_RATIO_LIMIT);
555
556cpuFrequency = bus_ratio_max * fsbFrequency;
557max_ratio = bus_ratio_max * 10;
558}
559else
560{
561cpuFrequency = tscFrequency;
562}
563if ((getValueForKey(kbusratio, &newratio, &len, &bootInfo->chameleonConfig)) && (len <= 4))
564{
565max_ratio = atoi(newratio);
566max_ratio = (max_ratio * 10);
567if (len >= 3)
568{
569max_ratio = (max_ratio + 5);
570}
571
572verbose("Bus-Ratio: min=%d, max=%s\n", bus_ratio_min, newratio);
573
574// extreme overclockers may love 320 ;)
575if ((max_ratio >= min_ratio) && (max_ratio <= 320))
576{
577cpuFrequency = (fsbFrequency * max_ratio) / 10;
578if (len >= 3)
579{
580maxdiv = 1;
581}
582else
583{
584maxdiv = 0;
585}
586}
587else
588{
589max_ratio = (bus_ratio_max * 10);
590}
591}
592//valv: to be uncommented if Remarq.1 didn't stick
593/*if (bus_ratio_max > 0) bus_ratio = flex_ratio;*/
594p->CPU.MaxRatio = max_ratio;
595p->CPU.MinRatio = min_ratio;
596
597myfsb = fsbFrequency / 1000000;
598verbose("Sticking with [BCLK: %dMhz, Bus-Ratio: %d]\n", myfsb, max_ratio/10); // Bungo: fixed wrong Bus-Ratio readout
599currcoef = bus_ratio_max;
600
601break;
602
603default:
604msr = rdmsr64(MSR_IA32_PERF_STATUS);
605DBG("msr(%d): ia32_perf_stat 0x%08x\n", __LINE__, bitfield(msr, 31, 0));
606currcoef = bitfield(msr, 12, 8); // Bungo: reverted to 2263 state because of wrong old CPUs freq. calculating
607/* Non-integer bus ratio for the max-multi*/
608maxdiv = bitfield(msr, 46, 46);
609/* Non-integer bus ratio for the current-multi (undocumented)*/
610currdiv = bitfield(msr, 14, 14);
611
612// This will always be model >= 3
613if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0e) || (p->CPU.Family == 0x0f))
614{
615/* On these models, maxcoef defines TSC freq */
616maxcoef = bitfield(msr, 44, 40);
617}
618else
619{
620/* On lower models, currcoef defines TSC freq */
621/* XXX */
622maxcoef = currcoef;
623}
624
625if (!currcoef)
626{
627currcoef = maxcoef;
628}
629
630if (maxcoef)
631{
632if (maxdiv)
633{
634fsbFrequency = ((tscFrequency * 2) / ((maxcoef * 2) + 1));
635}
636else
637{
638fsbFrequency = (tscFrequency / maxcoef);
639}
640
641if (currdiv)
642{
643cpuFrequency = (fsbFrequency * ((currcoef * 2) + 1) / 2);
644}
645else
646{
647cpuFrequency = (fsbFrequency * currcoef);
648}
649
650DBG("max: %d%s current: %d%s\n", maxcoef, maxdiv ? ".5" : "",currcoef, currdiv ? ".5" : "");
651}
652break;
653}
654}
655/* Mobile CPU */
656if (rdmsr64(MSR_IA32_PLATFORM_ID) & (1<<28))
657{
658p->CPU.Features |= CPU_FEATURE_MOBILE;
659}
660}
661else if ((p->CPU.Vendor == CPUID_VENDOR_AMD) && (p->CPU.Family == 0x0f))
662{
663switch(p->CPU.ExtFamily)
664{
665case 0x00: /* K8 */
666msr = rdmsr64(K8_FIDVID_STATUS);
667maxcoef = bitfield(msr, 21, 16) / 2 + 4;
668currcoef = bitfield(msr, 5, 0) / 2 + 4;
669break;
670
671case 0x01: /* K10 */
672msr = rdmsr64(K10_COFVID_STATUS);
673do_cpuid2(0x00000006, 0, p->CPU.CPUID[CPUID_6]);
674// EffFreq: effective frequency interface
675if (bitfield(p->CPU.CPUID[CPUID_6][2], 0, 0) == 1)
676{
677//uint64_t mperf = measure_mperf_frequency();
678uint64_t aperf = measure_aperf_frequency();
679cpuFrequency = aperf;
680}
681// NOTE: tsc runs at the maccoeff (non turbo)
682//*not* at the turbo frequency.
683maxcoef = bitfield(msr, 54, 49) / 2 + 4;
684currcoef = bitfield(msr, 5, 0) + 0x10;
685currdiv = 2 << bitfield(msr, 8, 6);
686
687break;
688
689case 0x05: /* K14 */
690msr = rdmsr64(K10_COFVID_STATUS);
691currcoef = (bitfield(msr, 54, 49) + 0x10) << 2;
692currdiv = (bitfield(msr, 8, 4) + 1) << 2;
693currdiv += bitfield(msr, 3, 0);
694
695break;
696
697case 0x02: /* K11 */
698// not implimented
699break;
700}
701
702if (maxcoef)
703{
704if (currdiv)
705{
706if (!currcoef)
707{
708currcoef = maxcoef;
709}
710
711if (!cpuFrequency)
712{
713fsbFrequency = ((tscFrequency * currdiv) / currcoef);
714}
715else
716{
717fsbFrequency = ((cpuFrequency * currdiv) / currcoef);
718}
719DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
720}
721else
722{
723if (!cpuFrequency)
724{
725fsbFrequency = (tscFrequency / maxcoef);
726}
727else
728{
729fsbFrequency = (cpuFrequency / maxcoef);
730}
731DBG("%d\n", currcoef);
732}
733}
734else if (currcoef)
735{
736if (currdiv)
737{
738fsbFrequency = ((tscFrequency * currdiv) / currcoef);
739DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
740}
741else
742{
743fsbFrequency = (tscFrequency / currcoef);
744DBG("%d\n", currcoef);
745}
746}
747if (!cpuFrequency) cpuFrequency = tscFrequency;
748}
749
750#if 0
751if (!fsbFrequency)
752{
753fsbFrequency = (DEFAULT_FSB * 1000);
754cpuFrequency = tscFrequency;
755DBG("0 ! using the default value for FSB !\n");
756}
757
758DBG("cpu freq = 0x%016llxn", timeRDTSC() * 20);
759
760#endif
761
762p->CPU.MaxCoef = maxcoef;
763p->CPU.MaxDiv = maxdiv;
764p->CPU.CurrCoef = currcoef;
765p->CPU.CurrDiv = currdiv;
766p->CPU.TSCFrequency = tscFrequency;
767p->CPU.FSBFrequency = fsbFrequency;
768p->CPU.CPUFrequency = cpuFrequency;
769
770// keep formatted with spaces instead of tabs
771DBG("\n---------------------------------------------\n");
772 DBG("------------------ CPU INFO -----------------\n");
773DBG("---------------------------------------------\n");
774DBG("Brand String: %s\n",p->CPU.BrandString); // Processor name (BIOS)
775DBG("Vendor: 0x%x\n",p->CPU.Vendor); // Vendor ex: GenuineIntel
776DBG("Family: 0x%x\n",p->CPU.Family); // Family ex: 6 (06h)
777DBG("ExtFamily: 0x%x\n",p->CPU.ExtFamily);
778DBG("Signature: %x\n",p->CPU.Signature); // CPUID signature
779/*switch (p->CPU.Type) {
780case PT_OEM:
781DBG("Processor type: Intel Original OEM Processor\n");
782break;
783case PT_OD:
784DBG("Processor type: Intel Over Drive Processor\n");
785break;
786case PT_DUAL:
787DBG("Processor type: Intel Dual Processor\n");
788break;
789case PT_RES:
790DBG("Processor type: Intel Reserved\n");
791break;
792default:
793break;
794}*/
795DBG("Model: 0x%x\n",p->CPU.Model); // Model ex: 37 (025h)
796DBG("ExtModel: 0x%x\n",p->CPU.ExtModel);
797DBG("Stepping: 0x%x\n",p->CPU.Stepping); // Stepping ex: 5 (05h)
798DBG("MaxCoef: 0x%x\n",p->CPU.MaxCoef);
799DBG("CurrCoef: 0x%x\n",p->CPU.CurrCoef);
800DBG("MaxDiv: 0x%x\n",p->CPU.MaxDiv);
801DBG("CurrDiv: 0x%x\n",p->CPU.CurrDiv);
802DBG("TSCFreq: %dMHz\n",p->CPU.TSCFrequency / 1000000);
803DBG("FSBFreq: %dMHz\n",(p->CPU.FSBFrequency + 500000) / 1000000);
804DBG("CPUFreq: %dMHz\n",p->CPU.CPUFrequency / 1000000);
805DBG("Cores: %d\n",p->CPU.NoCores); // Cores
806DBG("Logical processor: %d\n",p->CPU.NoThreads); // Logical procesor
807DBG("Features: 0x%08x\n",p->CPU.Features);
808DBG("Microcode version: %d\n",p->CPU.MCodeVersion); // CPU microcode version
809DBG("\n---------------------------------------------\n");
810#if DEBUG_CPU
811pause();
812#endif
813}
814

Archive Download this file

Revision: 2542