Chameleon

Chameleon Svn Source Tree

Root/branches/ErmaC/Enoch/i386/libsaio/cpu.c

1/*
2 * Copyright 2008 Islam Ahmed Zaid. All rights reserved. <azismed@gmail.com>
3 * AsereBLN: 2009: cleanup and bugfix
4 */
5
6#include "libsaio.h"
7#include "platform.h"
8#include "cpu.h"
9#include "bootstruct.h"
10#include "boot.h"
11
12#ifndef DEBUG_CPU
13#define DEBUG_CPU 0
14#endif
15
16#if DEBUG_CPU
17#define DBG(x...)printf(x)
18#else
19#define DBG(x...)msglog(x)
20#endif
21
22/*
23 * timeRDTSC()
24 * This routine sets up PIT counter 2 to count down 1/20 of a second.
25 * It pauses until the value is latched in the counter
26 * and then reads the time stamp counter to return to the caller.
27 */
28uint64_t timeRDTSC(void)
29{
30intattempts = 0;
31uint64_t latchTime;
32uint64_tsaveTime,intermediate;
33unsigned int timerValue, lastValue;
34//boolean_tint_enabled;
35/*
36 * Table of correction factors to account for
37 * - timer counter quantization errors, and
38 * - undercounts 0..5
39 */
40#define SAMPLE_CLKS_EXACT(((double) CLKNUM) / 20.0)
41#define SAMPLE_CLKS_INT((int) CLKNUM / 20)
42#define SAMPLE_NSECS(2000000000LL)
43#define SAMPLE_MULTIPLIER(((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)
44#define ROUND64(x)((uint64_t)((x) + 0.5))
45uint64_tscale[6] = {
46ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)),
47ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)),
48ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)),
49ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)),
50ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)),
51ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5))
52};
53
54//int_enabled = ml_set_interrupts_enabled(FALSE);
55
56restart:
57if (attempts >= 9) // increase to up to 9 attempts.
58{
59 // This will flash-reboot. TODO: Use tscPanic instead.
60printf("Timestamp counter calibation failed with %d attempts\n", attempts);
61}
62attempts++;
63enable_PIT2();// turn on PIT2
64set_PIT2(0);// reset timer 2 to be zero
65latchTime = rdtsc64();// get the time stamp to time
66latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes
67set_PIT2(SAMPLE_CLKS_INT);// set up the timer for (almost) 1/20th a second
68saveTime = rdtsc64();// now time how long a 20th a second is...
69get_PIT2(&lastValue);
70get_PIT2(&lastValue);// read twice, first value may be unreliable
71do {
72intermediate = get_PIT2(&timerValue);
73if (timerValue > lastValue)
74{
75// Timer wrapped
76set_PIT2(0);
77disable_PIT2();
78goto restart;
79}
80lastValue = timerValue;
81} while (timerValue > 5);
82printf("timerValue %d\n",timerValue);
83printf("intermediate 0x%016llx\n",intermediate);
84printf("saveTime 0x%016llx\n",saveTime);
85
86intermediate -= saveTime;// raw count for about 1/20 second
87intermediate *= scale[timerValue];// rescale measured time spent
88intermediate /= SAMPLE_NSECS;// so its exactly 1/20 a second
89intermediate += latchTime;// add on our save fudge
90
91set_PIT2(0);// reset timer 2 to be zero
92disable_PIT2();// turn off PIT 2
93
94//ml_set_interrupts_enabled(int_enabled);
95return intermediate;
96}
97
98/*
99 * DFE: Measures the TSC frequency in Hz (64-bit) using the ACPI PM timer
100 */
101static uint64_t measure_tsc_frequency(void)
102{
103uint64_t tscStart;
104uint64_t tscEnd;
105uint64_t tscDelta = 0xffffffffffffffffULL;
106unsigned long pollCount;
107uint64_t retval = 0;
108int i;
109
110/* Time how many TSC ticks elapse in 30 msec using the 8254 PIT
111 * counter 2. We run this loop 3 times to make sure the cache
112 * is hot and we take the minimum delta from all of the runs.
113 * That is to say that we're biased towards measuring the minimum
114 * number of TSC ticks that occur while waiting for the timer to
115 * expire. That theoretically helps avoid inconsistencies when
116 * running under a VM if the TSC is not virtualized and the host
117 * steals time. The TSC is normally virtualized for VMware.
118 */
119for(i = 0; i < 10; ++i)
120{
121enable_PIT2();
122set_PIT2_mode0(CALIBRATE_LATCH);
123tscStart = rdtsc64();
124pollCount = poll_PIT2_gate();
125tscEnd = rdtsc64();
126/* The poll loop must have run at least a few times for accuracy */
127if (pollCount <= 1) {
128continue;
129}
130/* The TSC must increment at LEAST once every millisecond.
131 * We should have waited exactly 30 msec so the TSC delta should
132 * be >= 30. Anything less and the processor is way too slow.
133 */
134if ((tscEnd - tscStart) <= CALIBRATE_TIME_MSEC) {
135continue;
136}
137// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
138if ( (tscEnd - tscStart) < tscDelta ) {
139tscDelta = tscEnd - tscStart;
140}
141}
142/* tscDelta is now the least number of TSC ticks the processor made in
143 * a timespan of 0.03 s (e.g. 30 milliseconds)
144 * Linux thus divides by 30 which gives the answer in kiloHertz because
145 * 1 / ms = kHz. But we're xnu and most of the rest of the code uses
146 * Hz so we need to convert our milliseconds to seconds. Since we're
147 * dividing by the milliseconds, we simply multiply by 1000.
148 */
149
150/* Unlike linux, we're not limited to 32-bit, but we do need to take care
151 * that we're going to multiply by 1000 first so we do need at least some
152 * arithmetic headroom. For now, 32-bit should be enough.
153 * Also unlike Linux, our compiler can do 64-bit integer arithmetic.
154 */
155if (tscDelta > (1ULL<<32)) {
156retval = 0;
157} else {
158retval = tscDelta * 1000 / 30;
159}
160disable_PIT2();
161return retval;
162}
163
164/*
165 * Original comment/code:
166 * "DFE: Measures the Max Performance Frequency in Hz (64-bit)"
167 *
168 * Measures the Actual Performance Frequency in Hz (64-bit)
169 * (just a naming change, mperf --> aperf )
170 */
171static uint64_t measure_aperf_frequency(void)
172{
173uint64_t aperfStart;
174uint64_t aperfEnd;
175uint64_t aperfDelta = 0xffffffffffffffffULL;
176unsigned long pollCount;
177uint64_t retval = 0;
178int i;
179
180/* Time how many APERF ticks elapse in 30 msec using the 8254 PIT
181 * counter 2. We run this loop 3 times to make sure the cache
182 * is hot and we take the minimum delta from all of the runs.
183 * That is to say that we're biased towards measuring the minimum
184 * number of APERF ticks that occur while waiting for the timer to
185 * expire.
186 */
187for(i = 0; i < 10; ++i)
188{
189enable_PIT2();
190set_PIT2_mode0(CALIBRATE_LATCH);
191aperfStart = rdmsr64(MSR_AMD_APERF);
192pollCount = poll_PIT2_gate();
193aperfEnd = rdmsr64(MSR_AMD_APERF);
194/* The poll loop must have run at least a few times for accuracy */
195if (pollCount <= 1) {
196continue;
197}
198/* The TSC must increment at LEAST once every millisecond.
199 * We should have waited exactly 30 msec so the APERF delta should
200 * be >= 30. Anything less and the processor is way too slow.
201 */
202if ((aperfEnd - aperfStart) <= CALIBRATE_TIME_MSEC) {
203continue;
204}
205// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
206if ( (aperfEnd - aperfStart) < aperfDelta ) {
207aperfDelta = aperfEnd - aperfStart;
208}
209}
210/* mperfDelta is now the least number of MPERF ticks the processor made in
211 * a timespan of 0.03 s (e.g. 30 milliseconds)
212 */
213
214if (aperfDelta > (1ULL<<32)) {
215retval = 0;
216} else {
217retval = aperfDelta * 1000 / 30;
218}
219disable_PIT2();
220return retval;
221}
222
223/*
224 * Calculates the FSB and CPU frequencies using specific MSRs for each CPU
225 * - multi. is read from a specific MSR. In the case of Intel, there is:
226 * a max multi. (used to calculate the FSB freq.),
227 * and a current multi. (used to calculate the CPU freq.)
228 * - fsbFrequency = tscFrequency / multi
229 * - cpuFrequency = fsbFrequency * multi
230 */
231void scan_cpu(PlatformInfo_t *p)
232{
233uint64_ttscFrequency = 0;
234uint64_tfsbFrequency = 0;
235uint64_tcpuFrequency = 0;
236uint64_tmsr = 0;
237uint64_tflex_ratio = 0;
238uint32_tmax_ratio = 0;
239uint32_tmin_ratio = 0;
240uint8_tbus_ratio_max = 0;
241uint8_tcurrdiv = 0;
242uint8_tcurrcoef = 0;
243uint8_tmaxdiv = 0;
244uint8_tmaxcoef = 0;
245const char*newratio;
246intlen = 0;
247intmyfsb = 0;
248uint8_tbus_ratio_min = 0;
249
250/* get cpuid values */
251do_cpuid(0x00000000, p->CPU.CPUID[CPUID_0]);
252do_cpuid(0x00000001, p->CPU.CPUID[CPUID_1]);
253
254do_cpuid(0x00000002, p->CPU.CPUID[CPUID_2]);
255do_cpuid(0x00000003, p->CPU.CPUID[CPUID_3]);
256do_cpuid2(0x00000004, 0, p->CPU.CPUID[CPUID_4]);
257
258do_cpuid(0x80000000, p->CPU.CPUID[CPUID_80]);
259if (p->CPU.CPUID[CPUID_0][0] >= 0x5) {
260do_cpuid(5, p->CPU.CPUID[CPUID_5]);
261}
262if (p->CPU.CPUID[CPUID_0][0] >= 6) {
263do_cpuid(6, p->CPU.CPUID[CPUID_6]);
264}
265if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 8) {
266do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);
267do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
268} else if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 1) {
269do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
270}
271
272// #if DEBUG_CPU
273{
274inti;
275DBG("CPUID Raw Values:\n");
276for (i=0; i<CPUID_MAX; i++) {
277DBG("%02d: %08x-%08x-%08x-%08x\n", i,
278 p->CPU.CPUID[i][0], p->CPU.CPUID[i][1],
279 p->CPU.CPUID[i][2], p->CPU.CPUID[i][3]);
280}
281}
282// #endif
283
284/* http://www.flounder.com/cpuid_explorer2.htm
285 EAX (Intel):
286 31 28 27 20 19 16 1514 1312 11 8 7 4 3 0
287 +--------+----------------+--------+----+----+--------+--------+--------+
288 |########|Extended family |Extmodel|####|type|familyid| model |stepping|
289 +--------+----------------+--------+----+----+--------+--------+--------+
290
291 EAX (AMD):
292 31 28 27 20 19 16 1514 1312 11 8 7 4 3 0
293 +--------+----------------+--------+----+----+--------+--------+--------+
294 |########|Extended family |Extmodel|####|####|familyid| model |stepping|
295 +--------+----------------+--------+----+----+--------+--------+--------+
296*/
297p->CPU.MCodeVersion= (uint32_t)(rdmsr64(MSR_IA32_BIOS_SIGN_ID) >> 32);
298p->CPU.Vendor= p->CPU.CPUID[CPUID_0][1];
299p->CPU.Signature= p->CPU.CPUID[CPUID_1][0];
300p->CPU.Stepping= bitfield(p->CPU.CPUID[CPUID_1][0], 3, 0); // stepping = cpu_feat_eax & 0xF;
301p->CPU.Model= bitfield(p->CPU.CPUID[CPUID_1][0], 7, 4); // model = (cpu_feat_eax >> 4) & 0xF;
302p->CPU.Family= bitfield(p->CPU.CPUID[CPUID_1][0], 11, 8); // family = (cpu_feat_eax >> 8) & 0xF;
303//p->CPU.Type= bitfield(p->CPU.CPUID[CPUID_1][0], 13, 12);// type = (cpu_feat_eax >> 12) & 0x3;
304p->CPU.ExtModel= bitfield(p->CPU.CPUID[CPUID_1][0], 19, 16); // ext_model = (cpu_feat_eax >> 16) & 0xF;
305p->CPU.ExtFamily= bitfield(p->CPU.CPUID[CPUID_1][0], 27, 20);// ext_family = (cpu_feat_eax >> 20) & 0xFF;
306
307p->CPU.Model += (p->CPU.ExtModel << 4);
308
309if (p->CPU.Vendor == CPUID_VENDOR_INTEL &&
310p->CPU.Family == 0x06 &&
311p->CPU.Model >= CPU_MODEL_NEHALEM &&
312p->CPU.Model != CPU_MODEL_ATOM// MSR is *NOT* available on the Intel Atom CPU
313) {
314/*
315 * Find the number of enabled cores and threads
316 * (which determines whether SMT/Hyperthreading is active).
317 */
318switch (p->CPU.Model) {
319case CPU_MODEL_NEHALEM:
320case CPU_MODEL_FIELDS:
321case CPU_MODEL_DALES:
322case CPU_MODEL_NEHALEM_EX:
323case CPU_MODEL_IVYBRIDGE:
324case CPU_MODEL_HASWELL:
325case CPU_MODEL_HASWELL_SVR:
326//case CPU_MODEL_HASWELL_H:
327case CPU_MODEL_HASWELL_ULT:
328case CPU_MODEL_CRYSTALWELL:
329msr = rdmsr64(MSR_CORE_THREAD_COUNT);
330p->CPU.NoCores= bitfield((uint32_t)msr, 31, 16);
331p->CPU.NoThreads= bitfield((uint32_t)msr, 15, 0);
332break;
333
334case CPU_MODEL_DALES_32NM:
335case CPU_MODEL_WESTMERE:
336case CPU_MODEL_WESTMERE_EX:
337case CPU_MODEL_SANDYBRIDGE:
338case CPU_MODEL_JAKETOWN:
339// 0x2A 0x2D
340msr = rdmsr64(MSR_CORE_THREAD_COUNT);
341p->CPU.NoCores= bitfield((uint32_t)msr, 19, 16);
342p->CPU.NoThreads= bitfield((uint32_t)msr, 15, 0);
343break;
344
345default:
346p->CPU.NoCores = bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
347p->CPU.NoThreads = (uint8_t)(p->CPU.LogicalPerPackage & 0xff);
348//workaround for N270. I don't know why it detected wrong
349if ((p->CPU.Model == CPU_MODEL_ATOM) &&
350(p->CPU.Stepping == 2)) {
351p->CPU.NoCores = 1;
352}
353break;
354
355} // end switch
356
357} else if (p->CPU.Vendor == CPUID_VENDOR_AMD) {
358p->CPU.NoThreads= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
359p->CPU.NoCores= bitfield(p->CPU.CPUID[CPUID_88][2], 7, 0) + 1;
360} else {
361// Use previous method for Cores and Threads
362p->CPU.NoThreads= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
363p->CPU.NoCores= bitfield(p->CPU.CPUID[CPUID_4][0], 31, 26) + 1;
364}
365
366/* get brand string (if supported) */
367/* Copyright: from Apple's XNU cpuid.c */
368if (p->CPU.CPUID[CPUID_80][0] > 0x80000004) {
369uint32_treg[4];
370charstr[128], *s;
371/*
372 * The brand string 48 bytes (max), guaranteed to
373 * be NULL terminated.
374 */
375do_cpuid(0x80000002, reg);
376bcopy((char *)reg, &str[0], 16);
377do_cpuid(0x80000003, reg);
378bcopy((char *)reg, &str[16], 16);
379do_cpuid(0x80000004, reg);
380bcopy((char *)reg, &str[32], 16);
381for (s = str; *s != '\0'; s++) {
382if (*s != ' ') {
383break;
384}
385}
386
387strlcpy(p->CPU.BrandString, s, sizeof(p->CPU.BrandString));
388
389if (!strncmp(p->CPU.BrandString, CPU_STRING_UNKNOWN,
390 MIN(sizeof(p->CPU.BrandString),
391 strlen(CPU_STRING_UNKNOWN) + 1))) {
392/*
393 * This string means we have a firmware-programmable brand string,
394 * and the firmware couldn't figure out what sort of CPU we have.
395 */
396p->CPU.BrandString[0] = '\0';
397}
398}
399
400/* setup features */
401if ((bit(23) & p->CPU.CPUID[CPUID_1][3]) != 0) {
402p->CPU.Features |= CPU_FEATURE_MMX;
403}
404if ((bit(25) & p->CPU.CPUID[CPUID_1][3]) != 0) {
405p->CPU.Features |= CPU_FEATURE_SSE;
406}
407if ((bit(26) & p->CPU.CPUID[CPUID_1][3]) != 0) {
408p->CPU.Features |= CPU_FEATURE_SSE2;
409}
410if ((bit(0) & p->CPU.CPUID[CPUID_1][2]) != 0) {
411p->CPU.Features |= CPU_FEATURE_SSE3;
412}
413if ((bit(19) & p->CPU.CPUID[CPUID_1][2]) != 0) {
414p->CPU.Features |= CPU_FEATURE_SSE41;
415}
416if ((bit(20) & p->CPU.CPUID[CPUID_1][2]) != 0) {
417p->CPU.Features |= CPU_FEATURE_SSE42;
418}
419if ((bit(29) & p->CPU.CPUID[CPUID_81][3]) != 0) {
420p->CPU.Features |= CPU_FEATURE_EM64T;
421}
422if ((bit(5) & p->CPU.CPUID[CPUID_1][3]) != 0) {
423p->CPU.Features |= CPU_FEATURE_MSR;
424}
425//if ((bit(28) & p->CPU.CPUID[CPUID_1][3]) != 0) {
426if (p->CPU.NoThreads > p->CPU.NoCores) {
427p->CPU.Features |= CPU_FEATURE_HTT;
428}
429
430tscFrequency = measure_tsc_frequency();
431DBG("cpu freq classic = 0x%016llx\n", tscFrequency);
432/* if usual method failed */
433if ( tscFrequency < 1000 ) { //TEST
434tscFrequency = timeRDTSC() * 20;//measure_tsc_frequency();
435// DBG("cpu freq timeRDTSC = 0x%016llx\n", tscFrequency);
436} else {
437// DBG("cpu freq timeRDTSC = 0x%016llxn", timeRDTSC() * 20);
438}
439fsbFrequency = 0;
440cpuFrequency = 0;
441
442if ((p->CPU.Vendor == CPUID_VENDOR_INTEL) && ((p->CPU.Family == 0x06) || (p->CPU.Family == 0x0f))) {
443int intelCPU = p->CPU.Model;
444if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0c) || (p->CPU.Family == 0x0f && p->CPU.Model >= 0x03)){
445/* Nehalem CPU model */
446switch (p->CPU.Model) {
447case CPU_MODEL_NEHALEM:
448case CPU_MODEL_FIELDS:
449case CPU_MODEL_DALES:
450case CPU_MODEL_DALES_32NM:
451case CPU_MODEL_WESTMERE:
452case CPU_MODEL_NEHALEM_EX:
453case CPU_MODEL_WESTMERE_EX:
454/* --------------------------------------------------------- */
455case CPU_MODEL_SANDYBRIDGE:
456case CPU_MODEL_JAKETOWN:
457case CPU_MODEL_IVYBRIDGE_XEON:
458case CPU_MODEL_IVYBRIDGE:
459case CPU_MODEL_HASWELL:
460case CPU_MODEL_HASWELL_SVR:
461
462case CPU_MODEL_HASWELL_ULT:
463case CPU_MODEL_CRYSTALWELL:
464/* --------------------------------------------------------- */
465msr = rdmsr64(MSR_PLATFORM_INFO);
466DBG("msr(%d): platform_info %08x\n", __LINE__, bitfield(msr, 31, 0));
467bus_ratio_max = bitfield(msr, 15, 8);
468bus_ratio_min = bitfield(msr, 47, 40); //valv: not sure about this one (Remarq.1)
469msr = rdmsr64(MSR_FLEX_RATIO);
470DBG("msr(%d): flex_ratio %08x\n", __LINE__, bitfield(msr, 31, 0));
471if (bitfield(msr, 16, 16)) {
472flex_ratio = bitfield(msr, 15, 8);
473/* bcc9: at least on the gigabyte h67ma-ud2h,
474 where the cpu multipler can't be changed to
475 allow overclocking, the flex_ratio msr has unexpected (to OSX)
476 contents.These contents cause mach_kernel to
477 fail to compute the bus ratio correctly, instead
478 causing the system to crash since tscGranularity
479 is inadvertently set to 0.
480 */
481if (flex_ratio == 0) {
482/* Clear bit 16 (evidently the presence bit) */
483wrmsr64(MSR_FLEX_RATIO, (msr & 0xFFFFFFFFFFFEFFFFULL));
484msr = rdmsr64(MSR_FLEX_RATIO);
485DBG("Unusable flex ratio detected. Patched MSR now %08x\n", bitfield(msr, 31, 0));
486} else {
487if (bus_ratio_max > flex_ratio) {
488bus_ratio_max = flex_ratio;
489}
490}
491}
492
493if (bus_ratio_max) {
494fsbFrequency = (tscFrequency / bus_ratio_max);
495}
496//valv: Turbo Ratio Limit
497if ((intelCPU != 0x2e) && (intelCPU != 0x2f)) {
498msr = rdmsr64(MSR_TURBO_RATIO_LIMIT);
499
500cpuFrequency = bus_ratio_max * fsbFrequency;
501max_ratio = bus_ratio_max * 10;
502} else {
503cpuFrequency = tscFrequency;
504}
505if ((getValueForKey(kbusratio, &newratio, &len, &bootInfo->chameleonConfig)) && (len <= 4)) {
506max_ratio = atoi(newratio);
507max_ratio = (max_ratio * 10);
508if (len >= 3) {
509max_ratio = (max_ratio + 5);
510}
511
512verbose("Bus-Ratio: min=%d, max=%s\n", bus_ratio_min, newratio);
513
514// extreme overclockers may love 320 ;)
515if ((max_ratio >= min_ratio) && (max_ratio <= 320)) {
516cpuFrequency = (fsbFrequency * max_ratio) / 10;
517if (len >= 3) {
518maxdiv = 1;
519} else {
520maxdiv = 0;
521}
522} else {
523max_ratio = (bus_ratio_max * 10);
524}
525}
526//valv: to be uncommented if Remarq.1 didn't stick
527/*if (bus_ratio_max > 0) bus_ratio = flex_ratio;*/
528p->CPU.MaxRatio = max_ratio;
529p->CPU.MinRatio = min_ratio;
530
531myfsb = fsbFrequency / 1000000;
532verbose("Sticking with [BCLK: %dMhz, Bus-Ratio: %d]\n", myfsb, max_ratio/10); // Bungo: fixed wrong Bus-Ratio readout
533currcoef = bus_ratio_max;
534
535break;
536
537default:
538msr = rdmsr64(MSR_IA32_PERF_STATUS);
539DBG("msr(%d): ia32_perf_stat 0x%08x\n", __LINE__, bitfield(msr, 31, 0));
540currcoef = bitfield(msr, 12, 8); // Bungo: reverted to 2263 state because of wrong old CPUs freq. calculating
541/* Non-integer bus ratio for the max-multi*/
542maxdiv = bitfield(msr, 46, 46);
543/* Non-integer bus ratio for the current-multi (undocumented)*/
544currdiv = bitfield(msr, 14, 14);
545
546// This will always be model >= 3
547if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0e) || (p->CPU.Family == 0x0f)) {
548/* On these models, maxcoef defines TSC freq */
549maxcoef = bitfield(msr, 44, 40);
550} else {
551/* On lower models, currcoef defines TSC freq */
552/* XXX */
553maxcoef = currcoef;
554}
555
556if (maxcoef) {
557if (maxdiv) {
558fsbFrequency = ((tscFrequency * 2) / ((maxcoef * 2) + 1));
559} else {
560fsbFrequency = (tscFrequency / maxcoef);
561}
562if (currdiv) {
563cpuFrequency = (fsbFrequency * ((currcoef * 2) + 1) / 2);
564} else {
565cpuFrequency = (fsbFrequency * currcoef);
566}
567DBG("max: %d%s current: %d%s\n", maxcoef, maxdiv ? ".5" : "",currcoef, currdiv ? ".5" : "");
568}
569break;
570}
571}
572/* Mobile CPU */
573if (rdmsr64(MSR_IA32_PLATFORM_ID) & (1<<28)) {
574p->CPU.Features |= CPU_FEATURE_MOBILE;
575}
576} else if ((p->CPU.Vendor == CPUID_VENDOR_AMD) && (p->CPU.Family == 0x0f)) {
577switch(p->CPU.ExtFamily) {
578case 0x00: /* K8 */
579msr = rdmsr64(K8_FIDVID_STATUS);
580maxcoef = bitfield(msr, 21, 16) / 2 + 4;
581currcoef = bitfield(msr, 5, 0) / 2 + 4;
582break;
583
584case 0x01: /* K10 */
585msr = rdmsr64(K10_COFVID_STATUS);
586do_cpuid2(0x00000006, 0, p->CPU.CPUID[CPUID_6]);
587// EffFreq: effective frequency interface
588if (bitfield(p->CPU.CPUID[CPUID_6][2], 0, 0) == 1) {
589//uint64_t mperf = measure_mperf_frequency();
590uint64_t aperf = measure_aperf_frequency();
591cpuFrequency = aperf;
592}
593// NOTE: tsc runs at the maccoeff (non turbo)
594//*not* at the turbo frequency.
595maxcoef = bitfield(msr, 54, 49) / 2 + 4;
596currcoef = bitfield(msr, 5, 0) + 0x10;
597currdiv = 2 << bitfield(msr, 8, 6);
598
599break;
600
601case 0x05: /* K14 */
602msr = rdmsr64(K10_COFVID_STATUS);
603currcoef = (bitfield(msr, 54, 49) + 0x10) << 2;
604currdiv = (bitfield(msr, 8, 4) + 1) << 2;
605currdiv += bitfield(msr, 3, 0);
606
607break;
608
609case 0x02: /* K11 */
610// not implimented
611break;
612}
613
614if (maxcoef) {
615if (currdiv) {
616if (!currcoef) {
617currcoef = maxcoef;
618}
619
620if (!cpuFrequency) {
621fsbFrequency = ((tscFrequency * currdiv) / currcoef);
622} else {
623fsbFrequency = ((cpuFrequency * currdiv) / currcoef);
624}
625DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
626} else {
627if (!cpuFrequency) {
628fsbFrequency = (tscFrequency / maxcoef);
629} else {
630fsbFrequency = (cpuFrequency / maxcoef);
631}
632DBG("%d\n", currcoef);
633}
634} else if (currcoef) {
635if (currdiv) {
636fsbFrequency = ((tscFrequency * currdiv) / currcoef);
637DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
638} else {
639fsbFrequency = (tscFrequency / currcoef);
640DBG("%d\n", currcoef);
641}
642}
643if (!cpuFrequency) cpuFrequency = tscFrequency;
644}
645
646#if 0
647if (!fsbFrequency) {
648fsbFrequency = (DEFAULT_FSB * 1000);
649cpuFrequency = tscFrequency;
650DBG("0 ! using the default value for FSB !\n");
651}
652
653DBG("cpu freq = 0x%016llxn", timeRDTSC() * 20);
654
655#endif
656
657p->CPU.MaxCoef = maxcoef;
658p->CPU.MaxDiv = maxdiv;
659p->CPU.CurrCoef = currcoef;
660p->CPU.CurrDiv = currdiv;
661p->CPU.TSCFrequency = tscFrequency;
662p->CPU.FSBFrequency = fsbFrequency;
663p->CPU.CPUFrequency = cpuFrequency;
664
665// keep formatted with spaces instead of tabs
666DBG("\n---------------------------------------------\n");
667 DBG("--------------- CPU INFO ---------------\n");
668DBG("---------------------------------------------\n");
669DBG("Brand String: %s\n", p->CPU.BrandString); // Processor name (BIOS)
670DBG("Vendor: 0x%x\n", p->CPU.Vendor); // Vendor ex: GenuineIntel
671DBG("Family: 0x%x\n", p->CPU.Family); // Family ex: 6 (06h)
672DBG("ExtFamily: 0x%x\n", p->CPU.ExtFamily);
673DBG("Signature: %x\n", p->CPU.Signature); // CPUID signature
674/*switch (p->CPU.Type) {
675case PT_OEM:
676DBG("Processor type: Intel Original OEM Processor\n");
677break;
678case PT_OD:
679DBG("Processor type: Intel Over Drive Processor\n");
680break;
681case PT_DUAL:
682DBG("Processor type: Intel Dual Processor\n");
683break;
684case PT_RES:
685DBG("Processor type: Intel Reserved\n");
686break;
687default:
688break;
689}*/
690DBG("Model: 0x%x\n", p->CPU.Model); // Model ex: 37 (025h)
691DBG("ExtModel: 0x%x\n", p->CPU.ExtModel);
692DBG("Stepping: 0x%x\n", p->CPU.Stepping); // Stepping ex: 5 (05h)
693DBG("MaxCoef: 0x%x\n", p->CPU.MaxCoef);
694DBG("CurrCoef: 0x%x\n", p->CPU.CurrCoef);
695DBG("MaxDiv: 0x%x\n", p->CPU.MaxDiv);
696DBG("CurrDiv: 0x%x\n", p->CPU.CurrDiv);
697DBG("TSCFreq: %dMHz\n", p->CPU.TSCFrequency / 1000000);
698DBG("FSBFreq: %dMHz\n", (p->CPU.FSBFrequency + 500000) / 1000000);
699DBG("CPUFreq: %dMHz\n", p->CPU.CPUFrequency / 1000000);
700DBG("Cores: %d\n", p->CPU.NoCores); // Cores
701DBG("Logical processor: %d\n", p->CPU.NoThreads); // Logical procesor
702DBG("Features: 0x%08x\n", p->CPU.Features);
703DBG("Microcode version: %d\n", p->CPU.MCodeVersion); // CPU microcode version
704DBG("\n---------------------------------------------\n");
705#if DEBUG_CPU
706pause();
707#endif
708}
709

Archive Download this file

Revision: 2385