Chameleon

Chameleon Svn Source Tree

Root/branches/ErmaC/Enoch/i386/libsaio/cpu.c

1/*
2 * Copyright 2008 Islam Ahmed Zaid. All rights reserved. <azismed@gmail.com>
3 * AsereBLN: 2009: cleanup and bugfix
4 */
5
6#include "libsaio.h"
7#include "platform.h"
8#include "cpu.h"
9#include "bootstruct.h"
10#include "boot.h"
11
12#ifndef DEBUG_CPU
13#define DEBUG_CPU 0
14#endif
15
16#if DEBUG_CPU
17#define DBG(x...)printf(x)
18#else
19#define DBG(x...)msglog(x)
20#endif
21
22/*
23 * timeRDTSC()
24 * This routine sets up PIT counter 2 to count down 1/20 of a second.
25 * It pauses until the value is latched in the counter
26 * and then reads the time stamp counter to return to the caller.
27 */
28uint64_t timeRDTSC(void)
29{
30intattempts = 0;
31uint64_t latchTime;
32uint64_tsaveTime,intermediate;
33unsigned int timerValue, lastValue;
34//boolean_tint_enabled;
35/*
36 * Table of correction factors to account for
37 * - timer counter quantization errors, and
38 * - undercounts 0..5
39 */
40#define SAMPLE_CLKS_EXACT(((double) CLKNUM) / 20.0)
41#define SAMPLE_CLKS_INT((int) CLKNUM / 20)
42#define SAMPLE_NSECS(2000000000LL)
43#define SAMPLE_MULTIPLIER(((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)
44#define ROUND64(x)((uint64_t)((x) + 0.5))
45uint64_tscale[6] = {
46ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)),
47ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)),
48ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)),
49ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)),
50ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)),
51ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5))
52};
53
54//int_enabled = ml_set_interrupts_enabled(FALSE);
55
56restart:
57if (attempts >= 9) // increase to up to 9 attempts.
58{
59 // This will flash-reboot. TODO: Use tscPanic instead.
60printf("Timestamp counter calibation failed with %d attempts\n", attempts);
61}
62attempts++;
63enable_PIT2();// turn on PIT2
64set_PIT2(0);// reset timer 2 to be zero
65latchTime = rdtsc64();// get the time stamp to time
66latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes
67set_PIT2(SAMPLE_CLKS_INT);// set up the timer for (almost) 1/20th a second
68saveTime = rdtsc64();// now time how long a 20th a second is...
69get_PIT2(&lastValue);
70get_PIT2(&lastValue);// read twice, first value may be unreliable
71do {
72intermediate = get_PIT2(&timerValue);
73if (timerValue > lastValue)
74{
75// Timer wrapped
76set_PIT2(0);
77disable_PIT2();
78goto restart;
79}
80lastValue = timerValue;
81} while (timerValue > 5);
82printf("timerValue %d\n",timerValue);
83printf("intermediate 0x%016llx\n",intermediate);
84printf("saveTime 0x%016llx\n",saveTime);
85
86intermediate -= saveTime;// raw count for about 1/20 second
87intermediate *= scale[timerValue];// rescale measured time spent
88intermediate /= SAMPLE_NSECS;// so its exactly 1/20 a second
89intermediate += latchTime;// add on our save fudge
90
91set_PIT2(0);// reset timer 2 to be zero
92disable_PIT2();// turn off PIT 2
93
94//ml_set_interrupts_enabled(int_enabled);
95return intermediate;
96}
97
98/*
99 * DFE: Measures the TSC frequency in Hz (64-bit) using the ACPI PM timer
100 */
101static uint64_t measure_tsc_frequency(void)
102{
103uint64_t tscStart;
104uint64_t tscEnd;
105uint64_t tscDelta = 0xffffffffffffffffULL;
106unsigned long pollCount;
107uint64_t retval = 0;
108int i;
109
110/* Time how many TSC ticks elapse in 30 msec using the 8254 PIT
111 * counter 2. We run this loop 3 times to make sure the cache
112 * is hot and we take the minimum delta from all of the runs.
113 * That is to say that we're biased towards measuring the minimum
114 * number of TSC ticks that occur while waiting for the timer to
115 * expire. That theoretically helps avoid inconsistencies when
116 * running under a VM if the TSC is not virtualized and the host
117 * steals time. The TSC is normally virtualized for VMware.
118 */
119for(i = 0; i < 10; ++i)
120{
121enable_PIT2();
122set_PIT2_mode0(CALIBRATE_LATCH);
123tscStart = rdtsc64();
124pollCount = poll_PIT2_gate();
125tscEnd = rdtsc64();
126/* The poll loop must have run at least a few times for accuracy */
127if (pollCount <= 1)
128{
129continue;
130}
131/* The TSC must increment at LEAST once every millisecond.
132 * We should have waited exactly 30 msec so the TSC delta should
133 * be >= 30. Anything less and the processor is way too slow.
134 */
135if ((tscEnd - tscStart) <= CALIBRATE_TIME_MSEC)
136{
137continue;
138}
139// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
140if ( (tscEnd - tscStart) < tscDelta )
141{
142tscDelta = tscEnd - tscStart;
143}
144}
145/* tscDelta is now the least number of TSC ticks the processor made in
146 * a timespan of 0.03 s (e.g. 30 milliseconds)
147 * Linux thus divides by 30 which gives the answer in kiloHertz because
148 * 1 / ms = kHz. But we're xnu and most of the rest of the code uses
149 * Hz so we need to convert our milliseconds to seconds. Since we're
150 * dividing by the milliseconds, we simply multiply by 1000.
151 */
152
153/* Unlike linux, we're not limited to 32-bit, but we do need to take care
154 * that we're going to multiply by 1000 first so we do need at least some
155 * arithmetic headroom. For now, 32-bit should be enough.
156 * Also unlike Linux, our compiler can do 64-bit integer arithmetic.
157 */
158if (tscDelta > (1ULL<<32))
159{
160retval = 0;
161}
162else
163{
164retval = tscDelta * 1000 / 30;
165}
166disable_PIT2();
167return retval;
168}
169
170/*
171 * Original comment/code:
172 * "DFE: Measures the Max Performance Frequency in Hz (64-bit)"
173 *
174 * Measures the Actual Performance Frequency in Hz (64-bit)
175 * (just a naming change, mperf --> aperf )
176 */
177static uint64_t measure_aperf_frequency(void)
178{
179uint64_t aperfStart;
180uint64_t aperfEnd;
181uint64_t aperfDelta = 0xffffffffffffffffULL;
182unsigned long pollCount;
183uint64_t retval = 0;
184int i;
185
186/* Time how many APERF ticks elapse in 30 msec using the 8254 PIT
187 * counter 2. We run this loop 3 times to make sure the cache
188 * is hot and we take the minimum delta from all of the runs.
189 * That is to say that we're biased towards measuring the minimum
190 * number of APERF ticks that occur while waiting for the timer to
191 * expire.
192 */
193for(i = 0; i < 10; ++i)
194{
195enable_PIT2();
196set_PIT2_mode0(CALIBRATE_LATCH);
197aperfStart = rdmsr64(MSR_AMD_APERF);
198pollCount = poll_PIT2_gate();
199aperfEnd = rdmsr64(MSR_AMD_APERF);
200/* The poll loop must have run at least a few times for accuracy */
201if (pollCount <= 1)
202{
203continue;
204}
205/* The TSC must increment at LEAST once every millisecond.
206 * We should have waited exactly 30 msec so the APERF delta should
207 * be >= 30. Anything less and the processor is way too slow.
208 */
209if ((aperfEnd - aperfStart) <= CALIBRATE_TIME_MSEC)
210{
211continue;
212}
213// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
214if ( (aperfEnd - aperfStart) < aperfDelta )
215{
216aperfDelta = aperfEnd - aperfStart;
217}
218}
219/* mperfDelta is now the least number of MPERF ticks the processor made in
220 * a timespan of 0.03 s (e.g. 30 milliseconds)
221 */
222
223if (aperfDelta > (1ULL<<32))
224{
225retval = 0;
226}
227else
228{
229retval = aperfDelta * 1000 / 30;
230}
231disable_PIT2();
232return retval;
233}
234
235/*
236 * Calculates the FSB and CPU frequencies using specific MSRs for each CPU
237 * - multi. is read from a specific MSR. In the case of Intel, there is:
238 * a max multi. (used to calculate the FSB freq.),
239 * and a current multi. (used to calculate the CPU freq.)
240 * - fsbFrequency = tscFrequency / multi
241 * - cpuFrequency = fsbFrequency * multi
242 */
243void scan_cpu(PlatformInfo_t *p)
244{
245uint64_ttscFrequency = 0;
246uint64_tfsbFrequency = 0;
247uint64_tcpuFrequency = 0;
248uint64_tmsr = 0;
249uint64_tflex_ratio = 0;
250uint32_tmax_ratio = 0;
251uint32_tmin_ratio = 0;
252uint8_tbus_ratio_max = 0;
253uint8_tcurrdiv = 0;
254uint8_tcurrcoef = 0;
255uint8_tmaxdiv = 0;
256uint8_tmaxcoef = 0;
257const char*newratio;
258intlen = 0;
259intmyfsb = 0;
260uint8_tbus_ratio_min = 0;
261
262/* get cpuid values */
263do_cpuid(0x00000000, p->CPU.CPUID[CPUID_0]);
264do_cpuid(0x00000001, p->CPU.CPUID[CPUID_1]);
265do_cpuid(0x00000002, p->CPU.CPUID[CPUID_2]);
266do_cpuid(0x00000003, p->CPU.CPUID[CPUID_3]);
267do_cpuid2(0x00000004, 0, p->CPU.CPUID[CPUID_4]);
268do_cpuid(0x80000000, p->CPU.CPUID[CPUID_80]);
269if (p->CPU.CPUID[CPUID_0][0] >= 0x5)
270{
271do_cpuid(5, p->CPU.CPUID[CPUID_5]);
272}
273if (p->CPU.CPUID[CPUID_0][0] >= 6)
274{
275do_cpuid(6, p->CPU.CPUID[CPUID_6]);
276}
277if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 8)
278{
279do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);
280do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
281}
282else if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 1)
283{
284do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
285}
286
287#if DEBUG_CPU
288{
289inti;
290printf("CPUID Raw Values:\n");
291for (i=0; i<CPUID_MAX; i++)
292{
293printf("%02d: %08x-%08x-%08x-%08x\n", i,
294 p->CPU.CPUID[i][0], p->CPU.CPUID[i][1],
295 p->CPU.CPUID[i][2], p->CPU.CPUID[i][3]);
296}
297}
298#endif
299
300/* http://www.flounder.com/cpuid_explorer2.htm
301 EAX (Intel):
302 31 28 27 20 19 16 1514 1312 11 8 7 4 3 0
303 +--------+----------------+--------+----+----+--------+--------+--------+
304 |########|Extended family |Extmodel|####|type|familyid| model |stepping|
305 +--------+----------------+--------+----+----+--------+--------+--------+
306
307 EAX (AMD):
308 31 28 27 20 19 16 1514 1312 11 8 7 4 3 0
309 +--------+----------------+--------+----+----+--------+--------+--------+
310 |########|Extended family |Extmodel|####|####|familyid| model |stepping|
311 +--------+----------------+--------+----+----+--------+--------+--------+
312*/
313
314p->CPU.Vendor= p->CPU.CPUID[CPUID_0][1];
315p->CPU.Signature= p->CPU.CPUID[CPUID_1][0];
316p->CPU.Stepping= bitfield(p->CPU.CPUID[CPUID_1][0], 3, 0);
317p->CPU.Model= bitfield(p->CPU.CPUID[CPUID_1][0], 7, 4);
318p->CPU.Family= bitfield(p->CPU.CPUID[CPUID_1][0], 11, 8);
319p->CPU.Type = bitfield(p->CPU.CPUID[CPUID_1][0], 13, 12);
320p->CPU.ExtModel= bitfield(p->CPU.CPUID[CPUID_1][0], 19, 16);
321p->CPU.ExtFamily= bitfield(p->CPU.CPUID[CPUID_1][0], 27, 20);
322
323p->CPU.Model += (p->CPU.ExtModel << 4);
324
325if (p->CPU.Vendor == CPUID_VENDOR_INTEL &&
326p->CPU.Family == 0x06 &&
327p->CPU.Model >= CPU_MODEL_NEHALEM &&
328p->CPU.Model != CPU_MODEL_ATOM// MSR is *NOT* available on the Intel Atom CPU
329)
330{
331msr = rdmsr64(MSR_CORE_THREAD_COUNT);// Undocumented MSR in Nehalem and newer CPUs
332p->CPU.NoCores= bitfield((uint32_t)msr, 31, 16);// Using undocumented MSR to get actual values
333p->CPU.NoThreads= bitfield((uint32_t)msr, 15, 0);// Using undocumented MSR to get actual values
334}
335else if (p->CPU.Vendor == CPUID_VENDOR_AMD)
336{
337p->CPU.NoThreads= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
338p->CPU.NoCores= bitfield(p->CPU.CPUID[CPUID_88][2], 7, 0) + 1;
339}
340else
341{
342// Use previous method for Cores and Threads
343p->CPU.NoThreads= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
344p->CPU.NoCores= bitfield(p->CPU.CPUID[CPUID_4][0], 31, 26) + 1;
345}
346
347/* get brand string (if supported) */
348/* Copyright: from Apple's XNU cpuid.c */
349if (p->CPU.CPUID[CPUID_80][0] > 0x80000004)
350{
351uint32_treg[4];
352charstr[128], *s;
353/*
354 * The brand string 48 bytes (max), guaranteed to
355 * be NULL terminated.
356 */
357do_cpuid(0x80000002, reg);
358bcopy((char *)reg, &str[0], 16);
359do_cpuid(0x80000003, reg);
360bcopy((char *)reg, &str[16], 16);
361do_cpuid(0x80000004, reg);
362bcopy((char *)reg, &str[32], 16);
363for (s = str; *s != '\0'; s++)
364{
365if (*s != ' ')
366{
367break;
368}
369}
370
371strlcpy(p->CPU.BrandString, s, sizeof(p->CPU.BrandString));
372
373if (!strncmp(p->CPU.BrandString, CPU_STRING_UNKNOWN, MIN(sizeof(p->CPU.BrandString), strlen(CPU_STRING_UNKNOWN) + 1)))
374{
375/*
376 * This string means we have a firmware-programmable brand string,
377 * and the firmware couldn't figure out what sort of CPU we have.
378 */
379p->CPU.BrandString[0] = '\0';
380}
381}
382
383/* setup features */
384if ((bit(23) & p->CPU.CPUID[CPUID_1][3]) != 0)
385{
386p->CPU.Features |= CPU_FEATURE_MMX;
387}
388if ((bit(25) & p->CPU.CPUID[CPUID_1][3]) != 0)
389{
390p->CPU.Features |= CPU_FEATURE_SSE;
391}
392if ((bit(26) & p->CPU.CPUID[CPUID_1][3]) != 0)
393{
394p->CPU.Features |= CPU_FEATURE_SSE2;
395}
396if ((bit(0) & p->CPU.CPUID[CPUID_1][2]) != 0)
397{
398p->CPU.Features |= CPU_FEATURE_SSE3;
399}
400if ((bit(19) & p->CPU.CPUID[CPUID_1][2]) != 0)
401{
402p->CPU.Features |= CPU_FEATURE_SSE41;
403}
404if ((bit(20) & p->CPU.CPUID[CPUID_1][2]) != 0)
405{
406p->CPU.Features |= CPU_FEATURE_SSE42;
407}
408if ((bit(29) & p->CPU.CPUID[CPUID_81][3]) != 0)
409{
410p->CPU.Features |= CPU_FEATURE_EM64T;
411}
412if ((bit(5) & p->CPU.CPUID[CPUID_1][3]) != 0)
413{
414p->CPU.Features |= CPU_FEATURE_MSR;
415}
416//if ((bit(28) & p->CPU.CPUID[CPUID_1][3]) != 0) {
417if (p->CPU.NoThreads > p->CPU.NoCores)
418{
419p->CPU.Features |= CPU_FEATURE_HTT;
420}
421
422tscFrequency = measure_tsc_frequency();
423DBG("cpu freq classic = 0x%016llx\n", tscFrequency);
424/* if usual method failed */
425if ( tscFrequency < 1000 )//TEST
426{
427tscFrequency = timeRDTSC() * 20;//measure_tsc_frequency();
428// DBG("cpu freq timeRDTSC = 0x%016llx\n", tscFrequency);
429}
430else
431{
432// DBG("cpu freq timeRDTSC = 0x%016llxn", timeRDTSC() * 20);
433}
434fsbFrequency = 0;
435cpuFrequency = 0;
436
437if ((p->CPU.Vendor == CPUID_VENDOR_INTEL) && ((p->CPU.Family == 0x06) || (p->CPU.Family == 0x0f))) {
438int intelCPU = p->CPU.Model;
439if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0c) || (p->CPU.Family == 0x0f && p->CPU.Model >= 0x03)){
440/* Nehalem CPU model */
441if (p->CPU.Family == 0x06 && (p->CPU.Model == CPU_MODEL_NEHALEM||
442 p->CPU.Model == CPU_MODEL_FIELDS||
443 p->CPU.Model == CPU_MODEL_DALES||
444 p->CPU.Model == CPU_MODEL_DALES_32NM||
445 p->CPU.Model == CPU_MODEL_WESTMERE||
446 p->CPU.Model == CPU_MODEL_NEHALEM_EX||
447 p->CPU.Model == CPU_MODEL_WESTMERE_EX ||
448 p->CPU.Model == CPU_MODEL_SANDYBRIDGE ||
449 p->CPU.Model == CPU_MODEL_JAKETOWN ||
450 p->CPU.Model == CPU_MODEL_IVYBRIDGE_XEON||
451 p->CPU.Model == CPU_MODEL_IVYBRIDGE ||
452 p->CPU.Model == CPU_MODEL_HASWELL ||
453 p->CPU.Model == CPU_MODEL_HASWELL_MB ||
454 //p->CPU.Model == CPU_MODEL_HASWELL_H ||
455 p->CPU.Model == CPU_MODEL_HASWELL_ULT ||
456 p->CPU.Model == CPU_MODEL_CRYSTALWELL ))
457{
458msr = rdmsr64(MSR_PLATFORM_INFO);
459DBG("msr(%d): platform_info %08x\n", __LINE__, bitfield(msr, 31, 0));
460bus_ratio_max = bitfield(msr, 15, 8);
461bus_ratio_min = bitfield(msr, 47, 40); //valv: not sure about this one (Remarq.1)
462msr = rdmsr64(MSR_FLEX_RATIO);
463DBG("msr(%d): flex_ratio %08x\n", __LINE__, bitfield(msr, 31, 0));
464if (bitfield(msr, 16, 16))
465{
466flex_ratio = bitfield(msr, 15, 8);
467/* bcc9: at least on the gigabyte h67ma-ud2h,
468 where the cpu multipler can't be changed to
469 allow overclocking, the flex_ratio msr has unexpected (to OSX)
470 contents.These contents cause mach_kernel to
471 fail to compute the bus ratio correctly, instead
472 causing the system to crash since tscGranularity
473 is inadvertently set to 0.
474 */
475if (flex_ratio == 0)
476{
477/* Clear bit 16 (evidently the presence bit) */
478wrmsr64(MSR_FLEX_RATIO, (msr & 0xFFFFFFFFFFFEFFFFULL));
479msr = rdmsr64(MSR_FLEX_RATIO);
480verbose("Unusable flex ratio detected. Patched MSR now %08x\n", bitfield(msr, 31, 0));
481}
482else
483{
484if (bus_ratio_max > flex_ratio)
485{
486bus_ratio_max = flex_ratio;
487}
488}
489}
490
491if (bus_ratio_max)
492{
493fsbFrequency = (tscFrequency / bus_ratio_max);
494}
495//valv: Turbo Ratio Limit
496if ((intelCPU != 0x2e) && (intelCPU != 0x2f))
497{
498msr = rdmsr64(MSR_TURBO_RATIO_LIMIT);
499cpuFrequency = bus_ratio_max * fsbFrequency;
500max_ratio = bus_ratio_max * 10;
501}
502else
503{
504cpuFrequency = tscFrequency;
505}
506if ((getValueForKey(kbusratio, &newratio, &len, &bootInfo->chameleonConfig)) && (len <= 4))
507{
508max_ratio = atoi(newratio);
509max_ratio = (max_ratio * 10);
510if (len >= 3)
511{
512max_ratio = (max_ratio + 5);
513}
514
515verbose("Bus-Ratio: min=%d, max=%s\n", bus_ratio_min, newratio);
516
517// extreme overclockers may love 320 ;)
518if ((max_ratio >= min_ratio) && (max_ratio <= 320))
519{
520cpuFrequency = (fsbFrequency * max_ratio) / 10;
521if (len >= 3)
522{
523maxdiv = 1;
524}
525else
526{
527maxdiv = 0;
528}
529}
530else
531{
532max_ratio = (bus_ratio_max * 10);
533}
534}
535//valv: to be uncommented if Remarq.1 didn't stick
536/*if (bus_ratio_max > 0) bus_ratio = flex_ratio;*/
537p->CPU.MaxRatio = max_ratio;
538p->CPU.MinRatio = min_ratio;
539
540myfsb = fsbFrequency / 1000000;
541verbose("Sticking with [BCLK: %dMhz, Bus-Ratio: %d]\n", myfsb, max_ratio/10);
542currcoef = bus_ratio_max;
543} else {
544msr = rdmsr64(MSR_IA32_PERF_STATUS);
545DBG("msr(%d): ia32_perf_stat 0x%08x\n", __LINE__, bitfield(msr, 31, 0));
546currcoef = bitfield(msr, 12, 8);
547/* Non-integer bus ratio for the max-multi*/
548maxdiv = bitfield(msr, 46, 46);
549/* Non-integer bus ratio for the current-multi (undocumented)*/
550currdiv = bitfield(msr, 14, 14);
551
552// This will always be model >= 3
553if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0e) || (p->CPU.Family == 0x0f))
554{
555/* On these models, maxcoef defines TSC freq */
556maxcoef = bitfield(msr, 44, 40);
557}
558else
559{
560/* On lower models, currcoef defines TSC freq */
561/* XXX */
562maxcoef = currcoef;
563}
564
565if (maxcoef)
566{
567if (maxdiv)
568{
569fsbFrequency = ((tscFrequency * 2) / ((maxcoef * 2) + 1));
570}
571else
572{
573fsbFrequency = (tscFrequency / maxcoef);
574}
575if (currdiv)
576{
577cpuFrequency = (fsbFrequency * ((currcoef * 2) + 1) / 2);
578}
579else
580{
581cpuFrequency = (fsbFrequency * currcoef);
582}
583DBG("max: %d%s current: %d%s\n", maxcoef, maxdiv ? ".5" : "",currcoef, currdiv ? ".5" : "");
584}
585}
586}
587/* Mobile CPU */
588if (rdmsr64(MSR_IA32_PLATFORM_ID) & (1<<28)) {
589p->CPU.Features |= CPU_FEATURE_MOBILE;
590}
591} else if ((p->CPU.Vendor == CPUID_VENDOR_AMD) && (p->CPU.Family == 0x0f)) {
592switch(p->CPU.ExtFamily) {
593case 0x00: /* K8 */
594msr = rdmsr64(K8_FIDVID_STATUS);
595maxcoef = bitfield(msr, 21, 16) / 2 + 4;
596currcoef = bitfield(msr, 5, 0) / 2 + 4;
597break;
598
599case 0x01: /* K10 */
600msr = rdmsr64(K10_COFVID_STATUS);
601do_cpuid2(0x00000006, 0, p->CPU.CPUID[CPUID_6]);
602// EffFreq: effective frequency interface
603if (bitfield(p->CPU.CPUID[CPUID_6][2], 0, 0) == 1)
604{
605//uint64_t mperf = measure_mperf_frequency();
606uint64_t aperf = measure_aperf_frequency();
607cpuFrequency = aperf;
608}
609// NOTE: tsc runs at the maccoeff (non turbo)
610//*not* at the turbo frequency.
611maxcoef = bitfield(msr, 54, 49) / 2 + 4;
612currcoef = bitfield(msr, 5, 0) + 0x10;
613currdiv = 2 << bitfield(msr, 8, 6);
614
615break;
616
617case 0x05: /* K14 */
618msr = rdmsr64(K10_COFVID_STATUS);
619currcoef = (bitfield(msr, 54, 49) + 0x10) << 2;
620currdiv = (bitfield(msr, 8, 4) + 1) << 2;
621currdiv += bitfield(msr, 3, 0);
622
623break;
624
625case 0x02: /* K11 */
626// not implimented
627break;
628}
629
630if (maxcoef) {
631if (currdiv) {
632if (!currcoef) {
633currcoef = maxcoef;
634}
635
636if (!cpuFrequency) {
637fsbFrequency = ((tscFrequency * currdiv) / currcoef);
638} else {
639fsbFrequency = ((cpuFrequency * currdiv) / currcoef);
640}
641DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
642} else {
643if (!cpuFrequency) {
644fsbFrequency = (tscFrequency / maxcoef);
645} else {
646fsbFrequency = (cpuFrequency / maxcoef);
647}
648DBG("%d\n", currcoef);
649}
650} else if (currcoef) {
651if (currdiv) {
652fsbFrequency = ((tscFrequency * currdiv) / currcoef);
653DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
654} else {
655fsbFrequency = (tscFrequency / currcoef);
656DBG("%d\n", currcoef);
657}
658}
659if (!cpuFrequency) cpuFrequency = tscFrequency;
660}
661
662#if 0
663if (!fsbFrequency)
664{
665fsbFrequency = (DEFAULT_FSB * 1000);
666cpuFrequency = tscFrequency;
667DBG("0 ! using the default value for FSB !\n");
668}
669
670DBG("cpu freq = 0x%016llxn", timeRDTSC() * 20);
671
672#endif
673
674p->CPU.MaxCoef = maxcoef;
675p->CPU.MaxDiv = maxdiv;
676p->CPU.CurrCoef = currcoef;
677p->CPU.CurrDiv = currdiv;
678p->CPU.TSCFrequency = tscFrequency;
679p->CPU.FSBFrequency = fsbFrequency;
680p->CPU.CPUFrequency = cpuFrequency;
681
682// keep formatted with spaces instead of tabs
683DBG("\n---------------------------------------------\n");
684 DBG("--------------- CPU INFO --------------------\n");
685DBG("---------------------------------------------\n");
686DBG("Brand String: %s\n", p->CPU.BrandString); // Processor name (BIOS)
687DBG("Vendor: 0x%x\n", p->CPU.Vendor); // Vendor ex: GenuineIntel
688DBG("Family: 0x%x\n", p->CPU.Family); // Family ex: 6 (06h)
689DBG("ExtFamily: 0x%x\n", p->CPU.ExtFamily);
690DBG("Signature: %x\n", p->CPU.Signature); // CPUID signature
691switch (p->CPU.Type)
692{
693case PT_OEM:
694DBG("Processor type: Intel Original OEM Processor\n");
695break;
696case PT_OD:
697DBG("Processor type: Intel Over Drive Processor\n");
698break;
699case PT_DUAL:
700DBG("Processor type: Intel Dual Processor\n");
701break;
702case PT_RES:
703DBG("Processor type: Intel Reserved\n");
704break;
705default:
706break;
707}
708DBG("Model: 0x%x\n", p->CPU.Model); // Model ex: 37 (025h)
709DBG("ExtModel: 0x%x\n", p->CPU.ExtModel);
710DBG("Stepping: 0x%x\n", p->CPU.Stepping); // Stepping ex: 5 (05h)
711DBG("MaxCoef/CurrCoef: 0x%x/0x%x\n", p->CPU.MaxCoef, p->CPU.CurrCoef);
712DBG("MaxDiv/CurrDiv: 0x%x/0x%x\n", p->CPU.MaxDiv, p->CPU.CurrDiv);
713DBG("TSCFreq: %dMHz\n", p->CPU.TSCFrequency / 1000000);
714DBG("FSBFreq: %dMHz\n", (p->CPU.FSBFrequency + 500000) / 1000000);
715DBG("CPUFreq: %dMHz\n", p->CPU.CPUFrequency / 1000000);
716DBG("Cores: %d\n", p->CPU.NoCores); // Cores
717DBG("Logical processor: %d\n", p->CPU.NoThreads); // Logical procesor
718DBG("Features: 0x%08x\n", p->CPU.Features);
719DBG("\n---------------------------------------------\n");
720#if DEBUG_CPU
721pause();
722#endif
723}
724

Archive Download this file

Revision: 2323