Chameleon

Chameleon Svn Source Tree

Root/branches/ErmaC/Trunk/i386/libsaio/cpu.c

1/*
2 * Copyright 2008 Islam Ahmed Zaid. All rights reserved. <azismed@gmail.com>
3 * AsereBLN: 2009: cleanup and bugfix
4 */
5
6#include "libsaio.h"
7#include "platform.h"
8#include "cpu.h"
9#include "bootstruct.h"
10#include "boot.h"
11
12#ifndef DEBUG_CPU
13#define DEBUG_CPU 0
14#endif
15
16#if DEBUG_CPU
17#define DBG(x...)printf(x)
18#else
19#define DBG(x...)msglog(x)
20#endif
21
22/*
23 * timeRDTSC()
24 * This routine sets up PIT counter 2 to count down 1/20 of a second.
25 * It pauses until the value is latched in the counter
26 * and then reads the time stamp counter to return to the caller.
27 */
28uint64_t timeRDTSC(void)
29{
30 intattempts = 0;
31 uint64_t latchTime;
32 uint64_tsaveTime,intermediate;
33 unsigned int timerValue, lastValue;
34 //boolean_tint_enabled;
35 /*
36 * Table of correction factors to account for
37 * - timer counter quantization errors, and
38 * - undercounts 0..5
39 */
40#define SAMPLE_CLKS_EXACT(((double) CLKNUM) / 20.0)
41#define SAMPLE_CLKS_INT((int) CLKNUM / 20)
42#define SAMPLE_NSECS(2000000000LL)
43#define SAMPLE_MULTIPLIER(((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)
44#define ROUND64(x)((uint64_t)((x) + 0.5))
45 uint64_tscale[6] = {
46ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)),
47ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)),
48ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)),
49ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)),
50ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)),
51ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5))
52 };
53
54 //int_enabled = ml_set_interrupts_enabled(FALSE);
55
56restart:
57 if (attempts >= 9) // increase to up to 9 attempts.
58 // This will flash-reboot. TODO: Use tscPanic instead.
59 printf("Timestamp counter calibation failed with %d attempts\n", attempts);
60 attempts++;
61 enable_PIT2();// turn on PIT2
62 set_PIT2(0);// reset timer 2 to be zero
63 latchTime = rdtsc64();// get the time stamp to time
64 latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes
65 set_PIT2(SAMPLE_CLKS_INT);// set up the timer for (almost) 1/20th a second
66 saveTime = rdtsc64();// now time how long a 20th a second is...
67 get_PIT2(&lastValue);
68 get_PIT2(&lastValue);// read twice, first value may be unreliable
69 do {
70intermediate = get_PIT2(&timerValue);
71if (timerValue > lastValue) {
72// Timer wrapped
73set_PIT2(0);
74disable_PIT2();
75goto restart;
76}
77lastValue = timerValue;
78 } while (timerValue > 5);
79 printf("timerValue %d\n",timerValue);
80 printf("intermediate 0x%016llx\n",intermediate);
81 printf("saveTime 0x%016llx\n",saveTime);
82
83 intermediate -= saveTime;// raw count for about 1/20 second
84 intermediate *= scale[timerValue];// rescale measured time spent
85 intermediate /= SAMPLE_NSECS;// so its exactly 1/20 a second
86 intermediate += latchTime;// add on our save fudge
87
88 set_PIT2(0);// reset timer 2 to be zero
89 disable_PIT2();// turn off PIT 2
90
91 //ml_set_interrupts_enabled(int_enabled);
92 return intermediate;
93}
94
95/*
96 * DFE: Measures the TSC frequency in Hz (64-bit) using the ACPI PM timer
97 */
98static uint64_t measure_tsc_frequency(void)
99{
100uint64_t tscStart;
101uint64_t tscEnd;
102uint64_t tscDelta = 0xffffffffffffffffULL;
103unsigned long pollCount;
104uint64_t retval = 0;
105int i;
106
107/* Time how many TSC ticks elapse in 30 msec using the 8254 PIT
108 * counter 2. We run this loop 3 times to make sure the cache
109 * is hot and we take the minimum delta from all of the runs.
110 * That is to say that we're biased towards measuring the minimum
111 * number of TSC ticks that occur while waiting for the timer to
112 * expire. That theoretically helps avoid inconsistencies when
113 * running under a VM if the TSC is not virtualized and the host
114 * steals time. The TSC is normally virtualized for VMware.
115 */
116for(i = 0; i < 10; ++i)
117{
118enable_PIT2();
119set_PIT2_mode0(CALIBRATE_LATCH);
120tscStart = rdtsc64();
121pollCount = poll_PIT2_gate();
122tscEnd = rdtsc64();
123/* The poll loop must have run at least a few times for accuracy */
124if (pollCount <= 1)
125continue;
126/* The TSC must increment at LEAST once every millisecond.
127 * We should have waited exactly 30 msec so the TSC delta should
128 * be >= 30. Anything less and the processor is way too slow.
129 */
130if ((tscEnd - tscStart) <= CALIBRATE_TIME_MSEC)
131continue;
132// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
133if ( (tscEnd - tscStart) < tscDelta )
134tscDelta = tscEnd - tscStart;
135}
136/* tscDelta is now the least number of TSC ticks the processor made in
137 * a timespan of 0.03 s (e.g. 30 milliseconds)
138 * Linux thus divides by 30 which gives the answer in kiloHertz because
139 * 1 / ms = kHz. But we're xnu and most of the rest of the code uses
140 * Hz so we need to convert our milliseconds to seconds. Since we're
141 * dividing by the milliseconds, we simply multiply by 1000.
142 */
143
144/* Unlike linux, we're not limited to 32-bit, but we do need to take care
145 * that we're going to multiply by 1000 first so we do need at least some
146 * arithmetic headroom. For now, 32-bit should be enough.
147 * Also unlike Linux, our compiler can do 64-bit integer arithmetic.
148 */
149if (tscDelta > (1ULL<<32))
150retval = 0;
151else
152{
153retval = tscDelta * 1000 / 30;
154}
155disable_PIT2();
156return retval;
157}
158
159/*
160 * Original comment/code:
161 * "DFE: Measures the Max Performance Frequency in Hz (64-bit)"
162 *
163 * Measures the Actual Performance Frequency in Hz (64-bit)
164 * (just a naming change, mperf --> aperf )
165 */
166static uint64_t measure_aperf_frequency(void)
167{
168uint64_t aperfStart;
169uint64_t aperfEnd;
170uint64_t aperfDelta = 0xffffffffffffffffULL;
171unsigned long pollCount;
172uint64_t retval = 0;
173int i;
174
175/* Time how many APERF ticks elapse in 30 msec using the 8254 PIT
176 * counter 2. We run this loop 3 times to make sure the cache
177 * is hot and we take the minimum delta from all of the runs.
178 * That is to say that we're biased towards measuring the minimum
179 * number of APERF ticks that occur while waiting for the timer to
180 * expire.
181 */
182for(i = 0; i < 10; ++i)
183{
184enable_PIT2();
185set_PIT2_mode0(CALIBRATE_LATCH);
186aperfStart = rdmsr64(MSR_AMD_APERF);
187pollCount = poll_PIT2_gate();
188aperfEnd = rdmsr64(MSR_AMD_APERF);
189/* The poll loop must have run at least a few times for accuracy */
190if (pollCount <= 1)
191continue;
192/* The TSC must increment at LEAST once every millisecond.
193 * We should have waited exactly 30 msec so the APERF delta should
194 * be >= 30. Anything less and the processor is way too slow.
195 */
196if ((aperfEnd - aperfStart) <= CALIBRATE_TIME_MSEC)
197continue;
198// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
199if ( (aperfEnd - aperfStart) < aperfDelta )
200aperfDelta = aperfEnd - aperfStart;
201}
202/* mperfDelta is now the least number of MPERF ticks the processor made in
203 * a timespan of 0.03 s (e.g. 30 milliseconds)
204 */
205
206if (aperfDelta > (1ULL<<32))
207retval = 0;
208else
209{
210retval = aperfDelta * 1000 / 30;
211}
212disable_PIT2();
213return retval;
214}
215
216/*
217 * Calculates the FSB and CPU frequencies using specific MSRs for each CPU
218 * - multi. is read from a specific MSR. In the case of Intel, there is:
219 * a max multi. (used to calculate the FSB freq.),
220 * and a current multi. (used to calculate the CPU freq.)
221 * - fsbFrequency = tscFrequency / multi
222 * - cpuFrequency = fsbFrequency * multi
223 */
224void scan_cpu(PlatformInfo_t *p)
225{
226uint64_ttscFrequency, fsbFrequency, cpuFrequency;
227uint64_tmsr, flex_ratio;
228uint8_tmaxcoef, maxdiv, currcoef, bus_ratio_max, currdiv;
229const char*newratio;
230intlen, myfsb;
231uint8_tbus_ratio_min;
232uint32_tmax_ratio, min_ratio;
233
234max_ratio = min_ratio = myfsb = bus_ratio_min = 0;
235maxcoef = maxdiv = bus_ratio_max = currcoef = currdiv = 0;
236
237/* get cpuid values */
238do_cpuid(0x00000000, p->CPU.CPUID[CPUID_0]);
239do_cpuid(0x00000001, p->CPU.CPUID[CPUID_1]);
240do_cpuid(0x00000002, p->CPU.CPUID[CPUID_2]);
241do_cpuid(0x00000003, p->CPU.CPUID[CPUID_3]);
242do_cpuid2(0x00000004, 0, p->CPU.CPUID[CPUID_4]);
243do_cpuid(0x80000000, p->CPU.CPUID[CPUID_80]);
244if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 8) {
245do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);
246do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
247}
248else if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 1) {
249do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
250}
251
252#if DEBUG_CPU
253{
254inti;
255printf("CPUID Raw Values:\n");
256for (i=0; i<CPUID_MAX; i++) {
257printf("%02d: %08x-%08x-%08x-%08x\n", i,
258 p->CPU.CPUID[i][0], p->CPU.CPUID[i][1],
259 p->CPU.CPUID[i][2], p->CPU.CPUID[i][3]);
260}
261}
262#endif
263
264p->CPU.Vendor= p->CPU.CPUID[CPUID_0][1];
265p->CPU.Signature= p->CPU.CPUID[CPUID_1][0];
266p->CPU.Stepping= bitfield(p->CPU.CPUID[CPUID_1][0], 3, 0);
267p->CPU.Model= bitfield(p->CPU.CPUID[CPUID_1][0], 7, 4);
268p->CPU.Family= bitfield(p->CPU.CPUID[CPUID_1][0], 11, 8);
269p->CPU.ExtModel= bitfield(p->CPU.CPUID[CPUID_1][0], 19, 16);
270p->CPU.ExtFamily= bitfield(p->CPU.CPUID[CPUID_1][0], 27, 20);
271
272p->CPU.Model += (p->CPU.ExtModel << 4);
273
274if (p->CPU.Vendor == CPUID_VENDOR_INTEL &&
275p->CPU.Family == 0x06 &&
276p->CPU.Model >= CPUID_MODEL_NEHALEM &&
277p->CPU.Model != CPUID_MODEL_ATOM// MSR is *NOT* available on the Intel Atom CPU
278)
279{
280msr = rdmsr64(MSR_CORE_THREAD_COUNT);// Undocumented MSR in Nehalem and newer CPUs
281p->CPU.NoCores= bitfield((uint32_t)msr, 31, 16);// Using undocumented MSR to get actual values
282p->CPU.NoThreads= bitfield((uint32_t)msr, 15, 0);// Using undocumented MSR to get actual values
283}
284else if (p->CPU.Vendor == CPUID_VENDOR_AMD)
285{
286p->CPU.NoThreads= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
287p->CPU.NoCores= bitfield(p->CPU.CPUID[CPUID_88][2], 7, 0) + 1;
288}
289else
290{
291// Use previous method for Cores and Threads
292p->CPU.NoThreads= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
293p->CPU.NoCores= bitfield(p->CPU.CPUID[CPUID_4][0], 31, 26) + 1;
294}
295
296/* get brand string (if supported) */
297/* Copyright: from Apple's XNU cpuid.c */
298if (p->CPU.CPUID[CPUID_80][0] > 0x80000004) {
299uint32_treg[4];
300charstr[128], *s;
301/*
302 * The brand string 48 bytes (max), guaranteed to
303 * be NULL terminated.
304 */
305do_cpuid(0x80000002, reg);
306bcopy((char *)reg, &str[0], 16);
307do_cpuid(0x80000003, reg);
308bcopy((char *)reg, &str[16], 16);
309do_cpuid(0x80000004, reg);
310bcopy((char *)reg, &str[32], 16);
311for (s = str; *s != '\0'; s++) {
312if (*s != ' ') break;
313}
314
315strlcpy(p->CPU.BrandString, s, sizeof(p->CPU.BrandString));
316
317if (!strncmp(p->CPU.BrandString, CPU_STRING_UNKNOWN, MIN(sizeof(p->CPU.BrandString), strlen(CPU_STRING_UNKNOWN) + 1))) {
318/*
319 * This string means we have a firmware-programmable brand string,
320 * and the firmware couldn't figure out what sort of CPU we have.
321 */
322p->CPU.BrandString[0] = '\0';
323}
324}
325
326/* setup features */
327if ((bit(23) & p->CPU.CPUID[CPUID_1][3]) != 0) {
328p->CPU.Features |= CPU_FEATURE_MMX;
329}
330if ((bit(25) & p->CPU.CPUID[CPUID_1][3]) != 0) {
331p->CPU.Features |= CPU_FEATURE_SSE;
332}
333if ((bit(26) & p->CPU.CPUID[CPUID_1][3]) != 0) {
334p->CPU.Features |= CPU_FEATURE_SSE2;
335}
336if ((bit(0) & p->CPU.CPUID[CPUID_1][2]) != 0) {
337p->CPU.Features |= CPU_FEATURE_SSE3;
338}
339if ((bit(19) & p->CPU.CPUID[CPUID_1][2]) != 0) {
340p->CPU.Features |= CPU_FEATURE_SSE41;
341}
342if ((bit(20) & p->CPU.CPUID[CPUID_1][2]) != 0) {
343p->CPU.Features |= CPU_FEATURE_SSE42;
344}
345if ((bit(29) & p->CPU.CPUID[CPUID_81][3]) != 0) {
346p->CPU.Features |= CPU_FEATURE_EM64T;
347}
348if ((bit(5) & p->CPU.CPUID[CPUID_1][3]) != 0) {
349p->CPU.Features |= CPU_FEATURE_MSR;
350}
351//if ((bit(28) & p->CPU.CPUID[CPUID_1][3]) != 0) {
352if (p->CPU.NoThreads > p->CPU.NoCores) {
353p->CPU.Features |= CPU_FEATURE_HTT;
354}
355
356tscFrequency = measure_tsc_frequency();
357DBG("cpu freq classic = 0x%016llx\n", tscFrequency);
358/* if usual method failed */
359if ( tscFrequency < 1000 )//TEST
360{
361tscFrequency = timeRDTSC() * 20;//measure_tsc_frequency();
362// DBG("cpu freq timeRDTSC = 0x%016llx\n", tscFrequency);
363}
364else{
365// DBG("cpu freq timeRDTSC = 0x%016llxn", timeRDTSC() * 20);
366}
367fsbFrequency = 0;
368cpuFrequency = 0;
369
370if ((p->CPU.Vendor == CPUID_VENDOR_INTEL) && ((p->CPU.Family == 0x06) || (p->CPU.Family == 0x0f))) {
371int intelCPU = p->CPU.Model;
372if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0c) || (p->CPU.Family == 0x0f && p->CPU.Model >= 0x03)) {
373/* Nehalem CPU model */
374if (p->CPU.Family == 0x06 && (p->CPU.Model == CPU_MODEL_NEHALEM||
375 p->CPU.Model == CPU_MODEL_FIELDS||
376 p->CPU.Model == CPU_MODEL_DALES||
377 p->CPU.Model == CPU_MODEL_DALES_32NM||
378 p->CPU.Model == CPU_MODEL_WESTMERE||
379 p->CPU.Model == CPU_MODEL_NEHALEM_EX||
380 p->CPU.Model == CPU_MODEL_WESTMERE_EX ||
381 p->CPU.Model == CPU_MODEL_SANDYBRIDGE ||
382 p->CPU.Model == CPU_MODEL_JAKETOWN||
383 p->CPU.Model == CPU_MODEL_IVYBRIDGE)) {
384msr = rdmsr64(MSR_PLATFORM_INFO);
385DBG("msr(%d): platform_info %08x\n", __LINE__, bitfield(msr, 31, 0));
386bus_ratio_max = bitfield(msr, 14, 8);
387bus_ratio_min = bitfield(msr, 46, 40); //valv: not sure about this one (Remarq.1)
388msr = rdmsr64(MSR_FLEX_RATIO);
389DBG("msr(%d): flex_ratio %08x\n", __LINE__, bitfield(msr, 31, 0));
390if (bitfield(msr, 16, 16)) {
391flex_ratio = bitfield(msr, 14, 8);
392/* bcc9: at least on the gigabyte h67ma-ud2h,
393 where the cpu multipler can't be changed to
394 allow overclocking, the flex_ratio msr has unexpected (to OSX)
395 contents.These contents cause mach_kernel to
396 fail to compute the bus ratio correctly, instead
397 causing the system to crash since tscGranularity
398 is inadvertently set to 0.
399 */
400if (flex_ratio == 0) {
401/* Clear bit 16 (evidently the presence bit) */
402wrmsr64(MSR_FLEX_RATIO, (msr & 0xFFFFFFFFFFFEFFFFULL));
403msr = rdmsr64(MSR_FLEX_RATIO);
404verbose("Unusable flex ratio detected. Patched MSR now %08x\n", bitfield(msr, 31, 0));
405} else {
406if (bus_ratio_max > flex_ratio) {
407bus_ratio_max = flex_ratio;
408}
409}
410}
411
412if (bus_ratio_max) {
413fsbFrequency = (tscFrequency / bus_ratio_max);
414}
415//valv: Turbo Ratio Limit
416if ((intelCPU != 0x2e) && (intelCPU != 0x2f)) {
417msr = rdmsr64(MSR_TURBO_RATIO_LIMIT);
418cpuFrequency = bus_ratio_max * fsbFrequency;
419max_ratio = bus_ratio_max * 10;
420} else {
421cpuFrequency = tscFrequency;
422}
423if ((getValueForKey(kbusratio, &newratio, &len, &bootInfo->chameleonConfig)) && (len <= 4)) {
424max_ratio = atoi(newratio);
425max_ratio = (max_ratio * 10);
426if (len >= 3) max_ratio = (max_ratio + 5);
427
428verbose("Bus-Ratio: min=%d, max=%s\n", bus_ratio_min, newratio);
429
430// extreme overclockers may love 320 ;)
431if ((max_ratio >= min_ratio) && (max_ratio <= 320)) {
432cpuFrequency = (fsbFrequency * max_ratio) / 10;
433if (len >= 3) maxdiv = 1;
434else maxdiv = 0;
435} else {
436max_ratio = (bus_ratio_max * 10);
437}
438}
439//valv: to be uncommented if Remarq.1 didn't stick
440/*if (bus_ratio_max > 0) bus_ratio = flex_ratio;*/
441p->CPU.MaxRatio = max_ratio;
442p->CPU.MinRatio = min_ratio;
443
444myfsb = fsbFrequency / 1000000;
445verbose("Sticking with [BCLK: %dMhz, Bus-Ratio: %d]\n", myfsb, max_ratio);
446currcoef = bus_ratio_max;
447} else {
448msr = rdmsr64(MSR_IA32_PERF_STATUS);
449DBG("msr(%d): ia32_perf_stat 0x%08x\n", __LINE__, bitfield(msr, 31, 0));
450currcoef = bitfield(msr, 12, 8);
451/* Non-integer bus ratio for the max-multi*/
452maxdiv = bitfield(msr, 46, 46);
453/* Non-integer bus ratio for the current-multi (undocumented)*/
454currdiv = bitfield(msr, 14, 14);
455
456// This will always be model >= 3
457if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0e) || (p->CPU.Family == 0x0f))
458{
459/* On these models, maxcoef defines TSC freq */
460maxcoef = bitfield(msr, 44, 40);
461} else {
462/* On lower models, currcoef defines TSC freq */
463/* XXX */
464maxcoef = currcoef;
465}
466
467if (maxcoef)
468{
469if (maxdiv)
470{
471fsbFrequency = ((tscFrequency * 2) / ((maxcoef * 2) + 1));
472} else {
473fsbFrequency = (tscFrequency / maxcoef);
474}
475if (currdiv)
476{
477cpuFrequency = (fsbFrequency * ((currcoef * 2) + 1) / 2);
478} else {
479cpuFrequency = (fsbFrequency * currcoef);
480}
481DBG("max: %d%s current: %d%s\n", maxcoef, maxdiv ? ".5" : "",currcoef, currdiv ? ".5" : "");
482}
483}
484}
485/* Mobile CPU */
486if (rdmsr64(MSR_IA32_PLATFORM_ID) & (1<<28))
487{
488p->CPU.Features |= CPU_FEATURE_MOBILE;
489}
490}
491else if ((p->CPU.Vendor == CPUID_VENDOR_AMD) && (p->CPU.Family == 0x0f))
492{
493switch(p->CPU.ExtFamily)
494{
495case 0x00: /* K8 */
496msr = rdmsr64(K8_FIDVID_STATUS);
497maxcoef = bitfield(msr, 21, 16) / 2 + 4;
498currcoef = bitfield(msr, 5, 0) / 2 + 4;
499break;
500
501case 0x01: /* K10 */
502msr = rdmsr64(K10_COFVID_STATUS);
503do_cpuid2(0x00000006, 0, p->CPU.CPUID[CPUID_6]);
504// EffFreq: effective frequency interface
505if (bitfield(p->CPU.CPUID[CPUID_6][2], 0, 0) == 1)
506{
507//uint64_t mperf = measure_mperf_frequency();
508uint64_t aperf = measure_aperf_frequency();
509cpuFrequency = aperf;
510}
511// NOTE: tsc runs at the maccoeff (non turbo)
512//*not* at the turbo frequency.
513maxcoef = bitfield(msr, 54, 49) / 2 + 4;
514currcoef = bitfield(msr, 5, 0) + 0x10;
515currdiv = 2 << bitfield(msr, 8, 6);
516
517break;
518
519case 0x05: /* K14 */
520msr = rdmsr64(K10_COFVID_STATUS);
521currcoef = (bitfield(msr, 54, 49) + 0x10) << 2;
522currdiv = (bitfield(msr, 8, 4) + 1) << 2;
523currdiv += bitfield(msr, 3, 0);
524
525break;
526
527case 0x02: /* K11 */
528// not implimented
529break;
530}
531
532if (maxcoef)
533{
534if (currdiv)
535{
536if (!currcoef) currcoef = maxcoef;
537if (!cpuFrequency)
538fsbFrequency = ((tscFrequency * currdiv) / currcoef);
539else
540fsbFrequency = ((cpuFrequency * currdiv) / currcoef);
541
542DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
543} else {
544if (!cpuFrequency)
545fsbFrequency = (tscFrequency / maxcoef);
546else
547fsbFrequency = (cpuFrequency / maxcoef);
548DBG("%d\n", currcoef);
549}
550}
551else if (currcoef)
552{
553if (currdiv)
554{
555fsbFrequency = ((tscFrequency * currdiv) / currcoef);
556DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
557} else {
558fsbFrequency = (tscFrequency / currcoef);
559DBG("%d\n", currcoef);
560}
561}
562if (!cpuFrequency) cpuFrequency = tscFrequency;
563}
564
565#if 0
566if (!fsbFrequency)
567{
568fsbFrequency = (DEFAULT_FSB * 1000);
569cpuFrequency = tscFrequency;
570DBG("0 ! using the default value for FSB !\n");
571}
572
573DBG("cpu freq = 0x%016llxn", timeRDTSC() * 20);
574
575#endif
576
577p->CPU.MaxCoef = maxcoef;
578p->CPU.MaxDiv = maxdiv;
579p->CPU.CurrCoef = currcoef;
580p->CPU.CurrDiv = currdiv;
581p->CPU.TSCFrequency = tscFrequency;
582p->CPU.FSBFrequency = fsbFrequency;
583p->CPU.CPUFrequency = cpuFrequency;
584
585// keep formatted with spaces instead of tabs
586DBG("\n---------------------------------------------\n");
587DBG("CPU: Brand String:\t\t\t\t %s\n", p->CPU.BrandString);
588DBG("CPU: Vendor/Family/ExtFamily:\t 0x%x/0x%x/0x%x\n", p->CPU.Vendor, p->CPU.Family, p->CPU.ExtFamily);
589DBG("CPU: Model/ExtModel/Stepping:\t 0x%x/0x%x/0x%x\n", p->CPU.Model, p->CPU.ExtModel, p->CPU.Stepping);
590DBG("CPU: MaxCoef/CurrCoef:\t\t\t 0x%x/0x%x\n", p->CPU.MaxCoef, p->CPU.CurrCoef);
591DBG("CPU: MaxDiv/CurrDiv:\t\t\t 0x%x/0x%x\n", p->CPU.MaxDiv, p->CPU.CurrDiv);
592DBG("CPU: TSCFreq:\t\t\t\t %dMHz\n", p->CPU.TSCFrequency / 1000000);
593DBG("CPU: FSBFreq:\t\t\t\t\t %dMHz\n", (p->CPU.FSBFrequency + 500000) / 1000000);
594DBG("CPU: CPUFreq:\t\t\t\t %dMHz\n", p->CPU.CPUFrequency / 1000000);
595DBG("CPU: Number of CPU Cores:\t\t %d\n", p->CPU.NoCores);
596DBG("CPU: Number of CPU Threads:\t %d\n", p->CPU.NoThreads);
597DBG("CPU: Features:\t\t\t\t\t 0x%08x\n", p->CPU.Features);
598DBG("---------------------------------------------\n");
599#if DEBUG_CPU
600pause();
601#endif
602}
603

Archive Download this file

Revision: 2026