Chameleon

Chameleon Svn Source Tree

Root/trunk/i386/libsaio/cpu.c

1/*
2 * Copyright 2008 Islam Ahmed Zaid. All rights reserved. <azismed@gmail.com>
3 * AsereBLN: 2009: cleanup and bugfix
4 */
5
6#include "libsaio.h"
7#include "platform.h"
8#include "cpu.h"
9#include "bootstruct.h"
10#include "boot.h"
11
12#ifndef DEBUG_CPU
13#define DEBUG_CPU 0
14#endif
15
16#if DEBUG_CPU
17#define DBG(x...)printf(x)
18#else
19#define DBG(x...)msglog(x)
20#endif
21
22/*
23 * timeRDTSC()
24 * This routine sets up PIT counter 2 to count down 1/20 of a second.
25 * It pauses until the value is latched in the counter
26 * and then reads the time stamp counter to return to the caller.
27 */
28uint64_t timeRDTSC(void)
29{
30intattempts = 0;
31uint64_t latchTime;
32uint64_tsaveTime,intermediate;
33unsigned int timerValue, lastValue;
34//boolean_tint_enabled;
35/*
36 * Table of correction factors to account for
37 * - timer counter quantization errors, and
38 * - undercounts 0..5
39 */
40#define SAMPLE_CLKS_EXACT(((double) CLKNUM) / 20.0)
41#define SAMPLE_CLKS_INT((int) CLKNUM / 20)
42#define SAMPLE_NSECS(2000000000LL)
43#define SAMPLE_MULTIPLIER(((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)
44#define ROUND64(x)((uint64_t)((x) + 0.5))
45uint64_tscale[6] = {
46ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)),
47ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)),
48ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)),
49ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)),
50ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)),
51ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5))
52};
53
54restart:
55 if (attempts >= 9) // increase to up to 9 attempts.
56 // This will flash-reboot. TODO: Use tscPanic instead.
57 printf("Timestamp counter calibation failed with %d attempts\n", attempts);
58 attempts++;
59 enable_PIT2();// turn on PIT2
60 set_PIT2(0);// reset timer 2 to be zero
61 latchTime = rdtsc64();// get the time stamp to time
62 latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes
63 set_PIT2(SAMPLE_CLKS_INT);// set up the timer for (almost) 1/20th a second
64 saveTime = rdtsc64();// now time how long a 20th a second is...
65 get_PIT2(&lastValue);
66 get_PIT2(&lastValue);// read twice, first value may be unreliable
67 do {
68intermediate = get_PIT2(&timerValue);
69if (timerValue > lastValue) {
70// Timer wrapped
71set_PIT2(0);
72disable_PIT2();
73goto restart;
74}
75lastValue = timerValue;
76 } while (timerValue > 5);
77 printf("timerValue %d\n",timerValue);
78 printf("intermediate 0x%016llx\n",intermediate);
79 printf("saveTime 0x%016llx\n",saveTime);
80
81 intermediate -= saveTime;// raw count for about 1/20 second
82 intermediate *= scale[timerValue];// rescale measured time spent
83 intermediate /= SAMPLE_NSECS;// so its exactly 1/20 a second
84 intermediate += latchTime;// add on our save fudge
85
86 set_PIT2(0);// reset timer 2 to be zero
87 disable_PIT2();// turn off PIT 2
88
89 return intermediate;
90}
91
92/*
93 * DFE: Measures the TSC frequency in Hz (64-bit) using the ACPI PM timer
94 */
95static uint64_t measure_tsc_frequency(void)
96{
97uint64_t tscStart;
98uint64_t tscEnd;
99uint64_t tscDelta = 0xffffffffffffffffULL;
100unsigned long pollCount;
101uint64_t retval = 0;
102int i;
103
104/* Time how many TSC ticks elapse in 30 msec using the 8254 PIT
105 * counter 2. We run this loop 3 times to make sure the cache
106 * is hot and we take the minimum delta from all of the runs.
107 * That is to say that we're biased towards measuring the minimum
108 * number of TSC ticks that occur while waiting for the timer to
109 * expire. That theoretically helps avoid inconsistencies when
110 * running under a VM if the TSC is not virtualized and the host
111 * steals time. The TSC is normally virtualized for VMware.
112 */
113for(i = 0; i < 10; ++i)
114{
115enable_PIT2();
116set_PIT2_mode0(CALIBRATE_LATCH);
117tscStart = rdtsc64();
118pollCount = poll_PIT2_gate();
119tscEnd = rdtsc64();
120/* The poll loop must have run at least a few times for accuracy */
121if (pollCount <= 1)
122continue;
123/* The TSC must increment at LEAST once every millisecond.
124 * We should have waited exactly 30 msec so the TSC delta should
125 * be >= 30. Anything less and the processor is way too slow.
126 */
127if ((tscEnd - tscStart) <= CALIBRATE_TIME_MSEC)
128continue;
129// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
130if ( (tscEnd - tscStart) < tscDelta )
131tscDelta = tscEnd - tscStart;
132}
133/* tscDelta is now the least number of TSC ticks the processor made in
134 * a timespan of 0.03 s (e.g. 30 milliseconds)
135 * Linux thus divides by 30 which gives the answer in kiloHertz because
136 * 1 / ms = kHz. But we're xnu and most of the rest of the code uses
137 * Hz so we need to convert our milliseconds to seconds. Since we're
138 * dividing by the milliseconds, we simply multiply by 1000.
139 */
140
141/* Unlike linux, we're not limited to 32-bit, but we do need to take care
142 * that we're going to multiply by 1000 first so we do need at least some
143 * arithmetic headroom. For now, 32-bit should be enough.
144 * Also unlike Linux, our compiler can do 64-bit integer arithmetic.
145 */
146if (tscDelta > (1ULL<<32))
147retval = 0;
148else
149{
150retval = tscDelta * 1000 / 30;
151}
152disable_PIT2();
153return retval;
154}
155
156/*
157 * Original comment/code:
158 * "DFE: Measures the Max Performance Frequency in Hz (64-bit)"
159 *
160 * Measures the Actual Performance Frequency in Hz (64-bit)
161 * (just a naming change, mperf --> aperf )
162 */
163static uint64_t measure_aperf_frequency(void)
164{
165uint64_t aperfStart;
166uint64_t aperfEnd;
167uint64_t aperfDelta = 0xffffffffffffffffULL;
168unsigned long pollCount;
169uint64_t retval = 0;
170int i;
171
172/* Time how many APERF ticks elapse in 30 msec using the 8254 PIT
173 * counter 2. We run this loop 3 times to make sure the cache
174 * is hot and we take the minimum delta from all of the runs.
175 * That is to say that we're biased towards measuring the minimum
176 * number of APERF ticks that occur while waiting for the timer to
177 * expire.
178 */
179for(i = 0; i < 10; ++i)
180{
181enable_PIT2();
182set_PIT2_mode0(CALIBRATE_LATCH);
183aperfStart = rdmsr64(MSR_AMD_APERF);
184pollCount = poll_PIT2_gate();
185aperfEnd = rdmsr64(MSR_AMD_APERF);
186/* The poll loop must have run at least a few times for accuracy */
187if (pollCount <= 1)
188continue;
189/* The TSC must increment at LEAST once every millisecond.
190 * We should have waited exactly 30 msec so the APERF delta should
191 * be >= 30. Anything less and the processor is way too slow.
192 */
193if ((aperfEnd - aperfStart) <= CALIBRATE_TIME_MSEC)
194continue;
195// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
196if ( (aperfEnd - aperfStart) < aperfDelta )
197aperfDelta = aperfEnd - aperfStart;
198}
199/* mperfDelta is now the least number of MPERF ticks the processor made in
200 * a timespan of 0.03 s (e.g. 30 milliseconds)
201 */
202
203if (aperfDelta > (1ULL<<32))
204retval = 0;
205else
206{
207retval = aperfDelta * 1000 / 30;
208}
209disable_PIT2();
210return retval;
211}
212
213/*
214 * Calculates the FSB and CPU frequencies using specific MSRs for each CPU
215 * - multi. is read from a specific MSR. In the case of Intel, there is:
216 * a max multi. (used to calculate the FSB freq.),
217 * and a current multi. (used to calculate the CPU freq.)
218 * - fsbFrequency = tscFrequency / multi
219 * - cpuFrequency = fsbFrequency * multi
220 */
221void scan_cpu(PlatformInfo_t *p)
222{
223uint64_ttscFrequency, fsbFrequency, cpuFrequency;
224uint64_tmsr, flex_ratio;
225uint8_tmaxcoef, maxdiv, currcoef, bus_ratio_max, currdiv;
226const char*newratio;
227intlen, myfsb;
228uint8_tbus_ratio_min;
229uint32_tmax_ratio, min_ratio;
230
231max_ratio = min_ratio = myfsb = bus_ratio_min = 0;
232maxcoef = maxdiv = bus_ratio_max = currcoef = currdiv = 0;
233
234/* get cpuid values */
235do_cpuid(0x00000000, p->CPU.CPUID[CPUID_0]);
236do_cpuid(0x00000001, p->CPU.CPUID[CPUID_1]);
237do_cpuid(0x00000002, p->CPU.CPUID[CPUID_2]);
238do_cpuid(0x00000003, p->CPU.CPUID[CPUID_3]);
239do_cpuid2(0x00000004, 0, p->CPU.CPUID[CPUID_4]);
240do_cpuid(0x80000000, p->CPU.CPUID[CPUID_80]);
241if (p->CPU.CPUID[CPUID_0][0] >= 0x5) {
242do_cpuid(5, p->CPU.CPUID[CPUID_5]);
243}
244if (p->CPU.CPUID[CPUID_0][0] >= 6) {
245do_cpuid(6, p->CPU.CPUID[CPUID_6]);
246}
247if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 8) {
248do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);
249do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
250}
251else if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 1) {
252do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
253}
254
255#if DEBUG_CPU
256{
257inti;
258printf("CPUID Raw Values:\n");
259for (i=0; i<CPUID_MAX; i++) {
260printf("%02d: %08x-%08x-%08x-%08x\n", i,
261 p->CPU.CPUID[i][0], p->CPU.CPUID[i][1],
262 p->CPU.CPUID[i][2], p->CPU.CPUID[i][3]);
263}
264}
265#endif
266
267p->CPU.Vendor= p->CPU.CPUID[CPUID_0][1];
268p->CPU.Signature= p->CPU.CPUID[CPUID_1][0];
269p->CPU.Stepping= bitfield(p->CPU.CPUID[CPUID_1][0], 3, 0);
270p->CPU.Model= bitfield(p->CPU.CPUID[CPUID_1][0], 7, 4);
271p->CPU.Family= bitfield(p->CPU.CPUID[CPUID_1][0], 11, 8);
272p->CPU.ExtModel= bitfield(p->CPU.CPUID[CPUID_1][0], 19, 16);
273p->CPU.ExtFamily= bitfield(p->CPU.CPUID[CPUID_1][0], 27, 20);
274
275p->CPU.Model += (p->CPU.ExtModel << 4);
276
277if (p->CPU.Vendor == CPUID_VENDOR_INTEL &&
278p->CPU.Family == 0x06 &&
279p->CPU.Model >= CPUID_MODEL_NEHALEM &&
280p->CPU.Model != CPUID_MODEL_ATOM// MSR is *NOT* available on the Intel Atom CPU
281)
282{
283msr = rdmsr64(MSR_CORE_THREAD_COUNT);// Undocumented MSR in Nehalem and newer CPUs
284p->CPU.NoCores= bitfield((uint32_t)msr, 31, 16);// Using undocumented MSR to get actual values
285p->CPU.NoThreads= bitfield((uint32_t)msr, 15, 0);// Using undocumented MSR to get actual values
286}
287else if (p->CPU.Vendor == CPUID_VENDOR_AMD)
288{
289p->CPU.NoThreads= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
290p->CPU.NoCores= bitfield(p->CPU.CPUID[CPUID_88][2], 7, 0) + 1;
291}
292else
293{
294// Use previous method for Cores and Threads
295p->CPU.NoThreads= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
296p->CPU.NoCores= bitfield(p->CPU.CPUID[CPUID_4][0], 31, 26) + 1;
297}
298
299/* get brand string (if supported) */
300/* Copyright: from Apple's XNU cpuid.c */
301if (p->CPU.CPUID[CPUID_80][0] > 0x80000004) {
302uint32_treg[4];
303charstr[128], *s;
304/*
305 * The brand string 48 bytes (max), guaranteed to
306 * be NULL terminated.
307 */
308do_cpuid(0x80000002, reg);
309bcopy((char *)reg, &str[0], 16);
310do_cpuid(0x80000003, reg);
311bcopy((char *)reg, &str[16], 16);
312do_cpuid(0x80000004, reg);
313bcopy((char *)reg, &str[32], 16);
314for (s = str; *s != '\0'; s++) {
315if (*s != ' ') break;
316}
317
318strlcpy(p->CPU.BrandString, s, sizeof(p->CPU.BrandString));
319
320if (!strncmp(p->CPU.BrandString, CPU_STRING_UNKNOWN, MIN(sizeof(p->CPU.BrandString), strlen(CPU_STRING_UNKNOWN) + 1))) {
321/*
322 * This string means we have a firmware-programmable brand string,
323 * and the firmware couldn't figure out what sort of CPU we have.
324 */
325p->CPU.BrandString[0] = '\0';
326}
327}
328
329/* setup features */
330if ((bit(23) & p->CPU.CPUID[CPUID_1][3]) != 0) {
331p->CPU.Features |= CPU_FEATURE_MMX;
332}
333if ((bit(25) & p->CPU.CPUID[CPUID_1][3]) != 0) {
334p->CPU.Features |= CPU_FEATURE_SSE;
335}
336if ((bit(26) & p->CPU.CPUID[CPUID_1][3]) != 0) {
337p->CPU.Features |= CPU_FEATURE_SSE2;
338}
339if ((bit(0) & p->CPU.CPUID[CPUID_1][2]) != 0) {
340p->CPU.Features |= CPU_FEATURE_SSE3;
341}
342if ((bit(19) & p->CPU.CPUID[CPUID_1][2]) != 0) {
343p->CPU.Features |= CPU_FEATURE_SSE41;
344}
345if ((bit(20) & p->CPU.CPUID[CPUID_1][2]) != 0) {
346p->CPU.Features |= CPU_FEATURE_SSE42;
347}
348if ((bit(29) & p->CPU.CPUID[CPUID_81][3]) != 0) {
349p->CPU.Features |= CPU_FEATURE_EM64T;
350}
351if ((bit(5) & p->CPU.CPUID[CPUID_1][3]) != 0) {
352p->CPU.Features |= CPU_FEATURE_MSR;
353}
354//if ((bit(28) & p->CPU.CPUID[CPUID_1][3]) != 0) {
355if (p->CPU.NoThreads > p->CPU.NoCores) {
356p->CPU.Features |= CPU_FEATURE_HTT;
357}
358
359tscFrequency = measure_tsc_frequency();
360/* if usual method failed */
361if ( tscFrequency < 1000 )
362{
363tscFrequency = timeRDTSC() * 20;
364}
365fsbFrequency = 0;
366cpuFrequency = 0;
367
368if ((p->CPU.Vendor == CPUID_VENDOR_INTEL) && ((p->CPU.Family == 0x06) || (p->CPU.Family == 0x0f))) {
369int intelCPU = p->CPU.Model;
370if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0c) || (p->CPU.Family == 0x0f && p->CPU.Model >= 0x03)) {
371/* Nehalem CPU model */
372if (p->CPU.Family == 0x06 && (p->CPU.Model == CPU_MODEL_NEHALEM||
373 p->CPU.Model == CPU_MODEL_FIELDS||
374 p->CPU.Model == CPU_MODEL_DALES||
375 p->CPU.Model == CPU_MODEL_CLARKDALE||
376 p->CPU.Model == CPU_MODEL_WESTMERE||
377 p->CPU.Model == CPU_MODEL_NEHALEM_EX||
378 p->CPU.Model == CPU_MODEL_WESTMERE_EX ||
379 p->CPU.Model == CPU_MODEL_SANDYBRIDGE ||
380 p->CPU.Model == CPU_MODEL_SANDYBRIDGE_XEON ||
381 p->CPU.Model == CPU_MODEL_IVYBRIDGE_XEON||
382 p->CPU.Model == CPU_MODEL_IVYBRIDGE ||
383 p->CPU.Model == CPU_MODEL_HASWELL_DT ||
384 p->CPU.Model == CPU_MODEL_HASWELL_MB ||
385 //p->CPU.Model == CPU_MODEL_HASWELL_H ||
386 p->CPU.Model == CPU_MODEL_HASWELL_ULT ||
387 p->CPU.Model == CPU_MODEL_HASWELL_ULX ))
388{
389msr = rdmsr64(MSR_PLATFORM_INFO);
390DBG("msr(%d): platform_info %08x\n", __LINE__, bitfield(msr, 31, 0));
391bus_ratio_max = bitfield(msr, 14, 8);
392bus_ratio_min = bitfield(msr, 46, 40); //valv: not sure about this one (Remarq.1)
393msr = rdmsr64(MSR_FLEX_RATIO);
394DBG("msr(%d): flex_ratio %08x\n", __LINE__, bitfield(msr, 31, 0));
395if (bitfield(msr, 16, 16)) {
396flex_ratio = bitfield(msr, 14, 8);
397/* bcc9: at least on the gigabyte h67ma-ud2h,
398 where the cpu multipler can't be changed to
399 allow overclocking, the flex_ratio msr has unexpected (to OSX)
400 contents.These contents cause mach_kernel to
401 fail to compute the bus ratio correctly, instead
402 causing the system to crash since tscGranularity
403 is inadvertently set to 0.
404 */
405if (flex_ratio == 0) {
406/* Clear bit 16 (evidently the presence bit) */
407wrmsr64(MSR_FLEX_RATIO, (msr & 0xFFFFFFFFFFFEFFFFULL));
408msr = rdmsr64(MSR_FLEX_RATIO);
409verbose("Unusable flex ratio detected. Patched MSR now %08x\n", bitfield(msr, 31, 0));
410} else {
411if (bus_ratio_max > flex_ratio) {
412bus_ratio_max = flex_ratio;
413}
414}
415}
416
417if (bus_ratio_max) {
418fsbFrequency = (tscFrequency / bus_ratio_max);
419}
420//valv: Turbo Ratio Limit
421if ((intelCPU != 0x2e) && (intelCPU != 0x2f)) {
422msr = rdmsr64(MSR_TURBO_RATIO_LIMIT);
423cpuFrequency = bus_ratio_max * fsbFrequency;
424max_ratio = bus_ratio_max * 10;
425} else {
426cpuFrequency = tscFrequency;
427}
428if ((getValueForKey(kbusratio, &newratio, &len, &bootInfo->chameleonConfig)) && (len <= 4)) {
429max_ratio = atoi(newratio);
430max_ratio = (max_ratio * 10);
431if (len >= 3) max_ratio = (max_ratio + 5);
432
433verbose("Bus-Ratio: min=%d, max=%s\n", bus_ratio_min, newratio);
434
435// extreme overclockers may love 320 ;)
436if ((max_ratio >= min_ratio) && (max_ratio <= 320)) {
437cpuFrequency = (fsbFrequency * max_ratio) / 10;
438if (len >= 3) maxdiv = 1;
439else maxdiv = 0;
440} else {
441max_ratio = (bus_ratio_max * 10);
442}
443}
444//valv: to be uncommented if Remarq.1 didn't stick
445/*if (bus_ratio_max > 0) bus_ratio = flex_ratio;*/
446p->CPU.MaxRatio = max_ratio;
447p->CPU.MinRatio = min_ratio;
448
449myfsb = fsbFrequency / 1000000;
450verbose("Sticking with [BCLK: %dMhz, Bus-Ratio: %d]\n", myfsb, max_ratio);
451currcoef = bus_ratio_max;
452} else {
453msr = rdmsr64(MSR_IA32_PERF_STATUS);
454DBG("msr(%d): ia32_perf_stat 0x%08x\n", __LINE__, bitfield(msr, 31, 0));
455currcoef = bitfield(msr, 12, 8);
456/* Non-integer bus ratio for the max-multi*/
457maxdiv = bitfield(msr, 46, 46);
458/* Non-integer bus ratio for the current-multi (undocumented)*/
459currdiv = bitfield(msr, 14, 14);
460
461// This will always be model >= 3
462if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0e) || (p->CPU.Family == 0x0f))
463{
464/* On these models, maxcoef defines TSC freq */
465maxcoef = bitfield(msr, 44, 40);
466} else {
467/* On lower models, currcoef defines TSC freq */
468/* XXX */
469maxcoef = currcoef;
470}
471
472if (maxcoef) {
473if (maxdiv) {
474fsbFrequency = ((tscFrequency * 2) / ((maxcoef * 2) + 1));
475} else {
476fsbFrequency = (tscFrequency / maxcoef);
477}
478if (currdiv) {
479cpuFrequency = (fsbFrequency * ((currcoef * 2) + 1) / 2);
480} else {
481cpuFrequency = (fsbFrequency * currcoef);
482}
483DBG("max: %d%s current: %d%s\n", maxcoef, maxdiv ? ".5" : "",currcoef, currdiv ? ".5" : "");
484}
485}
486}
487/* Mobile CPU */
488if (rdmsr64(MSR_IA32_PLATFORM_ID) & (1<<28)) {
489p->CPU.Features |= CPU_FEATURE_MOBILE;
490}
491}
492else if ((p->CPU.Vendor == CPUID_VENDOR_AMD) && (p->CPU.Family == 0x0f))
493{
494switch(p->CPU.ExtFamily)
495{
496case 0x00: /* K8 */
497msr = rdmsr64(K8_FIDVID_STATUS);
498maxcoef = bitfield(msr, 21, 16) / 2 + 4;
499currcoef = bitfield(msr, 5, 0) / 2 + 4;
500break;
501
502case 0x01: /* K10 */
503msr = rdmsr64(K10_COFVID_STATUS);
504do_cpuid2(0x00000006, 0, p->CPU.CPUID[CPUID_6]);
505// EffFreq: effective frequency interface
506if (bitfield(p->CPU.CPUID[CPUID_6][2], 0, 0) == 1)
507{
508//uint64_t mperf = measure_mperf_frequency();
509uint64_t aperf = measure_aperf_frequency();
510cpuFrequency = aperf;
511}
512// NOTE: tsc runs at the maccoeff (non turbo)
513//*not* at the turbo frequency.
514maxcoef = bitfield(msr, 54, 49) / 2 + 4;
515currcoef = bitfield(msr, 5, 0) + 0x10;
516currdiv = 2 << bitfield(msr, 8, 6);
517
518break;
519
520case 0x05: /* K14 */
521msr = rdmsr64(K10_COFVID_STATUS);
522currcoef = (bitfield(msr, 54, 49) + 0x10) << 2;
523currdiv = (bitfield(msr, 8, 4) + 1) << 2;
524currdiv += bitfield(msr, 3, 0);
525
526break;
527
528case 0x02: /* K11 */
529// not implimented
530break;
531}
532
533if (maxcoef)
534{
535if (currdiv)
536{
537if (!currcoef) currcoef = maxcoef;
538if (!cpuFrequency)
539fsbFrequency = ((tscFrequency * currdiv) / currcoef);
540else
541fsbFrequency = ((cpuFrequency * currdiv) / currcoef);
542
543DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
544} else {
545if (!cpuFrequency)
546fsbFrequency = (tscFrequency / maxcoef);
547else
548fsbFrequency = (cpuFrequency / maxcoef);
549DBG("%d\n", currcoef);
550}
551}
552else if (currcoef)
553{
554if (currdiv)
555{
556fsbFrequency = ((tscFrequency * currdiv) / currcoef);
557DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
558} else {
559fsbFrequency = (tscFrequency / currcoef);
560DBG("%d\n", currcoef);
561}
562}
563if (!cpuFrequency) cpuFrequency = tscFrequency;
564}
565
566#if 0
567if (!fsbFrequency) {
568fsbFrequency = (DEFAULT_FSB * 1000);
569cpuFrequency = tscFrequency;
570DBG("0 ! using the default value for FSB !\n");
571}
572#endif
573
574p->CPU.MaxCoef = maxcoef;
575p->CPU.MaxDiv = maxdiv;
576p->CPU.CurrCoef = currcoef;
577p->CPU.CurrDiv = currdiv;
578p->CPU.TSCFrequency = tscFrequency;
579p->CPU.FSBFrequency = fsbFrequency;
580p->CPU.CPUFrequency = cpuFrequency;
581
582// keep formatted with spaces instead of tabs
583DBG("CPU: Brand String: %s\n", p->CPU.BrandString);
584 DBG("CPU: Vendor/Family/ExtFamily: 0x%x/0x%x/0x%x\n", p->CPU.Vendor, p->CPU.Family, p->CPU.ExtFamily);
585 DBG("CPU: Model/ExtModel/Stepping: 0x%x/0x%x/0x%x\n", p->CPU.Model, p->CPU.ExtModel, p->CPU.Stepping);
586 DBG("CPU: MaxCoef/CurrCoef: 0x%x/0x%x\n", p->CPU.MaxCoef, p->CPU.CurrCoef);
587 DBG("CPU: MaxDiv/CurrDiv: 0x%x/0x%x\n", p->CPU.MaxDiv, p->CPU.CurrDiv);
588 DBG("CPU: TSCFreq: %dMHz\n", p->CPU.TSCFrequency / 1000000);
589 DBG("CPU: FSBFreq: %dMHz\n", p->CPU.FSBFrequency / 1000000);
590 DBG("CPU: CPUFreq: %dMHz\n", p->CPU.CPUFrequency / 1000000);
591 DBG("CPU: NoCores/NoThreads: %d/%d\n", p->CPU.NoCores, p->CPU.NoThreads);
592 DBG("CPU: Features: 0x%08x\n", p->CPU.Features);
593#if DEBUG_CPU
594pause();
595#endif
596}
597

Archive Download this file

Revision: 2248