Chameleon

Chameleon Svn Source Tree

Root/branches/Chimera/i386/libsaio/cpu.c

1/*
2 * Copyright 2008 Islam Ahmed Zaid. All rights reserved. <azismed@gmail.com>
3 * AsereBLN: 2009: cleanup and bugfix
4 */
5
6#include "libsaio.h"
7#include "platform.h"
8#include "cpu.h"
9#include "bootstruct.h"
10#include "boot.h"
11
12#ifndef DEBUG_CPU
13#define DEBUG_CPU 0
14#endif
15
16#if DEBUG_CPU
17#define DBG(x...)printf(x)
18#else
19#define DBG(x...)msglog(x)
20#endif
21
22/*
23 * timeRDTSC()
24 * This routine sets up PIT counter 2 to count down 1/20 of a second.
25 * It pauses until the value is latched in the counter
26 * and then reads the time stamp counter to return to the caller.
27 */
28uint64_t timeRDTSC(void)
29{
30 intattempts = 0;
31 uint64_t latchTime;
32 uint64_tsaveTime,intermediate;
33 unsigned int timerValue, lastValue;
34 //boolean_tint_enabled;
35 /*
36 * Table of correction factors to account for
37 * - timer counter quantization errors, and
38 * - undercounts 0..5
39 */
40#define SAMPLE_CLKS_EXACT(((double) CLKNUM) / 20.0)
41#define SAMPLE_CLKS_INT((int) CLKNUM / 20)
42#define SAMPLE_NSECS(2000000000LL)
43#define SAMPLE_MULTIPLIER(((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)
44#define ROUND64(x)((uint64_t)((x) + 0.5))
45 uint64_tscale[6] = {
46ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)),
47ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)),
48ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)),
49ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)),
50ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)),
51ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5))
52 };
53
54restart:
55 if (attempts >= 9) // increase to up to 9 attempts.
56 // This will flash-reboot. TODO: Use tscPanic instead.
57 printf("Timestamp counter calibation failed with %d attempts\n", attempts);
58 attempts++;
59 enable_PIT2();// turn on PIT2
60 set_PIT2(0);// reset timer 2 to be zero
61 latchTime = rdtsc64();// get the time stamp to time
62 latchTime = get_PIT2(&timerValue) - latchTime; // time how long this takes
63 set_PIT2(SAMPLE_CLKS_INT);// set up the timer for (almost) 1/20th a second
64 saveTime = rdtsc64();// now time how long a 20th a second is...
65 get_PIT2(&lastValue);
66 get_PIT2(&lastValue);// read twice, first value may be unreliable
67 do {
68intermediate = get_PIT2(&timerValue);
69if (timerValue > lastValue) {
70// Timer wrapped
71set_PIT2(0);
72disable_PIT2();
73goto restart;
74}
75lastValue = timerValue;
76 } while (timerValue > 5);
77 printf("timerValue %d\n",timerValue);
78 printf("intermediate 0x%016llx\n",intermediate);
79 printf("saveTime 0x%016llx\n",saveTime);
80
81 intermediate -= saveTime;// raw count for about 1/20 second
82 intermediate *= scale[timerValue];// rescale measured time spent
83 intermediate /= SAMPLE_NSECS;// so its exactly 1/20 a second
84 intermediate += latchTime;// add on our save fudge
85
86 set_PIT2(0);// reset timer 2 to be zero
87 disable_PIT2();// turn off PIT 2
88
89 return intermediate;
90}
91
92/*
93 * DFE: Measures the TSC frequency in Hz (64-bit) using the ACPI PM timer
94 */
95static uint64_t measure_tsc_frequency(void)
96{
97uint64_t tscStart;
98uint64_t tscEnd;
99uint64_t tscDelta = 0xffffffffffffffffULL;
100unsigned long pollCount;
101uint64_t retval = 0;
102int i;
103
104/* Time how many TSC ticks elapse in 30 msec using the 8254 PIT
105 * counter 2. We run this loop 3 times to make sure the cache
106 * is hot and we take the minimum delta from all of the runs.
107 * That is to say that we're biased towards measuring the minimum
108 * number of TSC ticks that occur while waiting for the timer to
109 * expire. That theoretically helps avoid inconsistencies when
110 * running under a VM if the TSC is not virtualized and the host
111 * steals time. The TSC is normally virtualized for VMware.
112 */
113for(i = 0; i < 10; ++i)
114{
115enable_PIT2();
116set_PIT2_mode0(CALIBRATE_LATCH);
117tscStart = rdtsc64();
118pollCount = poll_PIT2_gate();
119tscEnd = rdtsc64();
120/* The poll loop must have run at least a few times for accuracy */
121if (pollCount <= 1)
122continue;
123/* The TSC must increment at LEAST once every millisecond.
124 * We should have waited exactly 30 msec so the TSC delta should
125 * be >= 30. Anything less and the processor is way too slow.
126 */
127if ((tscEnd - tscStart) <= CALIBRATE_TIME_MSEC)
128continue;
129// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
130if ( (tscEnd - tscStart) < tscDelta )
131tscDelta = tscEnd - tscStart;
132}
133/* tscDelta is now the least number of TSC ticks the processor made in
134 * a timespan of 0.03 s (e.g. 30 milliseconds)
135 * Linux thus divides by 30 which gives the answer in kiloHertz because
136 * 1 / ms = kHz. But we're xnu and most of the rest of the code uses
137 * Hz so we need to convert our milliseconds to seconds. Since we're
138 * dividing by the milliseconds, we simply multiply by 1000.
139 */
140
141/* Unlike linux, we're not limited to 32-bit, but we do need to take care
142 * that we're going to multiply by 1000 first so we do need at least some
143 * arithmetic headroom. For now, 32-bit should be enough.
144 * Also unlike Linux, our compiler can do 64-bit integer arithmetic.
145 */
146if (tscDelta > (1ULL<<32))
147retval = 0;
148else
149{
150retval = tscDelta * 1000 / 30;
151}
152disable_PIT2();
153return retval;
154}
155
156/*
157 * Original comment/code:
158 * "DFE: Measures the Max Performance Frequency in Hz (64-bit)"
159 *
160 * Measures the Actual Performance Frequency in Hz (64-bit)
161 * (just a naming change, mperf --> aperf )
162 */
163static uint64_t measure_aperf_frequency(void)
164{
165uint64_t aperfStart;
166uint64_t aperfEnd;
167uint64_t aperfDelta = 0xffffffffffffffffULL;
168unsigned long pollCount;
169uint64_t retval = 0;
170int i;
171
172/* Time how many APERF ticks elapse in 30 msec using the 8254 PIT
173 * counter 2. We run this loop 3 times to make sure the cache
174 * is hot and we take the minimum delta from all of the runs.
175 * That is to say that we're biased towards measuring the minimum
176 * number of APERF ticks that occur while waiting for the timer to
177 * expire.
178 */
179for(i = 0; i < 10; ++i)
180{
181enable_PIT2();
182set_PIT2_mode0(CALIBRATE_LATCH);
183aperfStart = rdmsr64(MSR_AMD_APERF);
184pollCount = poll_PIT2_gate();
185aperfEnd = rdmsr64(MSR_AMD_APERF);
186/* The poll loop must have run at least a few times for accuracy */
187if (pollCount <= 1)
188continue;
189/* The TSC must increment at LEAST once every millisecond.
190 * We should have waited exactly 30 msec so the APERF delta should
191 * be >= 30. Anything less and the processor is way too slow.
192 */
193if ((aperfEnd - aperfStart) <= CALIBRATE_TIME_MSEC)
194continue;
195// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
196if ( (aperfEnd - aperfStart) < aperfDelta )
197aperfDelta = aperfEnd - aperfStart;
198}
199/* mperfDelta is now the least number of MPERF ticks the processor made in
200 * a timespan of 0.03 s (e.g. 30 milliseconds)
201 */
202
203if (aperfDelta > (1ULL<<32))
204retval = 0;
205else
206{
207retval = aperfDelta * 1000 / 30;
208}
209disable_PIT2();
210return retval;
211}
212
213/*
214 * Calculates the FSB and CPU frequencies using specific MSRs for each CPU
215 * - multi. is read from a specific MSR. In the case of Intel, there is:
216 * a max multi. (used to calculate the FSB freq.),
217 * and a current multi. (used to calculate the CPU freq.)
218 * - fsbFrequency = tscFrequency / multi
219 * - cpuFrequency = fsbFrequency * multi
220 */
221void scan_cpu(PlatformInfo_t *p)
222{
223uint64_ttscFrequency = 0;
224uint64_tfsbFrequency = 0;
225uint64_tcpuFrequency =0;
226uint64_tmsr = 0;
227uint64_tflex_ratio = 0;
228uint32_tmax_ratio = 0;
229uint32_tmin_ratio = 0;
230uint8_tbus_ratio_max = 0;
231uint8_tbus_ratio_min = 0;
232uint8_tcurrdiv = 0;
233uint8_tcurrcoef = 0;
234uint8_tmaxdiv = 0;
235uint8_tmaxcoef = 0;
236
237const char*newratio;
238intlen = 0;
239
240/* get cpuid values */
241do_cpuid(0x00000000, p->CPU.CPUID[CPUID_0]);
242do_cpuid(0x00000001, p->CPU.CPUID[CPUID_1]);
243do_cpuid(0x00000002, p->CPU.CPUID[CPUID_2]);
244do_cpuid(0x00000003, p->CPU.CPUID[CPUID_3]);
245do_cpuid2(0x00000004, 0, p->CPU.CPUID[CPUID_4]);
246do_cpuid(0x80000000, p->CPU.CPUID[CPUID_80]);
247if (p->CPU.CPUID[CPUID_0][0] >= 0x5) {
248do_cpuid(5, p->CPU.CPUID[CPUID_5]);
249}
250if (p->CPU.CPUID[CPUID_0][0] >= 6) {
251do_cpuid(6, p->CPU.CPUID[CPUID_6]);
252}
253if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 8) {
254do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);
255do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
256}
257else if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 1) {
258do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
259}
260
261#if DEBUG_CPU
262{
263inti;
264printf("CPUID Raw Values:\n");
265for (i=0; i<CPUID_MAX; i++) {
266printf("%02d: %08x-%08x-%08x-%08x\n", i,
267 p->CPU.CPUID[i][0], p->CPU.CPUID[i][1],
268 p->CPU.CPUID[i][2], p->CPU.CPUID[i][3]);
269}
270}
271#endif
272
273p->CPU.Vendor= p->CPU.CPUID[CPUID_0][1];
274p->CPU.Signature= p->CPU.CPUID[CPUID_1][0];
275p->CPU.Stepping= bitfield(p->CPU.CPUID[CPUID_1][0], 3, 0);
276p->CPU.Model= bitfield(p->CPU.CPUID[CPUID_1][0], 7, 4);
277p->CPU.Family= bitfield(p->CPU.CPUID[CPUID_1][0], 11, 8);
278p->CPU.ExtModel= bitfield(p->CPU.CPUID[CPUID_1][0], 19, 16);
279p->CPU.ExtFamily= bitfield(p->CPU.CPUID[CPUID_1][0], 27, 20);
280
281p->CPU.Model += (p->CPU.ExtModel << 4);
282
283if (p->CPU.Vendor == CPUID_VENDOR_INTEL &&
284p->CPU.Family == 0x06 &&
285p->CPU.Model >= CPU_MODEL_NEHALEM &&
286p->CPU.Model != CPU_MODEL_ATOM// MSR is *NOT* available on the Intel Atom CPU
287)
288{
289msr = rdmsr64(MSR_CORE_THREAD_COUNT);// Undocumented MSR in Nehalem and newer CPUs
290p->CPU.NoCores= bitfield((uint32_t)msr, 31, 16);// Using undocumented MSR to get actual values
291p->CPU.NoThreads= bitfield((uint32_t)msr, 15, 0);// Using undocumented MSR to get actual values
292}
293else if (p->CPU.Vendor == CPUID_VENDOR_AMD)
294{
295p->CPU.NoThreads= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
296p->CPU.NoCores= bitfield(p->CPU.CPUID[CPUID_88][2], 7, 0) + 1;
297}
298else
299{
300// Use previous method for Cores and Threads
301p->CPU.NoThreads= bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
302p->CPU.NoCores= bitfield(p->CPU.CPUID[CPUID_4][0], 31, 26) + 1;
303}
304
305/* get brand string (if supported) */
306/* Copyright: from Apple's XNU cpuid.c */
307if (p->CPU.CPUID[CPUID_80][0] > 0x80000004) {
308uint32_treg[4];
309charstr[128], *s;
310/*
311 * The brand string 48 bytes (max), guaranteed to
312 * be NULL terminated.
313 */
314do_cpuid(0x80000002, reg);
315bcopy((char *)reg, &str[0], 16);
316do_cpuid(0x80000003, reg);
317bcopy((char *)reg, &str[16], 16);
318do_cpuid(0x80000004, reg);
319bcopy((char *)reg, &str[32], 16);
320for (s = str; *s != '\0'; s++) {
321if (*s != ' ') break;
322}
323
324strlcpy(p->CPU.BrandString, s, sizeof(p->CPU.BrandString));
325
326if (!strncmp(p->CPU.BrandString, CPU_STRING_UNKNOWN, MIN(sizeof(p->CPU.BrandString), strlen(CPU_STRING_UNKNOWN) + 1))) {
327/*
328 * This string means we have a firmware-programmable brand string,
329 * and the firmware couldn't figure out what sort of CPU we have.
330 */
331p->CPU.BrandString[0] = '\0';
332}
333}
334
335/* setup features */
336if ((bit(23) & p->CPU.CPUID[CPUID_1][3]) != 0) {
337p->CPU.Features |= CPU_FEATURE_MMX;
338}
339if ((bit(25) & p->CPU.CPUID[CPUID_1][3]) != 0) {
340p->CPU.Features |= CPU_FEATURE_SSE;
341}
342if ((bit(26) & p->CPU.CPUID[CPUID_1][3]) != 0) {
343p->CPU.Features |= CPU_FEATURE_SSE2;
344}
345if ((bit(0) & p->CPU.CPUID[CPUID_1][2]) != 0) {
346p->CPU.Features |= CPU_FEATURE_SSE3;
347}
348if ((bit(19) & p->CPU.CPUID[CPUID_1][2]) != 0) {
349p->CPU.Features |= CPU_FEATURE_SSE41;
350}
351if ((bit(20) & p->CPU.CPUID[CPUID_1][2]) != 0) {
352p->CPU.Features |= CPU_FEATURE_SSE42;
353}
354if ((bit(29) & p->CPU.CPUID[CPUID_81][3]) != 0) {
355p->CPU.Features |= CPU_FEATURE_EM64T;
356}
357if ((bit(5) & p->CPU.CPUID[CPUID_1][3]) != 0) {
358p->CPU.Features |= CPU_FEATURE_MSR;
359}
360//if ((bit(28) & p->CPU.CPUID[CPUID_1][3]) != 0) {
361if (p->CPU.NoThreads > p->CPU.NoCores) {
362p->CPU.Features |= CPU_FEATURE_HTT;
363}
364
365tscFrequency = measure_tsc_frequency();
366///* if usual method failed */
367//if ( tscFrequency < 1000 )
368//{
369//tscFrequency = timeRDTSC() * 20;
370//}
371//fsbFrequency = 0;
372//cpuFrequency = 0;
373
374if ((p->CPU.Vendor == CPUID_VENDOR_INTEL) && ((p->CPU.Family == 0x06) || (p->CPU.Family == 0x0f))) {
375int intelCPU = p->CPU.Model;
376if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0c) || (p->CPU.Family == 0x0f && p->CPU.Model >= 0x03)) {
377/* Nehalem CPU model */
378if (p->CPU.Family == 0x06 && (p->CPU.Model == CPU_MODEL_NEHALEM ||
379 p->CPU.Model == CPU_MODEL_FIELDS ||
380 p->CPU.Model == CPU_MODEL_DALES ||
381 p->CPU.Model == CPU_MODEL_DALES_32NM ||
382 p->CPU.Model == CPU_MODEL_WESTMERE ||
383 p->CPU.Model == CPU_MODEL_NEHALEM_EX ||
384 p->CPU.Model == CPU_MODEL_WESTMERE_EX ||
385 p->CPU.Model == CPU_MODEL_SANDYBRIDGE ||
386 p->CPU.Model == CPU_MODEL_JAKETOWN ||
387 p->CPU.Model == CPU_MODEL_IVYBRIDGE )) {
388msr = rdmsr64(MSR_PLATFORM_INFO);
389//DBG("msr(%d): platform_info %08x\n", __LINE__, bitfield(msr, 31, 0));
390bus_ratio_max = bitfield(msr, 15, 8);//MacMan: Changed bitfield to match Apple tsc.c
391bus_ratio_min = bitfield(msr, 47, 40);//MacMan: Changed bitfield to match Apple tsc.c
392msr = rdmsr64(MSR_FLEX_RATIO);
393//DBG("msr(%d): flex_ratio %08x\n", __LINE__, bitfield(msr, 31, 0));
394if (bitfield(msr, 16, 16)) {
395flex_ratio = bitfield(msr, 15, 8);//MacMan: Changed bitfield to match Apple tsc.c
396if (flex_ratio == 0) {
397/* Clear bit 16 (evidently the presence bit) */
398wrmsr64(MSR_FLEX_RATIO, (msr & 0xFFFFFFFFFFFEFFFFULL));
399msr = rdmsr64(MSR_FLEX_RATIO);
400//verbose("Unusable flex ratio detected. Patched MSR now %08x\n", bitfield(msr, 31, 0));
401} else {
402if (bus_ratio_max > flex_ratio) {
403bus_ratio_max = flex_ratio;
404}
405}
406}
407
408if (bus_ratio_max) {
409fsbFrequency = (tscFrequency / bus_ratio_max);
410}
411//MacMan: Turbo Ratio Limit
412switch (intelCPU)
413{
414case CPU_MODEL_WESTMERE_EX:// Intel Xeon E7
415case CPU_MODEL_NEHALEM_EX:// Intel Xeon X75xx, Xeon X65xx, Xeon E75xx, Xeon E65xx
416{
417cpuFrequency = tscFrequency;
418DBG("cpu.c (%d)CPU_MODEL_NEHALEM_EX or CPU_MODEL_WESTMERE_EX Found\n", __LINE__);
419break;
420}
421case CPU_MODEL_SANDYBRIDGE:// Intel Core i3, i5, i7 LGA1155 (32nm)
422case CPU_MODEL_IVYBRIDGE:// Intel Core i3, i5, i7 LGA1155 (22nm)
423case CPU_MODEL_JAKETOWN:// Intel Core i7, Xeon E5 LGA2011 (32nm)
424{
425msr = rdmsr64(MSR_IA32_PERF_STATUS);
426currcoef = bitfield(msr, 15, 8);
427cpuFrequency = currcoef * fsbFrequency;
428maxcoef = bus_ratio_max;
429break;
430}
431default:
432{
433msr = rdmsr64(MSR_IA32_PERF_STATUS);
434currcoef = bitfield(msr, 7, 0);
435cpuFrequency = currcoef * fsbFrequency;
436maxcoef = bus_ratio_max;
437break;
438}
439}
440
441if ((getValueForKey(kbusratio, &newratio, &len, &bootInfo->chameleonConfig)) && (len <= 4)) {
442max_ratio = atoi(newratio);
443max_ratio = (max_ratio * 10);
444if (len >= 3) max_ratio = (max_ratio + 5);
445
446verbose("Bus-Ratio: min=%d, max=%s\n", bus_ratio_min, newratio);
447
448// extreme overclockers may love 320 ;)
449if ((max_ratio >= min_ratio) && (max_ratio <= 320)) {
450cpuFrequency = (fsbFrequency * max_ratio) / 10;
451if (len >= 3) maxdiv = 1;
452else maxdiv = 0;
453} else {
454max_ratio = (bus_ratio_max * 10);
455}
456}
457p->CPU.MaxRatio = bus_ratio_max;
458p->CPU.MinRatio = bus_ratio_min;
459} else {
460msr = rdmsr64(MSR_IA32_PERF_STATUS);
461DBG("msr(%d): ia32_perf_stat 0x%08x\n", __LINE__, bitfield(msr, 31, 0));
462currcoef = bitfield(msr, 12, 8);
463/* Non-integer bus ratio for the max-multi*/
464maxdiv = bitfield(msr, 46, 46);
465/* Non-integer bus ratio for the current-multi (undocumented)*/
466currdiv = bitfield(msr, 14, 14);
467
468// This will always be model >= 3
469if ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0e) || (p->CPU.Family == 0x0f))
470{
471/* On these models, maxcoef defines TSC freq */
472maxcoef = bitfield(msr, 44, 40);
473} else {
474/* On lower models, currcoef defines TSC freq */
475/* XXX */
476maxcoef = currcoef;
477}
478
479if (maxcoef) {
480if (maxdiv) {
481fsbFrequency = ((tscFrequency * 2) / ((maxcoef * 2) + 1));
482} else {
483fsbFrequency = (tscFrequency / maxcoef);
484}
485if (currdiv) {
486cpuFrequency = (fsbFrequency * ((currcoef * 2) + 1) / 2);
487} else {
488cpuFrequency = (fsbFrequency * currcoef);
489}
490DBG("max: %d%s current: %d%s\n", maxcoef, maxdiv ? ".5" : "",currcoef, currdiv ? ".5" : "");
491}
492}
493}
494/* Mobile CPU */
495if (rdmsr64(MSR_IA32_PLATFORM_ID) & (1<<28)) {
496p->CPU.Features |= CPU_FEATURE_MOBILE;
497}
498}
499else if ((p->CPU.Vendor == CPUID_VENDOR_AMD) && (p->CPU.Family == 0x0f))
500{
501switch(p->CPU.ExtFamily)
502{
503case 0x00: /* K8 */
504msr = rdmsr64(K8_FIDVID_STATUS);
505maxcoef = bitfield(msr, 21, 16) / 2 + 4;
506currcoef = bitfield(msr, 5, 0) / 2 + 4;
507break;
508
509case 0x01: /* K10 */
510msr = rdmsr64(K10_COFVID_STATUS);
511do_cpuid2(0x00000006, 0, p->CPU.CPUID[CPUID_6]);
512// EffFreq: effective frequency interface
513if (bitfield(p->CPU.CPUID[CPUID_6][2], 0, 0) == 1)
514{
515//uint64_t mperf = measure_mperf_frequency();
516uint64_t aperf = measure_aperf_frequency();
517cpuFrequency = aperf;
518}
519// NOTE: tsc runs at the maccoeff (non turbo)
520//*not* at the turbo frequency.
521maxcoef = bitfield(msr, 54, 49) / 2 + 4;
522currcoef = bitfield(msr, 5, 0) + 0x10;
523currdiv = 2 << bitfield(msr, 8, 6);
524
525break;
526
527case 0x05: /* K14 */
528msr = rdmsr64(K10_COFVID_STATUS);
529currcoef = (bitfield(msr, 54, 49) + 0x10) << 2;
530currdiv = (bitfield(msr, 8, 4) + 1) << 2;
531currdiv += bitfield(msr, 3, 0);
532
533break;
534
535case 0x02: /* K11 */
536// not implimented
537break;
538}
539
540if (maxcoef)
541{
542if (currdiv)
543{
544if (!currcoef) currcoef = maxcoef;
545if (!cpuFrequency)
546fsbFrequency = ((tscFrequency * currdiv) / currcoef);
547else
548fsbFrequency = ((cpuFrequency * currdiv) / currcoef);
549
550DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
551} else {
552if (!cpuFrequency)
553fsbFrequency = (tscFrequency / maxcoef);
554else
555fsbFrequency = (cpuFrequency / maxcoef);
556DBG("%d\n", currcoef);
557}
558}
559else if (currcoef)
560{
561if (currdiv)
562{
563fsbFrequency = ((tscFrequency * currdiv) / currcoef);
564DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
565} else {
566fsbFrequency = (tscFrequency / currcoef);
567DBG("%d\n", currcoef);
568}
569}
570if (!cpuFrequency) cpuFrequency = tscFrequency;
571}
572
573#if 0
574if (!fsbFrequency) {
575fsbFrequency = (DEFAULT_FSB * 1000);
576cpuFrequency = tscFrequency;
577DBG("0 ! using the default value for FSB !\n");
578}
579#endif
580
581p->CPU.MaxCoef = maxcoef;
582if (maxdiv == 0){
583p->CPU.MaxDiv = bus_ratio_max;
584}
585else {
586p->CPU.MaxDiv = maxdiv;
587}
588p->CPU.CurrCoef = currcoef;
589if (currdiv == 0){
590p->CPU.CurrDiv = currcoef;
591}
592else {
593p->CPU.CurrDiv = currdiv;
594}
595p->CPU.TSCFrequency = tscFrequency;
596p->CPU.FSBFrequency = fsbFrequency;
597p->CPU.CPUFrequency = cpuFrequency;
598
599// keep formatted with spaces instead of tabs
600DBG("CPU: Brand String: %s\n", p->CPU.BrandString);
601DBG("CPU: Vendor: 0x%x\n", p->CPU.Vendor);
602DBG("CPU: Family / ExtFamily: 0x%x / 0x%x\n", p->CPU.Family, p->CPU.ExtFamily);
603DBG("CPU: Model / ExtModel / Stepping: 0x%x / 0x%x / 0x%x\n", p->CPU.Model, p->CPU.ExtModel, p->CPU.Stepping);
604DBG("CPU: Number of Cores / Threads: %d / %d\n", p->CPU.NoCores, p->CPU.NoThreads);
605DBG("CPU: Features: 0x%08x\n", p->CPU.Features);
606DBG("CPU: TSC Frequency: %d MHz\n", p->CPU.TSCFrequency / 1000000);
607DBG("CPU: FSB Frequency: %d MHz\n", p->CPU.FSBFrequency / 1000000);
608DBG("CPU: CPU Frequency: %d MHz\n", p->CPU.CPUFrequency / 1000000);
609DBG("CPU: Minimum Bus Ratio: %d\n", p->CPU.MinRatio);
610DBG("CPU: Maximum Bus Ratio: %d\n", p->CPU.MaxRatio);
611DBG("CPU: Current Bus Ratio: %d\n", p->CPU.CurrCoef);
612//DBG("CPU: Maximum Multiplier: %d\n", p->CPU.MaxCoef);
613//DBG("CPU: Maximum Divider: %d\n", p->CPU.MaxDiv);
614//DBG("CPU: Current Divider: %d\n", p->CPU.CurrDiv);
615
616#if DEBUG_CPU
617pause();
618#endif
619}
620

Archive Download this file

Revision: 2225