1 | /*␊ |
2 | * Copyright 2008 Islam Ahmed Zaid. All rights reserved. <azismed@gmail.com>␊ |
3 | * AsereBLN: 2009: cleanup and bugfix␊ |
4 | */␊ |
5 | ␊ |
6 | #include "libsaio.h"␊ |
7 | #include "platform.h"␊ |
8 | #include "cpu.h"␊ |
9 | ␊ |
10 | #ifndef DEBUG_CPU␊ |
11 | #define DEBUG_CPU 0␊ |
12 | #endif␊ |
13 | ␊ |
14 | #if DEBUG_CPU␊ |
15 | #define DBG(x...)␉␉printf(x)␊ |
16 | #else␊ |
17 | #define DBG(x...)␉␉msglog(x)␊ |
18 | #endif␊ |
19 | ␊ |
20 | //#define AMD_SUPPORT ␊ |
21 | ␊ |
22 | #ifndef INTEL_SUPPORT␊ |
23 | #define INTEL_SUPPORT 0 //Default (0: nolegacy, 1 : legacy)␊ |
24 | #endif␊ |
25 | ␊ |
26 | #ifdef AMD_SUPPORT␊ |
27 | #ifdef LEGACY_CPU␊ |
28 | #undef LEGACY_CPU␊ |
29 | #endif␊ |
30 | #ifdef INTEL_SUPPORT␊ |
31 | #undef INTEL_SUPPORT␊ |
32 | #endif␊ |
33 | #define LEGACY_CPU 1␊ |
34 | #endif␊ |
35 | ␊ |
36 | #ifdef INTEL_SUPPORT ␊ |
37 | #ifdef LEGACY_CPU␊ |
38 | #undef LEGACY_CPU␊ |
39 | #endif␊ |
40 | #define LEGACY_CPU INTEL_SUPPORT␊ |
41 | #endif␊ |
42 | // (?) : if AMD_SUPPORT then (LEGACY_CPU = 1 && INTEL_SUPPORT = disabled)␊ |
43 | //␉␉ else LEGACY_CPU = INTEL_SUPPORT␊ |
44 | ␊ |
45 | ␊ |
46 | #if LEGACY_CPU␊ |
47 | static uint64_t measure_tsc_frequency(void);␊ |
48 | ␊ |
49 | // DFE: enable_PIT2 and disable_PIT2 come from older xnu␊ |
50 | ␊ |
51 | /*␊ |
52 | * Enable or disable timer 2.␊ |
53 | * Port 0x61 controls timer 2:␊ |
54 | * bit 0 gates the clock,␊ |
55 | * bit 1 gates output to speaker.␊ |
56 | */␊ |
57 | static inline void enable_PIT2(void)␊ |
58 | {␊ |
59 | /* Enable gate, disable speaker */␊ |
60 | __asm__ volatile(␊ |
61 | ␉␉␉␉␉ " inb $0x61,%%al \n\t"␊ |
62 | ␉␉␉␉␉ " and $0xFC,%%al \n\t" /* & ~0x03 */␊ |
63 | ␉␉␉␉␉ " or $1,%%al \n\t"␊ |
64 | ␉␉␉␉␉ " outb %%al,$0x61 \n\t"␊ |
65 | ␉␉␉␉␉ : : : "%al" );␊ |
66 | }␊ |
67 | ␊ |
68 | static inline void disable_PIT2(void)␊ |
69 | {␊ |
70 | /* Disable gate and output to speaker */␊ |
71 | __asm__ volatile(␊ |
72 | ␉␉␉␉␉ " inb $0x61,%%al \n\t"␊ |
73 | ␉␉␉␉␉ " and $0xFC,%%al \n\t"␉/* & ~0x03 */␊ |
74 | ␉␉␉␉␉ " outb %%al,$0x61 \n\t"␊ |
75 | ␉␉␉␉␉ : : : "%al" );␊ |
76 | }␊ |
77 | ␊ |
78 | // DFE: set_PIT2_mode0, poll_PIT2_gate, and measure_tsc_frequency are␊ |
79 | // roughly based on Linux code␊ |
80 | ␊ |
81 | /* Set the 8254 channel 2 to mode 0 with the specified value.␊ |
82 | In mode 0, the counter will initially set its gate low when the␊ |
83 | timer expires. For this to be useful, you ought to set it high␊ |
84 | before calling this function. The enable_PIT2 function does this.␊ |
85 | */␊ |
86 | static inline void set_PIT2_mode0(uint16_t value)␊ |
87 | {␊ |
88 | __asm__ volatile(␊ |
89 | ␉␉␉␉␉ " movb $0xB0,%%al \n\t"␊ |
90 | ␉␉␉␉␉ " outb␉%%al,$0x43␉\n\t"␊ |
91 | ␉␉␉␉␉ " movb␉%%dl,%%al␉\n\t"␊ |
92 | ␉␉␉␉␉ " outb␉%%al,$0x42␉\n\t"␊ |
93 | ␉␉␉␉␉ " movb␉%%dh,%%al␉\n\t"␊ |
94 | ␉␉␉␉␉ " outb␉%%al,$0x42"␊ |
95 | ␉␉␉␉␉ : : "d"(value) /*: no clobber */ );␊ |
96 | }␊ |
97 | ␊ |
98 | /* Returns the number of times the loop ran before the PIT2 signaled */␊ |
99 | static inline unsigned long poll_PIT2_gate(void)␊ |
100 | {␊ |
101 | unsigned long count = 0;␊ |
102 | unsigned char nmi_sc_val;␊ |
103 | do {␊ |
104 | ++count;␊ |
105 | __asm__ volatile(␊ |
106 | ␉␉␉␉␉␉ "inb␉$0x61,%0"␊ |
107 | ␉␉␉␉␉␉ : "=q"(nmi_sc_val) /*:*/ /* no input */ /*:*/ /* no clobber */);␊ |
108 | } while( (nmi_sc_val & 0x20) == 0);␊ |
109 | return count;␊ |
110 | }␊ |
111 | /*␊ |
112 | * DFE: Measures the TSC frequency in Hz (64-bit) using the ACPI PM timer␊ |
113 | */␊ |
114 | static uint64_t measure_tsc_frequency(void)␊ |
115 | {␊ |
116 | uint64_t tscStart;␊ |
117 | uint64_t tscEnd;␊ |
118 | uint64_t tscDelta = 0xffffffffffffffffULL;␊ |
119 | unsigned long pollCount;␊ |
120 | uint64_t retval = 0;␊ |
121 | int i;␊ |
122 | ␉␊ |
123 | /* Time how many TSC ticks elapse in 30 msec using the 8254 PIT␊ |
124 | * counter 2. We run this loop 3 times to make sure the cache␊ |
125 | * is hot and we take the minimum delta from all of the runs.␊ |
126 | * That is to say that we're biased towards measuring the minimum␊ |
127 | * number of TSC ticks that occur while waiting for the timer to␊ |
128 | * expire. That theoretically helps avoid inconsistencies when␊ |
129 | * running under a VM if the TSC is not virtualized and the host␊ |
130 | * steals time. The TSC is normally virtualized for VMware.␊ |
131 | */␊ |
132 | for(i = 0; i < 10; ++i)␊ |
133 | {␊ |
134 | enable_PIT2();␊ |
135 | set_PIT2_mode0(CALIBRATE_LATCH);␊ |
136 | tscStart = rdtsc64();␊ |
137 | pollCount = poll_PIT2_gate();␊ |
138 | tscEnd = rdtsc64();␊ |
139 | /* The poll loop must have run at least a few times for accuracy */␊ |
140 | if(pollCount <= 1)␊ |
141 | continue;␊ |
142 | /* The TSC must increment at LEAST once every millisecond. We␊ |
143 | * should have waited exactly 30 msec so the TSC delta should␊ |
144 | * be >= 30. Anything less and the processor is way too slow.␊ |
145 | */␊ |
146 | if((tscEnd - tscStart) <= CALIBRATE_TIME_MSEC)␊ |
147 | continue;␊ |
148 | // tscDelta = min(tscDelta, (tscEnd - tscStart))␊ |
149 | if( (tscEnd - tscStart) < tscDelta )␊ |
150 | tscDelta = tscEnd - tscStart;␊ |
151 | }␊ |
152 | /* tscDelta is now the least number of TSC ticks the processor made in␊ |
153 | * a timespan of 0.03 s (e.g. 30 milliseconds)␊ |
154 | * Linux thus divides by 30 which gives the answer in kiloHertz because␊ |
155 | * 1 / ms = kHz. But we're xnu and most of the rest of the code uses␊ |
156 | * Hz so we need to convert our milliseconds to seconds. Since we're␊ |
157 | * dividing by the milliseconds, we simply multiply by 1000.␊ |
158 | */␊ |
159 | ␉␊ |
160 | /* Unlike linux, we're not limited to 32-bit, but we do need to take care␊ |
161 | * that we're going to multiply by 1000 first so we do need at least some␊ |
162 | * arithmetic headroom. For now, 32-bit should be enough.␊ |
163 | * Also unlike Linux, our compiler can do 64-bit integer arithmetic.␊ |
164 | */␊ |
165 | if(tscDelta > (1ULL<<32))␊ |
166 | retval = 0;␊ |
167 | else␊ |
168 | {␊ |
169 | retval = tscDelta * 1000 / 30;␊ |
170 | }␊ |
171 | disable_PIT2();␊ |
172 | return retval;␊ |
173 | }␊ |
174 | ␊ |
175 | #ifdef AMD_SUPPORT␊ |
176 | #define MSR_AMD_APERF 0x000000E8␊ |
177 | /*␊ |
178 | * Original comment/code:␊ |
179 | * "DFE: Measures the Max Performance Frequency in Hz (64-bit)"␊ |
180 | *␊ |
181 | * Measures the Actual Performance Frequency in Hz (64-bit)␊ |
182 | * (just a naming change, mperf --> aperf )␊ |
183 | */␊ |
184 | static uint64_t measure_aperf_frequency(void)␊ |
185 | {␊ |
186 | ␉uint64_t aperfStart;␊ |
187 | ␉uint64_t aperfEnd;␊ |
188 | ␉uint64_t aperfDelta = 0xffffffffffffffffULL;␊ |
189 | ␉unsigned long pollCount;␊ |
190 | ␉uint64_t retval = 0;␊ |
191 | ␉int i;␊ |
192 | ␉␊ |
193 | ␉/* Time how many APERF ticks elapse in 30 msec using the 8254 PIT␊ |
194 | ␉ * counter 2. We run this loop 3 times to make sure the cache␊ |
195 | ␉ * is hot and we take the minimum delta from all of the runs.␊ |
196 | ␉ * That is to say that we're biased towards measuring the minimum␊ |
197 | ␉ * number of APERF ticks that occur while waiting for the timer to␊ |
198 | ␉ * expire.␊ |
199 | ␉ */␊ |
200 | ␉for(i = 0; i < 10; ++i)␊ |
201 | ␉{␊ |
202 | ␉␉enable_PIT2();␊ |
203 | ␉␉set_PIT2_mode0(CALIBRATE_LATCH);␊ |
204 | ␉␉aperfStart = rdmsr64(MSR_AMD_APERF);␊ |
205 | ␉␉pollCount = poll_PIT2_gate();␊ |
206 | ␉␉aperfEnd = rdmsr64(MSR_AMD_APERF);␊ |
207 | ␉␉/* The poll loop must have run at least a few times for accuracy */␊ |
208 | ␉␉if (pollCount <= 1)␊ |
209 | ␉␉␉continue;␊ |
210 | ␉␉/* The TSC must increment at LEAST once every millisecond.␊ |
211 | ␉␉ * We should have waited exactly 30 msec so the APERF delta should␊ |
212 | ␉␉ * be >= 30. Anything less and the processor is way too slow.␊ |
213 | ␉␉ */␊ |
214 | ␉␉if ((aperfEnd - aperfStart) <= CALIBRATE_TIME_MSEC)␊ |
215 | ␉␉␉continue;␊ |
216 | ␉␉// tscDelta = MIN(tscDelta, (tscEnd - tscStart))␊ |
217 | ␉␉if ( (aperfEnd - aperfStart) < aperfDelta )␊ |
218 | ␉␉␉aperfDelta = aperfEnd - aperfStart;␊ |
219 | ␉}␊ |
220 | ␉/* mperfDelta is now the least number of MPERF ticks the processor made in␊ |
221 | ␉ * a timespan of 0.03 s (e.g. 30 milliseconds)␊ |
222 | ␉ */␊ |
223 | ␉␊ |
224 | ␉if (aperfDelta > (1ULL<<32))␊ |
225 | ␉␉retval = 0;␊ |
226 | ␉else␊ |
227 | ␉{␊ |
228 | ␉␉retval = aperfDelta * 1000 / 30;␊ |
229 | ␉}␊ |
230 | ␉disable_PIT2();␊ |
231 | ␉return retval;␊ |
232 | }␊ |
233 | #endif␊ |
234 | ␊ |
235 | #endif␊ |
236 | ␊ |
237 | /*␊ |
238 | License for x2apic_enabled, get_apicbase, compute_bclk.␊ |
239 | ␊ |
240 | Copyright (c) 2010, Intel Corporation␊ |
241 | All rights reserved.␊ |
242 | ␊ |
243 | Redistribution and use in source and binary forms, with or without␊ |
244 | modification, are permitted provided that the following conditions are met:␊ |
245 | ␊ |
246 | * Redistributions of source code must retain the above copyright notice,␊ |
247 | this list of conditions and the following disclaimer.␊ |
248 | * Redistributions in binary form must reproduce the above copyright notice,␊ |
249 | this list of conditions and the following disclaimer in the documentation␊ |
250 | and/or other materials provided with the distribution.␊ |
251 | * Neither the name of Intel Corporation nor the names of its contributors␊ |
252 | may be used to endorse or promote products derived from this software␊ |
253 | without specific prior written permission.␊ |
254 | ␊ |
255 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND␊ |
256 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED␊ |
257 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE␊ |
258 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR␊ |
259 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES␊ |
260 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;␊ |
261 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON␊ |
262 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT␊ |
263 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS␊ |
264 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.␊ |
265 | */␊ |
266 | static inline __attribute__((always_inline)) void rdmsr32(uint32_t msr, uint32_t * lo_data_addr, uint32_t * hi_data_addr);␊ |
267 | static inline __attribute__((always_inline)) void wrmsr32(uint32_t msr, uint32_t lo_data, uint32_t hi_data);␊ |
268 | static uint32_t x2apic_enabled(void);␊ |
269 | static uint32_t get_apicbase(void);␊ |
270 | static uint32_t compute_bclk(void);␊ |
271 | static inline __attribute__((always_inline)) void rdmsr32(uint32_t msr, uint32_t * lo_data_addr, uint32_t * hi_data_addr)␊ |
272 | { ␊ |
273 | __asm__ volatile(␊ |
274 | ␉␉␉␉␉ "rdmsr"␊ |
275 | ␉␉␉␉␉ : "=a" (*lo_data_addr), "=d" (*hi_data_addr)␊ |
276 | ␉␉␉␉␉ : "c" (msr)␊ |
277 | ␉␉␉␉␉ ); ␊ |
278 | }␉␊ |
279 | static inline __attribute__((always_inline)) void wrmsr32(uint32_t msr, uint32_t lo_data, uint32_t hi_data)␊ |
280 | {␊ |
281 | __asm__ __volatile__ (␊ |
282 | ␉␉␉␉␉␉ "wrmsr"␊ |
283 | ␉␉␉␉␉␉ : /* No outputs */␊ |
284 | ␉␉␉␉␉␉ : "c" (msr), "a" (lo_data), "d" (hi_data)␊ |
285 | ␉␉␉␉␉␉ );␊ |
286 | }␊ |
287 | #define MSR_APIC_BASE 0x1B␊ |
288 | #define APIC_TMR_INITIAL_CNT 0x380␊ |
289 | #define APIC_TMR_CURRENT_CNT 0x390␊ |
290 | #define APIC_TMR_DIVIDE_CFG 0x3E0␊ |
291 | #define MSR_APIC_TMR_INITIAL_CNT 0x838␊ |
292 | #define MSR_APIC_TMR_CURRENT_CNT 0x839␊ |
293 | #define MSR_APIC_TMR_DIVIDE_CFG 0x83E␊ |
294 | static uint32_t x2apic_enabled(void)␊ |
295 | {␊ |
296 | uint64_t temp64;␊ |
297 | ␉␊ |
298 | temp64 = rdmsr64(MSR_APIC_BASE);␊ |
299 | ␉␊ |
300 | return (uint32_t) (temp64 & (1 << 10)) ? 1 : 0;␊ |
301 | }␊ |
302 | static uint32_t get_apicbase(void)␊ |
303 | {␊ |
304 | uint64_t temp64;␊ |
305 | ␉␊ |
306 | temp64 = rdmsr64(MSR_APIC_BASE);␊ |
307 | ␉␊ |
308 | return (uint32_t) (temp64 & 0xfffff000);␊ |
309 | }␊ |
310 | static uint32_t compute_bclk(void)␊ |
311 | {␊ |
312 | uint32_t dummy;␊ |
313 | uint32_t start, stop;␊ |
314 | uint8_t temp8;␊ |
315 | uint16_t delay_count;␊ |
316 | uint32_t bclk;␊ |
317 | ␉␊ |
318 | #define DELAY_IN_US 1000␊ |
319 | ␉␊ |
320 | // Compute fixed delay as time␊ |
321 | // delay count = desired time * PIT frequency␊ |
322 | // PIT frequency = 1.193182 MHz␊ |
323 | delay_count = 1193182 / DELAY_IN_US;␊ |
324 | ␉␊ |
325 | // PIT channel 2 gate is controlled by IO port 0x61, bit 0␊ |
326 | #define PIT_CH2_LATCH_REG 0x61␊ |
327 | #define CH2_SPEAKER (1 << 1) // bit 1 -- 1 = speaker enabled 0 = speaker disabled␊ |
328 | #define CH2_GATE_IN (1 << 0) // bit 0 -- 1 = gate enabled, 0 = gate disabled␊ |
329 | #define CH2_GATE_OUT (1 << 5) // bit 5 -- 1 = gate latched, 0 = gate not latched␊ |
330 | ␉␊ |
331 | // PIT Command register␊ |
332 | #define PIT_MODE_COMMAND_REG 0x43␊ |
333 | #define SELECT_CH2 (2 << 6)␊ |
334 | #define ACCESS_MODE_LOBYTE_HIBYTE (3 << 4)␊ |
335 | #define MODE0_INTERRUPT_ON_TERMINAL_COUNT 0 // Despite name, no interrupts on CH2␊ |
336 | ␉␊ |
337 | // PIT Channel 2 data port␊ |
338 | #define PIT_CH2_DATA 0x42␊ |
339 | ␉␊ |
340 | // Disable the PIT channel 2 speaker and gate␊ |
341 | temp8 = inb(PIT_CH2_LATCH_REG);␊ |
342 | temp8 &= ~(CH2_SPEAKER | CH2_GATE_IN);␊ |
343 | outb(PIT_CH2_LATCH_REG, temp8);␊ |
344 | ␉␊ |
345 | // Setup command and mode␊ |
346 | outb(PIT_MODE_COMMAND_REG, SELECT_CH2 | ACCESS_MODE_LOBYTE_HIBYTE | MODE0_INTERRUPT_ON_TERMINAL_COUNT);␊ |
347 | ␉␊ |
348 | // Set time for fixed delay␊ |
349 | outb(PIT_CH2_DATA, (uint8_t) (delay_count));␊ |
350 | outb(PIT_CH2_DATA, (uint8_t) (delay_count >> 8));␊ |
351 | ␉␊ |
352 | // Prepare to enable channel 2 gate but leave the speaker disabled␊ |
353 | temp8 = inb(PIT_CH2_LATCH_REG);␊ |
354 | temp8 &= ~CH2_SPEAKER;␊ |
355 | temp8 |= CH2_GATE_IN;␊ |
356 | ␉␊ |
357 | if (x2apic_enabled())␊ |
358 | ␉{␊ |
359 | // Set APIC Timer Divide Value as 2␊ |
360 | wrmsr32(MSR_APIC_TMR_DIVIDE_CFG, 0, 0);␊ |
361 | ␉␉␊ |
362 | // start APIC timer with a known value␊ |
363 | start = ~0UL;␊ |
364 | wrmsr32(MSR_APIC_TMR_INITIAL_CNT, start, 0);␊ |
365 | }␊ |
366 | else␊ |
367 | ␉{␊ |
368 | // Set APIC Timer Divide Value as 2␊ |
369 | *(volatile uint32_t *)(uint32_t) (get_apicbase() + APIC_TMR_DIVIDE_CFG) = 0UL;␊ |
370 | ␉␉␊ |
371 | // start APIC timer with a known value␊ |
372 | start = ~0UL;␊ |
373 | *(volatile uint32_t *)(uint32_t) (get_apicbase() + APIC_TMR_INITIAL_CNT) = start;␊ |
374 | }␊ |
375 | ␉␊ |
376 | // Actually start the PIT channel 2␊ |
377 | outb(PIT_CH2_LATCH_REG, temp8);␊ |
378 | ␉␊ |
379 | // Wait for the fixed delay␊ |
380 | while (!(inb(PIT_CH2_LATCH_REG) & CH2_GATE_OUT));␊ |
381 | ␉␊ |
382 | if (x2apic_enabled())␊ |
383 | ␉{␊ |
384 | // read the APIC timer to determine the change that occurred over this fixed delay␊ |
385 | rdmsr32(MSR_APIC_TMR_CURRENT_CNT, &stop, &dummy);␊ |
386 | ␉␉␊ |
387 | // stop APIC timer␊ |
388 | wrmsr32(MSR_APIC_TMR_INITIAL_CNT, 0, 0);␊ |
389 | ␉␉␊ |
390 | }␊ |
391 | else␊ |
392 | ␉{␊ |
393 | // read the APIC timer to determine the change that occurred over this fixed delay␊ |
394 | stop = *(volatile uint32_t *)(uint32_t) (get_apicbase() + APIC_TMR_CURRENT_CNT);␊ |
395 | ␉␉␊ |
396 | // stop APIC timer␊ |
397 | *(volatile uint32_t *)(uint32_t) (get_apicbase() + APIC_TMR_INITIAL_CNT) = 0UL;␊ |
398 | }␊ |
399 | ␉␊ |
400 | // Disable channel 2 speaker and gate input␊ |
401 | temp8 = inb(PIT_CH2_LATCH_REG);␊ |
402 | temp8 &= ~(CH2_SPEAKER | CH2_GATE_IN);␊ |
403 | outb(PIT_CH2_LATCH_REG, temp8);␊ |
404 | ␉␊ |
405 | bclk = (start - stop) * 2 / DELAY_IN_US;␊ |
406 | ␉␊ |
407 | // Round bclk to the nearest 100/12 integer value␊ |
408 | bclk = ((((bclk * 24) + 100) / 200) * 200) / 24;␊ |
409 | ␉␊ |
410 | return bclk;␊ |
411 | }␊ |
412 | ␊ |
413 | ␊ |
414 | /*␊ |
415 | * Calculates the FSB and CPU frequencies using specific MSRs for each CPU␊ |
416 | * - multi. is read from a specific MSR. In the case of Intel, there is:␊ |
417 | * a max multi. (used to calculate the FSB freq.),␊ |
418 | * and a current multi. (used to calculate the CPU freq.)␊ |
419 | * - fsbFrequency = tscFrequency / multi␊ |
420 | * - cpuFrequency = fsbFrequency * multi␊ |
421 | */␊ |
422 | ␊ |
423 | void scan_cpu(void)␊ |
424 | {␉␊ |
425 | ␉uint64_t␉msr = 0; ␊ |
426 | ␊ |
427 | ␊ |
428 | uint64_t␉Features = 0;␉␉// CPU Features like MMX, SSE2, VT ...␊ |
429 | ␉uint64_t␉ExtFeatures = 0; // CPU Extended Features like SYSCALL, XD, EM64T, LAHF ...␊ |
430 | uint64_t␉TSCFreq = 0 ;␊ |
431 | uint64_t FSBFreq = 0 ; ␊ |
432 | uint64_t CPUFreq = 0;␊ |
433 | ␊ |
434 | uint32_t␉reg[4];␊ |
435 | uint32_t cores_per_package = 0;␊ |
436 | uint32_t logical_per_package = 0;␊ |
437 | ␊ |
438 | uint32_t␉Vendor = 0;␉␉␉// Vendor␊ |
439 | ␉uint32_t␉Signature = 0;␉␉// Signature␊ |
440 | ␉uint8_t Stepping = 0;␉␉// Stepping␊ |
441 | ␉uint8_t Model = 0;␉␉␉// Model␊ |
442 | ␉uint8_t ExtModel = 0;␉␉// Extended Model␊ |
443 | ␉uint8_t Family = 0;␉␉␉// Family␊ |
444 | ␉uint8_t ExtFamily = 0;␉␉// Extended Family␊ |
445 | ␉uint32_t␉NoCores = 0;␉␉// No Cores per Package␊ |
446 | ␉uint32_t␉NoThreads = 0;␉␉// Threads per Package␊ |
447 | ␉uint8_t Brand = 0; ␊ |
448 | ␉uint32_t␉MicrocodeVersion = 0; // The microcode version number a.k.a. signature a.k.a. BIOS ID ␊ |
449 | ␊ |
450 | ␉uint8_t isMobile = 0; ␊ |
451 | ␉␊ |
452 | ␉boolean_t␉dynamic_acceleration = 0;␊ |
453 | ␉boolean_t␉invariant_APIC_timer = 0;␊ |
454 | ␉boolean_t␉fine_grain_clock_mod = 0;␊ |
455 | ␉␊ |
456 | ␉uint32_t cpuid_max_basic = 0;␊ |
457 | ␉uint32_t cpuid_max_ext = 0;␊ |
458 | ␉uint32_t␉sub_Cstates = 0;␊ |
459 | ␉uint32_t extensions = 0; ␊ |
460 | ␊ |
461 | ␉uint8_t␉␉maxcoef = 0, maxdiv = 0, currcoef = 0, currdiv = 0;␊ |
462 | char␉␉CpuBrandString[48];␉// 48 Byte Branding String␊ |
463 | ␊ |
464 | ␉␊ |
465 | ␉do_cpuid(0, reg);␊ |
466 | ␉Vendor = reg[ebx];␊ |
467 | ␉cpuid_max_basic = reg[eax];␊ |
468 | ␊ |
469 | #ifndef AMD_SUPPORT␊ |
470 | do_cpuid2(0x00000004, 0, reg);␊ |
471 | cores_per_package␉␉= bitfield(reg[eax], 31, 26) + 1;␊ |
472 | #endif␊ |
473 | ␉␊ |
474 | /* get extended cpuid results */␊ |
475 | ␉do_cpuid(0x80000000, reg);␊ |
476 | ␉cpuid_max_ext = reg[eax];␊ |
477 | ␊ |
478 | ␉/* Begin of Copyright: from Apple's XNU cpuid.c */␊ |
479 | ␉␊ |
480 | ␉/* get brand string (if supported) */␊ |
481 | ␉if (cpuid_max_ext > 0x80000004)␊ |
482 | ␉{␉␉␊ |
483 | char str[128], *s;␊ |
484 | ␉␉/*␊ |
485 | ␉␉ * The brand string 48 bytes (max), guaranteed to␊ |
486 | ␉␉ * be NUL terminated.␊ |
487 | ␉␉ */␊ |
488 | ␉␉do_cpuid(0x80000002, reg);␊ |
489 | ␉␉bcopy((char *)reg, &str[0], 16);␊ |
490 | ␉␉do_cpuid(0x80000003, reg);␊ |
491 | ␉␉bcopy((char *)reg, &str[16], 16);␊ |
492 | ␉␉do_cpuid(0x80000004, reg);␊ |
493 | ␉␉bcopy((char *)reg, &str[32], 16);␊ |
494 | ␉␉for (s = str; *s != '\0'; s++)␊ |
495 | ␉␉{␊ |
496 | ␉␉␉if (*s != ' ') break;␊ |
497 | ␉␉}␊ |
498 | ␉␉␊ |
499 | ␉␉strlcpy(CpuBrandString,␉s, sizeof(CpuBrandString));␊ |
500 | ␉␉␊ |
501 | ␉␉if (!strncmp(CpuBrandString, CPUID_STRING_UNKNOWN, min(sizeof(CpuBrandString), (unsigned)strlen(CPUID_STRING_UNKNOWN) + 1)))␊ |
502 | ␉␉{␊ |
503 | /*␊ |
504 | * This string means we have a firmware-programmable brand string,␊ |
505 | * and the firmware couldn't figure out what sort of CPU we have.␊ |
506 | */␊ |
507 | CpuBrandString[0] = '\0';␊ |
508 | }␊ |
509 | ␉} ␊ |
510 | ␉␊ |
511 | /*␊ |
512 | ␉ * Get processor signature and decode␊ |
513 | ␉ * and bracket this with the approved procedure for reading the␊ |
514 | ␉ * the microcode version number a.k.a. signature a.k.a. BIOS ID␊ |
515 | ␉ */␊ |
516 | #ifndef AMD_SUPPORT␊ |
517 | ␉wrmsr64(MSR_IA32_BIOS_SIGN_ID, 0);␊ |
518 | ␉do_cpuid(1, reg);␊ |
519 | ␉MicrocodeVersion = (uint32_t) (rdmsr64(MSR_IA32_BIOS_SIGN_ID) >> 32); ␊ |
520 | #else␊ |
521 | ␉do_cpuid(1, reg);␊ |
522 | #endif␉␊ |
523 | ␉Signature = reg[eax];␊ |
524 | ␉Stepping = bitfield(reg[eax], 3, 0);␊ |
525 | ␉Model = bitfield(reg[eax], 7, 4);␊ |
526 | ␉Family = bitfield(reg[eax], 11, 8);␊ |
527 | ␉ExtModel = bitfield(reg[eax], 19, 16);␊ |
528 | ␉ExtFamily = bitfield(reg[eax], 27, 20);␊ |
529 | ␉Brand = bitfield(reg[ebx], 7, 0);␊ |
530 | ␉Features = quad(reg[ecx], reg[edx]);␊ |
531 | ␊ |
532 | /* Fold extensions into family/model */␊ |
533 | ␉if (Family == 0x0f)␊ |
534 | ␉␉Family += ExtFamily;␊ |
535 | ␉if (Family == 0x0f || Family == 0x06)␊ |
536 | ␉␉Model += (ExtModel << 4);␊ |
537 | ␊ |
538 | if (Features & CPUID_FEATURE_HTT)␊ |
539 | ␉␉logical_per_package =␊ |
540 | bitfield(reg[ebx], 23, 16);␊ |
541 | ␉else␊ |
542 | ␉␉logical_per_package = 1;␉ ␊ |
543 | ␉␊ |
544 | ␉␊ |
545 | ␉if (cpuid_max_ext >= 0x80000001)␊ |
546 | ␉{␊ |
547 | ␉␉do_cpuid(0x80000001, reg);␊ |
548 | ␉␉ExtFeatures =␊ |
549 | quad(reg[ecx], reg[edx]);␊ |
550 | ␉␉␊ |
551 | ␉}␊ |
552 | ␉␊ |
553 | ␉if (cpuid_max_ext >= 0x80000007)␊ |
554 | ␉{␊ |
555 | ␉␉do_cpuid(0x80000007, reg); ␊ |
556 | ␉␉␊ |
557 | ␉␉/* Fold in the Invariant TSC feature bit, if present */␊ |
558 | ␉␉ExtFeatures |=␊ |
559 | reg[edx] & (uint32_t)CPUID_EXTFEATURE_TSCI;␊ |
560 | ␉␉␊ |
561 | #ifdef AMD_SUPPORT␊ |
562 | ␉␉/* Fold in the Hardware P-State control feature bit, if present */␊ |
563 | ␉␉ExtFeatures |=␊ |
564 | reg[edx] & (uint32_t)_Bit(7);␊ |
565 | ␉␉␊ |
566 | ␉␉/* Fold in the read-only effective frequency interface feature bit, if present */␊ |
567 | ␉␉ExtFeatures |=␊ |
568 | reg[edx] & (uint32_t)_Bit(10);␊ |
569 | #endif␊ |
570 | ␉} ␊ |
571 | ␉␊ |
572 | #ifdef AMD_SUPPORT␊ |
573 | ␉if (cpuid_max_ext >= 0x80000008)␊ |
574 | ␉{␊ |
575 | ␉␉if (Features & CPUID_FEATURE_HTT) ␊ |
576 | ␉␉{␊ |
577 | ␉␉␉do_cpuid(0x80000008, reg);␊ |
578 | ␉␉␉cores_per_package␉␉= bitfield(reg[ecx], 7 , 0) + 1; // NC + 1␊ |
579 | ␉␉}␊ |
580 | ␉}␉␉␊ |
581 | #endif␊ |
582 | ␉␊ |
583 | if (cpuid_max_basic >= 0x5) { ␊ |
584 | ␉␉/*␊ |
585 | ␉␉ * Extract the Monitor/Mwait Leaf info:␊ |
586 | ␉␉ */␊ |
587 | ␉␉do_cpuid(5, reg);␊ |
588 | #ifndef AMD_SUPPORT␊ |
589 | sub_Cstates = reg[edx];␊ |
590 | #endif␊ |
591 | extensions = reg[ecx];␉␊ |
592 | ␉}␊ |
593 | ␉␊ |
594 | #ifndef AMD_SUPPORT ␊ |
595 | if (cpuid_max_basic >= 0x6)␊ |
596 | { ␊ |
597 | ␉␉/*␊ |
598 | ␉␉ * The thermal and Power Leaf:␊ |
599 | ␉␉ */␊ |
600 | ␉␉do_cpuid(6, reg);␊ |
601 | ␉␉dynamic_acceleration = bitfield(reg[eax], 1, 1); // "Dynamic Acceleration Technology (Turbo Mode)"␊ |
602 | ␉␉invariant_APIC_timer = bitfield(reg[eax], 2, 2); // "Invariant APIC Timer"␊ |
603 | fine_grain_clock_mod = bitfield(reg[eax], 4, 4);␊ |
604 | ␉}␊ |
605 | ␉␊ |
606 | if ((Vendor == CPUID_VENDOR_INTEL) && ␊ |
607 | ␉␉(Family == 0x06))␊ |
608 | ␉{␊ |
609 | ␉␉/*␊ |
610 | ␉␉ * Find the number of enabled cores and threads␊ |
611 | ␉␉ * (which determines whether SMT/Hyperthreading is active).␊ |
612 | ␉␉ */␊ |
613 | ␉␉switch (Model)␊ |
614 | ␉␉{␊ |
615 | ␉␉␉␉␊ |
616 | ␉␉␉case CPUID_MODEL_DALES_32NM:␊ |
617 | ␉␉␉case CPUID_MODEL_WESTMERE:␊ |
618 | ␉␉␉case CPUID_MODEL_WESTMERE_EX:␊ |
619 | ␉␉␉{␊ |
620 | ␉␉␉␉msr = rdmsr64(MSR_CORE_THREAD_COUNT);␊ |
621 | ␉␉␉␉NoThreads = bitfield((uint32_t)msr, 15, 0);␊ |
622 | ␉␉␉␉NoCores = bitfield((uint32_t)msr, 19, 16); ␊ |
623 | ␉␉␉␉break;␊ |
624 | ␉␉␉}␊ |
625 | ␉␉␉␉␊ |
626 | ␉␉␉case CPUID_MODEL_NEHALEM:␊ |
627 | ␉␉␉case CPUID_MODEL_FIELDS:␊ |
628 | ␉␉␉case CPUID_MODEL_DALES:␊ |
629 | ␉␉␉case CPUID_MODEL_NEHALEM_EX:␊ |
630 | ␉␉␉case CPUID_MODEL_SANDYBRIDGE:␊ |
631 | ␉␉␉case CPUID_MODEL_JAKETOWN:␊ |
632 | ␉␉␉{␊ |
633 | ␉␉␉␉msr = rdmsr64(MSR_CORE_THREAD_COUNT);␊ |
634 | ␉␉␉␉NoThreads = bitfield((uint32_t)msr, 15, 0);␊ |
635 | ␉␉␉␉NoCores = bitfield((uint32_t)msr, 31, 16); ␊ |
636 | ␉␉␉␉break;␊ |
637 | ␉␉␉} ␊ |
638 | ␉␉}␊ |
639 | }␊ |
640 | #endif␊ |
641 | if (NoCores == 0)␊ |
642 | ␉{␊ |
643 | #ifdef AMD_SUPPORT␉␉␊ |
644 | ␉␉if (!cores_per_package) {␊ |
645 | ␉␉␉//legacy method␊ |
646 | ␉␉␉if ((ExtFeatures & _HBit(1)/* CmpLegacy */) && ( Features & CPUID_FEATURE_HTT) )␊ |
647 | ␉␉␉␉cores_per_package = logical_per_package; ␊ |
648 | ␉␉␉else ␊ |
649 | ␉␉␉␉cores_per_package = 1;␊ |
650 | ␉␉}␉␉␊ |
651 | #endif␊ |
652 | ␉␉NoThreads = logical_per_package;␊ |
653 | ␉␉NoCores = cores_per_package ? cores_per_package : 1 ;␊ |
654 | ␉}␊ |
655 | ␉␊ |
656 | ␉/* End of Copyright: from Apple's XNU cpuid.c */␊ |
657 | ␊ |
658 | ␉FSBFreq = (uint64_t)(compute_bclk() * 1000000);␊ |
659 | ␊ |
660 | #if LEGACY_CPU␊ |
661 | ␉TSCFreq = measure_tsc_frequency();␊ |
662 | #endif␉␊ |
663 | ␉␊ |
664 | #ifdef AMD_SUPPORT␊ |
665 | #define K8_FIDVID_STATUS␉␉0xC0010042␊ |
666 | #define K10_COFVID_STATUS␉␉0xC0010071␊ |
667 | ␉if (ExtFeatures & _Bit(10))␊ |
668 | ␉{␉␉␊ |
669 | ␉␉CPUFreq = measure_aperf_frequency();␊ |
670 | ␉}␊ |
671 | ␉␊ |
672 | if ((Vendor == CPUID_VENDOR_AMD) && (Family == 0x0f))␊ |
673 | ␉{␊ |
674 | ␉␉switch(ExtFamily)␊ |
675 | ␉␉{␊ |
676 | ␉␉␉case 0x00: /* K8 */␊ |
677 | ␉␉␉␉msr = rdmsr64(K8_FIDVID_STATUS);␊ |
678 | ␉␉␉␉maxcoef = bitfield(msr, 21, 16) / 2 + 4;␊ |
679 | ␉␉␉␉currcoef = bitfield(msr, 5, 0) / 2 + 4;␊ |
680 | ␉␉␉␉break;␊ |
681 | ␉␉␉␉␊ |
682 | ␉␉␉case 0x01: /* K10 */␊ |
683 | {␊ |
684 | //uint32_t reg[4];␊ |
685 | ␉␉␉␉msr = rdmsr64(K10_COFVID_STATUS);␊ |
686 | ␉␉␉␉/*␊ |
687 | do_cpuid2(0x00000006, 0, reg);␊ |
688 | ␉␉␉␉ EffFreq: effective frequency interface␊ |
689 | if (bitfield(reg[ecx], 0, 0) == 1)␊ |
690 | {␊ |
691 | uint64_t aperf = measure_aperf_frequency();␊ |
692 | CPUFreq = aperf;␊ |
693 | }␊ |
694 | */␉␉␉␉ ␊ |
695 | ␉␉␉␉// NOTE: tsc runs at the maccoeff (non turbo)␊ |
696 | ␉␉␉␉//␉␉␉*not* at the turbo frequency.␊ |
697 | ␉␉␉␉maxcoef␉ = bitfield(msr, 54, 49) / 2 + 4;␊ |
698 | ␉␉␉␉currcoef = bitfield(msr, 5, 0) + 0x10;␊ |
699 | ␉␉␉␉currdiv = 2 << bitfield(msr, 8, 6);␊ |
700 | ␉␉␉␉␊ |
701 | ␉␉␉␉break;␊ |
702 | ␉␉␉}␉␊ |
703 | ␉␉␉case 0x05: /* K14 */␊ |
704 | ␉␉␉␉msr = rdmsr64(K10_COFVID_STATUS);␊ |
705 | ␉␉␉␉currcoef = (bitfield(msr, 54, 49) + 0x10) << 2;␊ |
706 | ␉␉␉␉currdiv = (bitfield(msr, 8, 4) + 1) << 2;␊ |
707 | ␉␉␉␉currdiv += bitfield(msr, 3, 0);␊ |
708 | ␉␉␉␉␊ |
709 | ␉␉␉␉break;␊ |
710 | ␉␉␉␉␊ |
711 | ␉␉␉case 0x02: /* K11 */␊ |
712 | ␉␉␉␉DBG("K11 detected, but not supported !!!\n");␊ |
713 | ␉␉␉␉// not implimented␊ |
714 | ␉␉␉␉break;␊ |
715 | ␉␉}␊ |
716 | ␉␉␊ |
717 | ␉␉if (!FSBFreq)␊ |
718 | ␉␉{␊ |
719 | ␉␉␉if (maxcoef)␊ |
720 | ␉␉␉{␊ |
721 | ␉␉␉␉if (currdiv)␊ |
722 | ␉␉␉␉{␊ |
723 | ␉␉␉␉␉if (!currcoef) currcoef = maxcoef;␊ |
724 | ␉␉␉␉␉if (!CPUFreq)␊ |
725 | ␉␉␉␉␉␉FSBFreq = ((TSCFreq * currdiv) / currcoef);␊ |
726 | ␉␉␉␉␉else␊ |
727 | ␉␉␉␉␉␉FSBFreq = ((CPUFreq * currdiv) / currcoef);␊ |
728 | ␉␉␉␉␉␊ |
729 | ␉␉␉␉␉DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);␊ |
730 | ␉␉␉␉} else {␊ |
731 | ␉␉␉␉␉if (!CPUFreq)␊ |
732 | ␉␉␉␉␉␉FSBFreq = (TSCFreq / maxcoef);␊ |
733 | ␉␉␉␉␉else ␊ |
734 | ␉␉␉␉␉␉FSBFreq = (CPUFreq / maxcoef);␊ |
735 | ␉␉␉␉␉DBG("%d\n", currcoef);␊ |
736 | ␉␉␉␉}␊ |
737 | ␉␉␉}␊ |
738 | ␉␉␉else if (currcoef)␊ |
739 | ␉␉␉{␊ |
740 | ␉␉␉␉if (currdiv)␊ |
741 | ␉␉␉␉{␊ |
742 | ␉␉␉␉␉FSBFreq = ((TSCFreq * currdiv) / currcoef);␊ |
743 | ␉␉␉␉␉DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);␊ |
744 | ␉␉␉␉} else {␊ |
745 | ␉␉␉␉␉FSBFreq = (TSCFreq / currcoef);␊ |
746 | ␉␉␉␉␉DBG("%d\n", currcoef);␊ |
747 | ␉␉␉␉}␊ |
748 | ␉␉␉}␊ |
749 | ␉␉}␊ |
750 | ␉␉␊ |
751 | ␉}␊ |
752 | ␉␊ |
753 | ␉// NOTE: This is not the approved method,␊ |
754 | ␉// the method provided by AMD is: ␊ |
755 | ␉// if ((PowerNow == enabled (p->cpu->cpuid_max_ext >= 0x80000007)) && (StartupFID(??) != MaxFID(??))) then "mobile processor present"␊ |
756 | ␉␊ |
757 | ␉if (strstr(CpuBrandString, "obile")) ␊ |
758 | ␉␉isMobile = 1;␊ |
759 | ␉else ␊ |
760 | ␉␉isMobile = 0;␊ |
761 | ␉␊ |
762 | ␉DBG("%s platform detected.\n", isMobile?"Mobile":"Desktop");␊ |
763 | #else␊ |
764 | if ((Vendor == CPUID_VENDOR_INTEL) && ␊ |
765 | ␉␉((Family == 0x06) || ␊ |
766 | ␉␉ (Family == 0x0f)))␊ |
767 | ␉{␊ |
768 | ␉␉if ((Family == 0x06 && Model >= 0x0c) || ␊ |
769 | ␉␉␉(Family == 0x0f && Model >= 0x03))␊ |
770 | ␉␉{␊ |
771 | ␉␉␉/* Nehalem CPU model */␊ |
772 | ␉␉␉if (Family == 0x06 && (Model == CPUID_MODEL_NEHALEM || ␊ |
773 | Model == CPUID_MODEL_FIELDS || ␊ |
774 | Model == CPUID_MODEL_DALES || ␊ |
775 | Model == CPUID_MODEL_DALES_32NM || ␊ |
776 | Model == CPUID_MODEL_WESTMERE ||␊ |
777 | Model == CPUID_MODEL_NEHALEM_EX ||␊ |
778 | Model == CPUID_MODEL_WESTMERE_EX ||␊ |
779 | Model == CPUID_MODEL_SANDYBRIDGE ||␊ |
780 | Model == CPUID_MODEL_JAKETOWN)) ␊ |
781 | ␉␉␉{␊ |
782 | ␉␉␉␉uint8_t␉␉bus_ratio_max = 0;␊ |
783 | ␉␉␉␉uint64_t␉flex_ratio = 0;␊ |
784 | ␉␉␉␉msr = rdmsr64(MSR_PLATFORM_INFO);␊ |
785 | #if DEBUG_CPU␊ |
786 | uint32_t␉max_ratio = 0, bus_ratio_min = 0;␊ |
787 | ␊ |
788 | ␉␉␉␉DBG("msr(%d): platform_info %08x\n", __LINE__, msr & 0xffffffff);␊ |
789 | #endif␊ |
790 | ␉␉␉␉bus_ratio_max = (msr >> 8) & 0xff;␊ |
791 | ␉␉␉␉//bus_ratio_min = (msr >> 40) & 0xff; ␊ |
792 | ␉␉␉␉msr = rdmsr64(MSR_FLEX_RATIO);␊ |
793 | #if DEBUG_CPU␊ |
794 | ␉␉␉␉DBG("msr(%d): flex_ratio %08x\n", __LINE__, msr & 0xffffffff);␊ |
795 | #endif␊ |
796 | ␉␉␉␉if ((msr >> 16) & 0x01)␊ |
797 | ␉␉␉␉{␊ |
798 | ␉␉␉␉␉flex_ratio = (msr >> 8) & 0xff;␊ |
799 | ␉␉␉␉␉/* bcc9: at least on the gigabyte h67ma-ud2h,␊ |
800 | ␉␉␉␉␉ where the cpu multipler can't be changed to␊ |
801 | ␉␉␉␉␉ allow overclocking, the flex_ratio msr has unexpected (to OSX)␊ |
802 | ␉␉␉␉␉ contents. These contents cause mach_kernel to␊ |
803 | ␉␉␉␉␉ fail to compute the bus ratio correctly, instead␊ |
804 | ␉␉␉␉␉ causing the system to crash since tscGranularity␊ |
805 | ␉␉␉␉␉ is inadvertently set to 0.␊ |
806 | ␉␉␉␉␉ */␊ |
807 | ␉␉␉␉␉if (flex_ratio == 0)␊ |
808 | ␉␉␉␉␉{␊ |
809 | ␉␉␉␉␉␉/* Clear bit 16 (evidently the␊ |
810 | ␉␉␉␉␉␉ presence bit) */␊ |
811 | ␉␉␉␉␉␉wrmsr64(MSR_FLEX_RATIO, (msr & 0xFFFFFFFFFFFEFFFFULL));␊ |
812 | #if DEBUG_CPU␊ |
813 | msr = rdmsr64(MSR_FLEX_RATIO);␊ |
814 | ␊ |
815 | ␉␉␉␉␉␉DBG("Unusable flex ratio detected. MSR Patched to %08x\n", msr & 0xffffffff);␊ |
816 | #endif␊ |
817 | ␉␉␉␉␉}␊ |
818 | ␉␉␉␉␉else␊ |
819 | ␉␉␉␉␉{␊ |
820 | ␉␉␉␉␉␉if (bus_ratio_max > flex_ratio)␊ |
821 | ␉␉␉␉␉␉{␊ |
822 | ␉␉␉␉␉␉␉bus_ratio_max = flex_ratio;␊ |
823 | ␉␉␉␉␉␉}␊ |
824 | ␉␉␉␉␉}␊ |
825 | ␉␉␉␉}␊ |
826 | #if LEGACY_CPU␊ |
827 | ␉␉␉␉if (bus_ratio_max)␊ |
828 | ␉␉␉␉{␊ |
829 | ␉␉␉␉␉FSBFreq = (TSCFreq / bus_ratio_max);␊ |
830 | ␉␉␉␉}␊ |
831 | #endif␊ |
832 | ␉␉␉␉//valv: Turbo Ratio Limit␊ |
833 | ␉␉␉␉if ((Model != 0x2e) && (Model != 0x2f))␊ |
834 | ␉␉␉␉{␊ |
835 | ␉␉␉␉␉//msr = rdmsr64(MSR_TURBO_RATIO_LIMIT);␊ |
836 | ␉␉␉␉␉CPUFreq = bus_ratio_max * FSBFreq;␊ |
837 | ␉␉␉␉␉//max_ratio = bus_ratio_max * 10;␊ |
838 | ␉␉␉␉}␊ |
839 | ␉␉␉␉else␊ |
840 | ␉␉␉␉{␊ |
841 | #if LEGACY_CPU␊ |
842 | ␉␉␉␉␉CPUFreq = TSCFreq;␊ |
843 | #else␊ |
844 | ␉␉␉␉␉CPUFreq = bus_ratio_max * FSBFreq;␊ |
845 | #endif␊ |
846 | ␉␉␉␉}␉␉␉␉␉␉␉␉␊ |
847 | #if DEBUG_CPU␊ |
848 | ␉␉␉␉DBG("Sticking with [BCLK: %dMhz, Bus-Ratio: %d]\n", FSBFreq / 1000000, max_ratio);␊ |
849 | #endif␊ |
850 | ␉␉␉␉currcoef = bus_ratio_max;␊ |
851 | ␊ |
852 | TSCFreq = CPUFreq;␊ |
853 | ␉␉␉} ␊ |
854 | ␉␉␉else␊ |
855 | ␉␉␉{␊ |
856 | ␉␉␉␉msr = rdmsr64(MSR_IA32_PERF_STATUS);␊ |
857 | #if DEBUG_CPU␊ |
858 | ␉␉␉␉DBG("msr(%d): ia32_perf_stat 0x%08x\n", __LINE__, msr & 0xffffffff);␊ |
859 | #endif␊ |
860 | ␉␉␉␉currcoef = (msr >> 8) & 0x1f;␊ |
861 | ␉␉␉␉/* Non-integer bus ratio for the max-multi*/␊ |
862 | ␉␉␉␉maxdiv = (msr >> 46) & 0x01;␊ |
863 | ␉␉␉␉/* Non-integer bus ratio for the current-multi (undocumented)*/␊ |
864 | ␉␉␉␉currdiv = (msr >> 14) & 0x01;␊ |
865 | ␊ |
866 | ␉␉␉␉if ((Family == 0x06 && Model >= 0x0e) || ␊ |
867 | ␉␉␉␉␉(Family == 0x0f)) // This will always be model >= 3␊ |
868 | ␉␉␉␉{␊ |
869 | ␉␉␉␉␉/* On these models, maxcoef defines TSC freq */␊ |
870 | ␉␉␉␉␉maxcoef = (msr >> 40) & 0x1f;␊ |
871 | ␉␉␉␉} ␊ |
872 | ␉␉␉␉else ␊ |
873 | ␉␉␉␉{␊ |
874 | ␉␉␉␉␉/* On lower models, currcoef defines TSC freq */␊ |
875 | ␉␉␉␉␉/* XXX */␊ |
876 | ␉␉␉␉␉maxcoef = currcoef;␊ |
877 | ␉␉␉␉}␊ |
878 | ␉␉␉␉if (!currcoef) currcoef = maxcoef;␊ |
879 | #if LEGACY_CPU␊ |
880 | ␉␉␉␉if (maxcoef) ␊ |
881 | ␉␉␉␉{␉␉␉␉␉␊ |
882 | ␉␉␉␉␉␊ |
883 | ␉␉␉␉␉if (maxdiv)␊ |
884 | ␉␉␉␉␉{␊ |
885 | ␉␉␉␉␉␉FSBFreq = ((TSCFreq * 2) / ((maxcoef * 2) + 1));␊ |
886 | ␉␉␉␉␉}␊ |
887 | ␉␉␉␉␉else ␊ |
888 | ␉␉␉␉␉{␊ |
889 | ␉␉␉␉␉␉FSBFreq = (TSCFreq / maxcoef);␊ |
890 | ␉␉␉␉␉}␊ |
891 | ␉␉␉␉␉␊ |
892 | ␉␉␉␉␉if (currdiv) ␊ |
893 | ␉␉␉␉␉{␊ |
894 | ␉␉␉␉␉␉CPUFreq = (FSBFreq * ((currcoef * 2) + 1) / 2);␊ |
895 | ␉␉␉␉␉}␊ |
896 | ␉␉␉␉␉else ␊ |
897 | ␉␉␉␉␉{␊ |
898 | ␉␉␉␉␉␉CPUFreq = (FSBFreq * currcoef);␊ |
899 | ␉␉␉␉␉}␊ |
900 | #if DEBUG_CPU␊ |
901 | ␉␉␉␉␉DBG("max: %d%s current: %d%s\n", maxcoef, maxdiv ? ".5" : "",currcoef, currdiv ? ".5" : "");␊ |
902 | #endif␊ |
903 | ␉␉␉␉}␊ |
904 | #else␊ |
905 | ␉␉␉␉␊ |
906 | ␉␉␉␉␊ |
907 | ␉␉␉␉if (currdiv) ␊ |
908 | ␉␉␉␉{␊ |
909 | ␉␉␉␉␉CPUFreq = (FSBFreq * ((currcoef * 2) + 1) / 2);␊ |
910 | ␉␉␉␉}␊ |
911 | ␉␉␉␉else ␊ |
912 | ␉␉␉␉{␊ |
913 | ␉␉␉␉␉CPUFreq = (FSBFreq * currcoef);␊ |
914 | ␉␉␉␉}␊ |
915 | ␉␉␉␉␊ |
916 | ␉␉␉␉if (maxcoef) ␊ |
917 | ␉␉␉␉{␊ |
918 | ␉␉␉␉␉if (maxdiv)␊ |
919 | ␉␉␉␉␉{␊ |
920 | ␉␉␉␉␉␉TSCFreq = (FSBFreq * ((maxcoef * 2) + 1)) / 2;␊ |
921 | ␉␉␉␉␉}␊ |
922 | ␉␉␉␉␉else ␊ |
923 | ␉␉␉␉␉{␊ |
924 | ␉␉␉␉␉␉TSCFreq = FSBFreq * maxcoef;␊ |
925 | ␉␉␉␉␉}␊ |
926 | ␉␉␉␉}␉␉␉␉␉␉␉␉␊ |
927 | #if DEBUG_CPU␊ |
928 | ␉␉␉␉DBG("max: %d%s current: %d%s\n", maxcoef, maxdiv ? ".5" : "",currcoef, currdiv ? ".5" : "");␊ |
929 | #endif␊ |
930 | ␊ |
931 | #endif // LEGACY_CPU␊ |
932 | ␉␉␉␉␊ |
933 | ␉␉␉}␊ |
934 | ␉␉}␊ |
935 | /* Mobile CPU ? */ ␊ |
936 | ␉␉//Slice ␊ |
937 | ␉ //isMobile = 0;␊ |
938 | ␉␉switch (Model)␊ |
939 | ␉␉{␊ |
940 | ␉␉␉case 0x0D:␊ |
941 | ␉␉␉␉isMobile = 1; ␊ |
942 | ␉␉␉␉break;␉␉␉␊ |
943 | ␉␉␉case 0x02:␊ |
944 | ␉␉␉case 0x03:␊ |
945 | ␉␉␉case 0x04:␊ |
946 | ␉␉␉case 0x06:␉␊ |
947 | ␉␉␉␉isMobile = (rdmsr64(0x2C) & (1 << 21))? 1 : 0;␊ |
948 | ␉␉␉␉break;␊ |
949 | ␉␉␉default:␊ |
950 | ␉␉␉␉isMobile = (rdmsr64(0x17) & (1 << 28)) ? 1 : 0;␊ |
951 | ␉␉␉␉break;␊ |
952 | ␉␉}␊ |
953 | ␊ |
954 | ␉␉DBG("%s platform detected.\n", isMobile?"Mobile":"Desktop");␊ |
955 | ␉}␊ |
956 | #endif␊ |
957 | ␉if (!CPUFreq) CPUFreq = TSCFreq;␊ |
958 | if (!TSCFreq) TSCFreq = CPUFreq;␊ |
959 | ␊ |
960 | ␉set_env(envVendor, Vendor);␊ |
961 | set_env(envModel, Model); ␊ |
962 | set_env(envExtModel, ExtModel); ␊ |
963 | ␊ |
964 | ␉set_env(envCPUIDMaxBasic, cpuid_max_basic);␊ |
965 | ␉set_env(envCPUIDMaxBasic, cpuid_max_ext);␊ |
966 | #ifndef AMD_SUPPORT␊ |
967 | ␉set_env(envMicrocodeVersion, MicrocodeVersion); ␊ |
968 | #endif␊ |
969 | set_env_copy(envBrandString, CpuBrandString, sizeof(CpuBrandString));␊ |
970 | ␉set_env(envSignature, Signature); ␊ |
971 | ␉set_env(envStepping, Stepping); ␊ |
972 | ␉set_env(envFamily,␉ Family); ␊ |
973 | ␉set_env(envExtModel, ExtModel); ␊ |
974 | ␉set_env(envExtFamily, ExtFamily); ␊ |
975 | ␉set_env(envBrand,␉ Brand); ␊ |
976 | ␉set_env(envFeatures, Features);␊ |
977 | set_env(envExtFeatures, ExtFeatures);␊ |
978 | #ifndef AMD_SUPPORT␊ |
979 | ␉set_env(envSubCstates, sub_Cstates); ␊ |
980 | #endif␊ |
981 | ␉set_env(envExtensions, extensions); ␊ |
982 | #ifndef AMD_SUPPORT ␊ |
983 | ␉set_env(envDynamicAcceleration, dynamic_acceleration); ␊ |
984 | ␉set_env(envInvariantAPICTimer,␉ invariant_APIC_timer); ␊ |
985 | ␉set_env(envFineGrainClockMod, fine_grain_clock_mod);␊ |
986 | #endif␊ |
987 | ␉set_env(envNoThreads,␉ NoThreads); ␊ |
988 | ␉set_env(envNoCores,␉␉ NoCores);␊ |
989 | ␉set_env(envIsMobile,␉␉ isMobile);␊ |
990 | ␉␊ |
991 | ␉set_env(envMaxCoef,␉␉ maxcoef); ␊ |
992 | ␉set_env(envMaxDiv,␉␉ maxdiv);␊ |
993 | ␉set_env(envCurrCoef,␉␉ currcoef);␊ |
994 | ␉set_env(envCurrDiv,␉ currdiv); ␊ |
995 | ␉set_env(envTSCFreq,␉ TSCFreq);␊ |
996 | ␉set_env(envFSBFreq,␉ FSBFreq);␊ |
997 | ␉set_env(envCPUFreq,␉ CPUFreq);␊ |
998 | ␉␊ |
999 | #ifdef AMD_SUPPORT␊ |
1000 | msglog("AMD CPU Detection Enabled\n");␊ |
1001 | #endif␊ |
1002 | ␉␊ |
1003 | } |