Chameleon

Chameleon Commit Details

Date:2015-04-14 17:52:53 (9 years 9 days ago)
Author:ErmaC
Commit:2647
Parents: 2646
Message:Implement Bronya's AMD support (Credits to Bronya) More info here: http://www.insanelymac.com/forum/forum/318-amd-development/
Changes:
M/trunk/i386/libsaio/cpu.c
M/trunk/i386/libsaio/efi.h
M/trunk/i386/libsaio/fake_efi.c
M/trunk/i386/libsaio/platform.h
M/trunk/i386/modules/AcpiCodec/acpi_codec.c
M/trunk/i386/libsaio/cpu.h

File differences

trunk/i386/libsaio/efi.h
230230
231231
232232
233
234
235
233
234
235
236236
237
238237
239238
240239
......
498497
499498
500499
501
502
503
500
501
502
503
504
505
504506
505507
506508
// physical memory protection on range
#define EFI_MEMORY_WP 0x0000000000001000ULL
#define EFI_MEMORY_RP 0x0000000000002000ULL
#define EFI_MEMORY_XP 0x0000000000004000ULL
#define EFI_MEMORY_WP 0x0000000000001000ULL/* write-protect */
#define EFI_MEMORY_RP 0x0000000000002000ULL/* read-protect */
#define EFI_MEMORY_XP 0x0000000000004000ULL/* execute-protect */
// range requires a runtime mapping
#define EFI_MEMORY_RUNTIME 0x8000000000000000ULL
#define EFI_SYSTEM_TABLE_SIGNATURE 0x5453595320494249ULL
#define EFI_SYSTEM_TABLE_REVISION ((EFI_SPECIFICATION_MAJOR_REVISION << 16) | (EFI_SPECIFICATION_MINOR_REVISION))
#define EFI_2_00_SYSTEM_TABLE_REVISION ((2 << 16) | 00)
#define EFI_1_02_SYSTEM_TABLE_REVISION ((1 << 16) | 02)
#define EFI_1_10_SYSTEM_TABLE_REVISION ((1 << 16) | 10)
#define EFI_2_30_SYSTEM_TABLE_REVISION ((2 << 16) | (30))
#define EFI_2_20_SYSTEM_TABLE_REVISION ((2 << 16) | (20))
#define EFI_2_10_SYSTEM_TABLE_REVISION ((2 << 16) | (10))
#define EFI_2_00_SYSTEM_TABLE_REVISION ((2 << 16) | (00))
#define EFI_1_10_SYSTEM_TABLE_REVISION ((1 << 16) | (10))
#define EFI_1_02_SYSTEM_TABLE_REVISION ((1 << 16) | (02))
typedef struct EFI_SYSTEM_TABLE_32 {
EFI_TABLE_HEADER Hdr;
trunk/i386/libsaio/cpu.c
1919
2020
2121
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
2287
2388
2489
......
54119
55120
56121
57
122
58123
59124
60
125
61126
62127
63128
......
79144
80145
81146
82
83
84
147
148
149
85150
86151
87152
......
98163
99164
100165
101
166
102167
103168
104169
......
167232
168233
169234
170
171
172
173
174
175
176
177
235
236
237
238
239
178240
179
180
181
182
183
184
241
242
185243
186
187
188
189
190
191
192
193
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
194268
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
269
270
271
272
273
218274
219
220
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
221291
222
223
292
224293
225
294
226295
227296
228297
229
298
230299
231
232
300
301
302
303
233304
234305
235306
......
237308
238309
239310
240
241
311
312
242313
314
315
316
317
318
319
320
321
322
323
243324
244325
245
246
326
327
328
329
247330
248331
249332
333
250334
251335
252336
253
337
254338
255339
256340
......
261345
262346
263347
264
348
265349
266350
267351
......
271355
272356
273357
274
275
276
277
278
279
280
358
281359
282
283
284360
285
361
286362
287
363
288364
289
290
291
292
365
366
293367
294
295
368
296369
297
370
371
372
373
374
375
298376
299
377
378
379
380
381
382
383
384
300385
301
302386
303
387
388
389
390
304391
305
306
307
308
309
392
393
394
395
310396
311
312
313
314
397
398
399
400
315401
316
317
318
402
403
404
405
406
407
408
409
410
411
412
319413
320
321
414
322415
323
324
416
325417
326
418
419
420
421
422
327423
328
329
424
425
426
427
428
429
430
330431
331
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
332449
333
450
334451
335
452
336453
337454
338455
......
348465
349466
350467
468
351469
470
471
472
473
474
475
476
477
478
479
480
481
352482
353483
354484
......
358488
359489
360490
361
491
492
493
494
362495
496
497
498
499
500
363501
364502
365503
......
400538
401539
402540
403
541
542
404543
405
406
407
408
409
410
411
412
413
414
544
545
546
547
548
549
550
551
552
415553
416
417
418
419
420
421
422
423
424
425
554
555
556
557
558
559
560
561
562
563
426564
427
428
429
430
431
432
433
434
565
566
567
568
569
570
571
572
435573
436
437
438
439
440
441
574
575
576
577
578
442579
443
444
445
446
447
448
449
580
581
582
583
584
585
586
450587
451
452
453
454
455
456
457
458
459
588
589
590
591
592
593
460594
461595
462
463
464
465
466
467
468
469
470596
471597
472598
......
508634
509635
510636
511
637
512638
513639
514640
515641
516
517
518
519
520
642
643
644
645
521646
522
647
523648
524
649
525650
526651
527
528
529
530
531652
532
533
534
535
653
536654
537655
538656
......
593711
594712
595713
596
714
597715
598716
599717
......
601719
602720
603721
604
722
605723
606724
607725
608726
609
727
610728
611729
612730
......
622740
623741
624742
625
743
626744
627745
628746
......
642760
643761
644762
645
763
646764
647765
648766
......
679797
680798
681799
682
800
683801
684802
685803
686
804
687805
688806
689807
690808
691
809
692810
693811
694812
695
813
696814
697815
698816
......
706824
707825
708826
709
827
828
710829
711
830
712831
713
714
715
716
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
717848
718849
719
720
721
722
723
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
724915
725
726
727
916
917
918
919
920
921
922
923
924
925
728926
729
730
731
732
733
927
734928
929
930
931
932
735933
736934
737
738
739
740
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
741951
742952
953
954
955
956
743957
744958
745
746
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
747984
748
749985
750
751
752
986
753987
754
755
756
757
988
989
990
991
992
758993
759
760
761
762
763
764
765
766
767
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
7681009
769
1010
1011
1012
7701013
771
772
773
774
775
776
777
778
779
1014
1015
1016
1017
1018
1019
1020
1021
1022
7801023
7811024
782
1025
1026
7831027
7841028
7851029
786
1030
1031
1032
1033
1034
1035
1036
7871037
7881038
7891039
7901040
791
1041
1042
1043
1044
7921045
7931046
7941047
795
1048
7961049
797
1050
7981051
7991052
8001053
8011054
802
1055
8031056
804
805
806
1057
1058
1059
8071060
8081061
8091062
8101063
8111064
8121065
813
814
815
816
1066
1067
8171068
8181069
819
820
1070
1071
8211072
8221073
8231074
#define DBG(x...)
#endif
boolean_t ForceAmdCpu = false;
/* For AMD CPU's */
boolean_t IsAmdCPU(void)
{
if (ForceAmdCpu)
{
return true;
}
uint32_t ourcpuid[4];
do_cpuid(0, ourcpuid);
if (
/* This spells out "AuthenticAMD". */
ourcpuid[ebx] == 0x68747541 && // Auth
ourcpuid[ecx] == 0x444D4163 && // cAMD
ourcpuid[edx] == 0x69746E65) // enti
{
return true;
}
return false;
};
/* For Intel CPU's */
boolean_t IsIntelCPU(void)
{
uint32_t ourcpuid[4];
do_cpuid(0, ourcpuid);
if (
/* This spells out "GenuineIntel". */
ourcpuid[ebx] == 0x756E6547 && // Genu
ourcpuid[ecx] == 0x6C65746E && // ntel
ourcpuid[edx] == 0x49656E69) // ineI
{
return true;
}
if (!IsAmdCPU())
{
return true;
}
return false;
}
#define UI_CPUFREQ_ROUNDING_FACTOR10000000
clock_frequency_info_t gPEClockFrequencyInfo;
static inline uint32_t __unused clockspeed_rdtsc(void)
{
uint32_t out;
__asm__ volatile (
"rdtsc\n"
"shl $32,%%edx\n"
"or %%edx,%%eax\n"
: "=a" (out)
:
: "%edx"
);
return out;
}
/*
* timeRDTSC()
* This routine sets up PIT counter 2 to count down 1/20 of a second.
//int_enabled = ml_set_interrupts_enabled(false);
restart:
if (attempts >= 9) // increase to up to 9 attempts.
if (attempts >= 3) // increase to up to 9 attempts.
{
// This will flash-reboot. TODO: Use tscPanic instead.
printf("Timestamp counter calibation failed with %d attempts\n", attempts);
//printf("Timestamp counter calibation failed with %d attempts\n", attempts);
}
attempts++;
enable_PIT2();// turn on PIT2
}
lastValue = timerValue;
} while (timerValue > 5);
printf("timerValue %d\n",timerValue);
printf("intermediate 0x%016llX\n",intermediate);
printf("saveTime 0x%016llX\n",saveTime);
//printf("timerValue %d\n",timerValue);
//printf("intermediate 0x%016llX\n",intermediate);
//printf("saveTime 0x%016llX\n",saveTime);
intermediate -= saveTime;// raw count for about 1/20 second
intermediate *= scale[timerValue];// rescale measured time spent
/*
* DFE: Measures the TSC frequency in Hz (64-bit) using the ACPI PM timer
*/
static uint64_t measure_tsc_frequency(void)
static uint64_t __unused measure_tsc_frequency(void)
{
uint64_t tscStart;
uint64_t tscEnd;
return retval;
}
/*
* Original comment/code:
* "DFE: Measures the Max Performance Frequency in Hz (64-bit)"
*
* Measures the Actual Performance Frequency in Hz (64-bit)
* (just a naming change, mperf --> aperf )
*/
static uint64_t measure_aperf_frequency(void)
static uint64_trtc_set_cyc_per_sec(uint64_t cycles);
#define RTC_FAST_DENOM0xFFFFFFFF
inline static uint32_t
create_mul_quant_GHZ(int shift, uint32_t quant)
{
uint64_t aperfStart;
uint64_t aperfEnd;
uint64_t aperfDelta = 0xffffffffffffffffULL;
unsigned long pollCount;
uint64_t retval = 0;
int i;
return (uint32_t)((((uint64_t)NSEC_PER_SEC/20) << shift) / quant);
}
/* Time how many APERF ticks elapse in 30 msec using the 8254 PIT
* counter 2. We run this loop 3 times to make sure the cache
* is hot and we take the minimum delta from all of the runs.
* That is to say that we're biased towards measuring the minimum
* number of APERF ticks that occur while waiting for the timer to
* expire.
*/
for(i = 0; i < 10; ++i)
struct{
mach_timespec_tcalend_offset;
boolean_tcalend_is_set;
int64_tcalend_adjtotal;
int32_tcalend_adjdelta;
uint32_tboottime;
mach_timebase_info_data_ttimebase_const;
decl_simple_lock_data(,lock)/* real-time clock device lock */
} rtclock;
uint32_trtc_quant_shift;/* clock to nanos right shift */
uint32_trtc_quant_scale;/* clock to nanos multiplier */
uint64_trtc_cyc_per_sec;/* processor cycles per sec */
uint64_trtc_cycle_count;/* clocks in 1/20th second */
//uint64_t cpuFreq;
static uint64_t rtc_set_cyc_per_sec(uint64_t cycles)
{
if (cycles > (NSEC_PER_SEC/20))
{
enable_PIT2();
set_PIT2_mode0(CALIBRATE_LATCH);
aperfStart = rdmsr64(MSR_AMD_APERF);
pollCount = poll_PIT2_gate();
aperfEnd = rdmsr64(MSR_AMD_APERF);
/* The poll loop must have run at least a few times for accuracy */
if (pollCount <= 1)
{
continue;
}
/* The TSC must increment at LEAST once every millisecond.
* We should have waited exactly 30 msec so the APERF delta should
* be >= 30. Anything less and the processor is way too slow.
*/
if ((aperfEnd - aperfStart) <= CALIBRATE_TIME_MSEC)
{
continue;
}
// tscDelta = MIN(tscDelta, (tscEnd - tscStart))
if ( (aperfEnd - aperfStart) < aperfDelta )
{
aperfDelta = aperfEnd - aperfStart;
}
// we can use just a "fast" multiply to get nanos
rtc_quant_shift = 32;
rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, (uint32_t)cycles);
rtclock.timebase_const.numer = rtc_quant_scale; // timeRDTSC is 1/20
rtclock.timebase_const.denom = (uint32_t)RTC_FAST_DENOM;
}
/* mperfDelta is now the least number of MPERF ticks the processor made in
* a timespan of 0.03 s (e.g. 30 milliseconds)
else
{
rtc_quant_shift = 26;
rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, (uint32_t)cycles);
rtclock.timebase_const.numer = NSEC_PER_SEC/20; // timeRDTSC is 1/20
rtclock.timebase_const.denom = (uint32_t)cycles;
}
rtc_cyc_per_sec = cycles*20;// multiply it by 20 and we are done..
// BUT we also want to calculate...
cycles = ((rtc_cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
/ UI_CPUFREQ_ROUNDING_FACTOR)
* UI_CPUFREQ_ROUNDING_FACTOR;
/*
* Set current measured speed.
*/
if (aperfDelta > (1ULL<<32))
if (cycles >= 0x100000000ULL)
{
retval = 0;
gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
}
else
{
retval = aperfDelta * 1000 / 30;
gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
}
disable_PIT2();
return retval;
gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
//printf("[RTCLOCK_1] frequency %llu (%llu) %llu\n", cycles, rtc_cyc_per_sec,timeRDTSC() * 20);
return(rtc_cyc_per_sec);
}
/*
* - multi. is read from a specific MSR. In the case of Intel, there is:
* a max multi. (used to calculate the FSB freq.),
* and a current multi. (used to calculate the CPU freq.)
* - fsbFrequency = tscFrequency / multi
* - cpuFrequency = fsbFrequency * multi
* - busFrequency = tscFrequency / multi
* - cpuFrequency = busFrequency * multi
*/
/* Decimal powers: */
#define kilo (1000ULL)
#define Mega (kilo * kilo)
#define Giga (kilo * Mega)
#define Tera (kilo * Giga)
#define Peta (kilo * Tera)
#define quad(hi,lo)(((uint64_t)(hi)) << 32 | (lo))
void scan_cpu(PlatformInfo_t *p)
{
uint64_ttscFrequency= 0;
uint64_tfsbFrequency= 0;
uint64_tbusFCvtt2n;
uint64_ttscFCvtt2n;
uint64_ttscFreq= 0;
uint64_tbusFrequency= 0;
uint64_tcpuFrequency= 0;
uint64_tmsr= 0;
uint64_tflex_ratio= 0;
uint64_tcpuid_features;
uint32_tmax_ratio= 0;
uint32_tmin_ratio= 0;
uint32_treg[4]; //= {0, 0, 0, 0};
uint32_treg[4];
uint32_tcores_per_package= 0;
uint32_tlogical_per_package= 1;
uint32_tthreads_per_core= 1;
uint8_tcurrcoef= 0;
uint8_tmaxdiv= 0;
uint8_tmaxcoef= 0;
uint8_tpic0_mask;
uint8_tcpuMultN2= 0;
const char*newratio;
charstr[128];
intmyfsb= 0;
inti= 0;
/* get cpuid values */
do_cpuid(0x00000000, p->CPU.CPUID[CPUID_0]); // MaxFn, Vendor
p->CPU.Vendor = p->CPU.CPUID[CPUID_0][ebx];
do_cpuid(0x00000001, p->CPU.CPUID[CPUID_1]); // Signature, stepping, features
if ((p->CPU.Vendor == CPUID_VENDOR_INTEL) && ((bit(28) & p->CPU.CPUID[CPUID_1][edx]) != 0)) // Intel && HTT/Multicore
if(IsIntelCPU())
{
logical_per_package = bitfield(p->CPU.CPUID[CPUID_1][ebx], 23, 16);
}
do_cpuid(0x00000002, p->CPU.CPUID[CPUID_2]); // TLB/Cache/Prefetch
do_cpuid(0x00000002, p->CPU.CPUID[CPUID_2]); // TLB/Cache/Prefetch
do_cpuid(0x00000003, p->CPU.CPUID[CPUID_3]); // S/N
do_cpuid(0x00000003, p->CPU.CPUID[CPUID_3]); // S/N
/* Based on Apple's XNU cpuid.c - Deterministic cache parameters */
if ((p->CPU.CPUID[CPUID_0][eax] > 3) && (p->CPU.CPUID[CPUID_0][eax] < 0x80000000))
{
for (i = 0; i < 0xFF; i++) // safe loop
/* Based on Apple's XNU cpuid.c - Deterministic cache parameters */
if ((p->CPU.CPUID[CPUID_0][eax] > 3) && (p->CPU.CPUID[CPUID_0][eax] < 0x80000000))
{
do_cpuid2(0x00000004, i, reg); // AX=4: Fn, CX=i: cache index
if (bitfield(reg[eax], 4, 0) == 0)
for (i = 0; i < 0xFF; i++) // safe loop
{
break;
do_cpuid2(0x00000004, i, reg); // AX=4: Fn, CX=i: cache index
if (bitfield(reg[eax], 4, 0) == 0)
{
break;
}
cores_per_package = bitfield(reg[eax], 31, 26) + 1;
}
//cores_per_package = bitfield(reg[eax], 31, 26) + 1;
}
do_cpuid2(0x00000004, 0, p->CPU.CPUID[CPUID_4]);
if (i > 0)
{
cores_per_package = bitfield(p->CPU.CPUID[CPUID_4][eax], 31, 26) + 1; // i = cache index
threads_per_core = bitfield(p->CPU.CPUID[CPUID_4][eax], 25, 14) + 1;
}
}
do_cpuid2(0x00000004, 0, p->CPU.CPUID[CPUID_4]);
if (cores_per_package == 0)
{
cores_per_package = 1;
}
if (i > 0)
{
cores_per_package = bitfield(p->CPU.CPUID[CPUID_4][eax], 31, 26) + 1; // i = cache index
threads_per_core = bitfield(p->CPU.CPUID[CPUID_4][eax], 25, 14) + 1;
}
if (p->CPU.CPUID[CPUID_0][0] >= 0x5)// Monitor/Mwait
{
do_cpuid(5, p->CPU.CPUID[CPUID_5]);
}
if (cores_per_package == 0)
{
cores_per_package = 1;
}
if (p->CPU.CPUID[CPUID_0][0] >= 6)// Thermal/Power
{
do_cpuid(6, p->CPU.CPUID[CPUID_6]);
}
if (p->CPU.CPUID[CPUID_0][0] >= 0x5)// Monitor/Mwait
{
do_cpuid(5, p->CPU.CPUID[CPUID_5]);
do_cpuid(0x80000000, p->CPU.CPUID[CPUID_80]);
if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 8)
{
do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);
do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
}
else if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 1)
{
do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
}
}
if (p->CPU.CPUID[CPUID_0][0] >= 6)// Thermal/Power
else if(IsAmdCPU())
{
do_cpuid(6, p->CPU.CPUID[CPUID_6]);
}
do_cpuid(5, p->CPU.CPUID[CPUID_5]); // Monitor/Mwait
do_cpuid(0x80000000, p->CPU.CPUID[CPUID_80]);
do_cpuid(0x80000000, p->CPU.CPUID[CPUID_80]);
if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 8)
{
do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);
}
if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 8)
{
if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 1)
{
do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
}
do_cpuid(0x80000005, p->CPU.CPUID[CPUID_85]); // TLB/Cache/Prefetch
do_cpuid(0x80000006, p->CPU.CPUID[CPUID_86]); // TLB/Cache/Prefetch
do_cpuid(0x80000008, p->CPU.CPUID[CPUID_88]);
do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
cores_per_package = bitfield(p->CPU.CPUID[CPUID_88][ecx], 7, 0) + 1;
threads_per_core = cores_per_package;
if (cores_per_package == 0)
{
cores_per_package = 1;
}
p->CPU.NoThreads= logical_per_package;
p->CPU.NoCores= cores_per_package;
if (p->CPU.NoCores == 0)
{
p->CPU.NoCores = 1;
p->CPU.NoThreads= 1;
}
}
else if ((p->CPU.CPUID[CPUID_80][0] & 0x0000000f) >= 1)
else
{
do_cpuid(0x80000001, p->CPU.CPUID[CPUID_81]);
stop("Unsupported CPU detected! System halted.");
}
/* http://www.flounder.com/cpuid_explorer2.htm
|########|Extended family |Extmodel|####|####|familyid| model |stepping|
+--------+----------------+--------+----+----+--------+--------+--------+
*/
do_cpuid(0x00000001, p->CPU.CPUID[CPUID_1]); // Signature, stepping, features
cpuid_features = quad(p->CPU.CPUID[CPUID_1][ecx],p->CPU.CPUID[CPUID_1][edx]);
if (bit(28) & cpuid_features) // HTT/Multicore
{
logical_per_package = bitfield(p->CPU.CPUID[CPUID_1][ebx], 23, 16);
}
else
{
logical_per_package = 1;
}
do_cpuid(0x00000000, p->CPU.CPUID[CPUID_0]); // MaxFn, Vendor
p->CPU.Vendor= p->CPU.CPUID[CPUID_0][1];
p->CPU.Signature= p->CPU.CPUID[CPUID_1][0];
p->CPU.Stepping= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 3, 0);// stepping = cpu_feat_eax & 0xF;
p->CPU.ExtModel= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 19, 16);// ext_model = (cpu_feat_eax >> 16) & 0xF;
p->CPU.ExtFamily= (uint8_t)bitfield(p->CPU.CPUID[CPUID_1][0], 27, 20);// ext_family = (cpu_feat_eax >> 20) & 0xFF;
p->CPU.Model += (p->CPU.ExtModel << 4);
if (p->CPU.Family == 0x0f)
{
p->CPU.Family += p->CPU.ExtFamily;
}
if (p->CPU.Family == 0x0f || p->CPU.Family == 0x06)
{
p->CPU.Model += (p->CPU.ExtModel << 4);
}
/* get BrandString (if supported) */
/* Copyright: from Apple's XNU cpuid.c */
if (p->CPU.CPUID[CPUID_80][0] > 0x80000004)
* Find the number of enabled cores and threads
* (which determines whether SMT/Hyperthreading is active).
*/
switch (p->CPU.Vendor)
if(IsIntelCPU())
{
case CPUID_VENDOR_INTEL:
switch (p->CPU.Model)
{
case CPUID_MODEL_NEHALEM:
case CPUID_MODEL_FIELDS:
case CPUID_MODEL_DALES:
case CPUID_MODEL_NEHALEM_EX:
case CPUID_MODEL_JAKETOWN:
case CPUID_MODEL_SANDYBRIDGE:
case CPUID_MODEL_IVYBRIDGE:
switch (p->CPU.Model)
{
case CPUID_MODEL_NEHALEM:
case CPUID_MODEL_FIELDS:
case CPUID_MODEL_DALES:
case CPUID_MODEL_NEHALEM_EX:
case CPUID_MODEL_JAKETOWN:
case CPUID_MODEL_SANDYBRIDGE:
case CPUID_MODEL_IVYBRIDGE:
case CPUID_MODEL_HASWELL:
case CPUID_MODEL_HASWELL_SVR:
//case CPUID_MODEL_HASWELL_H:
case CPUID_MODEL_HASWELL_ULT:
case CPUID_MODEL_CRYSTALWELL:
//case CPUID_MODEL_:
msr = rdmsr64(MSR_CORE_THREAD_COUNT);
p->CPU.NoCores= (uint32_t)bitfield((uint32_t)msr, 31, 16);
p->CPU.NoThreads= (uint32_t)bitfield((uint32_t)msr, 15, 0);
break;
case CPUID_MODEL_HASWELL:
case CPUID_MODEL_HASWELL_SVR:
//case CPUID_MODEL_HASWELL_H:
case CPUID_MODEL_HASWELL_ULT:
case CPUID_MODEL_CRYSTALWELL:
//case CPUID_MODEL_:
msr = rdmsr64(MSR_CORE_THREAD_COUNT);
p->CPU.NoCores= (uint32_t)bitfield((uint32_t)msr, 31, 16);
p->CPU.NoThreads= (uint32_t)bitfield((uint32_t)msr, 15, 0);
break;
case CPUID_MODEL_DALES_32NM:
case CPUID_MODEL_WESTMERE:
case CPUID_MODEL_WESTMERE_EX:
msr = rdmsr64(MSR_CORE_THREAD_COUNT);
p->CPU.NoCores= (uint32_t)bitfield((uint32_t)msr, 19, 16);
p->CPU.NoThreads= (uint32_t)bitfield((uint32_t)msr, 15, 0);
break;
}
case CPUID_MODEL_DALES_32NM:
case CPUID_MODEL_WESTMERE:
case CPUID_MODEL_WESTMERE_EX:
msr = rdmsr64(MSR_CORE_THREAD_COUNT);
p->CPU.NoCores= (uint32_t)bitfield((uint32_t)msr, 19, 16);
p->CPU.NoThreads= (uint32_t)bitfield((uint32_t)msr, 15, 0);
break;
}
if (p->CPU.NoCores == 0)
{
p->CPU.NoCores= cores_per_package;
p->CPU.NoThreads= logical_per_package;
}
break;
if (p->CPU.NoCores == 0)
{
p->CPU.NoCores= cores_per_package;
p->CPU.NoThreads= logical_per_package;
}
case CPUID_VENDOR_AMD:
p->CPU.NoCores= (uint32_t)bitfield(p->CPU.CPUID[CPUID_88][2], 7, 0) + 1;
p->CPU.NoThreads= (uint32_t)bitfield(p->CPU.CPUID[CPUID_1][1], 23, 16);
if (p->CPU.NoCores == 0)
{
p->CPU.NoCores = 1;
}
// MSR is *NOT* available on the Intel Atom CPU
//workaround for N270. I don't know why it detected wrong
if ((p->CPU.Model == CPUID_MODEL_ATOM) && (strstr(p->CPU.BrandString, "270")))
{
p->CPU.NoCores= 1;
p->CPU.NoThreads= 2;
}
if (p->CPU.NoThreads < p->CPU.NoCores)
{
p->CPU.NoThreads = p->CPU.NoCores;
}
break;
default:
stop("Unsupported CPU detected! System halted.");
//workaround for Quad
if ( strstr(p->CPU.BrandString, "Quad") )
{
p->CPU.NoCores= 4;
p->CPU.NoThreads= 4;
}
}
//workaround for N270. I don't know why it detected wrong
// MSR is *NOT* available on the Intel Atom CPU
if ((p->CPU.Model == CPUID_MODEL_ATOM) && (strstr(p->CPU.BrandString, "270")))
{
p->CPU.NoCores= 1;
p->CPU.NoThreads= 2;
}
/* setup features */
if ((bit(23) & p->CPU.CPUID[CPUID_1][3]) != 0)
{
p->CPU.Features |= CPU_FEATURE_MSR;
}
if ((p->CPU.Vendor == CPUID_VENDOR_INTEL) && (p->CPU.NoThreads > p->CPU.NoCores))
if ((p->CPU.NoThreads > p->CPU.NoCores))
{
p->CPU.Features |= CPU_FEATURE_HTT;
}
pic0_mask = inb(0x21U);
outb(0x21U, 0xFFU); // mask PIC0 interrupts for duration of timing tests
tscFrequency = measure_tsc_frequency();
DBG("cpu freq classic = 0x%016llx\n", tscFrequency);
uint64_t cycles;
cycles = timeRDTSC();
tscFreq = rtc_set_cyc_per_sec(cycles);
DBG("cpu freq classic = 0x%016llx\n", tscFreq);
// if usual method failed
if ( tscFrequency < 1000 )//TEST
if ( tscFreq < 1000 )//TEST
{
tscFrequency = timeRDTSC() * 20;//measure_tsc_frequency();
tscFreq = timeRDTSC() * 20;//measure_tsc_frequency();
// DBG("cpu freq timeRDTSC = 0x%016llx\n", tscFrequency);
}
else
{
// DBG("cpu freq timeRDTSC = 0x%016llxn", timeRDTSC() * 20);
}
fsbFrequency = 0;
cpuFrequency = 0;
if (p->CPU.Vendor == CPUID_VENDOR_INTEL && ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0c) || (p->CPU.Family == 0x0f && p->CPU.Model >= 0x03)))
if (IsIntelCPU() && ((p->CPU.Family == 0x06 && p->CPU.Model >= 0x0c) || (p->CPU.Family == 0x0f && p->CPU.Model >= 0x03)))
{
int intelCPU = p->CPU.Model;
if (p->CPU.Family == 0x06)
if (bus_ratio_max)
{
fsbFrequency = (tscFrequency / bus_ratio_max);
busFrequency = (tscFreq / bus_ratio_max);
}
//valv: Turbo Ratio Limit
{
msr = rdmsr64(MSR_TURBO_RATIO_LIMIT);
cpuFrequency = bus_ratio_max * fsbFrequency;
cpuFrequency = bus_ratio_max * busFrequency;
max_ratio = bus_ratio_max * 10;
}
else
{
cpuFrequency = tscFrequency;
cpuFrequency = tscFreq;
}
if ((getValueForKey(kbusratio, &newratio, &len, &bootInfo->chameleonConfig)) && (len <= 4))
{
// extreme overclockers may love 320 ;)
if ((max_ratio >= min_ratio) && (max_ratio <= 320))
{
cpuFrequency = (fsbFrequency * max_ratio) / 10;
cpuFrequency = (busFrequency * max_ratio) / 10;
if (len >= 3)
{
maxdiv = 1;
p->CPU.MaxRatio = max_ratio;
p->CPU.MinRatio = min_ratio;
myfsb = fsbFrequency / 1000000;
myfsb = busFrequency / 1000000;
verbose("Sticking with [BCLK: %dMhz, Bus-Ratio: %d]\n", myfsb, max_ratio/10); // Bungo: fixed wrong Bus-Ratio readout
currcoef = bus_ratio_max;
{
if (maxdiv)
{
fsbFrequency = ((tscFrequency * 2) / ((maxcoef * 2) + 1));
busFrequency = ((tscFreq * 2) / ((maxcoef * 2) + 1));
}
else
{
fsbFrequency = (tscFrequency / maxcoef);
busFrequency = (tscFreq / maxcoef);
}
if (currdiv)
{
cpuFrequency = (fsbFrequency * ((currcoef * 2) + 1) / 2);
cpuFrequency = (busFrequency * ((currcoef * 2) + 1) / 2);
}
else
{
cpuFrequency = (fsbFrequency * currcoef);
cpuFrequency = (busFrequency * currcoef);
}
DBG("max: %d%s current: %d%s\n", maxcoef, maxdiv ? ".5" : "",currcoef, currdiv ? ".5" : "");
p->CPU.Features |= CPU_FEATURE_MOBILE;
}
}
else if ((p->CPU.Vendor == CPUID_VENDOR_AMD) && (p->CPU.Family == 0x0f))
else if (IsAmdCPU())
{
switch(p->CPU.ExtFamily)
switch(p->CPU.Family)
{
case 0x00: //* K8 *//
msr = rdmsr64(K8_FIDVID_STATUS);
maxcoef = bitfield(msr, 21, 16) / 2 + 4;
currcoef = bitfield(msr, 5, 0) / 2 + 4;
case 0xF: /* K8 */
{
uint64_t fidvid = 0;
uint64_t cpuMult;
uint64_t fid;
fidvid = rdmsr64(K8_FIDVID_STATUS);
fid = bitfield(fidvid, 5, 0);
cpuMult = (fid + 8) / 2;
currcoef = cpuMult;
cpuMultN2 = (fidvid & (uint64_t)bit(0));
currdiv = cpuMultN2;
/****** Addon END ******/
}
break;
case 0x01: //* K10 *//
msr = rdmsr64(K10_COFVID_STATUS);
do_cpuid2(0x00000006, 0, p->CPU.CPUID[CPUID_6]);
// EffFreq: effective frequency interface
if (bitfield(p->CPU.CPUID[CPUID_6][2], 0, 0) == 1)
case 0x10: /*** AMD Family 10h ***/
{
uint64_t cofvid = 0;
uint64_t cpuMult;
uint64_t divisor = 0;
uint64_t did;
uint64_t fid;
cofvid = rdmsr64(K10_COFVID_STATUS);
did = bitfield(cofvid, 8, 6);
fid = bitfield(cofvid, 5, 0);
if (did == 0) divisor = 2;
else if (did == 1) divisor = 4;
else if (did == 2) divisor = 8;
else if (did == 3) divisor = 16;
else if (did == 4) divisor = 32;
cpuMult = (fid + 16) / divisor;
currcoef = cpuMult;
cpuMultN2 = (cofvid & (uint64_t)bit(0));
currdiv = cpuMultN2;
/****** Addon END ******/
}
break;
case 0x11: /*** AMD Family 11h ***/
{
uint64_t cofvid = 0;
uint64_t cpuMult;
uint64_t divisor = 0;
uint64_t did;
uint64_t fid;
cofvid = rdmsr64(K10_COFVID_STATUS);
did = bitfield(cofvid, 8, 6);
fid = bitfield(cofvid, 5, 0);
if (did == 0) divisor = 2;
else if (did == 1) divisor = 4;
else if (did == 2) divisor = 8;
else if (did == 3) divisor = 16;
else if (did == 4) divisor = 32;
cpuMult = (fid + 8) / divisor;
currcoef = cpuMult;
cpuMultN2 = (cofvid & (uint64_t)bit(0));
currdiv = cpuMultN2;
/****** Addon END ******/
}
break;
case 0x12: /*** AMD Family 12h ***/
{
// 8:4 CpuFid: current CPU core frequency ID
// 3:0 CpuDid: current CPU core divisor ID
uint64_t prfsts,CpuFid,CpuDid;
prfsts = rdmsr64(K10_COFVID_STATUS);
CpuDid = bitfield(prfsts, 3, 0) ;
CpuFid = bitfield(prfsts, 8, 4) ;
uint64_t divisor;
switch (CpuDid)
{
//uint64_t mperf = measure_mperf_frequency();
uint64_t aperf = measure_aperf_frequency();
cpuFrequency = aperf;
case 0: divisor = 1; break;
case 1: divisor = (3/2); break;
case 2: divisor = 2; break;
case 3: divisor = 3; break;
case 4: divisor = 4; break;
case 5: divisor = 6; break;
case 6: divisor = 8; break;
case 7: divisor = 12; break;
case 8: divisor = 16; break;
default: divisor = 1; break;
}
// NOTE: tsc runs at the maccoeff (non turbo)
//*not* at the turbo frequency.
maxcoef = bitfield(msr, 54, 49) / 2 + 4;
currcoef = bitfield(msr, 5, 0) + 0x10;
currdiv = 2 << bitfield(msr, 8, 6);
currcoef = (CpuFid + 0x10) / divisor;
cpuMultN2 = (prfsts & (uint64_t)bit(0));
currdiv = cpuMultN2;
}
break;
case 0x05: //* K14 *//
msr = rdmsr64(K10_COFVID_STATUS);
currcoef = (bitfield(msr, 54, 49) + 0x10) << 2;
currdiv = (bitfield(msr, 8, 4) + 1) << 2;
case 0x14: /* K14 */
{
// 8:4: current CPU core divisor ID most significant digit
// 3:0: current CPU core divisor ID least significant digit
uint64_t prfsts;
prfsts = rdmsr64(K10_COFVID_STATUS);
uint64_t CpuDidMSD,CpuDidLSD;
CpuDidMSD = bitfield(prfsts, 8, 4) ;
CpuDidLSD = bitfield(prfsts, 3, 0) ;
uint64_t frequencyId = 0x10;
currcoef = (frequencyId + 0x10) /
(CpuDidMSD + (CpuDidLSD * 0.25) + 1);
currdiv = ((CpuDidMSD) + 1) << 2;
currdiv += bitfield(msr, 3, 0);
cpuMultN2 = (prfsts & (uint64_t)bit(0));
currdiv = cpuMultN2;
}
break;
case 0x02: //* K11 *//
// not implimented
case 0x15: /*** AMD Family 15h ***/
case 0x06: /*** AMD Family 06h ***/
{
uint64_t cofvid = 0;
uint64_t cpuMult;
uint64_t divisor = 0;
uint64_t did;
uint64_t fid;
cofvid = rdmsr64(K10_COFVID_STATUS);
did = bitfield(cofvid, 8, 6);
fid = bitfield(cofvid, 5, 0);
if (did == 0) divisor = 2;
else if (did == 1) divisor = 4;
else if (did == 2) divisor = 8;
else if (did == 3) divisor = 16;
else if (did == 4) divisor = 32;
cpuMult = (fid + 16) / divisor;
currcoef = cpuMult;
cpuMultN2 = (cofvid & (uint64_t)bit(0));
currdiv = cpuMultN2;
}
break;
}
if (maxcoef)
{
if (currdiv)
case 0x16: /*** AMD Family 16h kabini ***/
{
if (!currcoef)
{
currcoef = maxcoef;
}
uint64_t cofvid = 0;
uint64_t cpuMult;
uint64_t divisor = 0;
uint64_t did;
uint64_t fid;
if (!cpuFrequency)
{
fsbFrequency = ((tscFrequency * currdiv) / currcoef);
}
else
{
fsbFrequency = ((cpuFrequency * currdiv) / currcoef);
}
DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
cofvid = rdmsr64(K10_COFVID_STATUS);
did = bitfield(cofvid, 8, 6);
fid = bitfield(cofvid, 5, 0);
if (did == 0) divisor = 1;
else if (did == 1) divisor = 2;
else if (did == 2) divisor = 4;
else if (did == 3) divisor = 8;
else if (did == 4) divisor = 16;
cpuMult = (fid + 16) / divisor;
currcoef = cpuMult;
cpuMultN2 = (cofvid & (uint64_t)bit(0));
currdiv = cpuMultN2;
/****** Addon END ******/
}
else
break;
default:
{
if (!cpuFrequency)
{
fsbFrequency = (tscFrequency / maxcoef);
}
else
{
fsbFrequency = (cpuFrequency / maxcoef);
}
DBG("%d\n", currcoef);
typedef unsigned long long vlong;
uint64_t prfsts;
prfsts = rdmsr64(K10_COFVID_STATUS);
uint64_t r;
vlong hz;
r = (prfsts>>6) & 0x07;
hz = (((prfsts & 0x3f)+0x10)*100000000ll)/(1<<r);
currcoef = hz / (200 * Mega);
}
}
else if (currcoef)
if (currcoef)
{
if (currdiv)
{
fsbFrequency = ((tscFrequency * currdiv) / currcoef);
busFrequency = ((tscFreq * 2) / ((currcoef * 2) + 1));
busFCvtt2n = ((1 * Giga) << 32) / busFrequency;
tscFCvtt2n = busFCvtt2n * 2 / (1 + (2 * currcoef));
//cpuFrequency = (busFrequency * ((currcoef * 2) + 1) / 2);//((1 * Giga) << 32) / tscFCvtt2n;
cpuFrequency = ((1 * Giga) << 32) / tscFCvtt2n;
//cpuFrequency = (busFrequency * ((currcoef * 2) + 1) / 2);
DBG("%d.%d\n", currcoef / currdiv, ((currcoef % currdiv) * 100) / currdiv);
}
else
{
fsbFrequency = (tscFrequency / currcoef);
busFrequency = (tscFreq / currcoef);
busFCvtt2n = ((1 * Giga) << 32) / busFrequency;
tscFCvtt2n = busFCvtt2n / currcoef;
cpuFrequency = ((1 * Giga) << 32) / tscFCvtt2n;
DBG("%d\n", currcoef);
}
}
if (!cpuFrequency)
else if (!cpuFrequency)
{
cpuFrequency = tscFrequency;
cpuFrequency = tscFreq;
}
}
#if 0
if (!fsbFrequency)
if (!busFrequency)
{
fsbFrequency = (DEFAULT_FSB * 1000);
DBG("CPU: fsbFrequency = 0! using the default value for FSB!\n");
cpuFrequency = tscFrequency;
busFrequency = (DEFAULT_FSB * 1000);
DBG("CPU: busFrequency = 0! using the default value for FSB!\n");
cpuFrequency = tscFreq;
}
DBG("cpu freq = 0x%016llxn", timeRDTSC() * 20);
#endif
outb(0x21U, pic0_mask); // restore PIC0 interrupts
p->CPU.MaxCoef = maxcoef;
p->CPU.MaxDiv = maxdiv;
p->CPU.MaxCoef = maxcoef = currcoef;
p->CPU.MaxDiv = maxdiv = currdiv;
p->CPU.CurrCoef = currcoef;
p->CPU.CurrDiv = currdiv;
p->CPU.TSCFrequency = tscFrequency;
p->CPU.FSBFrequency = fsbFrequency;
p->CPU.TSCFrequency = tscFreq;
p->CPU.FSBFrequency = busFrequency;
p->CPU.CPUFrequency = cpuFrequency;
// keep formatted with spaces instead of tabs
trunk/i386/libsaio/platform.h
2323
2424
2525
26
27
26
27
28
29
30
2831
2932
3033
......
7780
7881
7982
83
84
85
86
87
88
89
90
8091
8192
8293
#define CPUID_66
#define CPUID_807
#define CPUID_818
#define CPUID_889
#define CPUID_MAX10
#define CPUID_859
#define CPUID_8610
#define CPUID_8711
#define CPUID_8812
#define CPUID_MAX13
#define CPUID_MODEL_ANY0x00
#define CPUID_MODEL_UNKNOWN0x01
#define CPUID_VENDOR_INTEL0x756E6547
#define CPUID_VENDOR_AMD0x68747541
/* This spells out "GenuineIntel". */
//#define is_intel \
// ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69
/* This spells out "AuthenticAMD". */
//#define is_amd \
// ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65
/* Unknown CPU */
#define CPU_STRING_UNKNOWN"Unknown CPU Type"
trunk/i386/libsaio/cpu.h
1010
1111
1212
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
1392
1493
1594
extern void scan_cpu(PlatformInfo_t *);
struct clock_frequency_info_t
{
unsigned long bus_clock_rate_hz;
unsigned long cpu_clock_rate_hz;
unsigned long dec_clock_rate_hz;
unsigned long bus_clock_rate_num;
unsigned long bus_clock_rate_den;
unsigned long bus_to_cpu_rate_num;
unsigned long bus_to_cpu_rate_den;
unsigned long bus_to_dec_rate_num;
unsigned long bus_to_dec_rate_den;
unsigned long timebase_frequency_hz;
unsigned long timebase_frequency_num;
unsigned long timebase_frequency_den;
unsigned long long bus_frequency_hz;
unsigned long long bus_frequency_min_hz;
unsigned long long bus_frequency_max_hz;
unsigned long long cpu_frequency_hz;
unsigned long long cpu_frequency_min_hz;
unsigned long long cpu_frequency_max_hz;
unsigned long long prf_frequency_hz;
unsigned long long prf_frequency_min_hz;
unsigned long long prf_frequency_max_hz;
unsigned long long mem_frequency_hz;
unsigned long long mem_frequency_min_hz;
unsigned long long mem_frequency_max_hz;
unsigned long long fix_frequency_hz;
};
typedef struct clock_frequency_info_t clock_frequency_info_t;
extern clock_frequency_info_t gPEClockFrequencyInfo;
struct mach_timebase_info
{
uint32_tnumer;
uint32_tdenom;
};
struct hslock
{
intlock_data;
};
typedef struct hslock hw_lock_data_t, *hw_lock_t;
#define hw_lock_addr(hwl)(&((hwl).lock_data))
typedef struct uslock_debug
{
void*lock_pc;/* pc where lock operation began */
void*lock_thread;/* thread that acquired lock */
unsigned longduration[2];
unsigned shortstate;
unsigned charlock_cpu;
void*unlock_thread;/* last thread to release lock */
unsigned charunlock_cpu;
void*unlock_pc;/* pc where lock operation ended */
} uslock_debug;
typedef struct slock
{
hw_lock_data_tinterlock;/* must be first... see lock.c */
unsigned shortlock_type;/* must be second... see lock.c */
#define USLOCK_TAG0x5353
uslock_debugdebug;
} usimple_lock_data_t, *usimple_lock_t;
#if !defined(decl_simple_lock_data)
typedef usimple_lock_data_t*simple_lock_t;
typedef usimple_lock_data_tsimple_lock_data_t;
#definedecl_simple_lock_data(class,name) \
classsimple_lock_data_tname;
#endif/* !defined(decl_simple_lock_data) */
typedef struct mach_timebase_info*mach_timebase_info_t;
typedef struct mach_timebase_infomach_timebase_info_data_t;
// DFE: These two constants come from Linux except CLOCK_TICK_RATE replaced with CLKNUM
#define CALIBRATE_TIME_MSEC30/* 30 msecs */
#define CALIBRATE_LATCH((CLKNUM * CALIBRATE_TIME_MSEC + 1000/2)/1000)
trunk/i386/libsaio/fake_efi.c
7979
8080
8181
82
83
82
83
8484
85
86
87
88
85
86
87
88
89
90
91
92
93
94
8995
9096
9197
}
// ==========================================================================
// ErmaC
static inline uint64_t getCPUTick(void)
EFI_UINT32 getCPUTick(void)
{
uint32_t lowest;
uint32_t highest;
__asm__ volatile ("rdtsc" : "=a" (lowest), "=d" (highest));
return (uint64_t) highest << 32 | lowest;
uint32_t out;
__asm__ volatile (
"rdtsc\n"
"shl $32,%%edx\n"
"or %%edx,%%eax\n"
: "=a" (out)
:
: "%edx"
);
return out;
}
/*==========================================================================
trunk/i386/modules/AcpiCodec/acpi_codec.c
5252
5353
5454
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
55101
56102
57103
......
10051051
10061052
10071053
1008
1009
1010
1011
1012
1013
1014
1015
1016
1054
10171055
1018
1019
1020
1021
1022
1056
1057
1058
1059
1060
1061
1062
1063
10231064
1024
1025
1026
1027
1028
1065
10291066
1030
1067
1068
1069
1070
1071
10311072
1032
1033
1073
1074
1075
1076
1077
10341078
1035
1036
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
10371090
1038
1091
10391092
1040
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
10411107
1042
1043
1044
1045
1046
1108
10471109
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1110
1111
10581112
1113
10591114
1060
1061
1062
1063
1064
1065
1066
1067
10681115
10691116
1070
1071
1072
1117
10731118
1074
1119
10751120
1076
1077
1078
1079
1080
1121
10811122
1123
1124
1125
1126
1127
1128
10821129
1083
1084
1130
1131
10851132
1086
1087
1088
1089
1133
1134
1135
1136
10901137
1091
1092
1093
1094
1095
1096
1138
1139
1140
1141
1142
1143
10971144
1098
1145
10991146
1100
1147
11011148
11021149
1103
1104
1105
1106
1107
1108
1150
1151
1152
1153
1154
1155
11091156
11101157
1111
1112
1113
1158
11141159
1115
1160
1161
1162
1163
1164
1165
1166
1167
11161168
1117
1118
1119
1120
1121
11221169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
11231188
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1189
1190
1191
1192
11361193
1137
1138
1139
1140
1141
1142
1194
1195
1196
11431197
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1198
1199
1200
1201
1202
1203
11591204
1160
1205
1206
1207
1208
11611209
1162
1163
1164
1165
11661210
1211
1212
11671213
1168
1169
11701214
1215
1216
1217
1218
11711219
11721220
11731221
......
11931241
11941242
11951243
1196
11971244
11981245
1199
12001246
12011247
1202
12031248
12041249
12051250
......
13251370
13261371
13271372
1373
13281374
13291375
13301376
#include "pci.h"
#include "pci_root.h"
boolean_t ForceAmdCpu = false;
/* For AMD CPU's */
boolean_t IsAmdCPU(void)
{
if (ForceAmdCpu)
{
return true;
}
uint32_t ourcpuid[4];
do_cpuid(0, ourcpuid);
if (
/* This spells out "AuthenticAMD". */
ourcpuid[ebx] == 0x68747541 && // Auth
ourcpuid[ecx] == 0x444D4163 && // cAMD
ourcpuid[edx] == 0x69746E65) // enti
{
return true;
}
return false;
};
/* For Intel CPU's */
boolean_t IsIntelCPU(void)
{
uint32_t ourcpuid[4];
do_cpuid(0, ourcpuid);
if (
/* This spells out "GenuineIntel". */
ourcpuid[ebx] == 0x756E6547 && // Genu
ourcpuid[ecx] == 0x6C65746E && // ntel
ourcpuid[edx] == 0x49656E69) // ineI
{
return true;
}
if (!IsAmdCPU())
{
return true;
}
return false;
}
U64 rsd_p;
ACPI_TABLES acpi_tables;
U32 uuid32;
boolean_tfine_grain_clock_mod = 0;
#if BUILD_ACPI_TSS || pstate_power_support
if (Platform.CPU.CPUID[CPUID_0][0] >= 0x5) {
/*
* Extract the Monitor/Mwait Leaf info:
*/
sub_Cstates = Platform.CPU.CPUID[CPUID_5][3];
extensions = Platform.CPU.CPUID[CPUID_5][2];
}
if (Platform.CPU.CPUID[CPUID_0][0] >= 6)
if(IsIntelCPU())
{
dynamic_acceleration = bitfield(Platform.CPU.CPUID[CPUID_6][0], 1, 1); // "Dynamic Acceleration Technology (Turbo Mode)"
invariant_APIC_timer = bitfield(Platform.CPU.CPUID[CPUID_6][0], 2, 2); // "Invariant APIC Timer"
fine_grain_clock_mod = bitfield(Platform.CPU.CPUID[CPUID_6][0], 4, 4);
}
cpu->turbo_available = (U32)dynamic_acceleration;
if (Platform.CPU.CPUID[CPUID_0][0] >= 0x5)
{
/*
* Extract the Monitor/Mwait Leaf info:
*/
sub_Cstates = Platform.CPU.CPUID[CPUID_5][3];
extensions = Platform.CPU.CPUID[CPUID_5][2];
}
{
U32 temp32 = 0;
U64 temp64= 0;
int tdp;
if (getIntForKey("TDP", &tdp, &bootInfo->chameleonConfig))
if (Platform.CPU.CPUID[CPUID_0][0] >= 6)
{
temp32 = (U32) (tdp*8) ;
dynamic_acceleration = bitfield(Platform.CPU.CPUID[CPUID_6][0], 1, 1); // "Dynamic Acceleration Technology (Turbo Mode)"
invariant_APIC_timer = bitfield(Platform.CPU.CPUID[CPUID_6][0], 2, 2); // "Invariant APIC Timer"
fine_grain_clock_mod = bitfield(Platform.CPU.CPUID[CPUID_6][0], 4, 4);
}
cpu->turbo_available = (U32)dynamic_acceleration;
int tdc;
if (getIntForKey("TDC", &tdc, &bootInfo->chameleonConfig))
{
U32 temp32 = 0;
U64 temp64= 0;
int tdp;
if (getIntForKey("TDP", &tdp, &bootInfo->chameleonConfig))
{
temp32 = (U32) (temp32) | tdc<<16 ;
temp32 = (U32) (tdp*8) ;
int tdc;
if (getIntForKey("TDC", &tdc, &bootInfo->chameleonConfig))
{
temp32 = (U32) (temp32) | tdc<<16 ;
}
else if (tdp)
{
temp32 = (U32) (temp32) | ((tdp)*8)<<16 ;
}
}
else if (tdp)
else if (!is_sandybridge() && !is_jaketown())
{
temp32 = (U32) (temp32) | ((tdp)*8)<<16 ;
if (turbo_enabled && cpu->turbo_available)
{
temp64 = rdmsr64(MSR_TURBO_POWER_CURRENT_LIMIT);
temp32 = (U32)temp64;
}
else
{
// Unfortunately, Intel don't provide a better method for non turbo processors
// and it will give a TDP of 95w (for ex. mine is 65w) , to fix this issue,
// you can set this value by simply adding the option TDP = XX (XX is an integer)
// in your boot.plist
temp32 = (U32)0x02a802f8;
}
}
}
else if (!is_sandybridge() && !is_jaketown())
{
if (turbo_enabled && cpu->turbo_available)
if (temp32)
{
temp64 = rdmsr64(MSR_TURBO_POWER_CURRENT_LIMIT);
temp32 = (U32)temp64;
}
else
{
// Unfortunately, Intel don't provide a better method for non turbo processors
// and it will give a TDP of 95w (for ex. mine is 65w) , to fix this issue,
// you can set this value by simply adding the option TDP = XX (XX is an integer)
// in your boot.plist
temp32 = (U32)0x02a802f8;
cpu->tdp_limit = ( temp32 & 0x7fff );
cpu->tdc_limit = ( (temp32 >> 16) & 0x7fff );
}
}
}
if (temp32)
{
cpu->tdp_limit = ( temp32 & 0x7fff );
cpu->tdc_limit = ( (temp32 >> 16) & 0x7fff );
}
}
#endif
switch (Platform.CPU.Family)
{
case 0x06:
switch (Platform.CPU.Family)
{
switch (Platform.CPU.Model)
case 0x06:
{
case CPUID_MODEL_DOTHAN:
case CPUID_MODEL_YONAH: // Yonah
case CPUID_MODEL_MEROM: // Merom
case CPUID_MODEL_PENRYN: // Penryn
case CPUID_MODEL_ATOM: // Intel Atom (45nm)
switch (Platform.CPU.Model)
{
case CPUID_MODEL_DOTHAN:
case CPUID_MODEL_YONAH: // Yonah
case CPUID_MODEL_MEROM: // Merom
case CPUID_MODEL_PENRYN: // Penryn
case CPUID_MODEL_ATOM: // Intel Atom (45nm)
{
cpu->core_c1_supported = ((sub_Cstates >> 4) & 0xf) ? 1 : 0;
cpu->core_c4_supported = ((sub_Cstates >> 16) & 0xf) ? 1 : 0;
cpu->core_c1_supported = ((sub_Cstates >> 4) & 0xf) ? 1 : 0;
cpu->core_c4_supported = ((sub_Cstates >> 16) & 0xf) ? 1 : 0;
if (Platform.CPU.Model == CPUID_MODEL_ATOM)
{
cpu->core_c2_supported = cpu->core_c3_supported = ((sub_Cstates >> 8) & 0xf) ? 1 : 0;
cpu->core_c6_supported = ((sub_Cstates >> 12) & 0xf) ? 1 : 0;
if (Platform.CPU.Model == CPUID_MODEL_ATOM)
{
cpu->core_c2_supported = cpu->core_c3_supported = ((sub_Cstates >> 8) & 0xf) ? 1 : 0;
cpu->core_c6_supported = ((sub_Cstates >> 12) & 0xf) ? 1 : 0;
}
else
{
cpu->core_c3_supported = ((sub_Cstates >> 12) & 0xf) ? 1 : 0;
cpu->core_c2_supported = ((sub_Cstates >> 8) & 0xf) ? 1 : 0;
cpu->core_c6_supported = 0;
}
else
{
cpu->core_c3_supported = ((sub_Cstates >> 12) & 0xf) ? 1 : 0;
cpu->core_c2_supported = ((sub_Cstates >> 8) & 0xf) ? 1 : 0;
cpu->core_c6_supported = 0;
}
}
cpu->core_c7_supported = 0;
cpu->core_c7_supported = 0;
#if BETA
GetMaxRatio(&cpu->max_ratio_as_mfg);
U64 msr = rdmsr64(MSR_IA32_PERF_STATUS);
U16 idlo = (msr >> 48) & 0xffff;
U16 idhi = (msr >> 32) & 0xffff;
cpu->min_ratio = (U32) (idlo >> 8) & 0xff;
cpu->max_ratio_as_cfg = (U32) (idhi >> 8) & 0xff;
GetMaxRatio(&cpu->max_ratio_as_mfg);
U64 msr = rdmsr64(MSR_IA32_PERF_STATUS);
U16 idlo = (msr >> 48) & 0xffff;
U16 idhi = (msr >> 32) & 0xffff;
cpu->min_ratio = (U32) (idlo >> 8) & 0xff;
cpu->max_ratio_as_cfg = (U32) (idhi >> 8) & 0xff;
#else
if (Platform.CPU.MaxCoef)
{
if (Platform.CPU.MaxDiv)
if (Platform.CPU.MaxCoef)
{
cpu->max_ratio_as_cfg = cpu->max_ratio_as_mfg = (U32) (Platform.CPU.MaxCoef * 10) + 5;
if (Platform.CPU.MaxDiv)
{
cpu->max_ratio_as_cfg = cpu->max_ratio_as_mfg = (U32) (Platform.CPU.MaxCoef * 10) + 5;
}
else
{
cpu->max_ratio_as_cfg = cpu->max_ratio_as_mfg = (U32) Platform.CPU.MaxCoef * 10;
}
}
else
{
cpu->max_ratio_as_cfg = cpu->max_ratio_as_mfg = (U32) Platform.CPU.MaxCoef * 10;
}
}
#endif
break;
}
case CPUID_MODEL_FIELDS:
case CPUID_MODEL_DALES:
case CPUID_MODEL_DALES_32NM:
case CPUID_MODEL_NEHALEM:
case CPUID_MODEL_NEHALEM_EX:
case CPUID_MODEL_WESTMERE:
case CPUID_MODEL_WESTMERE_EX:
case CPUID_MODEL_SANDYBRIDGE:
case CPUID_MODEL_JAKETOWN:
{
cpu->core_c1_supported = ((sub_Cstates >> 4) & 0xf) ? 1 : 0;
cpu->core_c3_supported = ((sub_Cstates >> 8) & 0xf) ? 1 : 0;
cpu->core_c6_supported = ((sub_Cstates >> 12) & 0xf) ? 1 : 0;
cpu->core_c7_supported = ((sub_Cstates >> 16) & 0xf) ? 1 : 0;
cpu->core_c2_supported = 0;
cpu->core_c4_supported = 0;
break;
}
case CPUID_MODEL_FIELDS:
case CPUID_MODEL_DALES:
case CPUID_MODEL_DALES_32NM:
case CPUID_MODEL_NEHALEM:
case CPUID_MODEL_NEHALEM_EX:
case CPUID_MODEL_WESTMERE:
case CPUID_MODEL_WESTMERE_EX:
case CPUID_MODEL_SANDYBRIDGE:
case CPUID_MODEL_JAKETOWN:
{
GetMaxRatio(&cpu->max_ratio_as_mfg);
U64 platform_info = rdmsr64(MSR_PLATFORM_INFO);
cpu->max_ratio_as_cfg = (U32) ((U32)platform_info >> 8) & 0xff;
cpu->min_ratio = (U32) ((platform_info >> 40) & 0xff);
cpu->core_c1_supported = ((sub_Cstates >> 4) & 0xf) ? 1 : 0;
cpu->core_c3_supported = ((sub_Cstates >> 8) & 0xf) ? 1 : 0;
cpu->core_c6_supported = ((sub_Cstates >> 12) & 0xf) ? 1 : 0;
cpu->core_c7_supported = ((sub_Cstates >> 16) & 0xf) ? 1 : 0;
cpu->core_c2_supported = 0;
cpu->core_c4_supported = 0;
cpu->tdc_tdp_limits_for_turbo_flag = (platform_info & (1ULL << 29)) ? 1 : 0;
cpu->ratio_limits_for_turbo_flag = (platform_info & (1ULL << 28)) ? 1 : 0;
cpu->xe_available = cpu->tdc_tdp_limits_for_turbo_flag | cpu->ratio_limits_for_turbo_flag;
GetMaxRatio(&cpu->max_ratio_as_mfg);
U64 platform_info = rdmsr64(MSR_PLATFORM_INFO);
cpu->max_ratio_as_cfg = (U32) ((U32)platform_info >> 8) & 0xff;
cpu->min_ratio = (U32) ((platform_info >> 40) & 0xff);
cpu->tdc_tdp_limits_for_turbo_flag = (platform_info & (1ULL << 29)) ? 1 : 0;
cpu->ratio_limits_for_turbo_flag = (platform_info & (1ULL << 28)) ? 1 : 0;
cpu->xe_available = cpu->tdc_tdp_limits_for_turbo_flag | cpu->ratio_limits_for_turbo_flag;
if (is_sandybridge() || is_jaketown())
{
cpu->package_power_limit = rdmsr64(MSR_PKG_RAPL_POWER_LIMIT);
cpu->package_power_sku_unit = rdmsr64(MSR_RAPL_POWER_UNIT);
if (is_sandybridge() || is_jaketown())
{
cpu->package_power_limit = rdmsr64(MSR_PKG_RAPL_POWER_LIMIT);
cpu->package_power_sku_unit = rdmsr64(MSR_RAPL_POWER_UNIT);
}
break;
}
break;
default:
verbose ("Unsupported CPU\n");
return /*(0)*/;
break;
}
default:
verbose ("Unsupported CPU\n");
return /*(0)*/;
break;
}
default:
break;
}
default:
break;
}
else
{
extensions = Platform.CPU.CPUID[CPUID_5][2];
}
cpu->mwait_supported = (extensions & (1UL << 0)) ? 1 : 0;
#if BUILD_ACPI_TSS || pstate_power_support
if (is_sandybridge() || is_jaketown())
{
printf("package_power_limit : %d\n",cpu->package_power_limit);
printf("package_power_sku_unit : %d\n",cpu->package_power_sku_unit);
}
#endif
DBG("invariant_apic_timer_flag : %d\n",cpu->invariant_apic_timer_flag);
#endif
return (0);
}
if(IsIntelCPU())
{
#if UNUSED
struct p_state initial;

Archive Download the corresponding diff file

Revision: 2647