36 #include "kmp_wrapper_getpid.h"
44 static const char *unknown =
"unknown";
46 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
52 static int trace_level = 5;
60 __kmp_get_physical_id(
int log_per_phy,
int apic_id )
62 int index_lsb, index_msb, temp;
64 if (log_per_phy > 1) {
69 while ( (temp & 1) == 0 ) {
75 while ( (temp & 0x80000000)==0 ) {
81 if (index_lsb != index_msb) index_msb++;
83 return ( (
int) (apic_id >> index_msb) );
96 __kmp_get_logical_id(
int log_per_phy,
int apic_id )
102 if (log_per_phy <= 1)
return ( 0 );
106 for (current_bit = 1; log_per_phy != 0; current_bit <<= 1) {
107 if ( log_per_phy & current_bit ) {
108 log_per_phy &= ~current_bit;
114 if (bits_seen == 1) {
118 return ( (
int) ((current_bit - 1) & apic_id) );
124 __kmp_parse_frequency(
125 char const * frequency
129 char const * unit = NULL;
130 kmp_uint64 result = ~ 0;
132 if ( frequency == NULL ) {
135 value = strtod( frequency, (
char * *) & unit );
136 if ( 0 < value && value <= DBL_MAX ) {
137 if ( strcmp( unit,
"MHz" ) == 0 ) {
138 value = value * 1.0E+6;
139 }
else if ( strcmp( unit,
"GHz" ) == 0 ) {
140 value = value * 1.0E+9;
141 }
else if ( strcmp( unit,
"THz" ) == 0 ) {
142 value = value * 1.0E+12;
153 __kmp_query_cpuid( kmp_cpuinfo_t *p )
155 struct kmp_cpuid buf;
164 __kmp_x86_cpuid( 0, 0, &buf );
166 KA_TRACE( trace_level, (
"INFO: CPUID %d: EAX=0x%08X EBX=0x%08X ECX=0x%08X EDX=0x%08X\n",
167 0, buf.eax, buf.ebx, buf.ecx, buf.edx ) );
175 kmp_uint32 t, data[ 4 ];
177 __kmp_x86_cpuid( 1, 0, &buf );
178 KA_TRACE( trace_level, (
"INFO: CPUID %d: EAX=0x%08X EBX=0x%08X ECX=0x%08X EDX=0x%08X\n",
179 1, buf.eax, buf.ebx, buf.ecx, buf.edx ) );
182 #define get_value(reg,lo,mask) ( ( ( reg ) >> ( lo ) ) & ( mask ) )
184 p->signature = buf.eax;
185 p->family = get_value( buf.eax, 20, 0xff ) + get_value( buf.eax, 8, 0x0f );
186 p->model = ( get_value( buf.eax, 16, 0x0f ) << 4 ) + get_value( buf.eax, 4, 0x0f );
187 p->stepping = get_value( buf.eax, 0, 0x0f );
191 KA_TRACE( trace_level, (
" family = %d, model = %d, stepping = %d\n", p->family, p->model, p->stepping ) );
194 for ( t = buf.ebx, i = 0; i < 4; t >>= 8, ++i ) {
195 data[ i ] = (t & 0xff);
198 p->sse2 = ( buf.edx >> 26 ) & 1;
202 if ( (buf.edx >> 4) & 1 ) {
204 KA_TRACE( trace_level, (
" TSC" ) );
206 if ( (buf.edx >> 8) & 1 ) {
208 KA_TRACE( trace_level, (
" CX8" ) );
210 if ( (buf.edx >> 9) & 1 ) {
212 KA_TRACE( trace_level, (
" APIC" ) );
214 if ( (buf.edx >> 15) & 1 ) {
216 KA_TRACE( trace_level, (
" CMOV" ) );
218 if ( (buf.edx >> 18) & 1 ) {
220 KA_TRACE( trace_level, (
" PSN" ) );
222 if ( (buf.edx >> 19) & 1 ) {
224 cflush_size = data[ 1 ] * 8;
225 KA_TRACE( trace_level, (
" CLFLUSH(%db)", cflush_size ) );
228 if ( (buf.edx >> 21) & 1 ) {
230 KA_TRACE( trace_level, (
" DTES" ) );
232 if ( (buf.edx >> 22) & 1 ) {
234 KA_TRACE( trace_level, (
" ACPI" ) );
236 if ( (buf.edx >> 23) & 1 ) {
238 KA_TRACE( trace_level, (
" MMX" ) );
240 if ( (buf.edx >> 25) & 1 ) {
242 KA_TRACE( trace_level, (
" SSE" ) );
244 if ( (buf.edx >> 26) & 1 ) {
246 KA_TRACE( trace_level, (
" SSE2" ) );
248 if ( (buf.edx >> 27) & 1 ) {
250 KA_TRACE( trace_level, (
" SLFSNP" ) );
254 if ( (buf.edx >> 28) & 1 ) {
256 log_per_phy = data[ 2 ];
257 p->apic_id = data[ 3 ];
258 KA_TRACE( trace_level, (
" HT(%d TPUs)", log_per_phy ) );
260 if( log_per_phy > 1 ) {
263 p->cpu_stackoffset = 4 * 1024;
265 p->cpu_stackoffset = 1 * 1024;
269 p->physical_id = __kmp_get_physical_id( log_per_phy, p->apic_id );
270 p->logical_id = __kmp_get_logical_id( log_per_phy, p->apic_id );
273 if ( (buf.edx >> 29) & 1 ) {
275 KA_TRACE( trace_level, (
" ATHROTL" ) );
277 KA_TRACE( trace_level, (
" ]\n" ) );
279 for (i = 2; i <= max_arg; ++i) {
280 __kmp_x86_cpuid( i, 0, &buf );
281 KA_TRACE( trace_level,
282 (
"INFO: CPUID %d: EAX=0x%08X EBX=0x%08X ECX=0x%08X EDX=0x%08X\n",
283 i, buf.eax, buf.ebx, buf.ecx, buf.edx ) );
286 #if KMP_USE_ADAPTIVE_LOCKS
291 __kmp_x86_cpuid(7, 0, &buf);
292 p->rtm = (buf.ebx >> 11) & 1;
293 KA_TRACE( trace_level, (
" RTM" ) );
300 union kmp_cpu_brand_string {
301 struct kmp_cpuid buf[ 3 ];
302 char string[
sizeof(
struct kmp_cpuid ) * 3 + 1 ];
304 union kmp_cpu_brand_string brand;
310 for ( i = 0; i < 3; ++ i ) {
311 __kmp_x86_cpuid( 0x80000002 + i, 0, &brand.buf[ i ] );
313 brand.string[
sizeof( brand.string ) - 1 ] = 0;
314 KA_TRACE( trace_level, (
"cpu brand string: \"%s\"\n", brand.string ) );
317 p->frequency = __kmp_parse_frequency( strrchr( brand.string,
' ' ) );
318 KA_TRACE( trace_level, (
"cpu frequency from brand string: %" KMP_UINT64_SPEC
"\n", p->frequency ) );
328 __kmp_expand_host_name(
char *buffer,
size_t size )
330 KMP_DEBUG_ASSERT(size >=
sizeof(unknown));
335 if (! GetComputerNameA( buffer, & s ))
336 KMP_STRCPY_S( buffer, size, unknown );
339 buffer[size - 2] = 0;
340 if (gethostname( buffer, size ) || buffer[size - 2] != 0)
341 KMP_STRCPY_S( buffer, size, unknown );
355 __kmp_expand_file_name(
char *result,
size_t rlen,
char *pattern )
357 char *pos = result, *end = result + rlen - 1;
359 int default_cpu_width = 1;
362 KMP_DEBUG_ASSERT(rlen > 0);
366 for(i = __kmp_xproc; i >= 10; i /= 10, ++default_cpu_width);
369 if (pattern != NULL) {
370 while (*pattern !=
'\0' && pos < end) {
371 if (*pattern !=
'%') {
374 char *old_pattern = pattern;
376 int cpu_width = default_cpu_width;
380 if (*pattern >=
'0' && *pattern <=
'9') {
383 width = (width * 10) + *pattern++ -
'0';
384 }
while (*pattern >=
'0' && *pattern <=
'9');
385 if (width < 0 || width > 1024)
395 __kmp_expand_host_name( buffer,
sizeof( buffer ) );
396 KMP_STRNCPY( pos, buffer, end - pos + 1);
408 snp_result = KMP_SNPRINTF( pos, end - pos + 1,
"%0*d", cpu_width, __kmp_dflt_team_nth );
409 if(snp_result >= 0 && snp_result <= end - pos) {
421 snp_result = KMP_SNPRINTF( pos, end - pos + 1,
"%0*d", width,
id );
422 if(snp_result >= 0 && snp_result <= end - pos) {
439 pattern = old_pattern + 1;
447 KMP_FATAL( FileNameTooLong );