38 #include "kmp_error.h"
45 static char const * cons_text_fort[] = {
49 "ORDERED work-sharing",
64 static char const * cons_text_c[] = {
68 "\"ordered\" work-sharing",
83 #define get_src( ident ) ( (ident) == NULL ? NULL : (ident)->psource )
85 #define PUSH_MSG( ct, ident ) \
86 "\tpushing on stack: %s (%s)\n", cons_text_c[ (ct) ], get_src( (ident) )
87 #define POP_MSG( p ) \
88 "\tpopping off stack: %s (%s)\n", \
89 cons_text_c[ (p)->stack_data[ tos ].type ], \
90 get_src( (p)->stack_data[ tos ].ident )
92 static int const cons_text_fort_num =
sizeof( cons_text_fort ) /
sizeof(
char const * );
93 static int const cons_text_c_num =
sizeof( cons_text_c ) /
sizeof(
char const * );
100 __kmp_check_null_func(
void )
106 __kmp_expand_cons_stack(
int gtid,
struct cons_header *p )
113 __kmp_check_null_func();
115 KE_TRACE( 10, (
"expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid() ) );
119 p->stack_size = (p->stack_size * 2) + 100;
122 p->stack_data = (
struct cons_data *) __kmp_allocate(
sizeof(
struct cons_data ) * (p->stack_size+1) );
124 for (i = p->stack_top; i >= 0; --i)
125 p->stack_data[i] = d[i];
136 char const * cons = NULL;
140 kmp_str_buf_t buffer;
142 __kmp_str_buf_init( & buffer );
143 if ( 0 < ct && ct < cons_text_c_num ) {
144 cons = cons_text_c[ ct ];
146 KMP_DEBUG_ASSERT( 0 );
148 if ( ident != NULL && ident->
psource != NULL ) {
150 __kmp_str_buf_print( & buffer,
"%s", ident->
psource );
153 __kmp_str_split( tail,
';', NULL, & tail );
154 __kmp_str_split( tail,
';', & file, & tail );
155 __kmp_str_split( tail,
';', & func, & tail );
156 __kmp_str_split( tail,
';', & line, & tail );
158 prgm = __kmp_msg_format( kmp_i18n_fmt_Pragma, cons, file, func, line );
159 __kmp_str_buf_free( & buffer );
169 __kmp_error_construct(
174 char const * construct = __kmp_pragma( ct, ident );
175 __kmp_msg( kmp_ms_fatal, __kmp_msg_format(
id, construct ), __kmp_msg_null );
176 KMP_INTERNAL_FREE( (
void *) construct );
180 __kmp_error_construct2(
184 struct cons_data
const * cons
186 char const * construct1 = __kmp_pragma( ct, ident );
187 char const * construct2 = __kmp_pragma( cons->type, cons->ident );
188 __kmp_msg( kmp_ms_fatal, __kmp_msg_format(
id, construct1, construct2 ), __kmp_msg_null );
189 KMP_INTERNAL_FREE( (
void *) construct1 );
190 KMP_INTERNAL_FREE( (
void *) construct2 );
195 __kmp_allocate_cons_stack(
int gtid )
197 struct cons_header *p;
201 __kmp_check_null_func();
203 KE_TRACE( 10, (
"allocate cons_stack (%d)\n", gtid ) );
204 p = (
struct cons_header *) __kmp_allocate(
sizeof(
struct cons_header ) );
205 p->p_top = p->w_top = p->s_top = 0;
206 p->stack_data = (
struct cons_data *) __kmp_allocate(
sizeof(
struct cons_data ) * (MIN_STACK+1) );
207 p->stack_size = MIN_STACK;
209 p->stack_data[ 0 ].type = ct_none;
210 p->stack_data[ 0 ].prev = 0;
211 p->stack_data[ 0 ].ident = NULL;
216 __kmp_free_cons_stack(
void * ptr ) {
217 struct cons_header * p = (
struct cons_header *) ptr;
219 if ( p->stack_data != NULL ) {
220 __kmp_free( p->stack_data );
221 p->stack_data = NULL;
229 dump_cons_stack(
int gtid,
struct cons_header * p ) {
231 int tos = p->stack_top;
232 kmp_str_buf_t buffer;
233 __kmp_str_buf_init( & buffer );
234 __kmp_str_buf_print( & buffer,
"+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" );
235 __kmp_str_buf_print( & buffer,
"Begin construct stack with %d items for thread %d\n", tos, gtid );
236 __kmp_str_buf_print( & buffer,
" stack_top=%d { P=%d, W=%d, S=%d }\n", tos, p->p_top, p->w_top, p->s_top );
237 for ( i = tos; i > 0; i-- ) {
238 struct cons_data * c = & ( p->stack_data[ i ] );
239 __kmp_str_buf_print( & buffer,
" stack_data[%2d] = { %s (%s) %d %p }\n", i, cons_text_c[ c->type ], get_src( c->ident ), c->prev, c->name );
241 __kmp_str_buf_print( & buffer,
"End construct stack for thread %d\n", gtid );
242 __kmp_str_buf_print( & buffer,
"+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" );
243 __kmp_debug_printf(
"%s", buffer.str );
244 __kmp_str_buf_free( & buffer );
248 __kmp_push_parallel(
int gtid,
ident_t const * ident )
251 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
253 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons );
254 KE_TRACE( 10, (
"__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) );
255 KE_TRACE( 100, ( PUSH_MSG( ct_parallel, ident ) ) );
256 if ( p->stack_top >= p->stack_size ) {
257 __kmp_expand_cons_stack( gtid, p );
259 tos = ++p->stack_top;
260 p->stack_data[ tos ].type = ct_parallel;
261 p->stack_data[ tos ].prev = p->p_top;
262 p->stack_data[ tos ].ident = ident;
263 p->stack_data[ tos ].name = NULL;
265 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
269 __kmp_check_workshare(
int gtid,
enum cons_type ct,
ident_t const * ident )
271 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
273 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons );
274 KE_TRACE( 10, (
"__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
277 if ( p->stack_top >= p->stack_size ) {
278 __kmp_expand_cons_stack( gtid, p );
280 if ( p->w_top > p->p_top &&
281 !(IS_CONS_TYPE_TASKQ(p->stack_data[ p->w_top ].type) && IS_CONS_TYPE_TASKQ(ct))) {
283 __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->w_top ] );
285 if ( p->s_top > p->p_top ) {
287 __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->s_top ] );
292 __kmp_push_workshare(
int gtid,
enum cons_type ct,
ident_t const * ident )
295 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
296 KE_TRACE( 10, (
"__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
297 __kmp_check_workshare( gtid, ct, ident );
298 KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
299 tos = ++p->stack_top;
300 p->stack_data[ tos ].type = ct;
301 p->stack_data[ tos ].prev = p->w_top;
302 p->stack_data[ tos ].ident = ident;
303 p->stack_data[ tos ].name = NULL;
305 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
309 #if KMP_USE_DYNAMIC_LOCK
310 __kmp_check_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
312 __kmp_check_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck )
315 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
317 KE_TRACE( 10, (
"__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid() ) );
319 if (p->stack_top >= p->stack_size)
320 __kmp_expand_cons_stack( gtid, p );
322 if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo || ct == ct_ordered_in_taskq ) {
323 if (p->w_top <= p->p_top) {
325 #ifdef BUILD_PARALLEL_ORDERED
327 KMP_ASSERT( ct == ct_ordered_in_parallel );
329 __kmp_error_construct( kmp_i18n_msg_CnsBoundToWorksharing, ct, ident );
333 if (!IS_CONS_TYPE_ORDERED(p->stack_data[ p->w_top ].type)) {
334 if (p->stack_data[ p->w_top ].type == ct_taskq) {
335 __kmp_error_construct2(
336 kmp_i18n_msg_CnsNotInTaskConstruct,
338 & p->stack_data[ p->w_top ]
341 __kmp_error_construct2(
342 kmp_i18n_msg_CnsNoOrderedClause,
344 & p->stack_data[ p->w_top ]
349 if (p->s_top > p->p_top && p->s_top > p->w_top) {
351 int index = p->s_top;
352 enum cons_type stack_type;
354 stack_type = p->stack_data[ index ].type;
356 if (stack_type == ct_critical ||
357 ( ( stack_type == ct_ordered_in_parallel ||
358 stack_type == ct_ordered_in_pdo ||
359 stack_type == ct_ordered_in_taskq ) &&
360 p->stack_data[ index ].ident != NULL &&
363 __kmp_error_construct2(
364 kmp_i18n_msg_CnsInvalidNesting,
366 & p->stack_data[ index ]
370 }
else if ( ct == ct_critical ) {
371 #if KMP_USE_DYNAMIC_LOCK
372 if ( lck != NULL && __kmp_get_user_lock_owner( lck, seq ) == gtid ) {
374 if ( lck != NULL && __kmp_get_user_lock_owner( lck ) == gtid ) {
376 int index = p->s_top;
377 struct cons_data cons = { NULL, ct_critical, 0, NULL };
379 while ( index != 0 && p->stack_data[ index ].name != lck ) {
380 index = p->stack_data[ index ].prev;
384 cons = p->stack_data[ index ];
387 __kmp_error_construct2( kmp_i18n_msg_CnsNestingSameName, ct, ident, & cons );
389 }
else if ( ct == ct_master || ct == ct_reduce ) {
390 if (p->w_top > p->p_top) {
392 __kmp_error_construct2(
393 kmp_i18n_msg_CnsInvalidNesting,
395 & p->stack_data[ p->w_top ]
398 if (ct == ct_reduce && p->s_top > p->p_top) {
400 __kmp_error_construct2(
401 kmp_i18n_msg_CnsInvalidNesting,
403 & p->stack_data[ p->s_top ]
410 #if KMP_USE_DYNAMIC_LOCK
411 __kmp_push_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
413 __kmp_push_sync(
int gtid,
enum cons_type ct,
ident_t const * ident, kmp_user_lock_p lck )
417 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
419 KMP_ASSERT( gtid == __kmp_get_gtid() );
420 KE_TRACE( 10, (
"__kmp_push_sync (gtid=%d)\n", gtid ) );
421 #if KMP_USE_DYNAMIC_LOCK
422 __kmp_check_sync( gtid, ct, ident, lck, seq );
424 __kmp_check_sync( gtid, ct, ident, lck );
426 KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
427 tos = ++ p->stack_top;
428 p->stack_data[ tos ].type = ct;
429 p->stack_data[ tos ].prev = p->s_top;
430 p->stack_data[ tos ].ident = ident;
431 p->stack_data[ tos ].name = lck;
433 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
439 __kmp_pop_parallel(
int gtid,
ident_t const * ident )
442 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
444 KE_TRACE( 10, (
"__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) );
445 if ( tos == 0 || p->p_top == 0 ) {
446 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident );
448 if ( tos != p->p_top || p->stack_data[ tos ].type != ct_parallel ) {
449 __kmp_error_construct2(
450 kmp_i18n_msg_CnsExpectedEnd,
452 & p->stack_data[ tos ]
455 KE_TRACE( 100, ( POP_MSG( p ) ) );
456 p->p_top = p->stack_data[ tos ].prev;
457 p->stack_data[ tos ].type = ct_none;
458 p->stack_data[ tos ].ident = NULL;
459 p->stack_top = tos - 1;
460 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
464 __kmp_pop_workshare(
int gtid,
enum cons_type ct,
ident_t const * ident )
467 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
470 KE_TRACE( 10, (
"__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
471 if ( tos == 0 || p->w_top == 0 ) {
472 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident );
475 if ( tos != p->w_top ||
476 ( p->stack_data[ tos ].type != ct &&
478 ! ( p->stack_data[ tos ].type == ct_pdo_ordered && ct == ct_pdo ) &&
479 ! ( p->stack_data[ tos ].type == ct_task_ordered && ct == ct_task )
482 __kmp_check_null_func();
483 __kmp_error_construct2(
484 kmp_i18n_msg_CnsExpectedEnd,
486 & p->stack_data[ tos ]
489 KE_TRACE( 100, ( POP_MSG( p ) ) );
490 p->w_top = p->stack_data[ tos ].prev;
491 p->stack_data[ tos ].type = ct_none;
492 p->stack_data[ tos ].ident = NULL;
493 p->stack_top = tos - 1;
494 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
495 return p->stack_data[ p->w_top ].type;
499 __kmp_pop_sync(
int gtid,
enum cons_type ct,
ident_t const * ident )
502 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
504 KE_TRACE( 10, (
"__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid() ) );
505 if ( tos == 0 || p->s_top == 0 ) {
506 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident );
508 if ( tos != p->s_top || p->stack_data[ tos ].type != ct ) {
509 __kmp_check_null_func();
510 __kmp_error_construct2(
511 kmp_i18n_msg_CnsExpectedEnd,
513 & p->stack_data[ tos ]
517 __kmp_check_null_func();
519 KE_TRACE( 100, ( POP_MSG( p ) ) );
520 p->s_top = p->stack_data[ tos ].prev;
521 p->stack_data[ tos ].type = ct_none;
522 p->stack_data[ tos ].ident = NULL;
523 p->stack_top = tos - 1;
524 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
530 __kmp_check_barrier(
int gtid,
enum cons_type ct,
ident_t const * ident )
532 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
533 KE_TRACE( 10, (
"__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid, __kmp_get_gtid() ) );
535 __kmp_check_null_func();
537 if ( p->w_top > p->p_top ) {
539 __kmp_error_construct2(
540 kmp_i18n_msg_CnsInvalidNesting,
542 & p->stack_data[ p->w_top ]
545 if (p->s_top > p->p_top) {
547 __kmp_error_construct2(
548 kmp_i18n_msg_CnsInvalidNesting,
550 & p->stack_data[ p->s_top ]