47 #include "kmp_error.h"
48 #include "kmp_stats.h"
52 #include "ompt-specific.h"
56 template<
typename T >
62 struct i_maxmin< int > {
63 static const int mx = 0x7fffffff;
64 static const int mn = 0x80000000;
67 struct i_maxmin< unsigned int > {
68 static const unsigned int mx = 0xffffffff;
69 static const unsigned int mn = 0x00000000;
72 struct i_maxmin< long long > {
73 static const long long mx = 0x7fffffffffffffffLL;
74 static const long long mn = 0x8000000000000000LL;
77 struct i_maxmin< unsigned long long > {
78 static const unsigned long long mx = 0xffffffffffffffffLL;
79 static const unsigned long long mn = 0x0000000000000000LL;
85 char const * traits_t< int >::spec =
"d";
86 char const * traits_t< unsigned int >::spec =
"u";
87 char const * traits_t< long long >::spec =
"lld";
88 char const * traits_t< unsigned long long >::spec =
"llu";
92 template<
typename T >
94 __kmp_for_static_init(
101 typename traits_t< T >::signed_t *pstride,
102 typename traits_t< T >::signed_t incr,
103 typename traits_t< T >::signed_t chunk
106 typedef typename traits_t< T >::unsigned_t UT;
107 typedef typename traits_t< T >::signed_t ST;
109 register kmp_int32 gtid = global_tid;
110 register kmp_uint32 tid;
111 register kmp_uint32 nth;
112 register UT trip_count;
113 register kmp_team_t *team;
114 register kmp_info_t *th = __kmp_threads[ gtid ];
116 #if OMPT_SUPPORT && OMPT_TRACE
117 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
118 ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
121 KMP_DEBUG_ASSERT( plastiter && plower && pupper && pstride );
122 KE_TRACE( 10, (
"__kmpc_for_static_init called (%d)\n", global_tid));
127 buff = __kmp_str_format(
128 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s," \
129 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
130 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
131 traits_t< ST >::spec, traits_t< ST >::spec, traits_t< T >::spec );
132 KD_TRACE(100, ( buff, global_tid, schedtype, *plastiter,
133 *plower, *pupper, *pstride, incr, chunk ) );
134 __kmp_str_free( &buff );
138 if ( __kmp_env_consistency_check ) {
139 __kmp_push_workshare( global_tid, ct_pdo, loc );
141 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
145 if ( incr > 0 ? (*pupper < *plower) : (*plower < *pupper) ) {
146 if( plastiter != NULL )
156 buff = __kmp_str_format(
157 "__kmpc_for_static_init:(ZERO TRIP) liter=%%d lower=%%%s upper=%%%s stride = %%%s signed?<%s>, loc = %%s\n",
158 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec, traits_t< T >::spec );
159 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride, loc->
psource ) );
160 __kmp_str_free( &buff );
163 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
165 #if OMPT_SUPPORT && OMPT_TRACE
166 if ((ompt_status == ompt_status_track_callback) &&
167 ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
168 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
169 team_info->parallel_id, task_info->task_id,
170 team_info->microtask);
180 tid = th->th.th_team->t.t_master_tid;
181 team = th->th.th_team->t.t_parent;
185 tid = __kmp_tid_from_gtid( global_tid );
186 team = th->th.th_team;
190 if ( team -> t.t_serialized ) {
192 if( plastiter != NULL )
195 *pstride = (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
201 buff = __kmp_str_format(
202 "__kmpc_for_static_init: (serial) liter=%%d lower=%%%s upper=%%%s stride = %%%s\n",
203 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec );
204 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
205 __kmp_str_free( &buff );
208 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
210 #if OMPT_SUPPORT && OMPT_TRACE
211 if ((ompt_status == ompt_status_track_callback) &&
212 ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
213 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
214 team_info->parallel_id, task_info->task_id,
215 team_info->microtask);
220 nth = team->t.t_nproc;
222 if( plastiter != NULL )
224 *pstride = (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
229 buff = __kmp_str_format(
230 "__kmpc_for_static_init: (serial) liter=%%d lower=%%%s upper=%%%s stride = %%%s\n",
231 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec );
232 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
233 __kmp_str_free( &buff );
236 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
238 #if OMPT_SUPPORT && OMPT_TRACE
239 if ((ompt_status == ompt_status_track_callback) &&
240 ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
241 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
242 team_info->parallel_id, task_info->task_id,
243 team_info->microtask);
251 trip_count = *pupper - *plower + 1;
252 }
else if (incr == -1) {
253 trip_count = *plower - *pupper + 1;
256 trip_count = (*pupper - *plower) / incr + 1;
258 trip_count = (*plower - *pupper) / ( -incr ) + 1;
262 if ( __kmp_env_consistency_check ) {
264 if ( trip_count == 0 && *pupper != *plower ) {
265 __kmp_error_construct( kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo, loc );
270 switch ( schedtype ) {
273 if ( trip_count < nth ) {
275 __kmp_static == kmp_sch_static_greedy || \
276 __kmp_static == kmp_sch_static_balanced
278 if ( tid < trip_count ) {
279 *pupper = *plower = *plower + tid * incr;
281 *plower = *pupper + incr;
283 if( plastiter != NULL )
284 *plastiter = ( tid == trip_count - 1 );
286 if ( __kmp_static == kmp_sch_static_balanced ) {
287 register UT small_chunk = trip_count / nth;
288 register UT extras = trip_count % nth;
289 *plower += incr * ( tid * small_chunk + ( tid < extras ? tid : extras ) );
290 *pupper = *plower + small_chunk * incr - ( tid < extras ? 0 : incr );
291 if( plastiter != NULL )
292 *plastiter = ( tid == nth - 1 );
294 register T big_chunk_inc_count = ( trip_count/nth +
295 ( ( trip_count % nth ) ? 1 : 0) ) * incr;
296 register T old_upper = *pupper;
298 KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
301 *plower += tid * big_chunk_inc_count;
302 *pupper = *plower + big_chunk_inc_count - incr;
304 if( *pupper < *plower )
305 *pupper = i_maxmin< T >::mx;
306 if( plastiter != NULL )
307 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
308 if ( *pupper > old_upper ) *pupper = old_upper;
310 if( *pupper > *plower )
311 *pupper = i_maxmin< T >::mn;
312 if( plastiter != NULL )
313 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
314 if ( *pupper < old_upper ) *pupper = old_upper;
320 case kmp_sch_static_chunked:
327 *pstride = span * nth;
328 *plower = *plower + (span * tid);
329 *pupper = *plower + span - incr;
330 if( plastiter != NULL )
331 *plastiter = (tid == ((trip_count - 1)/( UT )chunk) % nth);
335 KMP_ASSERT2( 0,
"__kmpc_for_static_init: unknown scheduling type" );
341 if ( KMP_MASTER_TID(tid) && __itt_metadata_add_ptr && __kmp_forkjoin_frames_mode == 3 &&
343 th->th.th_teams_microtask == NULL &&
345 team->t.t_active_level == 1 )
347 kmp_uint64 cur_chunk = chunk;
350 cur_chunk = trip_count / nth + ( ( trip_count % nth ) ? 1 : 0);
353 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
360 buff = __kmp_str_format(
361 "__kmpc_for_static_init: liter=%%d lower=%%%s upper=%%%s stride = %%%s signed?<%s>\n",
362 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec, traits_t< T >::spec );
363 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
364 __kmp_str_free( &buff );
367 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
369 #if OMPT_SUPPORT && OMPT_TRACE
370 if ((ompt_status == ompt_status_track_callback) &&
371 ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
372 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
373 team_info->parallel_id, task_info->task_id, team_info->microtask);
380 template<
typename T >
382 __kmp_dist_for_static_init(
386 kmp_int32 *plastiter,
390 typename traits_t< T >::signed_t *pstride,
391 typename traits_t< T >::signed_t incr,
392 typename traits_t< T >::signed_t chunk
395 typedef typename traits_t< T >::unsigned_t UT;
396 typedef typename traits_t< T >::signed_t ST;
397 register kmp_uint32 tid;
398 register kmp_uint32 nth;
399 register kmp_uint32 team_id;
400 register kmp_uint32 nteams;
401 register UT trip_count;
402 register kmp_team_t *team;
405 KMP_DEBUG_ASSERT( plastiter && plower && pupper && pupperDist && pstride );
406 KE_TRACE( 10, (
"__kmpc_dist_for_static_init called (%d)\n", gtid));
411 buff = __kmp_str_format(
412 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d "\
413 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
414 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
415 traits_t< ST >::spec, traits_t< T >::spec );
416 KD_TRACE(100, ( buff, gtid, schedule, *plastiter,
417 *plower, *pupper, incr, chunk ) );
418 __kmp_str_free( &buff );
422 if( __kmp_env_consistency_check ) {
423 __kmp_push_workshare( gtid, ct_pdo, loc );
425 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
427 if( incr > 0 ? (*pupper < *plower) : (*plower < *pupper) ) {
437 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc );
440 tid = __kmp_tid_from_gtid( gtid );
441 th = __kmp_threads[gtid];
442 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
443 nth = th->th.th_team_nproc;
444 team = th->th.th_team;
446 nteams = th->th.th_teams_size.nteams;
448 team_id = team->t.t_master_tid;
449 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
453 trip_count = *pupper - *plower + 1;
454 }
else if(incr == -1) {
455 trip_count = *plower - *pupper + 1;
457 trip_count = (ST)(*pupper - *plower) / incr + 1;
459 *pstride = *pupper - *plower;
460 if( trip_count <= nteams ) {
462 __kmp_static == kmp_sch_static_greedy || \
463 __kmp_static == kmp_sch_static_balanced
466 if( team_id < trip_count && tid == 0 ) {
467 *pupper = *pupperDist = *plower = *plower + team_id * incr;
469 *pupperDist = *pupper;
470 *plower = *pupper + incr;
472 if( plastiter != NULL )
473 *plastiter = ( tid == 0 && team_id == trip_count - 1 );
476 if( __kmp_static == kmp_sch_static_balanced ) {
477 register UT chunkD = trip_count / nteams;
478 register UT extras = trip_count % nteams;
479 *plower += incr * ( team_id * chunkD + ( team_id < extras ? team_id : extras ) );
480 *pupperDist = *plower + chunkD * incr - ( team_id < extras ? 0 : incr );
481 if( plastiter != NULL )
482 *plastiter = ( team_id == nteams - 1 );
484 register T chunk_inc_count =
485 ( trip_count / nteams + ( ( trip_count % nteams ) ? 1 : 0) ) * incr;
486 register T upper = *pupper;
487 KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
489 *plower += team_id * chunk_inc_count;
490 *pupperDist = *plower + chunk_inc_count - incr;
493 if( *pupperDist < *plower )
494 *pupperDist = i_maxmin< T >::mx;
495 if( plastiter != NULL )
496 *plastiter = *plower <= upper && *pupperDist > upper - incr;
497 if( *pupperDist > upper )
499 if( *plower > *pupperDist ) {
500 *pupper = *pupperDist;
504 if( *pupperDist > *plower )
505 *pupperDist = i_maxmin< T >::mn;
506 if( plastiter != NULL )
507 *plastiter = *plower >= upper && *pupperDist < upper - incr;
508 if( *pupperDist < upper )
510 if( *plower < *pupperDist ) {
511 *pupper = *pupperDist;
519 trip_count = *pupperDist - *plower + 1;
520 }
else if(incr == -1) {
521 trip_count = *plower - *pupperDist + 1;
523 trip_count = (ST)(*pupperDist - *plower) / incr + 1;
525 KMP_DEBUG_ASSERT( trip_count );
529 if( trip_count <= nth ) {
531 __kmp_static == kmp_sch_static_greedy || \
532 __kmp_static == kmp_sch_static_balanced
534 if( tid < trip_count )
535 *pupper = *plower = *plower + tid * incr;
537 *plower = *pupper + incr;
538 if( plastiter != NULL )
539 if( *plastiter != 0 && !( tid == trip_count - 1 ) )
542 if( __kmp_static == kmp_sch_static_balanced ) {
543 register UT chunkL = trip_count / nth;
544 register UT extras = trip_count % nth;
545 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
546 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
547 if( plastiter != NULL )
548 if( *plastiter != 0 && !( tid == nth - 1 ) )
551 register T chunk_inc_count =
552 ( trip_count / nth + ( ( trip_count % nth ) ? 1 : 0) ) * incr;
553 register T upper = *pupperDist;
554 KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
556 *plower += tid * chunk_inc_count;
557 *pupper = *plower + chunk_inc_count - incr;
559 if( *pupper < *plower )
560 *pupper = i_maxmin< T >::mx;
561 if( plastiter != NULL )
562 if( *plastiter != 0 && !(*plower <= upper && *pupper > upper - incr) )
564 if( *pupper > upper )
567 if( *pupper > *plower )
568 *pupper = i_maxmin< T >::mn;
569 if( plastiter != NULL )
570 if( *plastiter != 0 && !(*plower >= upper && *pupper < upper - incr) )
572 if( *pupper < upper )
579 case kmp_sch_static_chunked:
585 *pstride = span * nth;
586 *plower = *plower + (span * tid);
587 *pupper = *plower + span - incr;
588 if( plastiter != NULL )
589 if( *plastiter != 0 && !(tid == ((trip_count - 1) / ( UT )chunk) % nth) )
594 KMP_ASSERT2( 0,
"__kmpc_dist_for_static_init: unknown loop scheduling type" );
603 buff = __kmp_str_format(
604 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s "\
605 "stride=%%%s signed?<%s>\n",
606 traits_t< T >::spec, traits_t< T >::spec, traits_t< T >::spec,
607 traits_t< ST >::spec, traits_t< T >::spec );
608 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pupperDist, *pstride ) );
609 __kmp_str_free( &buff );
612 KE_TRACE( 10, (
"__kmpc_dist_for_static_init: T#%d return\n", gtid ) );
616 template<
typename T >
618 __kmp_team_static_init(
624 typename traits_t< T >::signed_t *p_st,
625 typename traits_t< T >::signed_t incr,
626 typename traits_t< T >::signed_t chunk
633 typedef typename traits_t< T >::unsigned_t UT;
634 typedef typename traits_t< T >::signed_t ST;
644 KMP_DEBUG_ASSERT( p_last && p_lb && p_ub && p_st );
645 KE_TRACE( 10, (
"__kmp_team_static_init called (%d)\n", gtid));
650 buff = __kmp_str_format(
"__kmp_team_static_init enter: T#%%d liter=%%d "\
651 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
652 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
653 traits_t< ST >::spec, traits_t< T >::spec );
654 KD_TRACE(100, ( buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk ) );
655 __kmp_str_free( &buff );
661 if( __kmp_env_consistency_check ) {
663 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
665 if( incr > 0 ? (upper < lower) : (lower < upper) ) {
675 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc );
678 th = __kmp_threads[gtid];
679 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
680 team = th->th.th_team;
682 nteams = th->th.th_teams_size.nteams;
684 team_id = team->t.t_master_tid;
685 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
689 trip_count = upper - lower + 1;
690 }
else if(incr == -1) {
691 trip_count = lower - upper + 1;
693 trip_count = (ST)(upper - lower) / incr + 1;
698 *p_st = span * nteams;
699 *p_lb = lower + (span * team_id);
700 *p_ub = *p_lb + span - incr;
701 if ( p_last != NULL )
702 *p_last = (team_id == ((trip_count - 1)/(UT)chunk) % nteams);
706 *p_ub = i_maxmin< T >::mx;
711 *p_ub = i_maxmin< T >::mn;
719 buff = __kmp_str_format(
"__kmp_team_static_init exit: T#%%d team%%u liter=%%d "\
720 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
721 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
722 traits_t< ST >::spec );
723 KD_TRACE(100, ( buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk ) );
724 __kmp_str_free( &buff );
754 kmp_int32 *plower, kmp_int32 *pupper,
755 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
757 __kmp_for_static_init< kmp_int32 >(
758 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
766 kmp_uint32 *plower, kmp_uint32 *pupper,
767 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
769 __kmp_for_static_init< kmp_uint32 >(
770 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
778 kmp_int64 *plower, kmp_int64 *pupper,
779 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
781 __kmp_for_static_init< kmp_int64 >(
782 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
790 kmp_uint64 *plower, kmp_uint64 *pupper,
791 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
793 __kmp_for_static_init< kmp_uint64 >(
794 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
824 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
825 kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD,
826 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
828 __kmp_dist_for_static_init< kmp_int32 >(
829 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
837 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
838 kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD,
839 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
841 __kmp_dist_for_static_init< kmp_uint32 >(
842 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
850 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
851 kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD,
852 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
854 __kmp_dist_for_static_init< kmp_int64 >(
855 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
863 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
864 kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD,
865 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
867 __kmp_dist_for_static_init< kmp_uint64 >(
868 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
901 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
902 kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk )
904 KMP_DEBUG_ASSERT( __kmp_init_serial );
905 __kmp_team_static_init< kmp_int32 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
913 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
914 kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk )
916 KMP_DEBUG_ASSERT( __kmp_init_serial );
917 __kmp_team_static_init< kmp_uint32 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
925 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
926 kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk )
928 KMP_DEBUG_ASSERT( __kmp_init_serial );
929 __kmp_team_static_init< kmp_int64 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
937 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
938 kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk )
940 KMP_DEBUG_ASSERT( __kmp_init_serial );
941 __kmp_team_static_init< kmp_uint64 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)