Intel® OpenMP* Runtime Library
 All Classes Functions Variables Typedefs Enumerations Enumerator Modules Pages
kmp_gsupport.c
1 /*
2  * kmp_gsupport.c
3  */
4 
5 /* <copyright>
6  Copyright (c) 1997-2015 Intel Corporation. All Rights Reserved.
7 
8  Redistribution and use in source and binary forms, with or without
9  modification, are permitted provided that the following conditions
10  are met:
11 
12  * Redistributions of source code must retain the above copyright
13  notice, this list of conditions and the following disclaimer.
14  * Redistributions in binary form must reproduce the above copyright
15  notice, this list of conditions and the following disclaimer in the
16  documentation and/or other materials provided with the distribution.
17  * Neither the name of Intel Corporation nor the names of its
18  contributors may be used to endorse or promote products derived
19  from this software without specific prior written permission.
20 
21  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 </copyright> */
34 
35 #if defined(__x86_64) || defined (__powerpc64__) || defined(__aarch64__)
36 # define KMP_I8
37 #endif
38 #include "kmp.h"
39 #include "kmp_atomic.h"
40 
41 #if OMPT_SUPPORT
42 #include "ompt-specific.h"
43 #endif
44 
45 #ifdef __cplusplus
46  extern "C" {
47 #endif // __cplusplus
48 
49 #define MKLOC(loc,routine) \
50  static ident_t (loc) = {0, KMP_IDENT_KMPC, 0, 0, ";unknown;unknown;0;0;;" };
51 
52 #include "kmp_ftn_os.h"
53 
54 void
55 xexpand(KMP_API_NAME_GOMP_BARRIER)(void)
56 {
57  int gtid = __kmp_entry_gtid();
58  MKLOC(loc, "GOMP_barrier");
59  KA_TRACE(20, ("GOMP_barrier: T#%d\n", gtid));
60  __kmpc_barrier(&loc, gtid);
61 }
62 
63 
64 //
65 // Mutual exclusion
66 //
67 
68 //
69 // The symbol that icc/ifort generates for unnamed for unnamed critical
70 // sections - .gomp_critical_user_ - is defined using .comm in any objects
71 // reference it. We can't reference it directly here in C code, as the
72 // symbol contains a ".".
73 //
74 // The RTL contains an assembly language definition of .gomp_critical_user_
75 // with another symbol __kmp_unnamed_critical_addr initialized with it's
76 // address.
77 //
78 extern kmp_critical_name *__kmp_unnamed_critical_addr;
79 
80 
81 void
82 xexpand(KMP_API_NAME_GOMP_CRITICAL_START)(void)
83 {
84  int gtid = __kmp_entry_gtid();
85  MKLOC(loc, "GOMP_critical_start");
86  KA_TRACE(20, ("GOMP_critical_start: T#%d\n", gtid));
87  __kmpc_critical(&loc, gtid, __kmp_unnamed_critical_addr);
88 }
89 
90 
91 void
92 xexpand(KMP_API_NAME_GOMP_CRITICAL_END)(void)
93 {
94  int gtid = __kmp_get_gtid();
95  MKLOC(loc, "GOMP_critical_end");
96  KA_TRACE(20, ("GOMP_critical_end: T#%d\n", gtid));
97  __kmpc_end_critical(&loc, gtid, __kmp_unnamed_critical_addr);
98 }
99 
100 
101 void
102 xexpand(KMP_API_NAME_GOMP_CRITICAL_NAME_START)(void **pptr)
103 {
104  int gtid = __kmp_entry_gtid();
105  MKLOC(loc, "GOMP_critical_name_start");
106  KA_TRACE(20, ("GOMP_critical_name_start: T#%d\n", gtid));
107  __kmpc_critical(&loc, gtid, (kmp_critical_name *)pptr);
108 }
109 
110 
111 void
112 xexpand(KMP_API_NAME_GOMP_CRITICAL_NAME_END)(void **pptr)
113 {
114  int gtid = __kmp_get_gtid();
115  MKLOC(loc, "GOMP_critical_name_end");
116  KA_TRACE(20, ("GOMP_critical_name_end: T#%d\n", gtid));
117  __kmpc_end_critical(&loc, gtid, (kmp_critical_name *)pptr);
118 }
119 
120 
121 //
122 // The Gnu codegen tries to use locked operations to perform atomic updates
123 // inline. If it can't, then it calls GOMP_atomic_start() before performing
124 // the update and GOMP_atomic_end() afterward, regardless of the data type.
125 //
126 
127 void
128 xexpand(KMP_API_NAME_GOMP_ATOMIC_START)(void)
129 {
130  int gtid = __kmp_entry_gtid();
131  KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
132 
133 #if OMPT_SUPPORT
134  __ompt_thread_assign_wait_id(0);
135 #endif
136 
137  __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);
138 }
139 
140 
141 void
142 xexpand(KMP_API_NAME_GOMP_ATOMIC_END)(void)
143 {
144  int gtid = __kmp_get_gtid();
145  KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
146  __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);
147 }
148 
149 
150 int
151 xexpand(KMP_API_NAME_GOMP_SINGLE_START)(void)
152 {
153  int gtid = __kmp_entry_gtid();
154  MKLOC(loc, "GOMP_single_start");
155  KA_TRACE(20, ("GOMP_single_start: T#%d\n", gtid));
156 
157  if (! TCR_4(__kmp_init_parallel))
158  __kmp_parallel_initialize();
159 
160  //
161  // 3rd parameter == FALSE prevents kmp_enter_single from pushing a
162  // workshare when USE_CHECKS is defined. We need to avoid the push,
163  // as there is no corresponding GOMP_single_end() call.
164  //
165  return __kmp_enter_single(gtid, &loc, FALSE);
166 }
167 
168 
169 void *
170 xexpand(KMP_API_NAME_GOMP_SINGLE_COPY_START)(void)
171 {
172  void *retval;
173  int gtid = __kmp_entry_gtid();
174  MKLOC(loc, "GOMP_single_copy_start");
175  KA_TRACE(20, ("GOMP_single_copy_start: T#%d\n", gtid));
176 
177  if (! TCR_4(__kmp_init_parallel))
178  __kmp_parallel_initialize();
179 
180  //
181  // If this is the first thread to enter, return NULL. The generated
182  // code will then call GOMP_single_copy_end() for this thread only,
183  // with the copyprivate data pointer as an argument.
184  //
185  if (__kmp_enter_single(gtid, &loc, FALSE))
186  return NULL;
187 
188  //
189  // Wait for the first thread to set the copyprivate data pointer,
190  // and for all other threads to reach this point.
191  //
192  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
193 
194  //
195  // Retrieve the value of the copyprivate data point, and wait for all
196  // threads to do likewise, then return.
197  //
198  retval = __kmp_team_from_gtid(gtid)->t.t_copypriv_data;
199  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
200  return retval;
201 }
202 
203 
204 void
205 xexpand(KMP_API_NAME_GOMP_SINGLE_COPY_END)(void *data)
206 {
207  int gtid = __kmp_get_gtid();
208  MKLOC(loc, "GOMP_single_copy_end");
209  KA_TRACE(20, ("GOMP_single_copy_end: T#%d\n", gtid));
210 
211  //
212  // Set the copyprivate data pointer fo the team, then hit the barrier
213  // so that the other threads will continue on and read it. Hit another
214  // barrier before continuing, so that the know that the copyprivate
215  // data pointer has been propagated to all threads before trying to
216  // reuse the t_copypriv_data field.
217  //
218  __kmp_team_from_gtid(gtid)->t.t_copypriv_data = data;
219  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
220  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
221 }
222 
223 
224 void
225 xexpand(KMP_API_NAME_GOMP_ORDERED_START)(void)
226 {
227  int gtid = __kmp_entry_gtid();
228  MKLOC(loc, "GOMP_ordered_start");
229  KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
230  __kmpc_ordered(&loc, gtid);
231 }
232 
233 
234 void
235 xexpand(KMP_API_NAME_GOMP_ORDERED_END)(void)
236 {
237  int gtid = __kmp_get_gtid();
238  MKLOC(loc, "GOMP_ordered_end");
239  KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
240  __kmpc_end_ordered(&loc, gtid);
241 }
242 
243 
244 //
245 // Dispatch macro defs
246 //
247 // They come in two flavors: 64-bit unsigned, and either 32-bit signed
248 // (IA-32 architecture) or 64-bit signed (Intel(R) 64).
249 //
250 
251 #if KMP_ARCH_X86 || KMP_ARCH_ARM
252 # define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_4
253 # define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_4
254 # define KMP_DISPATCH_NEXT __kmpc_dispatch_next_4
255 #else
256 # define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_8
257 # define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_8
258 # define KMP_DISPATCH_NEXT __kmpc_dispatch_next_8
259 #endif /* KMP_ARCH_X86 */
260 
261 # define KMP_DISPATCH_INIT_ULL __kmp_aux_dispatch_init_8u
262 # define KMP_DISPATCH_FINI_CHUNK_ULL __kmp_aux_dispatch_fini_chunk_8u
263 # define KMP_DISPATCH_NEXT_ULL __kmpc_dispatch_next_8u
264 
265 
266 //
267 // The parallel contruct
268 //
269 
270 #ifndef KMP_DEBUG
271 static
272 #endif /* KMP_DEBUG */
273 void
274 __kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *),
275  void *data)
276 {
277 #if OMPT_SUPPORT
278  kmp_info_t *thr;
279  ompt_frame_t *ompt_frame;
280  ompt_state_t enclosing_state;
281 
282  if (ompt_status & ompt_status_track) {
283  // get pointer to thread data structure
284  thr = __kmp_threads[*gtid];
285 
286  // save enclosing task state; set current state for task
287  enclosing_state = thr->th.ompt_thread_info.state;
288  thr->th.ompt_thread_info.state = ompt_state_work_parallel;
289 
290  // set task frame
291  ompt_frame = __ompt_get_task_frame_internal(0);
292  ompt_frame->exit_runtime_frame = __builtin_frame_address(0);
293  }
294 #endif
295 
296  task(data);
297 
298 #if OMPT_SUPPORT
299  if (ompt_status & ompt_status_track) {
300  // clear task frame
301  ompt_frame->exit_runtime_frame = NULL;
302 
303  // restore enclosing state
304  thr->th.ompt_thread_info.state = enclosing_state;
305  }
306 #endif
307 }
308 
309 
310 #ifndef KMP_DEBUG
311 static
312 #endif /* KMP_DEBUG */
313 void
314 __kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr,
315  void (*task)(void *), void *data, unsigned num_threads, ident_t *loc,
316  enum sched_type schedule, long start, long end, long incr, long chunk_size)
317 {
318  //
319  // Intialize the loop worksharing construct.
320  //
321  KMP_DISPATCH_INIT(loc, *gtid, schedule, start, end, incr, chunk_size,
322  schedule != kmp_sch_static);
323 
324 #if OMPT_SUPPORT
325  kmp_info_t *thr;
326  ompt_frame_t *ompt_frame;
327  ompt_state_t enclosing_state;
328 
329  if (ompt_status & ompt_status_track) {
330  thr = __kmp_threads[*gtid];
331  // save enclosing task state; set current state for task
332  enclosing_state = thr->th.ompt_thread_info.state;
333  thr->th.ompt_thread_info.state = ompt_state_work_parallel;
334 
335  // set task frame
336  ompt_frame = __ompt_get_task_frame_internal(0);
337  ompt_frame->exit_runtime_frame = __builtin_frame_address(0);
338  }
339 #endif
340 
341  //
342  // Now invoke the microtask.
343  //
344  task(data);
345 
346 #if OMPT_SUPPORT
347  if (ompt_status & ompt_status_track) {
348  // clear task frame
349  ompt_frame->exit_runtime_frame = NULL;
350 
351  // reset enclosing state
352  thr->th.ompt_thread_info.state = enclosing_state;
353  }
354 #endif
355 }
356 
357 
358 #ifndef KMP_DEBUG
359 static
360 #endif /* KMP_DEBUG */
361 void
362 __kmp_GOMP_fork_call(ident_t *loc, int gtid, void (*unwrapped_task)(void *), microtask_t wrapper, int argc,...)
363 {
364  int rc;
365  kmp_info_t *thr = __kmp_threads[gtid];
366  kmp_team_t *team = thr->th.th_team;
367  int tid = __kmp_tid_from_gtid(gtid);
368 
369  va_list ap;
370  va_start(ap, argc);
371 
372 #if OMPT_SUPPORT
373  team->t.t_implicit_task_taskdata[tid].
374  ompt_task_info.frame.reenter_runtime_frame = NULL;
375 #endif
376 
377  rc = __kmp_fork_call(loc, gtid, fork_context_gnu, argc,
378 #if OMPT_SUPPORT
379  VOLATILE_CAST(void *) unwrapped_task,
380 #endif
381  wrapper, __kmp_invoke_task_func,
382 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
383  &ap
384 #else
385  ap
386 #endif
387  );
388 
389  va_end(ap);
390 
391  if (rc) {
392  __kmp_run_before_invoked_task(gtid, tid, thr, team);
393  }
394 
395 #if OMPT_SUPPORT && OMPT_TRACE
396  if (ompt_status & ompt_status_track) {
397  ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
398  ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
399 
400  // implicit task callback
401  if ((ompt_status == ompt_status_track_callback) &&
402  ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
403  ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
404  team_info->parallel_id, task_info->task_id);
405  }
406  thr->th.ompt_thread_info.state = ompt_state_work_parallel;
407  }
408 #endif
409 }
410 
411 static void
412 __kmp_GOMP_serialized_parallel(ident_t *loc, kmp_int32 gtid, void (*task)(void *))
413 {
414  __kmp_serialized_parallel(loc, gtid);
415 
416 #if OMPT_SUPPORT
417  if (ompt_status & ompt_status_track) {
418  ompt_task_id_t ompt_task_id = __ompt_get_task_id_internal(0);
419  ompt_frame_t *ompt_frame = __ompt_get_task_frame_internal(0);
420  kmp_info_t *thr = __kmp_threads[gtid];
421 
422  ompt_parallel_id_t ompt_parallel_id = __ompt_parallel_id_new(gtid);
423  ompt_task_id_t my_ompt_task_id = __ompt_task_id_new(gtid);
424 
425  ompt_frame->exit_runtime_frame = NULL;
426 
427  // parallel region callback
428  if ((ompt_status == ompt_status_track_callback) &&
429  ompt_callbacks.ompt_callback(ompt_event_parallel_begin)) {
430  int team_size = 1;
431  ompt_callbacks.ompt_callback(ompt_event_parallel_begin)(
432  ompt_task_id, ompt_frame, ompt_parallel_id,
433  team_size, (void *) task);
434  }
435 
436  // set up lightweight task
437  ompt_lw_taskteam_t *lwt = (ompt_lw_taskteam_t *)
438  __kmp_allocate(sizeof(ompt_lw_taskteam_t));
439  __ompt_lw_taskteam_init(lwt, thr, gtid, (void *) task, ompt_parallel_id);
440  lwt->ompt_task_info.task_id = my_ompt_task_id;
441  lwt->ompt_task_info.frame.exit_runtime_frame = 0;
442  __ompt_lw_taskteam_link(lwt, thr);
443 
444 #if OMPT_TRACE
445  // implicit task callback
446  if ((ompt_status == ompt_status_track_callback) &&
447  ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
448  ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
449  ompt_parallel_id, my_ompt_task_id);
450  }
451  thr->th.ompt_thread_info.state = ompt_state_work_parallel;
452 #endif
453  }
454 #endif
455 }
456 
457 
458 void
459 xexpand(KMP_API_NAME_GOMP_PARALLEL_START)(void (*task)(void *), void *data, unsigned num_threads)
460 {
461  int gtid = __kmp_entry_gtid();
462 
463 #if OMPT_SUPPORT
464  ompt_frame_t *parent_frame;
465 
466  if (ompt_status & ompt_status_track) {
467  parent_frame = __ompt_get_task_frame_internal(0);
468  parent_frame->reenter_runtime_frame = __builtin_frame_address(0);
469  }
470 #endif
471 
472  MKLOC(loc, "GOMP_parallel_start");
473  KA_TRACE(20, ("GOMP_parallel_start: T#%d\n", gtid));
474 
475  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
476  if (num_threads != 0) {
477  __kmp_push_num_threads(&loc, gtid, num_threads);
478  }
479  __kmp_GOMP_fork_call(&loc, gtid, task,
480  (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, data);
481  }
482  else {
483  __kmp_GOMP_serialized_parallel(&loc, gtid, task);
484  }
485 
486 #if OMPT_SUPPORT
487  if (ompt_status & ompt_status_track) {
488  parent_frame->reenter_runtime_frame = NULL;
489  }
490 #endif
491 }
492 
493 
494 void
495 xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(void)
496 {
497  int gtid = __kmp_get_gtid();
498  kmp_info_t *thr = __kmp_threads[gtid];
499 
500  MKLOC(loc, "GOMP_parallel_end");
501  KA_TRACE(20, ("GOMP_parallel_end: T#%d\n", gtid));
502 
503 #if OMPT_SUPPORT
504  ompt_parallel_id_t parallel_id;
505  ompt_frame_t *ompt_frame = NULL;
506 
507  if (ompt_status & ompt_status_track) {
508  ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
509  parallel_id = team_info->parallel_id;
510 
511  ompt_frame = __ompt_get_task_frame_internal(0);
512  ompt_frame->exit_runtime_frame = __builtin_frame_address(0);
513 
514 #if OMPT_TRACE
515  if ((ompt_status == ompt_status_track_callback) &&
516  ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
517  ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
518  ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)(
519  parallel_id, task_info->task_id);
520  }
521 #endif
522 
523  // unlink if necessary. no-op if there is not a lightweight task.
524  ompt_lw_taskteam_t *lwt = __ompt_lw_taskteam_unlink(thr);
525  // GOMP allocates/frees lwt since it can't be kept on the stack
526  if (lwt) __kmp_free(lwt);
527  }
528 #endif
529 
530  if (! __kmp_threads[gtid]->th.th_team->t.t_serialized) {
531  kmp_info_t *thr = __kmp_threads[gtid];
532  __kmp_run_after_invoked_task(gtid, __kmp_tid_from_gtid(gtid), thr,
533  thr->th.th_team);
534  __kmp_join_call(&loc, gtid);
535  }
536  else {
537  __kmpc_end_serialized_parallel(&loc, gtid);
538 
539 #if OMPT_SUPPORT
540  if (ompt_status & ompt_status_track) {
541  if ((ompt_status == ompt_status_track_callback) &&
542  ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
543  ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
544  ompt_callbacks.ompt_callback(ompt_event_parallel_end)(
545  parallel_id, task_info->task_id);
546  }
547 
548  thr->th.ompt_thread_info.state =
549  (((thr->th.th_team)->t.t_serialized) ?
550  ompt_state_work_serial : ompt_state_work_parallel);
551  }
552 #endif
553 
554  }
555 }
556 
557 
558 //
559 // Loop worksharing constructs
560 //
561 
562 //
563 // The Gnu codegen passes in an exclusive upper bound for the overall range,
564 // but the libguide dispatch code expects an inclusive upper bound, hence the
565 // "end - incr" 5th argument to KMP_DISPATCH_INIT (and the " ub - str" 11th
566 // argument to __kmp_GOMP_fork_call).
567 //
568 // Conversely, KMP_DISPATCH_NEXT returns and inclusive upper bound in *p_ub,
569 // but the Gnu codegen expects an excluside upper bound, so the adjustment
570 // "*p_ub += stride" compenstates for the discrepancy.
571 //
572 // Correction: the gnu codegen always adjusts the upper bound by +-1, not the
573 // stride value. We adjust the dispatch parameters accordingly (by +-1), but
574 // we still adjust p_ub by the actual stride value.
575 //
576 // The "runtime" versions do not take a chunk_sz parameter.
577 //
578 // The profile lib cannot support construct checking of unordered loops that
579 // are predetermined by the compiler to be statically scheduled, as the gcc
580 // codegen will not always emit calls to GOMP_loop_static_next() to get the
581 // next iteration. Instead, it emits inline code to call omp_get_thread_num()
582 // num and calculate the iteration space using the result. It doesn't do this
583 // with ordered static loop, so they can be checked.
584 //
585 
586 #define LOOP_START(func,schedule) \
587  int func (long lb, long ub, long str, long chunk_sz, long *p_lb, \
588  long *p_ub) \
589  { \
590  int status; \
591  long stride; \
592  int gtid = __kmp_entry_gtid(); \
593  MKLOC(loc, #func); \
594  KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
595  gtid, lb, ub, str, chunk_sz )); \
596  \
597  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
598  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
599  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
600  (schedule) != kmp_sch_static); \
601  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
602  (kmp_int *)p_ub, (kmp_int *)&stride); \
603  if (status) { \
604  KMP_DEBUG_ASSERT(stride == str); \
605  *p_ub += (str > 0) ? 1 : -1; \
606  } \
607  } \
608  else { \
609  status = 0; \
610  } \
611  \
612  KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
613  gtid, *p_lb, *p_ub, status)); \
614  return status; \
615  }
616 
617 
618 #define LOOP_RUNTIME_START(func,schedule) \
619  int func (long lb, long ub, long str, long *p_lb, long *p_ub) \
620  { \
621  int status; \
622  long stride; \
623  long chunk_sz = 0; \
624  int gtid = __kmp_entry_gtid(); \
625  MKLOC(loc, #func); \
626  KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \
627  gtid, lb, ub, str, chunk_sz )); \
628  \
629  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
630  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
631  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \
632  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
633  (kmp_int *)p_ub, (kmp_int *)&stride); \
634  if (status) { \
635  KMP_DEBUG_ASSERT(stride == str); \
636  *p_ub += (str > 0) ? 1 : -1; \
637  } \
638  } \
639  else { \
640  status = 0; \
641  } \
642  \
643  KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
644  gtid, *p_lb, *p_ub, status)); \
645  return status; \
646  }
647 
648 
649 #define LOOP_NEXT(func,fini_code) \
650  int func(long *p_lb, long *p_ub) \
651  { \
652  int status; \
653  long stride; \
654  int gtid = __kmp_get_gtid(); \
655  MKLOC(loc, #func); \
656  KA_TRACE(20, ( #func ": T#%d\n", gtid)); \
657  \
658  fini_code \
659  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
660  (kmp_int *)p_ub, (kmp_int *)&stride); \
661  if (status) { \
662  *p_ub += (stride > 0) ? 1 : -1; \
663  } \
664  \
665  KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, stride 0x%lx, " \
666  "returning %d\n", gtid, *p_lb, *p_ub, stride, status)); \
667  return status; \
668  }
669 
670 
671 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_STATIC_START), kmp_sch_static)
672 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT), {})
673 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START), kmp_sch_dynamic_chunked)
674 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT), {})
675 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_GUIDED_START), kmp_sch_guided_chunked)
676 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT), {})
677 LOOP_RUNTIME_START(xexpand(KMP_API_NAME_GOMP_LOOP_RUNTIME_START), kmp_sch_runtime)
678 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT), {})
679 
680 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START), kmp_ord_static)
681 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT), \
682  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
683 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START), kmp_ord_dynamic_chunked)
684 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT), \
685  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
686 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START), kmp_ord_guided_chunked)
687 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT), \
688  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
689 LOOP_RUNTIME_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START), kmp_ord_runtime)
690 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT), \
691  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
692 
693 
694 void
695 xexpand(KMP_API_NAME_GOMP_LOOP_END)(void)
696 {
697  int gtid = __kmp_get_gtid();
698  KA_TRACE(20, ("GOMP_loop_end: T#%d\n", gtid))
699 
700  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
701 
702  KA_TRACE(20, ("GOMP_loop_end exit: T#%d\n", gtid))
703 }
704 
705 
706 void
707 xexpand(KMP_API_NAME_GOMP_LOOP_END_NOWAIT)(void)
708 {
709  KA_TRACE(20, ("GOMP_loop_end_nowait: T#%d\n", __kmp_get_gtid()))
710 }
711 
712 
713 //
714 // Unsigned long long loop worksharing constructs
715 //
716 // These are new with gcc 4.4
717 //
718 
719 #define LOOP_START_ULL(func,schedule) \
720  int func (int up, unsigned long long lb, unsigned long long ub, \
721  unsigned long long str, unsigned long long chunk_sz, \
722  unsigned long long *p_lb, unsigned long long *p_ub) \
723  { \
724  int status; \
725  long long str2 = up ? ((long long)str) : -((long long)str); \
726  long long stride; \
727  int gtid = __kmp_entry_gtid(); \
728  MKLOC(loc, #func); \
729  \
730  KA_TRACE(20, ( #func ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
731  gtid, up, lb, ub, str, chunk_sz )); \
732  \
733  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
734  KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
735  (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \
736  (schedule) != kmp_sch_static); \
737  status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, \
738  (kmp_uint64 *)p_lb, (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
739  if (status) { \
740  KMP_DEBUG_ASSERT(stride == str2); \
741  *p_ub += (str > 0) ? 1 : -1; \
742  } \
743  } \
744  else { \
745  status = 0; \
746  } \
747  \
748  KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
749  gtid, *p_lb, *p_ub, status)); \
750  return status; \
751  }
752 
753 
754 #define LOOP_RUNTIME_START_ULL(func,schedule) \
755  int func (int up, unsigned long long lb, unsigned long long ub, \
756  unsigned long long str, unsigned long long *p_lb, \
757  unsigned long long *p_ub) \
758  { \
759  int status; \
760  long long str2 = up ? ((long long)str) : -((long long)str); \
761  unsigned long long stride; \
762  unsigned long long chunk_sz = 0; \
763  int gtid = __kmp_entry_gtid(); \
764  MKLOC(loc, #func); \
765  \
766  KA_TRACE(20, ( #func ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
767  gtid, up, lb, ub, str, chunk_sz )); \
768  \
769  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
770  KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
771  (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, TRUE); \
772  status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, \
773  (kmp_uint64 *)p_lb, (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
774  if (status) { \
775  KMP_DEBUG_ASSERT((long long)stride == str2); \
776  *p_ub += (str > 0) ? 1 : -1; \
777  } \
778  } \
779  else { \
780  status = 0; \
781  } \
782  \
783  KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
784  gtid, *p_lb, *p_ub, status)); \
785  return status; \
786  }
787 
788 
789 #define LOOP_NEXT_ULL(func,fini_code) \
790  int func(unsigned long long *p_lb, unsigned long long *p_ub) \
791  { \
792  int status; \
793  long long stride; \
794  int gtid = __kmp_get_gtid(); \
795  MKLOC(loc, #func); \
796  KA_TRACE(20, ( #func ": T#%d\n", gtid)); \
797  \
798  fini_code \
799  status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
800  (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
801  if (status) { \
802  *p_ub += (stride > 0) ? 1 : -1; \
803  } \
804  \
805  KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, stride 0x%llx, " \
806  "returning %d\n", gtid, *p_lb, *p_ub, stride, status)); \
807  return status; \
808  }
809 
810 
811 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START), kmp_sch_static)
812 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT), {})
813 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START), kmp_sch_dynamic_chunked)
814 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT), {})
815 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START), kmp_sch_guided_chunked)
816 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT), {})
817 LOOP_RUNTIME_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START), kmp_sch_runtime)
818 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT), {})
819 
820 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START), kmp_ord_static)
821 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT), \
822  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
823 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START), kmp_ord_dynamic_chunked)
824 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT), \
825  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
826 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START), kmp_ord_guided_chunked)
827 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT), \
828  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
829 LOOP_RUNTIME_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START), kmp_ord_runtime)
830 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT), \
831  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
832 
833 
834 //
835 // Combined parallel / loop worksharing constructs
836 //
837 // There are no ull versions (yet).
838 //
839 
840 #define PARALLEL_LOOP_START(func, schedule) \
841  void func (void (*task) (void *), void *data, unsigned num_threads, \
842  long lb, long ub, long str, long chunk_sz) \
843  { \
844  int gtid = __kmp_entry_gtid(); \
845  int last = FALSE; \
846  MKLOC(loc, #func); \
847  KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
848  gtid, lb, ub, str, chunk_sz )); \
849  \
850  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \
851  if (num_threads != 0) { \
852  __kmp_push_num_threads(&loc, gtid, num_threads); \
853  } \
854  __kmp_GOMP_fork_call(&loc, gtid, task, \
855  (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, \
856  task, data, num_threads, &loc, (schedule), lb, \
857  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \
858  } \
859  else { \
860  __kmp_GOMP_serialized_parallel(&loc, gtid, task); \
861  } \
862  \
863  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
864  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
865  (schedule) != kmp_sch_static); \
866  \
867  KA_TRACE(20, ( #func " exit: T#%d\n", gtid)); \
868  }
869 
870 
871 PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START), kmp_sch_static)
872 PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START), kmp_sch_dynamic_chunked)
873 PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START), kmp_sch_guided_chunked)
874 PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START), kmp_sch_runtime)
875 
876 
877 //
878 // Tasking constructs
879 //
880 
881 void
882 xexpand(KMP_API_NAME_GOMP_TASK)(void (*func)(void *), void *data, void (*copy_func)(void *, void *),
883  long arg_size, long arg_align, int if_cond, unsigned gomp_flags)
884 {
885  MKLOC(loc, "GOMP_task");
886  int gtid = __kmp_entry_gtid();
887  kmp_int32 flags = 0;
888  kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *) & flags;
889 
890  KA_TRACE(20, ("GOMP_task: T#%d\n", gtid));
891 
892  // The low-order bit is the "tied" flag
893  if (gomp_flags & 1) {
894  input_flags->tiedness = 1;
895  }
896  input_flags->native = 1;
897  // __kmp_task_alloc() sets up all other flags
898 
899  if (! if_cond) {
900  arg_size = 0;
901  }
902 
903  kmp_task_t *task = __kmp_task_alloc(&loc, gtid, input_flags,
904  sizeof(kmp_task_t), arg_size ? arg_size + arg_align - 1 : 0,
905  (kmp_routine_entry_t)func);
906 
907  if (arg_size > 0) {
908  if (arg_align > 0) {
909  task->shareds = (void *)((((size_t)task->shareds)
910  + arg_align - 1) / arg_align * arg_align);
911  }
912  //else error??
913 
914  if (copy_func) {
915  (*copy_func)(task->shareds, data);
916  }
917  else {
918  KMP_MEMCPY(task->shareds, data, arg_size);
919  }
920  }
921 
922  if (if_cond) {
923  __kmpc_omp_task(&loc, gtid, task);
924  }
925  else {
926 #if OMPT_SUPPORT
927  ompt_thread_info_t oldInfo;
928  kmp_info_t *thread;
929  kmp_taskdata_t *taskdata;
930  if (ompt_status & ompt_status_track) {
931  // Store the threads states and restore them after the task
932  thread = __kmp_threads[ gtid ];
933  taskdata = KMP_TASK_TO_TASKDATA(task);
934  oldInfo = thread->th.ompt_thread_info;
935  thread->th.ompt_thread_info.wait_id = 0;
936  thread->th.ompt_thread_info.state = ompt_state_work_parallel;
937  taskdata->ompt_task_info.frame.exit_runtime_frame =
938  __builtin_frame_address(0);
939  }
940 #endif
941 
942  __kmpc_omp_task_begin_if0(&loc, gtid, task);
943  func(data);
944  __kmpc_omp_task_complete_if0(&loc, gtid, task);
945 
946 #if OMPT_SUPPORT
947  if (ompt_status & ompt_status_track) {
948  thread->th.ompt_thread_info = oldInfo;
949  taskdata->ompt_task_info.frame.exit_runtime_frame = 0;
950  }
951 #endif
952  }
953 
954  KA_TRACE(20, ("GOMP_task exit: T#%d\n", gtid));
955 }
956 
957 
958 void
959 xexpand(KMP_API_NAME_GOMP_TASKWAIT)(void)
960 {
961  MKLOC(loc, "GOMP_taskwait");
962  int gtid = __kmp_entry_gtid();
963 
964  KA_TRACE(20, ("GOMP_taskwait: T#%d\n", gtid));
965 
966  __kmpc_omp_taskwait(&loc, gtid);
967 
968  KA_TRACE(20, ("GOMP_taskwait exit: T#%d\n", gtid));
969 }
970 
971 
972 //
973 // Sections worksharing constructs
974 //
975 
976 //
977 // For the sections construct, we initialize a dynamically scheduled loop
978 // worksharing construct with lb 1 and stride 1, and use the iteration #'s
979 // that its returns as sections ids.
980 //
981 // There are no special entry points for ordered sections, so we always use
982 // the dynamically scheduled workshare, even if the sections aren't ordered.
983 //
984 
985 unsigned
986 xexpand(KMP_API_NAME_GOMP_SECTIONS_START)(unsigned count)
987 {
988  int status;
989  kmp_int lb, ub, stride;
990  int gtid = __kmp_entry_gtid();
991  MKLOC(loc, "GOMP_sections_start");
992  KA_TRACE(20, ("GOMP_sections_start: T#%d\n", gtid));
993 
994  KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
995 
996  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
997  if (status) {
998  KMP_DEBUG_ASSERT(stride == 1);
999  KMP_DEBUG_ASSERT(lb > 0);
1000  KMP_ASSERT(lb == ub);
1001  }
1002  else {
1003  lb = 0;
1004  }
1005 
1006  KA_TRACE(20, ("GOMP_sections_start exit: T#%d returning %u\n", gtid,
1007  (unsigned)lb));
1008  return (unsigned)lb;
1009 }
1010 
1011 
1012 unsigned
1013 xexpand(KMP_API_NAME_GOMP_SECTIONS_NEXT)(void)
1014 {
1015  int status;
1016  kmp_int lb, ub, stride;
1017  int gtid = __kmp_get_gtid();
1018  MKLOC(loc, "GOMP_sections_next");
1019  KA_TRACE(20, ("GOMP_sections_next: T#%d\n", gtid));
1020 
1021  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1022  if (status) {
1023  KMP_DEBUG_ASSERT(stride == 1);
1024  KMP_DEBUG_ASSERT(lb > 0);
1025  KMP_ASSERT(lb == ub);
1026  }
1027  else {
1028  lb = 0;
1029  }
1030 
1031  KA_TRACE(20, ("GOMP_sections_next exit: T#%d returning %u\n", gtid,
1032  (unsigned)lb));
1033  return (unsigned)lb;
1034 }
1035 
1036 
1037 void
1038 xexpand(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START)(void (*task) (void *), void *data,
1039  unsigned num_threads, unsigned count)
1040 {
1041  int gtid = __kmp_entry_gtid();
1042  int last = FALSE;
1043 
1044 #if OMPT_SUPPORT
1045  ompt_frame_t *parent_frame;
1046 
1047  if (ompt_status & ompt_status_track) {
1048  parent_frame = __ompt_get_task_frame_internal(0);
1049  parent_frame->reenter_runtime_frame = __builtin_frame_address(0);
1050  }
1051 #endif
1052 
1053  MKLOC(loc, "GOMP_parallel_sections_start");
1054  KA_TRACE(20, ("GOMP_parallel_sections_start: T#%d\n", gtid));
1055 
1056  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1057  if (num_threads != 0) {
1058  __kmp_push_num_threads(&loc, gtid, num_threads);
1059  }
1060  __kmp_GOMP_fork_call(&loc, gtid, task,
1061  (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, task, data,
1062  num_threads, &loc, kmp_nm_dynamic_chunked, (kmp_int)1,
1063  (kmp_int)count, (kmp_int)1, (kmp_int)1);
1064  }
1065  else {
1066  __kmp_GOMP_serialized_parallel(&loc, gtid, task);
1067  }
1068 
1069 #if OMPT_SUPPORT
1070  if (ompt_status & ompt_status_track) {
1071  parent_frame->reenter_runtime_frame = NULL;
1072  }
1073 #endif
1074 
1075  KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1076 
1077  KA_TRACE(20, ("GOMP_parallel_sections_start exit: T#%d\n", gtid));
1078 }
1079 
1080 
1081 void
1082 xexpand(KMP_API_NAME_GOMP_SECTIONS_END)(void)
1083 {
1084  int gtid = __kmp_get_gtid();
1085  KA_TRACE(20, ("GOMP_sections_end: T#%d\n", gtid))
1086 
1087  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
1088 
1089  KA_TRACE(20, ("GOMP_sections_end exit: T#%d\n", gtid))
1090 }
1091 
1092 
1093 void
1094 xexpand(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT)(void)
1095 {
1096  KA_TRACE(20, ("GOMP_sections_end_nowait: T#%d\n", __kmp_get_gtid()))
1097 }
1098 
1099 // libgomp has an empty function for GOMP_taskyield as of 2013-10-10
1100 void
1101 xexpand(KMP_API_NAME_GOMP_TASKYIELD)(void)
1102 {
1103  KA_TRACE(20, ("GOMP_taskyield: T#%d\n", __kmp_get_gtid()))
1104  return;
1105 }
1106 
1107 #if OMP_40_ENABLED // these are new GOMP_4.0 entry points
1108 
1109 void
1110 xexpand(KMP_API_NAME_GOMP_PARALLEL)(void (*task)(void *), void *data, unsigned num_threads, unsigned int flags)
1111 {
1112  int gtid = __kmp_entry_gtid();
1113  MKLOC(loc, "GOMP_parallel");
1114  KA_TRACE(20, ("GOMP_parallel: T#%d\n", gtid));
1115 
1116  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1117  if (num_threads != 0) {
1118  __kmp_push_num_threads(&loc, gtid, num_threads);
1119  }
1120  if(flags != 0) {
1121  __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);
1122  }
1123  __kmp_GOMP_fork_call(&loc, gtid, task,
1124  (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, data);
1125  }
1126  else {
1127  __kmp_GOMP_serialized_parallel(&loc, gtid, task);
1128  }
1129  task(data);
1130  xexpand(KMP_API_NAME_GOMP_PARALLEL_END)();
1131 }
1132 
1133 void
1134 xexpand(KMP_API_NAME_GOMP_PARALLEL_SECTIONS)(void (*task) (void *), void *data,
1135  unsigned num_threads, unsigned count, unsigned flags)
1136 {
1137  int gtid = __kmp_entry_gtid();
1138  int last = FALSE;
1139  MKLOC(loc, "GOMP_parallel_sections");
1140  KA_TRACE(20, ("GOMP_parallel_sections: T#%d\n", gtid));
1141 
1142  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1143  if (num_threads != 0) {
1144  __kmp_push_num_threads(&loc, gtid, num_threads);
1145  }
1146  if(flags != 0) {
1147  __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);
1148  }
1149  __kmp_GOMP_fork_call(&loc, gtid, task,
1150  (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, task, data,
1151  num_threads, &loc, kmp_nm_dynamic_chunked, (kmp_int)1,
1152  (kmp_int)count, (kmp_int)1, (kmp_int)1);
1153  }
1154  else {
1155  __kmp_GOMP_serialized_parallel(&loc, gtid, task);
1156  }
1157 
1158  KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1159 
1160  task(data);
1161  xexpand(KMP_API_NAME_GOMP_PARALLEL_END)();
1162  KA_TRACE(20, ("GOMP_parallel_sections exit: T#%d\n", gtid));
1163 }
1164 
1165 #define PARALLEL_LOOP(func, schedule) \
1166  void func (void (*task) (void *), void *data, unsigned num_threads, \
1167  long lb, long ub, long str, long chunk_sz, unsigned flags) \
1168  { \
1169  int gtid = __kmp_entry_gtid(); \
1170  int last = FALSE; \
1171  MKLOC(loc, #func); \
1172  KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
1173  gtid, lb, ub, str, chunk_sz )); \
1174  \
1175  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \
1176  if (num_threads != 0) { \
1177  __kmp_push_num_threads(&loc, gtid, num_threads); \
1178  } \
1179  if (flags != 0) { \
1180  __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags); \
1181  } \
1182  __kmp_GOMP_fork_call(&loc, gtid, task, \
1183  (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, \
1184  task, data, num_threads, &loc, (schedule), lb, \
1185  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \
1186  } \
1187  else { \
1188  __kmp_GOMP_serialized_parallel(&loc, gtid, task); \
1189  } \
1190  \
1191  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
1192  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
1193  (schedule) != kmp_sch_static); \
1194  task(data); \
1195  xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(); \
1196  \
1197  KA_TRACE(20, ( #func " exit: T#%d\n", gtid)); \
1198  }
1199 
1200 PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC), kmp_sch_static)
1201 PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC), kmp_sch_dynamic_chunked)
1202 PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED), kmp_sch_guided_chunked)
1203 PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME), kmp_sch_runtime)
1204 
1205 
1206 void
1207 xexpand(KMP_API_NAME_GOMP_TASKGROUP_START)(void)
1208 {
1209  int gtid = __kmp_get_gtid();
1210  MKLOC(loc, "GOMP_taskgroup_start");
1211  KA_TRACE(20, ("GOMP_taskgroup_start: T#%d\n", gtid));
1212 
1213  __kmpc_taskgroup(&loc, gtid);
1214 
1215  return;
1216 }
1217 
1218 void
1219 xexpand(KMP_API_NAME_GOMP_TASKGROUP_END)(void)
1220 {
1221  int gtid = __kmp_get_gtid();
1222  MKLOC(loc, "GOMP_taskgroup_end");
1223  KA_TRACE(20, ("GOMP_taskgroup_end: T#%d\n", gtid));
1224 
1225  __kmpc_end_taskgroup(&loc, gtid);
1226 
1227  return;
1228 }
1229 
1230 #ifndef KMP_DEBUG
1231 static
1232 #endif /* KMP_DEBUG */
1233 kmp_int32 __kmp_gomp_to_iomp_cancellation_kind(int gomp_kind) {
1234  kmp_int32 cncl_kind = 0;
1235  switch(gomp_kind) {
1236  case 1:
1237  cncl_kind = cancel_parallel;
1238  break;
1239  case 2:
1240  cncl_kind = cancel_loop;
1241  break;
1242  case 4:
1243  cncl_kind = cancel_sections;
1244  break;
1245  case 8:
1246  cncl_kind = cancel_taskgroup;
1247  break;
1248  }
1249  return cncl_kind;
1250 }
1251 
1252 bool
1253 xexpand(KMP_API_NAME_GOMP_CANCELLATION_POINT)(int which)
1254 {
1255  if(__kmp_omp_cancellation) {
1256  KMP_FATAL(NoGompCancellation);
1257  }
1258  int gtid = __kmp_get_gtid();
1259  MKLOC(loc, "GOMP_cancellation_point");
1260  KA_TRACE(20, ("GOMP_cancellation_point: T#%d\n", gtid));
1261 
1262  kmp_int32 cncl_kind = __kmp_gomp_to_iomp_cancellation_kind(which);
1263 
1264  return __kmpc_cancellationpoint(&loc, gtid, cncl_kind);
1265 }
1266 
1267 bool
1268 xexpand(KMP_API_NAME_GOMP_BARRIER_CANCEL)(void)
1269 {
1270  if(__kmp_omp_cancellation) {
1271  KMP_FATAL(NoGompCancellation);
1272  }
1273  KMP_FATAL(NoGompCancellation);
1274  int gtid = __kmp_get_gtid();
1275  MKLOC(loc, "GOMP_barrier_cancel");
1276  KA_TRACE(20, ("GOMP_barrier_cancel: T#%d\n", gtid));
1277 
1278  return __kmpc_cancel_barrier(&loc, gtid);
1279 }
1280 
1281 bool
1282 xexpand(KMP_API_NAME_GOMP_CANCEL)(int which, bool do_cancel)
1283 {
1284  if(__kmp_omp_cancellation) {
1285  KMP_FATAL(NoGompCancellation);
1286  } else {
1287  return FALSE;
1288  }
1289 
1290  int gtid = __kmp_get_gtid();
1291  MKLOC(loc, "GOMP_cancel");
1292  KA_TRACE(20, ("GOMP_cancel: T#%d\n", gtid));
1293 
1294  kmp_int32 cncl_kind = __kmp_gomp_to_iomp_cancellation_kind(which);
1295 
1296  if(do_cancel == FALSE) {
1297  return xexpand(KMP_API_NAME_GOMP_CANCELLATION_POINT)(which);
1298  } else {
1299  return __kmpc_cancel(&loc, gtid, cncl_kind);
1300  }
1301 }
1302 
1303 bool
1304 xexpand(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL)(void)
1305 {
1306  if(__kmp_omp_cancellation) {
1307  KMP_FATAL(NoGompCancellation);
1308  }
1309  int gtid = __kmp_get_gtid();
1310  MKLOC(loc, "GOMP_sections_end_cancel");
1311  KA_TRACE(20, ("GOMP_sections_end_cancel: T#%d\n", gtid));
1312 
1313  return __kmpc_cancel_barrier(&loc, gtid);
1314 }
1315 
1316 bool
1317 xexpand(KMP_API_NAME_GOMP_LOOP_END_CANCEL)(void)
1318 {
1319  if(__kmp_omp_cancellation) {
1320  KMP_FATAL(NoGompCancellation);
1321  }
1322  int gtid = __kmp_get_gtid();
1323  MKLOC(loc, "GOMP_loop_end_cancel");
1324  KA_TRACE(20, ("GOMP_loop_end_cancel: T#%d\n", gtid));
1325 
1326  return __kmpc_cancel_barrier(&loc, gtid);
1327 }
1328 
1329 // All target functions are empty as of 2014-05-29
1330 void
1331 xexpand(KMP_API_NAME_GOMP_TARGET)(int device, void (*fn) (void *), const void *openmp_target,
1332  size_t mapnum, void **hostaddrs, size_t *sizes, unsigned char *kinds)
1333 {
1334  return;
1335 }
1336 
1337 void
1338 xexpand(KMP_API_NAME_GOMP_TARGET_DATA)(int device, const void *openmp_target, size_t mapnum,
1339  void **hostaddrs, size_t *sizes, unsigned char *kinds)
1340 {
1341  return;
1342 }
1343 
1344 void
1345 xexpand(KMP_API_NAME_GOMP_TARGET_END_DATA)(void)
1346 {
1347  return;
1348 }
1349 
1350 void
1351 xexpand(KMP_API_NAME_GOMP_TARGET_UPDATE)(int device, const void *openmp_target, size_t mapnum,
1352  void **hostaddrs, size_t *sizes, unsigned char *kinds)
1353 {
1354  return;
1355 }
1356 
1357 void
1358 xexpand(KMP_API_NAME_GOMP_TEAMS)(unsigned int num_teams, unsigned int thread_limit)
1359 {
1360  return;
1361 }
1362 #endif // OMP_40_ENABLED
1363 
1364 
1365 /*
1366  The following sections of code create aliases for the GOMP_* functions,
1367  then create versioned symbols using the assembler directive .symver.
1368  This is only pertinent for ELF .so library
1369  xaliasify and xversionify are defined in kmp_ftn_os.h
1370 */
1371 
1372 #ifdef KMP_USE_VERSION_SYMBOLS
1373 
1374 // GOMP_1.0 aliases
1375 xaliasify(KMP_API_NAME_GOMP_ATOMIC_END, 10);
1376 xaliasify(KMP_API_NAME_GOMP_ATOMIC_START, 10);
1377 xaliasify(KMP_API_NAME_GOMP_BARRIER, 10);
1378 xaliasify(KMP_API_NAME_GOMP_CRITICAL_END, 10);
1379 xaliasify(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10);
1380 xaliasify(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10);
1381 xaliasify(KMP_API_NAME_GOMP_CRITICAL_START, 10);
1382 xaliasify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10);
1383 xaliasify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10);
1384 xaliasify(KMP_API_NAME_GOMP_LOOP_END, 10);
1385 xaliasify(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10);
1386 xaliasify(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10);
1387 xaliasify(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10);
1388 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10);
1389 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10);
1390 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10);
1391 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10);
1392 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10);
1393 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10);
1394 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10);
1395 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10);
1396 xaliasify(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10);
1397 xaliasify(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10);
1398 xaliasify(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10);
1399 xaliasify(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10);
1400 xaliasify(KMP_API_NAME_GOMP_ORDERED_END, 10);
1401 xaliasify(KMP_API_NAME_GOMP_ORDERED_START, 10);
1402 xaliasify(KMP_API_NAME_GOMP_PARALLEL_END, 10);
1403 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10);
1404 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10);
1405 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10);
1406 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10);
1407 xaliasify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10);
1408 xaliasify(KMP_API_NAME_GOMP_PARALLEL_START, 10);
1409 xaliasify(KMP_API_NAME_GOMP_SECTIONS_END, 10);
1410 xaliasify(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10);
1411 xaliasify(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10);
1412 xaliasify(KMP_API_NAME_GOMP_SECTIONS_START, 10);
1413 xaliasify(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10);
1414 xaliasify(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10);
1415 xaliasify(KMP_API_NAME_GOMP_SINGLE_START, 10);
1416 
1417 // GOMP_2.0 aliases
1418 xaliasify(KMP_API_NAME_GOMP_TASK, 20);
1419 xaliasify(KMP_API_NAME_GOMP_TASKWAIT, 20);
1420 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20);
1421 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20);
1422 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20);
1423 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20);
1424 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20);
1425 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20);
1426 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20);
1427 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20);
1428 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20);
1429 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20);
1430 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20);
1431 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20);
1432 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20);
1433 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20);
1434 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20);
1435 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20);
1436 
1437 // GOMP_3.0 aliases
1438 xaliasify(KMP_API_NAME_GOMP_TASKYIELD, 30);
1439 
1440 // GOMP_4.0 aliases
1441 // The GOMP_parallel* entry points below aren't OpenMP 4.0 related.
1442 #if OMP_40_ENABLED
1443 xaliasify(KMP_API_NAME_GOMP_PARALLEL, 40);
1444 xaliasify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40);
1445 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40);
1446 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40);
1447 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40);
1448 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40);
1449 xaliasify(KMP_API_NAME_GOMP_TASKGROUP_START, 40);
1450 xaliasify(KMP_API_NAME_GOMP_TASKGROUP_END, 40);
1451 xaliasify(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40);
1452 xaliasify(KMP_API_NAME_GOMP_CANCEL, 40);
1453 xaliasify(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40);
1454 xaliasify(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40);
1455 xaliasify(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40);
1456 xaliasify(KMP_API_NAME_GOMP_TARGET, 40);
1457 xaliasify(KMP_API_NAME_GOMP_TARGET_DATA, 40);
1458 xaliasify(KMP_API_NAME_GOMP_TARGET_END_DATA, 40);
1459 xaliasify(KMP_API_NAME_GOMP_TARGET_UPDATE, 40);
1460 xaliasify(KMP_API_NAME_GOMP_TEAMS, 40);
1461 #endif
1462 
1463 // GOMP_1.0 versioned symbols
1464 xversionify(KMP_API_NAME_GOMP_ATOMIC_END, 10, "GOMP_1.0");
1465 xversionify(KMP_API_NAME_GOMP_ATOMIC_START, 10, "GOMP_1.0");
1466 xversionify(KMP_API_NAME_GOMP_BARRIER, 10, "GOMP_1.0");
1467 xversionify(KMP_API_NAME_GOMP_CRITICAL_END, 10, "GOMP_1.0");
1468 xversionify(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10, "GOMP_1.0");
1469 xversionify(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10, "GOMP_1.0");
1470 xversionify(KMP_API_NAME_GOMP_CRITICAL_START, 10, "GOMP_1.0");
1471 xversionify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10, "GOMP_1.0");
1472 xversionify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
1473 xversionify(KMP_API_NAME_GOMP_LOOP_END, 10, "GOMP_1.0");
1474 xversionify(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10, "GOMP_1.0");
1475 xversionify(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10, "GOMP_1.0");
1476 xversionify(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10, "GOMP_1.0");
1477 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10, "GOMP_1.0");
1478 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10, "GOMP_1.0");
1479 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10, "GOMP_1.0");
1480 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10, "GOMP_1.0");
1481 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10, "GOMP_1.0");
1482 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10, "GOMP_1.0");
1483 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10, "GOMP_1.0");
1484 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10, "GOMP_1.0");
1485 xversionify(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10, "GOMP_1.0");
1486 xversionify(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10, "GOMP_1.0");
1487 xversionify(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10, "GOMP_1.0");
1488 xversionify(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10, "GOMP_1.0");
1489 xversionify(KMP_API_NAME_GOMP_ORDERED_END, 10, "GOMP_1.0");
1490 xversionify(KMP_API_NAME_GOMP_ORDERED_START, 10, "GOMP_1.0");
1491 xversionify(KMP_API_NAME_GOMP_PARALLEL_END, 10, "GOMP_1.0");
1492 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
1493 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10, "GOMP_1.0");
1494 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10, "GOMP_1.0");
1495 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10, "GOMP_1.0");
1496 xversionify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10, "GOMP_1.0");
1497 xversionify(KMP_API_NAME_GOMP_PARALLEL_START, 10, "GOMP_1.0");
1498 xversionify(KMP_API_NAME_GOMP_SECTIONS_END, 10, "GOMP_1.0");
1499 xversionify(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10, "GOMP_1.0");
1500 xversionify(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10, "GOMP_1.0");
1501 xversionify(KMP_API_NAME_GOMP_SECTIONS_START, 10, "GOMP_1.0");
1502 xversionify(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10, "GOMP_1.0");
1503 xversionify(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10, "GOMP_1.0");
1504 xversionify(KMP_API_NAME_GOMP_SINGLE_START, 10, "GOMP_1.0");
1505 
1506 // GOMP_2.0 versioned symbols
1507 xversionify(KMP_API_NAME_GOMP_TASK, 20, "GOMP_2.0");
1508 xversionify(KMP_API_NAME_GOMP_TASKWAIT, 20, "GOMP_2.0");
1509 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20, "GOMP_2.0");
1510 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20, "GOMP_2.0");
1511 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20, "GOMP_2.0");
1512 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20, "GOMP_2.0");
1513 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20, "GOMP_2.0");
1514 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20, "GOMP_2.0");
1515 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20, "GOMP_2.0");
1516 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20, "GOMP_2.0");
1517 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20, "GOMP_2.0");
1518 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20, "GOMP_2.0");
1519 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20, "GOMP_2.0");
1520 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20, "GOMP_2.0");
1521 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20, "GOMP_2.0");
1522 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20, "GOMP_2.0");
1523 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20, "GOMP_2.0");
1524 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20, "GOMP_2.0");
1525 
1526 // GOMP_3.0 versioned symbols
1527 xversionify(KMP_API_NAME_GOMP_TASKYIELD, 30, "GOMP_3.0");
1528 
1529 // GOMP_4.0 versioned symbols
1530 #if OMP_40_ENABLED
1531 xversionify(KMP_API_NAME_GOMP_PARALLEL, 40, "GOMP_4.0");
1532 xversionify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40, "GOMP_4.0");
1533 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40, "GOMP_4.0");
1534 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40, "GOMP_4.0");
1535 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40, "GOMP_4.0");
1536 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40, "GOMP_4.0");
1537 xversionify(KMP_API_NAME_GOMP_TASKGROUP_START, 40, "GOMP_4.0");
1538 xversionify(KMP_API_NAME_GOMP_TASKGROUP_END, 40, "GOMP_4.0");
1539 xversionify(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40, "GOMP_4.0");
1540 xversionify(KMP_API_NAME_GOMP_CANCEL, 40, "GOMP_4.0");
1541 xversionify(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40, "GOMP_4.0");
1542 xversionify(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40, "GOMP_4.0");
1543 xversionify(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40, "GOMP_4.0");
1544 xversionify(KMP_API_NAME_GOMP_TARGET, 40, "GOMP_4.0");
1545 xversionify(KMP_API_NAME_GOMP_TARGET_DATA, 40, "GOMP_4.0");
1546 xversionify(KMP_API_NAME_GOMP_TARGET_END_DATA, 40, "GOMP_4.0");
1547 xversionify(KMP_API_NAME_GOMP_TARGET_UPDATE, 40, "GOMP_4.0");
1548 xversionify(KMP_API_NAME_GOMP_TEAMS, 40, "GOMP_4.0");
1549 #endif
1550 
1551 #endif // KMP_USE_VERSION_SYMBOLS
1552 
1553 #ifdef __cplusplus
1554  } //extern "C"
1555 #endif // __cplusplus
1556 
1557 
KMP_EXPORT void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid)
Definition: kmp_csupport.c:863
KMP_EXPORT void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid)
Definition: kmp_csupport.c:464
KMP_EXPORT void __kmpc_ordered(ident_t *, kmp_int32 global_tid)
Definition: kmp_csupport.c:797
KMP_EXPORT void __kmpc_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
Definition: kmp.h:218
KMP_EXPORT kmp_int32 __kmpc_ok_to_fork(ident_t *)
Definition: kmp_csupport.c:180
KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid)
Definition: kmp_csupport.c:672
KMP_EXPORT void __kmpc_end_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
sched_type
Definition: kmp.h:320