Intel® OpenMP* Runtime Library
 All Classes Functions Variables Typedefs Enumerations Enumerator Modules Pages
kmp_threadprivate.c
1 /*
2  * kmp_threadprivate.c -- OpenMP threadprivate support library
3  */
4 
5 /* <copyright>
6  Copyright (c) 1997-2015 Intel Corporation. All Rights Reserved.
7 
8  Redistribution and use in source and binary forms, with or without
9  modification, are permitted provided that the following conditions
10  are met:
11 
12  * Redistributions of source code must retain the above copyright
13  notice, this list of conditions and the following disclaimer.
14  * Redistributions in binary form must reproduce the above copyright
15  notice, this list of conditions and the following disclaimer in the
16  documentation and/or other materials provided with the distribution.
17  * Neither the name of Intel Corporation nor the names of its
18  contributors may be used to endorse or promote products derived
19  from this software without specific prior written permission.
20 
21  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 </copyright> */
34 
35 #include "kmp.h"
36 #include "kmp_itt.h"
37 #include "kmp_i18n.h"
38 
39 /* ------------------------------------------------------------------------ */
40 /* ------------------------------------------------------------------------ */
41 
42 #define USE_CHECKS_COMMON
43 
44 #define KMP_INLINE_SUBR 1
45 
46 
47 /* ------------------------------------------------------------------------ */
48 /* ------------------------------------------------------------------------ */
49 
50 void
51 kmp_threadprivate_insert_private_data( int gtid, void *pc_addr, void *data_addr, size_t pc_size );
52 struct private_common *
53 kmp_threadprivate_insert( int gtid, void *pc_addr, void *data_addr, size_t pc_size );
54 
55 struct shared_table __kmp_threadprivate_d_table;
56 
57 /* ------------------------------------------------------------------------ */
58 /* ------------------------------------------------------------------------ */
59 
60 static
61 #ifdef KMP_INLINE_SUBR
62 __forceinline
63 #endif
64 struct private_common *
65 __kmp_threadprivate_find_task_common( struct common_table *tbl, int gtid, void *pc_addr )
66 
67 {
68  struct private_common *tn;
69 
70 #ifdef KMP_TASK_COMMON_DEBUG
71  KC_TRACE( 10, ( "__kmp_threadprivate_find_task_common: thread#%d, called with address %p\n",
72  gtid, pc_addr ) );
73  dump_list();
74 #endif
75 
76  for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) {
77  if (tn->gbl_addr == pc_addr) {
78 #ifdef KMP_TASK_COMMON_DEBUG
79  KC_TRACE( 10, ( "__kmp_threadprivate_find_task_common: thread#%d, found node %p on list\n",
80  gtid, pc_addr ) );
81 #endif
82  return tn;
83  }
84  }
85  return 0;
86 }
87 
88 static
89 #ifdef KMP_INLINE_SUBR
90 __forceinline
91 #endif
92 struct shared_common *
93 __kmp_find_shared_task_common( struct shared_table *tbl, int gtid, void *pc_addr )
94 {
95  struct shared_common *tn;
96 
97  for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) {
98  if (tn->gbl_addr == pc_addr) {
99 #ifdef KMP_TASK_COMMON_DEBUG
100  KC_TRACE( 10, ( "__kmp_find_shared_task_common: thread#%d, found node %p on list\n",
101  gtid, pc_addr ) );
102 #endif
103  return tn;
104  }
105  }
106  return 0;
107 }
108 
109 
110 /*
111  * Create a template for the data initialized storage.
112  * Either the template is NULL indicating zero fill,
113  * or the template is a copy of the original data.
114  */
115 
116 static struct private_data *
117 __kmp_init_common_data( void *pc_addr, size_t pc_size )
118 {
119  struct private_data *d;
120  size_t i;
121  char *p;
122 
123  d = (struct private_data *) __kmp_allocate( sizeof( struct private_data ) );
124 /*
125  d->data = 0; // AC: commented out because __kmp_allocate zeroes the memory
126  d->next = 0;
127 */
128  d->size = pc_size;
129  d->more = 1;
130 
131  p = (char*)pc_addr;
132 
133  for (i = pc_size; i > 0; --i) {
134  if (*p++ != '\0') {
135  d->data = __kmp_allocate( pc_size );
136  KMP_MEMCPY( d->data, pc_addr, pc_size );
137  break;
138  }
139  }
140 
141  return d;
142 }
143 
144 /*
145  * Initialize the data area from the template.
146  */
147 
148 static void
149 __kmp_copy_common_data( void *pc_addr, struct private_data *d )
150 {
151  char *addr = (char *) pc_addr;
152  int i, offset;
153 
154  for (offset = 0; d != 0; d = d->next) {
155  for (i = d->more; i > 0; --i) {
156  if (d->data == 0)
157  memset( & addr[ offset ], '\0', d->size );
158  else
159  KMP_MEMCPY( & addr[ offset ], d->data, d->size );
160  offset += d->size;
161  }
162  }
163 }
164 
165 /* ------------------------------------------------------------------------ */
166 /* ------------------------------------------------------------------------ */
167 
168 /* we are called from __kmp_serial_initialize() with __kmp_initz_lock held. */
169 void
170 __kmp_common_initialize( void )
171 {
172  if( ! TCR_4(__kmp_init_common) ) {
173  int q;
174 #ifdef KMP_DEBUG
175  int gtid;
176 #endif
177 
178  __kmp_threadpriv_cache_list = NULL;
179 
180 #ifdef KMP_DEBUG
181  /* verify the uber masters were initialized */
182  for(gtid = 0 ; gtid < __kmp_threads_capacity; gtid++ )
183  if( __kmp_root[gtid] ) {
184  KMP_DEBUG_ASSERT( __kmp_root[gtid]->r.r_uber_thread );
185  for ( q = 0; q< KMP_HASH_TABLE_SIZE; ++q)
186  KMP_DEBUG_ASSERT( !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q] );
187 /* __kmp_root[ gitd ]-> r.r_uber_thread -> th.th_pri_common -> data[ q ] = 0;*/
188  }
189 #endif /* KMP_DEBUG */
190 
191  for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
192  __kmp_threadprivate_d_table.data[ q ] = 0;
193 
194  TCW_4(__kmp_init_common, TRUE);
195  }
196 }
197 
198 /* Call all destructors for threadprivate data belonging to all threads.
199  Currently unused! */
200 void
201 __kmp_common_destroy( void )
202 {
203  if( TCR_4(__kmp_init_common) ) {
204  int q;
205 
206  TCW_4(__kmp_init_common, FALSE);
207 
208  for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
209  int gtid;
210  struct private_common *tn;
211  struct shared_common *d_tn;
212 
213  /* C++ destructors need to be called once per thread before exiting */
214  /* don't call destructors for master thread though unless we used copy constructor */
215 
216  for (d_tn = __kmp_threadprivate_d_table.data[ q ]; d_tn; d_tn = d_tn->next) {
217  if (d_tn->is_vec) {
218  if (d_tn->dt.dtorv != 0) {
219  for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
220  if( __kmp_threads[gtid] ) {
221  if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
222  (! KMP_UBER_GTID (gtid)) ) {
223  tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common,
224  gtid, d_tn->gbl_addr );
225  if (tn) {
226  (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len);
227  }
228  }
229  }
230  }
231  if (d_tn->obj_init != 0) {
232  (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len);
233  }
234  }
235  } else {
236  if (d_tn->dt.dtor != 0) {
237  for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
238  if( __kmp_threads[gtid] ) {
239  if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
240  (! KMP_UBER_GTID (gtid)) ) {
241  tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common,
242  gtid, d_tn->gbl_addr );
243  if (tn) {
244  (*d_tn->dt.dtor) (tn->par_addr);
245  }
246  }
247  }
248  }
249  if (d_tn->obj_init != 0) {
250  (*d_tn->dt.dtor) (d_tn->obj_init);
251  }
252  }
253  }
254  }
255  __kmp_threadprivate_d_table.data[ q ] = 0;
256  }
257  }
258 }
259 
260 /* Call all destructors for threadprivate data belonging to this thread */
261 void
262 __kmp_common_destroy_gtid( int gtid )
263 {
264  struct private_common *tn;
265  struct shared_common *d_tn;
266 
267  KC_TRACE( 10, ("__kmp_common_destroy_gtid: T#%d called\n", gtid ) );
268  if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
269  (! KMP_UBER_GTID (gtid)) ) {
270 
271  if( TCR_4(__kmp_init_common) ) {
272 
273  /* Cannot do this here since not all threads have destroyed their data */
274  /* TCW_4(__kmp_init_common, FALSE); */
275 
276  for (tn = __kmp_threads[ gtid ]->th.th_pri_head; tn; tn = tn->link) {
277 
278  d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
279  gtid, tn->gbl_addr );
280 
281  KMP_DEBUG_ASSERT( d_tn );
282 
283  if (d_tn->is_vec) {
284  if (d_tn->dt.dtorv != 0) {
285  (void) (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len);
286  }
287  if (d_tn->obj_init != 0) {
288  (void) (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len);
289  }
290  } else {
291  if (d_tn->dt.dtor != 0) {
292  (void) (*d_tn->dt.dtor) (tn->par_addr);
293  }
294  if (d_tn->obj_init != 0) {
295  (void) (*d_tn->dt.dtor) (d_tn->obj_init);
296  }
297  }
298  }
299  KC_TRACE( 30, ("__kmp_common_destroy_gtid: T#%d threadprivate destructors complete\n",
300  gtid ) );
301  }
302  }
303 }
304 
305 /* ------------------------------------------------------------------------ */
306 /* ------------------------------------------------------------------------ */
307 
308 #ifdef KMP_TASK_COMMON_DEBUG
309 static void
310 dump_list( void )
311 {
312  int p, q;
313 
314  for (p = 0; p < __kmp_all_nth; ++p) {
315  if( !__kmp_threads[p] ) continue;
316  for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
317  if (__kmp_threads[ p ]->th.th_pri_common->data[ q ]) {
318  struct private_common *tn;
319 
320  KC_TRACE( 10, ( "\tdump_list: gtid:%d addresses\n", p ) );
321 
322  for (tn = __kmp_threads[ p ]->th.th_pri_common->data[ q ]; tn; tn = tn->next) {
323  KC_TRACE( 10, ( "\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n",
324  tn->gbl_addr, tn->par_addr ) );
325  }
326  }
327  }
328  }
329 }
330 #endif /* KMP_TASK_COMMON_DEBUG */
331 
332 
333 /*
334  * NOTE: this routine is to be called only from the serial part of the program.
335  */
336 
337 void
338 kmp_threadprivate_insert_private_data( int gtid, void *pc_addr, void *data_addr, size_t pc_size )
339 {
340  struct shared_common **lnk_tn, *d_tn;
341  KMP_DEBUG_ASSERT( __kmp_threads[ gtid ] &&
342  __kmp_threads[ gtid ] -> th.th_root -> r.r_active == 0 );
343 
344  d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
345  gtid, pc_addr );
346 
347  if (d_tn == 0) {
348  d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
349 
350  d_tn->gbl_addr = pc_addr;
351  d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size );
352 /*
353  d_tn->obj_init = 0; // AC: commented out because __kmp_allocate zeroes the memory
354  d_tn->ct.ctor = 0;
355  d_tn->cct.cctor = 0;;
356  d_tn->dt.dtor = 0;
357  d_tn->is_vec = FALSE;
358  d_tn->vec_len = 0L;
359 */
360  d_tn->cmn_size = pc_size;
361 
362  __kmp_acquire_lock( &__kmp_global_lock, gtid );
363 
364  lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]);
365 
366  d_tn->next = *lnk_tn;
367  *lnk_tn = d_tn;
368 
369  __kmp_release_lock( &__kmp_global_lock, gtid );
370  }
371 }
372 
373 struct private_common *
374 kmp_threadprivate_insert( int gtid, void *pc_addr, void *data_addr, size_t pc_size )
375 {
376  struct private_common *tn, **tt;
377  struct shared_common *d_tn;
378 
379  /* +++++++++ START OF CRITICAL SECTION +++++++++ */
380 
381  __kmp_acquire_lock( & __kmp_global_lock, gtid );
382 
383  tn = (struct private_common *) __kmp_allocate( sizeof (struct private_common) );
384 
385  tn->gbl_addr = pc_addr;
386 
387  d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
388  gtid, pc_addr ); /* Only the MASTER data table exists. */
389 
390  if (d_tn != 0) {
391  /* This threadprivate variable has already been seen. */
392 
393  if ( d_tn->pod_init == 0 && d_tn->obj_init == 0 ) {
394  d_tn->cmn_size = pc_size;
395 
396  if (d_tn->is_vec) {
397  if (d_tn->ct.ctorv != 0) {
398  /* Construct from scratch so no prototype exists */
399  d_tn->obj_init = 0;
400  }
401  else if (d_tn->cct.cctorv != 0) {
402  /* Now data initialize the prototype since it was previously registered */
403  d_tn->obj_init = (void *) __kmp_allocate( d_tn->cmn_size );
404  (void) (*d_tn->cct.cctorv) (d_tn->obj_init, pc_addr, d_tn->vec_len);
405  }
406  else {
407  d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size );
408  }
409  } else {
410  if (d_tn->ct.ctor != 0) {
411  /* Construct from scratch so no prototype exists */
412  d_tn->obj_init = 0;
413  }
414  else if (d_tn->cct.cctor != 0) {
415  /* Now data initialize the prototype since it was previously registered */
416  d_tn->obj_init = (void *) __kmp_allocate( d_tn->cmn_size );
417  (void) (*d_tn->cct.cctor) (d_tn->obj_init, pc_addr);
418  }
419  else {
420  d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size );
421  }
422  }
423  }
424  }
425  else {
426  struct shared_common **lnk_tn;
427 
428  d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
429  d_tn->gbl_addr = pc_addr;
430  d_tn->cmn_size = pc_size;
431  d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size );
432 /*
433  d_tn->obj_init = 0; // AC: commented out because __kmp_allocate zeroes the memory
434  d_tn->ct.ctor = 0;
435  d_tn->cct.cctor = 0;
436  d_tn->dt.dtor = 0;
437  d_tn->is_vec = FALSE;
438  d_tn->vec_len = 0L;
439 */
440  lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]);
441 
442  d_tn->next = *lnk_tn;
443  *lnk_tn = d_tn;
444  }
445 
446  tn->cmn_size = d_tn->cmn_size;
447 
448  if ( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) ) {
449  tn->par_addr = (void *) pc_addr;
450  }
451  else {
452  tn->par_addr = (void *) __kmp_allocate( tn->cmn_size );
453  }
454 
455  __kmp_release_lock( & __kmp_global_lock, gtid );
456 
457  /* +++++++++ END OF CRITICAL SECTION +++++++++ */
458 
459 #ifdef USE_CHECKS_COMMON
460  if (pc_size > d_tn->cmn_size) {
461  KC_TRACE( 10, ( "__kmp_threadprivate_insert: THREADPRIVATE: %p (%"
462  KMP_UINTPTR_SPEC " ,%" KMP_UINTPTR_SPEC ")\n",
463  pc_addr, pc_size, d_tn->cmn_size ) );
464  KMP_FATAL( TPCommonBlocksInconsist );
465  }
466 #endif /* USE_CHECKS_COMMON */
467 
468  tt = &(__kmp_threads[ gtid ]->th.th_pri_common->data[ KMP_HASH(pc_addr) ]);
469 
470 #ifdef KMP_TASK_COMMON_DEBUG
471  if (*tt != 0) {
472  KC_TRACE( 10, ( "__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n",
473  gtid, pc_addr ) );
474  }
475 #endif
476  tn->next = *tt;
477  *tt = tn;
478 
479 #ifdef KMP_TASK_COMMON_DEBUG
480  KC_TRACE( 10, ( "__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n",
481  gtid, pc_addr ) );
482  dump_list( );
483 #endif
484 
485  /* Link the node into a simple list */
486 
487  tn->link = __kmp_threads[ gtid ]->th.th_pri_head;
488  __kmp_threads[ gtid ]->th.th_pri_head = tn;
489 
490 #ifdef BUILD_TV
491  __kmp_tv_threadprivate_store( __kmp_threads[ gtid ], tn->gbl_addr, tn->par_addr );
492 #endif
493 
494  if( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) )
495  return tn;
496 
497  /*
498  * if C++ object with copy constructor, use it;
499  * else if C++ object with constructor, use it for the non-master copies only;
500  * else use pod_init and memcpy
501  *
502  * C++ constructors need to be called once for each non-master thread on allocate
503  * C++ copy constructors need to be called once for each thread on allocate
504  */
505 
506  /*
507  * C++ object with constructors/destructors;
508  * don't call constructors for master thread though
509  */
510  if (d_tn->is_vec) {
511  if ( d_tn->ct.ctorv != 0) {
512  (void) (*d_tn->ct.ctorv) (tn->par_addr, d_tn->vec_len);
513  } else if (d_tn->cct.cctorv != 0) {
514  (void) (*d_tn->cct.cctorv) (tn->par_addr, d_tn->obj_init, d_tn->vec_len);
515  } else if (tn->par_addr != tn->gbl_addr) {
516  __kmp_copy_common_data( tn->par_addr, d_tn->pod_init );
517  }
518  } else {
519  if ( d_tn->ct.ctor != 0 ) {
520  (void) (*d_tn->ct.ctor) (tn->par_addr);
521  } else if (d_tn->cct.cctor != 0) {
522  (void) (*d_tn->cct.cctor) (tn->par_addr, d_tn->obj_init);
523  } else if (tn->par_addr != tn->gbl_addr) {
524  __kmp_copy_common_data( tn->par_addr, d_tn->pod_init );
525  }
526  }
527 /* !BUILD_OPENMP_C
528  if (tn->par_addr != tn->gbl_addr)
529  __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); */
530 
531  return tn;
532 }
533 
534 /* ------------------------------------------------------------------------ */
535 /* We are currently parallel, and we know the thread id. */
536 /* ------------------------------------------------------------------------ */
537 
550 void
552 {
553  struct shared_common *d_tn, **lnk_tn;
554 
555  KC_TRACE( 10, ("__kmpc_threadprivate_register: called\n" ) );
556 
557 #ifdef USE_CHECKS_COMMON
558  /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
559  KMP_ASSERT( cctor == 0);
560 #endif /* USE_CHECKS_COMMON */
561 
562  /* Only the global data table exists. */
563  d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, -1, data );
564 
565  if (d_tn == 0) {
566  d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
567  d_tn->gbl_addr = data;
568 
569  d_tn->ct.ctor = ctor;
570  d_tn->cct.cctor = cctor;
571  d_tn->dt.dtor = dtor;
572 /*
573  d_tn->is_vec = FALSE; // AC: commented out because __kmp_allocate zeroes the memory
574  d_tn->vec_len = 0L;
575  d_tn->obj_init = 0;
576  d_tn->pod_init = 0;
577 */
578  lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]);
579 
580  d_tn->next = *lnk_tn;
581  *lnk_tn = d_tn;
582  }
583 }
584 
585 void *
586 __kmpc_threadprivate(ident_t *loc, kmp_int32 global_tid, void *data, size_t size)
587 {
588  void *ret;
589  struct private_common *tn;
590 
591  KC_TRACE( 10, ("__kmpc_threadprivate: T#%d called\n", global_tid ) );
592 
593 #ifdef USE_CHECKS_COMMON
594  if (! __kmp_init_serial)
595  KMP_FATAL( RTLNotInitialized );
596 #endif /* USE_CHECKS_COMMON */
597 
598  if ( ! __kmp_threads[global_tid] -> th.th_root -> r.r_active && ! __kmp_foreign_tp ) {
599  /* The parallel address will NEVER overlap with the data_address */
600  /* dkp: 3rd arg to kmp_threadprivate_insert_private_data() is the data_address; use data_address = data */
601 
602  KC_TRACE( 20, ("__kmpc_threadprivate: T#%d inserting private data\n", global_tid ) );
603  kmp_threadprivate_insert_private_data( global_tid, data, data, size );
604 
605  ret = data;
606  }
607  else {
608  KC_TRACE( 50, ("__kmpc_threadprivate: T#%d try to find private data at address %p\n",
609  global_tid, data ) );
610  tn = __kmp_threadprivate_find_task_common( __kmp_threads[ global_tid ]->th.th_pri_common, global_tid, data );
611 
612  if ( tn ) {
613  KC_TRACE( 20, ("__kmpc_threadprivate: T#%d found data\n", global_tid ) );
614 #ifdef USE_CHECKS_COMMON
615  if ((size_t) size > tn->cmn_size) {
616  KC_TRACE( 10, ( "THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC " ,%" KMP_UINTPTR_SPEC ")\n",
617  data, size, tn->cmn_size ) );
618  KMP_FATAL( TPCommonBlocksInconsist );
619  }
620 #endif /* USE_CHECKS_COMMON */
621  }
622  else {
623  /* The parallel address will NEVER overlap with the data_address */
624  /* dkp: 3rd arg to kmp_threadprivate_insert() is the data_address; use data_address = data */
625  KC_TRACE( 20, ("__kmpc_threadprivate: T#%d inserting data\n", global_tid ) );
626  tn = kmp_threadprivate_insert( global_tid, data, data, size );
627  }
628 
629  ret = tn->par_addr;
630  }
631  KC_TRACE( 10, ("__kmpc_threadprivate: T#%d exiting; return value = %p\n",
632  global_tid, ret ) );
633 
634  return ret;
635 }
636 
648 void *
650  ident_t * loc,
651  kmp_int32 global_tid, // gtid.
652  void * data, // Pointer to original global variable.
653  size_t size, // Size of original global variable.
654  void *** cache
655 ) {
656  KC_TRACE( 10, ("__kmpc_threadprivate_cached: T#%d called with cache: %p, address: %p, size: %"
657  KMP_SIZE_T_SPEC "\n",
658  global_tid, *cache, data, size ) );
659 
660  if ( TCR_PTR(*cache) == 0) {
661  __kmp_acquire_lock( & __kmp_global_lock, global_tid );
662 
663  if ( TCR_PTR(*cache) == 0) {
664  __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
665  __kmp_tp_cached = 1;
666  __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
667  void ** my_cache;
668  KMP_ITT_IGNORE(
669  my_cache = (void**)
670  __kmp_allocate(sizeof( void * ) * __kmp_tp_capacity + sizeof ( kmp_cached_addr_t ));
671  );
672  // No need to zero the allocated memory; __kmp_allocate does that.
673  KC_TRACE( 50, ("__kmpc_threadprivate_cached: T#%d allocated cache at address %p\n",
674  global_tid, my_cache ) );
675 
676  /* TODO: free all this memory in __kmp_common_destroy using __kmp_threadpriv_cache_list */
677  /* Add address of mycache to linked list for cleanup later */
678  kmp_cached_addr_t *tp_cache_addr;
679 
680  tp_cache_addr = (kmp_cached_addr_t *) & my_cache[__kmp_tp_capacity];
681  tp_cache_addr -> addr = my_cache;
682  tp_cache_addr -> next = __kmp_threadpriv_cache_list;
683  __kmp_threadpriv_cache_list = tp_cache_addr;
684 
685  KMP_MB();
686 
687  TCW_PTR( *cache, my_cache);
688 
689  KMP_MB();
690  }
691 
692  __kmp_release_lock( & __kmp_global_lock, global_tid );
693  }
694 
695  void *ret;
696  if ((ret = TCR_PTR((*cache)[ global_tid ])) == 0) {
697  ret = __kmpc_threadprivate( loc, global_tid, data, (size_t) size);
698 
699  TCW_PTR( (*cache)[ global_tid ], ret);
700  }
701  KC_TRACE( 10, ("__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n",
702  global_tid, ret ) );
703 
704  return ret;
705 }
706 
717 void
719  kmpc_cctor_vec cctor, kmpc_dtor_vec dtor,
720  size_t vector_length )
721 {
722  struct shared_common *d_tn, **lnk_tn;
723 
724  KC_TRACE( 10, ("__kmpc_threadprivate_register_vec: called\n" ) );
725 
726 #ifdef USE_CHECKS_COMMON
727  /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
728  KMP_ASSERT( cctor == 0);
729 #endif /* USE_CHECKS_COMMON */
730 
731  d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
732  -1, data ); /* Only the global data table exists. */
733 
734  if (d_tn == 0) {
735  d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
736  d_tn->gbl_addr = data;
737 
738  d_tn->ct.ctorv = ctor;
739  d_tn->cct.cctorv = cctor;
740  d_tn->dt.dtorv = dtor;
741  d_tn->is_vec = TRUE;
742  d_tn->vec_len = (size_t) vector_length;
743 /*
744  d_tn->obj_init = 0; // AC: commented out because __kmp_allocate zeroes the memory
745  d_tn->pod_init = 0;
746 */
747  lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]);
748 
749  d_tn->next = *lnk_tn;
750  *lnk_tn = d_tn;
751  }
752 }
void *(* kmpc_ctor_vec)(void *, size_t)
Definition: kmp.h:1344
void * __kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, void *data, size_t size, void ***cache)
void *(* kmpc_cctor_vec)(void *, void *, size_t)
Definition: kmp.h:1356
void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
void(* kmpc_dtor)(void *)
Definition: kmp.h:1329
void *(* kmpc_cctor)(void *, void *)
Definition: kmp.h:1334
Definition: kmp.h:218
void *(* kmpc_ctor)(void *)
Definition: kmp.h:1323
void(* kmpc_dtor_vec)(void *, size_t)
Definition: kmp.h:1350
void __kmpc_threadprivate_register_vec(ident_t *loc, void *data, kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, size_t vector_length)