LLVM OpenMP* Runtime Library
z_Linux_util.cpp
1 /*
2  * z_Linux_util.cpp -- platform specific routines.
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_affinity.h"
15 #include "kmp_i18n.h"
16 #include "kmp_io.h"
17 #include "kmp_itt.h"
18 #include "kmp_lock.h"
19 #include "kmp_stats.h"
20 #include "kmp_str.h"
21 #include "kmp_wait_release.h"
22 #include "kmp_wrapper_getpid.h"
23 
24 #if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD
25 #include <alloca.h>
26 #endif
27 #include <math.h> // HUGE_VAL.
28 #if KMP_OS_LINUX
29 #include <semaphore.h>
30 #endif // KMP_OS_LINUX
31 #include <sys/resource.h>
32 #if !KMP_OS_AIX
33 #include <sys/syscall.h>
34 #endif
35 #include <sys/time.h>
36 #include <sys/times.h>
37 #include <unistd.h>
38 
39 #if KMP_OS_LINUX
40 #include <sys/sysinfo.h>
41 #if KMP_USE_FUTEX
42 // We should really include <futex.h>, but that causes compatibility problems on
43 // different Linux* OS distributions that either require that you include (or
44 // break when you try to include) <pci/types.h>. Since all we need is the two
45 // macros below (which are part of the kernel ABI, so can't change) we just
46 // define the constants here and don't include <futex.h>
47 #ifndef FUTEX_WAIT
48 #define FUTEX_WAIT 0
49 #endif
50 #ifndef FUTEX_WAKE
51 #define FUTEX_WAKE 1
52 #endif
53 #endif
54 #elif KMP_OS_DARWIN
55 #include <mach/mach.h>
56 #include <sys/sysctl.h>
57 #elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD
58 #include <sys/types.h>
59 #include <sys/sysctl.h>
60 #include <sys/user.h>
61 #include <pthread_np.h>
62 #elif KMP_OS_NETBSD || KMP_OS_OPENBSD
63 #include <sys/types.h>
64 #include <sys/sysctl.h>
65 #elif KMP_OS_SOLARIS
66 #include <sys/loadavg.h>
67 #endif
68 
69 #include <ctype.h>
70 #include <dirent.h>
71 #include <fcntl.h>
72 
73 struct kmp_sys_timer {
74  struct timespec start;
75 };
76 
77 #ifndef TIMEVAL_TO_TIMESPEC
78 // Convert timeval to timespec.
79 #define TIMEVAL_TO_TIMESPEC(tv, ts) \
80  do { \
81  (ts)->tv_sec = (tv)->tv_sec; \
82  (ts)->tv_nsec = (tv)->tv_usec * 1000; \
83  } while (0)
84 #endif
85 
86 // Convert timespec to nanoseconds.
87 #define TS2NS(timespec) \
88  (((timespec).tv_sec * (long int)1e9) + (timespec).tv_nsec)
89 
90 static struct kmp_sys_timer __kmp_sys_timer_data;
91 
92 #if KMP_HANDLE_SIGNALS
93 typedef void (*sig_func_t)(int);
94 STATIC_EFI2_WORKAROUND struct sigaction __kmp_sighldrs[NSIG];
95 static sigset_t __kmp_sigset;
96 #endif
97 
98 static int __kmp_init_runtime = FALSE;
99 
100 static int __kmp_fork_count = 0;
101 
102 static pthread_condattr_t __kmp_suspend_cond_attr;
103 static pthread_mutexattr_t __kmp_suspend_mutex_attr;
104 
105 static kmp_cond_align_t __kmp_wait_cv;
106 static kmp_mutex_align_t __kmp_wait_mx;
107 
108 kmp_uint64 __kmp_ticks_per_msec = 1000000;
109 kmp_uint64 __kmp_ticks_per_usec = 1000;
110 
111 #ifdef DEBUG_SUSPEND
112 static void __kmp_print_cond(char *buffer, kmp_cond_align_t *cond) {
113  KMP_SNPRINTF(buffer, 128, "(cond (lock (%ld, %d)), (descr (%p)))",
114  cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
115  cond->c_cond.__c_waiting);
116 }
117 #endif
118 
119 #if ((KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_AIX) && KMP_AFFINITY_SUPPORTED)
120 
121 /* Affinity support */
122 
123 void __kmp_affinity_bind_thread(int which) {
124  KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
125  "Illegal set affinity operation when not capable");
126 
127  kmp_affin_mask_t *mask;
128  KMP_CPU_ALLOC_ON_STACK(mask);
129  KMP_CPU_ZERO(mask);
130  KMP_CPU_SET(which, mask);
131  __kmp_set_system_affinity(mask, TRUE);
132  KMP_CPU_FREE_FROM_STACK(mask);
133 }
134 
135 #if KMP_OS_AIX
136 void __kmp_affinity_determine_capable(const char *env_var) {
137  // All versions of AIX support bindprocessor().
138 
139  size_t mask_size = __kmp_xproc / CHAR_BIT;
140  // Round up to byte boundary.
141  if (__kmp_xproc % CHAR_BIT)
142  ++mask_size;
143 
144  // Round up to the mask_size_type boundary.
145  if (mask_size % sizeof(__kmp_affin_mask_size))
146  mask_size += sizeof(__kmp_affin_mask_size) -
147  mask_size % sizeof(__kmp_affin_mask_size);
148  KMP_AFFINITY_ENABLE(mask_size);
149  KA_TRACE(10,
150  ("__kmp_affinity_determine_capable: "
151  "AIX OS affinity interface bindprocessor functional (mask size = "
152  "%" KMP_SIZE_T_SPEC ").\n",
153  __kmp_affin_mask_size));
154 }
155 
156 #else // !KMP_OS_AIX
157 
158 /* Determine if we can access affinity functionality on this version of
159  * Linux* OS by checking __NR_sched_{get,set}affinity system calls, and set
160  * __kmp_affin_mask_size to the appropriate value (0 means not capable). */
161 void __kmp_affinity_determine_capable(const char *env_var) {
162  // Check and see if the OS supports thread affinity.
163 
164 #if KMP_OS_LINUX
165 #define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024)
166 #define KMP_CPU_SET_TRY_SIZE CACHE_LINE
167 #elif KMP_OS_FREEBSD
168 #define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t))
169 #endif
170 
171  int verbose = __kmp_affinity.flags.verbose;
172  int warnings = __kmp_affinity.flags.warnings;
173  enum affinity_type type = __kmp_affinity.type;
174 
175 #if KMP_OS_LINUX
176  long gCode;
177  unsigned char *buf;
178  buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
179 
180  // If the syscall returns a suggestion for the size,
181  // then we don't have to search for an appropriate size.
182  gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_TRY_SIZE, buf);
183  KA_TRACE(30, ("__kmp_affinity_determine_capable: "
184  "initial getaffinity call returned %ld errno = %d\n",
185  gCode, errno));
186 
187  if (gCode < 0 && errno != EINVAL) {
188  // System call not supported
189  if (verbose ||
190  (warnings && (type != affinity_none) && (type != affinity_default) &&
191  (type != affinity_disabled))) {
192  int error = errno;
193  kmp_msg_t err_code = KMP_ERR(error);
194  __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
195  err_code, __kmp_msg_null);
196  if (__kmp_generate_warnings == kmp_warnings_off) {
197  __kmp_str_free(&err_code.str);
198  }
199  }
200  KMP_AFFINITY_DISABLE();
201  KMP_INTERNAL_FREE(buf);
202  return;
203  } else if (gCode > 0) {
204  // The optimal situation: the OS returns the size of the buffer it expects.
205  KMP_AFFINITY_ENABLE(gCode);
206  KA_TRACE(10, ("__kmp_affinity_determine_capable: "
207  "affinity supported (mask size %d)\n",
208  (int)__kmp_affin_mask_size));
209  KMP_INTERNAL_FREE(buf);
210  return;
211  }
212 
213  // Call the getaffinity system call repeatedly with increasing set sizes
214  // until we succeed, or reach an upper bound on the search.
215  KA_TRACE(30, ("__kmp_affinity_determine_capable: "
216  "searching for proper set size\n"));
217  int size;
218  for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
219  gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
220  KA_TRACE(30, ("__kmp_affinity_determine_capable: "
221  "getaffinity for mask size %ld returned %ld errno = %d\n",
222  size, gCode, errno));
223 
224  if (gCode < 0) {
225  if (errno == ENOSYS) {
226  // We shouldn't get here
227  KA_TRACE(30, ("__kmp_affinity_determine_capable: "
228  "inconsistent OS call behavior: errno == ENOSYS for mask "
229  "size %d\n",
230  size));
231  if (verbose ||
232  (warnings && (type != affinity_none) &&
233  (type != affinity_default) && (type != affinity_disabled))) {
234  int error = errno;
235  kmp_msg_t err_code = KMP_ERR(error);
236  __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
237  err_code, __kmp_msg_null);
238  if (__kmp_generate_warnings == kmp_warnings_off) {
239  __kmp_str_free(&err_code.str);
240  }
241  }
242  KMP_AFFINITY_DISABLE();
243  KMP_INTERNAL_FREE(buf);
244  return;
245  }
246  continue;
247  }
248 
249  KMP_AFFINITY_ENABLE(gCode);
250  KA_TRACE(10, ("__kmp_affinity_determine_capable: "
251  "affinity supported (mask size %d)\n",
252  (int)__kmp_affin_mask_size));
253  KMP_INTERNAL_FREE(buf);
254  return;
255  }
256 #elif KMP_OS_FREEBSD
257  long gCode;
258  unsigned char *buf;
259  buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
260  gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT,
261  reinterpret_cast<cpuset_t *>(buf));
262  KA_TRACE(30, ("__kmp_affinity_determine_capable: "
263  "initial getaffinity call returned %d errno = %d\n",
264  gCode, errno));
265  if (gCode == 0) {
266  KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
267  KA_TRACE(10, ("__kmp_affinity_determine_capable: "
268  "affinity supported (mask size %d)\n",
269  (int)__kmp_affin_mask_size));
270  KMP_INTERNAL_FREE(buf);
271  return;
272  }
273 #endif
274  KMP_INTERNAL_FREE(buf);
275 
276  // Affinity is not supported
277  KMP_AFFINITY_DISABLE();
278  KA_TRACE(10, ("__kmp_affinity_determine_capable: "
279  "cannot determine mask size - affinity not supported\n"));
280  if (verbose || (warnings && (type != affinity_none) &&
281  (type != affinity_default) && (type != affinity_disabled))) {
282  KMP_WARNING(AffCantGetMaskSize, env_var);
283  }
284 }
285 #endif // KMP_OS_AIX
286 #endif // (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_AIX) &&
287  // KMP_AFFINITY_SUPPORTED
288 
289 #if KMP_USE_FUTEX
290 
291 int __kmp_futex_determine_capable() {
292  int loc = 0;
293  long rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
294  int retval = (rc == 0) || (errno != ENOSYS);
295 
296  KA_TRACE(10,
297  ("__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
298  KA_TRACE(10, ("__kmp_futex_determine_capable: futex syscall%s supported\n",
299  retval ? "" : " not"));
300 
301  return retval;
302 }
303 
304 #endif // KMP_USE_FUTEX
305 
306 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_WASM) && (!KMP_ASM_INTRINS)
307 /* Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to
308  use compare_and_store for these routines */
309 
310 kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 d) {
311  kmp_int8 old_value, new_value;
312 
313  old_value = TCR_1(*p);
314  new_value = old_value | d;
315 
316  while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
317  KMP_CPU_PAUSE();
318  old_value = TCR_1(*p);
319  new_value = old_value | d;
320  }
321  return old_value;
322 }
323 
324 kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 d) {
325  kmp_int8 old_value, new_value;
326 
327  old_value = TCR_1(*p);
328  new_value = old_value & d;
329 
330  while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
331  KMP_CPU_PAUSE();
332  old_value = TCR_1(*p);
333  new_value = old_value & d;
334  }
335  return old_value;
336 }
337 
338 kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 d) {
339  kmp_uint32 old_value, new_value;
340 
341  old_value = TCR_4(*p);
342  new_value = old_value | d;
343 
344  while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
345  KMP_CPU_PAUSE();
346  old_value = TCR_4(*p);
347  new_value = old_value | d;
348  }
349  return old_value;
350 }
351 
352 kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 d) {
353  kmp_uint32 old_value, new_value;
354 
355  old_value = TCR_4(*p);
356  new_value = old_value & d;
357 
358  while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
359  KMP_CPU_PAUSE();
360  old_value = TCR_4(*p);
361  new_value = old_value & d;
362  }
363  return old_value;
364 }
365 
366 #if KMP_ARCH_X86 || KMP_ARCH_WASM
367 kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 d) {
368  kmp_int8 old_value, new_value;
369 
370  old_value = TCR_1(*p);
371  new_value = old_value + d;
372 
373  while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
374  KMP_CPU_PAUSE();
375  old_value = TCR_1(*p);
376  new_value = old_value + d;
377  }
378  return old_value;
379 }
380 
381 kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 d) {
382  kmp_int64 old_value, new_value;
383 
384  old_value = TCR_8(*p);
385  new_value = old_value + d;
386 
387  while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
388  KMP_CPU_PAUSE();
389  old_value = TCR_8(*p);
390  new_value = old_value + d;
391  }
392  return old_value;
393 }
394 #endif /* KMP_ARCH_X86 */
395 
396 kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 d) {
397  kmp_uint64 old_value, new_value;
398 
399  old_value = TCR_8(*p);
400  new_value = old_value | d;
401  while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
402  KMP_CPU_PAUSE();
403  old_value = TCR_8(*p);
404  new_value = old_value | d;
405  }
406  return old_value;
407 }
408 
409 kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 d) {
410  kmp_uint64 old_value, new_value;
411 
412  old_value = TCR_8(*p);
413  new_value = old_value & d;
414  while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
415  KMP_CPU_PAUSE();
416  old_value = TCR_8(*p);
417  new_value = old_value & d;
418  }
419  return old_value;
420 }
421 
422 #endif /* (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS) */
423 
424 void __kmp_terminate_thread(int gtid) {
425  int status;
426  kmp_info_t *th = __kmp_threads[gtid];
427 
428  if (!th)
429  return;
430 
431 #ifdef KMP_CANCEL_THREADS
432  KA_TRACE(10, ("__kmp_terminate_thread: kill (%d)\n", gtid));
433  status = pthread_cancel(th->th.th_info.ds.ds_thread);
434  if (status != 0 && status != ESRCH) {
435  __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
436  __kmp_msg_null);
437  }
438 #endif
439  KMP_YIELD(TRUE);
440 } //
441 
442 /* Set thread stack info according to values returned by pthread_getattr_np().
443  If values are unreasonable, assume call failed and use incremental stack
444  refinement method instead. Returns TRUE if the stack parameters could be
445  determined exactly, FALSE if incremental refinement is necessary. */
446 static kmp_int32 __kmp_set_stack_info(int gtid, kmp_info_t *th) {
447  int stack_data;
448 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
449  KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
450  pthread_attr_t attr;
451  int status;
452  size_t size = 0;
453  void *addr = 0;
454 
455  /* Always do incremental stack refinement for ubermaster threads since the
456  initial thread stack range can be reduced by sibling thread creation so
457  pthread_attr_getstack may cause thread gtid aliasing */
458  if (!KMP_UBER_GTID(gtid)) {
459 
460  /* Fetch the real thread attributes */
461  status = pthread_attr_init(&attr);
462  KMP_CHECK_SYSFAIL("pthread_attr_init", status);
463 #if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD
464  status = pthread_attr_get_np(pthread_self(), &attr);
465  KMP_CHECK_SYSFAIL("pthread_attr_get_np", status);
466 #else
467  status = pthread_getattr_np(pthread_self(), &attr);
468  KMP_CHECK_SYSFAIL("pthread_getattr_np", status);
469 #endif
470  status = pthread_attr_getstack(&attr, &addr, &size);
471  KMP_CHECK_SYSFAIL("pthread_attr_getstack", status);
472  KA_TRACE(60,
473  ("__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:"
474  " %lu, low addr: %p\n",
475  gtid, size, addr));
476  status = pthread_attr_destroy(&attr);
477  KMP_CHECK_SYSFAIL("pthread_attr_destroy", status);
478  }
479 
480  if (size != 0 && addr != 0) { // was stack parameter determination successful?
481  /* Store the correct base and size */
482  TCW_PTR(th->th.th_info.ds.ds_stackbase, (((char *)addr) + size));
483  TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
484  TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
485  return TRUE;
486  }
487 #endif /* KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD \
488  || KMP_OS_HURD || KMP_OS_SOLARIS */
489  /* Use incremental refinement starting from initial conservative estimate */
490  TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
491  TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
492  TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
493  return FALSE;
494 }
495 
496 static void *__kmp_launch_worker(void *thr) {
497  int status, old_type, old_state;
498 #ifdef KMP_BLOCK_SIGNALS
499  sigset_t new_set, old_set;
500 #endif /* KMP_BLOCK_SIGNALS */
501  void *exit_val;
502 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
503  KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
504  void *volatile padding = 0;
505 #endif
506  int gtid;
507 
508  gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
509  __kmp_gtid_set_specific(gtid);
510 #ifdef KMP_TDATA_GTID
511  __kmp_gtid = gtid;
512 #endif
513 #if KMP_STATS_ENABLED
514  // set thread local index to point to thread-specific stats
515  __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
516  __kmp_stats_thread_ptr->startLife();
517  KMP_SET_THREAD_STATE(IDLE);
518  KMP_INIT_PARTITIONED_TIMERS(OMP_idle);
519 #endif
520 
521 #if USE_ITT_BUILD
522  __kmp_itt_thread_name(gtid);
523 #endif /* USE_ITT_BUILD */
524 
525 #if KMP_AFFINITY_SUPPORTED
526  __kmp_affinity_bind_init_mask(gtid);
527 #endif
528 
529 #ifdef KMP_CANCEL_THREADS
530  status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
531  KMP_CHECK_SYSFAIL("pthread_setcanceltype", status);
532  // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads?
533  status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
534  KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
535 #endif
536 
537 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
538  // Set FP control regs to be a copy of the parallel initialization thread's.
539  __kmp_clear_x87_fpu_status_word();
540  __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
541  __kmp_load_mxcsr(&__kmp_init_mxcsr);
542 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
543 
544 #ifdef KMP_BLOCK_SIGNALS
545  status = sigfillset(&new_set);
546  KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status);
547  status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
548  KMP_CHECK_SYSFAIL("pthread_sigmask", status);
549 #endif /* KMP_BLOCK_SIGNALS */
550 
551 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
552  KMP_OS_OPENBSD || KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_AIX
553  if (__kmp_stkoffset > 0 && gtid > 0) {
554  padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
555  (void)padding;
556  }
557 #endif
558 
559  KMP_MB();
560  __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
561 
562  __kmp_check_stack_overlap((kmp_info_t *)thr);
563 
564  exit_val = __kmp_launch_thread((kmp_info_t *)thr);
565 
566 #ifdef KMP_BLOCK_SIGNALS
567  status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
568  KMP_CHECK_SYSFAIL("pthread_sigmask", status);
569 #endif /* KMP_BLOCK_SIGNALS */
570 
571  return exit_val;
572 }
573 
574 #if KMP_USE_MONITOR
575 /* The monitor thread controls all of the threads in the complex */
576 
577 static void *__kmp_launch_monitor(void *thr) {
578  int status, old_type, old_state;
579 #ifdef KMP_BLOCK_SIGNALS
580  sigset_t new_set;
581 #endif /* KMP_BLOCK_SIGNALS */
582  struct timespec interval;
583 
584  KMP_MB(); /* Flush all pending memory write invalidates. */
585 
586  KA_TRACE(10, ("__kmp_launch_monitor: #1 launched\n"));
587 
588  /* register us as the monitor thread */
589  __kmp_gtid_set_specific(KMP_GTID_MONITOR);
590 #ifdef KMP_TDATA_GTID
591  __kmp_gtid = KMP_GTID_MONITOR;
592 #endif
593 
594  KMP_MB();
595 
596 #if USE_ITT_BUILD
597  // Instruct Intel(R) Threading Tools to ignore monitor thread.
598  __kmp_itt_thread_ignore();
599 #endif /* USE_ITT_BUILD */
600 
601  __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
602  (kmp_info_t *)thr);
603 
604  __kmp_check_stack_overlap((kmp_info_t *)thr);
605 
606 #ifdef KMP_CANCEL_THREADS
607  status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
608  KMP_CHECK_SYSFAIL("pthread_setcanceltype", status);
609  // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads?
610  status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
611  KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
612 #endif
613 
614 #if KMP_REAL_TIME_FIX
615  // This is a potential fix which allows application with real-time scheduling
616  // policy work. However, decision about the fix is not made yet, so it is
617  // disabled by default.
618  { // Are program started with real-time scheduling policy?
619  int sched = sched_getscheduler(0);
620  if (sched == SCHED_FIFO || sched == SCHED_RR) {
621  // Yes, we are a part of real-time application. Try to increase the
622  // priority of the monitor.
623  struct sched_param param;
624  int max_priority = sched_get_priority_max(sched);
625  int rc;
626  KMP_WARNING(RealTimeSchedNotSupported);
627  sched_getparam(0, &param);
628  if (param.sched_priority < max_priority) {
629  param.sched_priority += 1;
630  rc = sched_setscheduler(0, sched, &param);
631  if (rc != 0) {
632  int error = errno;
633  kmp_msg_t err_code = KMP_ERR(error);
634  __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
635  err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
636  if (__kmp_generate_warnings == kmp_warnings_off) {
637  __kmp_str_free(&err_code.str);
638  }
639  }
640  } else {
641  // We cannot abort here, because number of CPUs may be enough for all
642  // the threads, including the monitor thread, so application could
643  // potentially work...
644  __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
645  KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
646  __kmp_msg_null);
647  }
648  }
649  // AC: free thread that waits for monitor started
650  TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
651  }
652 #endif // KMP_REAL_TIME_FIX
653 
654  KMP_MB(); /* Flush all pending memory write invalidates. */
655 
656  if (__kmp_monitor_wakeups == 1) {
657  interval.tv_sec = 1;
658  interval.tv_nsec = 0;
659  } else {
660  interval.tv_sec = 0;
661  interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
662  }
663 
664  KA_TRACE(10, ("__kmp_launch_monitor: #2 monitor\n"));
665 
666  while (!TCR_4(__kmp_global.g.g_done)) {
667  struct timespec now;
668  struct timeval tval;
669 
670  /* This thread monitors the state of the system */
671 
672  KA_TRACE(15, ("__kmp_launch_monitor: update\n"));
673 
674  status = gettimeofday(&tval, NULL);
675  KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
676  TIMEVAL_TO_TIMESPEC(&tval, &now);
677 
678  now.tv_sec += interval.tv_sec;
679  now.tv_nsec += interval.tv_nsec;
680 
681  if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
682  now.tv_sec += 1;
683  now.tv_nsec -= KMP_NSEC_PER_SEC;
684  }
685 
686  status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
687  KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
688  // AC: the monitor should not fall asleep if g_done has been set
689  if (!TCR_4(__kmp_global.g.g_done)) { // check once more under mutex
690  status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
691  &__kmp_wait_mx.m_mutex, &now);
692  if (status != 0) {
693  if (status != ETIMEDOUT && status != EINTR) {
694  KMP_SYSFAIL("pthread_cond_timedwait", status);
695  }
696  }
697  }
698  status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
699  KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
700 
701  TCW_4(__kmp_global.g.g_time.dt.t_value,
702  TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
703 
704  KMP_MB(); /* Flush all pending memory write invalidates. */
705  }
706 
707  KA_TRACE(10, ("__kmp_launch_monitor: #3 cleanup\n"));
708 
709 #ifdef KMP_BLOCK_SIGNALS
710  status = sigfillset(&new_set);
711  KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status);
712  status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
713  KMP_CHECK_SYSFAIL("pthread_sigmask", status);
714 #endif /* KMP_BLOCK_SIGNALS */
715 
716  KA_TRACE(10, ("__kmp_launch_monitor: #4 finished\n"));
717 
718  if (__kmp_global.g.g_abort != 0) {
719  /* now we need to terminate the worker threads */
720  /* the value of t_abort is the signal we caught */
721 
722  int gtid;
723 
724  KA_TRACE(10, ("__kmp_launch_monitor: #5 terminate sig=%d\n",
725  __kmp_global.g.g_abort));
726 
727  /* terminate the OpenMP worker threads */
728  /* TODO this is not valid for sibling threads!!
729  * the uber master might not be 0 anymore.. */
730  for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
731  __kmp_terminate_thread(gtid);
732 
733  __kmp_cleanup();
734 
735  KA_TRACE(10, ("__kmp_launch_monitor: #6 raise sig=%d\n",
736  __kmp_global.g.g_abort));
737 
738  if (__kmp_global.g.g_abort > 0)
739  raise(__kmp_global.g.g_abort);
740  }
741 
742  KA_TRACE(10, ("__kmp_launch_monitor: #7 exit\n"));
743 
744  return thr;
745 }
746 #endif // KMP_USE_MONITOR
747 
748 void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size) {
749  pthread_t handle;
750  pthread_attr_t thread_attr;
751  int status;
752 
753  th->th.th_info.ds.ds_gtid = gtid;
754 
755 #if KMP_STATS_ENABLED
756  // sets up worker thread stats
757  __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
758 
759  // th->th.th_stats is used to transfer thread-specific stats-pointer to
760  // __kmp_launch_worker. So when thread is created (goes into
761  // __kmp_launch_worker) it will set its thread local pointer to
762  // th->th.th_stats
763  if (!KMP_UBER_GTID(gtid)) {
764  th->th.th_stats = __kmp_stats_list->push_back(gtid);
765  } else {
766  // For root threads, __kmp_stats_thread_ptr is set in __kmp_register_root(),
767  // so set the th->th.th_stats field to it.
768  th->th.th_stats = __kmp_stats_thread_ptr;
769  }
770  __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
771 
772 #endif // KMP_STATS_ENABLED
773 
774  if (KMP_UBER_GTID(gtid)) {
775  KA_TRACE(10, ("__kmp_create_worker: uber thread (%d)\n", gtid));
776  th->th.th_info.ds.ds_thread = pthread_self();
777  __kmp_set_stack_info(gtid, th);
778  __kmp_check_stack_overlap(th);
779  return;
780  }
781 
782  KA_TRACE(10, ("__kmp_create_worker: try to create thread (%d)\n", gtid));
783 
784  KMP_MB(); /* Flush all pending memory write invalidates. */
785 
786 #ifdef KMP_THREAD_ATTR
787  status = pthread_attr_init(&thread_attr);
788  if (status != 0) {
789  __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
790  }
791  status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
792  if (status != 0) {
793  __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
794  }
795 
796  /* Set stack size for this thread now.
797  The multiple of 2 is there because on some machines, requesting an unusual
798  stacksize causes the thread to have an offset before the dummy alloca()
799  takes place to create the offset. Since we want the user to have a
800  sufficient stacksize AND support a stack offset, we alloca() twice the
801  offset so that the upcoming alloca() does not eliminate any premade offset,
802  and also gives the user the stack space they requested for all threads */
803  stack_size += gtid * __kmp_stkoffset * 2;
804 
805  KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
806  "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
807  gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
808 
809 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
810  status = pthread_attr_setstacksize(&thread_attr, stack_size);
811 #ifdef KMP_BACKUP_STKSIZE
812  if (status != 0) {
813  if (!__kmp_env_stksize) {
814  stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
815  __kmp_stksize = KMP_BACKUP_STKSIZE;
816  KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
817  "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
818  "bytes\n",
819  gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
820  status = pthread_attr_setstacksize(&thread_attr, stack_size);
821  }
822  }
823 #endif /* KMP_BACKUP_STKSIZE */
824  if (status != 0) {
825  __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
826  KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
827  }
828 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
829 
830 #endif /* KMP_THREAD_ATTR */
831 
832  status =
833  pthread_create(&handle, &thread_attr, __kmp_launch_worker, (void *)th);
834  if (status != 0 || !handle) { // ??? Why do we check handle??
835 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
836  if (status == EINVAL) {
837  __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
838  KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
839  }
840  if (status == ENOMEM) {
841  __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
842  KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
843  }
844 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
845  if (status == EAGAIN) {
846  __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
847  KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
848  }
849  KMP_SYSFAIL("pthread_create", status);
850  }
851 
852  th->th.th_info.ds.ds_thread = handle;
853 
854 #ifdef KMP_THREAD_ATTR
855  status = pthread_attr_destroy(&thread_attr);
856  if (status) {
857  kmp_msg_t err_code = KMP_ERR(status);
858  __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
859  __kmp_msg_null);
860  if (__kmp_generate_warnings == kmp_warnings_off) {
861  __kmp_str_free(&err_code.str);
862  }
863  }
864 #endif /* KMP_THREAD_ATTR */
865 
866  KMP_MB(); /* Flush all pending memory write invalidates. */
867 
868  KA_TRACE(10, ("__kmp_create_worker: done creating thread (%d)\n", gtid));
869 
870 } // __kmp_create_worker
871 
872 #if KMP_USE_MONITOR
873 void __kmp_create_monitor(kmp_info_t *th) {
874  pthread_t handle;
875  pthread_attr_t thread_attr;
876  size_t size;
877  int status;
878  int auto_adj_size = FALSE;
879 
880  if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
881  // We don't need monitor thread in case of MAX_BLOCKTIME
882  KA_TRACE(10, ("__kmp_create_monitor: skipping monitor thread because of "
883  "MAX blocktime\n"));
884  th->th.th_info.ds.ds_tid = 0; // this makes reap_monitor no-op
885  th->th.th_info.ds.ds_gtid = 0;
886  return;
887  }
888  KA_TRACE(10, ("__kmp_create_monitor: try to create monitor\n"));
889 
890  KMP_MB(); /* Flush all pending memory write invalidates. */
891 
892  th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
893  th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
894 #if KMP_REAL_TIME_FIX
895  TCW_4(__kmp_global.g.g_time.dt.t_value,
896  -1); // Will use it for synchronization a bit later.
897 #else
898  TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
899 #endif // KMP_REAL_TIME_FIX
900 
901 #ifdef KMP_THREAD_ATTR
902  if (__kmp_monitor_stksize == 0) {
903  __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
904  auto_adj_size = TRUE;
905  }
906  status = pthread_attr_init(&thread_attr);
907  if (status != 0) {
908  __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
909  }
910  status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
911  if (status != 0) {
912  __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
913  }
914 
915 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
916  status = pthread_attr_getstacksize(&thread_attr, &size);
917  KMP_CHECK_SYSFAIL("pthread_attr_getstacksize", status);
918 #else
919  size = __kmp_sys_min_stksize;
920 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
921 #endif /* KMP_THREAD_ATTR */
922 
923  if (__kmp_monitor_stksize == 0) {
924  __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
925  }
926  if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
927  __kmp_monitor_stksize = __kmp_sys_min_stksize;
928  }
929 
930  KA_TRACE(10, ("__kmp_create_monitor: default stacksize = %lu bytes,"
931  "requested stacksize = %lu bytes\n",
932  size, __kmp_monitor_stksize));
933 
934 retry:
935 
936 /* Set stack size for this thread now. */
937 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
938  KA_TRACE(10, ("__kmp_create_monitor: setting stacksize = %lu bytes,",
939  __kmp_monitor_stksize));
940  status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
941  if (status != 0) {
942  if (auto_adj_size) {
943  __kmp_monitor_stksize *= 2;
944  goto retry;
945  }
946  kmp_msg_t err_code = KMP_ERR(status);
947  __kmp_msg(kmp_ms_warning, // should this be fatal? BB
948  KMP_MSG(CantSetMonitorStackSize, (long int)__kmp_monitor_stksize),
949  err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
950  if (__kmp_generate_warnings == kmp_warnings_off) {
951  __kmp_str_free(&err_code.str);
952  }
953  }
954 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
955 
956  status =
957  pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (void *)th);
958 
959  if (status != 0) {
960 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
961  if (status == EINVAL) {
962  if (auto_adj_size && (__kmp_monitor_stksize < (size_t)0x40000000)) {
963  __kmp_monitor_stksize *= 2;
964  goto retry;
965  }
966  __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
967  KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
968  __kmp_msg_null);
969  }
970  if (status == ENOMEM) {
971  __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
972  KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
973  __kmp_msg_null);
974  }
975 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
976  if (status == EAGAIN) {
977  __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
978  KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
979  }
980  KMP_SYSFAIL("pthread_create", status);
981  }
982 
983  th->th.th_info.ds.ds_thread = handle;
984 
985 #if KMP_REAL_TIME_FIX
986  // Wait for the monitor thread is really started and set its *priority*.
987  KMP_DEBUG_ASSERT(sizeof(kmp_uint32) ==
988  sizeof(__kmp_global.g.g_time.dt.t_value));
989  __kmp_wait_4((kmp_uint32 volatile *)&__kmp_global.g.g_time.dt.t_value, -1,
990  &__kmp_neq_4, NULL);
991 #endif // KMP_REAL_TIME_FIX
992 
993 #ifdef KMP_THREAD_ATTR
994  status = pthread_attr_destroy(&thread_attr);
995  if (status != 0) {
996  kmp_msg_t err_code = KMP_ERR(status);
997  __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
998  __kmp_msg_null);
999  if (__kmp_generate_warnings == kmp_warnings_off) {
1000  __kmp_str_free(&err_code.str);
1001  }
1002  }
1003 #endif
1004 
1005  KMP_MB(); /* Flush all pending memory write invalidates. */
1006 
1007  KA_TRACE(10, ("__kmp_create_monitor: monitor created %#.8lx\n",
1008  th->th.th_info.ds.ds_thread));
1009 
1010 } // __kmp_create_monitor
1011 #endif // KMP_USE_MONITOR
1012 
1013 void __kmp_exit_thread(int exit_status) {
1014 #if KMP_OS_WASI
1015 // TODO: the wasm32-wasi-threads target does not yet support pthread_exit.
1016 #else
1017  pthread_exit((void *)(intptr_t)exit_status);
1018 #endif
1019 } // __kmp_exit_thread
1020 
1021 #if KMP_USE_MONITOR
1022 void __kmp_resume_monitor();
1023 
1024 extern "C" void __kmp_reap_monitor(kmp_info_t *th) {
1025  int status;
1026  void *exit_val;
1027 
1028  KA_TRACE(10, ("__kmp_reap_monitor: try to reap monitor thread with handle"
1029  " %#.8lx\n",
1030  th->th.th_info.ds.ds_thread));
1031 
1032  // If monitor has been created, its tid and gtid should be KMP_GTID_MONITOR.
1033  // If both tid and gtid are 0, it means the monitor did not ever start.
1034  // If both tid and gtid are KMP_GTID_DNE, the monitor has been shut down.
1035  KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
1036  if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
1037  KA_TRACE(10, ("__kmp_reap_monitor: monitor did not start, returning\n"));
1038  return;
1039  }
1040 
1041  KMP_MB(); /* Flush all pending memory write invalidates. */
1042 
1043  /* First, check to see whether the monitor thread exists to wake it up. This
1044  is to avoid performance problem when the monitor sleeps during
1045  blocktime-size interval */
1046 
1047  status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1048  if (status != ESRCH) {
1049  __kmp_resume_monitor(); // Wake up the monitor thread
1050  }
1051  KA_TRACE(10, ("__kmp_reap_monitor: try to join with monitor\n"));
1052  status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1053  if (exit_val != th) {
1054  __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1055  }
1056 
1057  th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1058  th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1059 
1060  KA_TRACE(10, ("__kmp_reap_monitor: done reaping monitor thread with handle"
1061  " %#.8lx\n",
1062  th->th.th_info.ds.ds_thread));
1063 
1064  KMP_MB(); /* Flush all pending memory write invalidates. */
1065 }
1066 #else
1067 // Empty symbol to export (see exports_so.txt) when
1068 // monitor thread feature is disabled
1069 extern "C" void __kmp_reap_monitor(kmp_info_t *th) {
1070  (void)th;
1071 }
1072 #endif // KMP_USE_MONITOR
1073 
1074 void __kmp_reap_worker(kmp_info_t *th) {
1075  int status;
1076  void *exit_val;
1077 
1078  KMP_MB(); /* Flush all pending memory write invalidates. */
1079 
1080  KA_TRACE(
1081  10, ("__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1082 
1083  status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1084 #ifdef KMP_DEBUG
1085  /* Don't expose these to the user until we understand when they trigger */
1086  if (status != 0) {
1087  __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1088  }
1089  if (exit_val != th) {
1090  KA_TRACE(10, ("__kmp_reap_worker: worker T#%d did not reap properly, "
1091  "exit_val = %p\n",
1092  th->th.th_info.ds.ds_gtid, exit_val));
1093  }
1094 #else
1095  (void)status; // unused variable
1096 #endif /* KMP_DEBUG */
1097 
1098  KA_TRACE(10, ("__kmp_reap_worker: done reaping T#%d\n",
1099  th->th.th_info.ds.ds_gtid));
1100 
1101  KMP_MB(); /* Flush all pending memory write invalidates. */
1102 }
1103 
1104 #if KMP_HANDLE_SIGNALS
1105 
1106 static void __kmp_null_handler(int signo) {
1107  // Do nothing, for doing SIG_IGN-type actions.
1108 } // __kmp_null_handler
1109 
1110 static void __kmp_team_handler(int signo) {
1111  if (__kmp_global.g.g_abort == 0) {
1112 /* Stage 1 signal handler, let's shut down all of the threads */
1113 #ifdef KMP_DEBUG
1114  __kmp_debug_printf("__kmp_team_handler: caught signal = %d\n", signo);
1115 #endif
1116  switch (signo) {
1117  case SIGHUP:
1118  case SIGINT:
1119  case SIGQUIT:
1120  case SIGILL:
1121  case SIGABRT:
1122  case SIGFPE:
1123  case SIGBUS:
1124  case SIGSEGV:
1125 #ifdef SIGSYS
1126  case SIGSYS:
1127 #endif
1128  case SIGTERM:
1129  if (__kmp_debug_buf) {
1130  __kmp_dump_debug_buffer();
1131  }
1132  __kmp_unregister_library(); // cleanup shared memory
1133  KMP_MB(); // Flush all pending memory write invalidates.
1134  TCW_4(__kmp_global.g.g_abort, signo);
1135  KMP_MB(); // Flush all pending memory write invalidates.
1136  TCW_4(__kmp_global.g.g_done, TRUE);
1137  KMP_MB(); // Flush all pending memory write invalidates.
1138  break;
1139  default:
1140 #ifdef KMP_DEBUG
1141  __kmp_debug_printf("__kmp_team_handler: unknown signal type");
1142 #endif
1143  break;
1144  }
1145  }
1146 } // __kmp_team_handler
1147 
1148 static void __kmp_sigaction(int signum, const struct sigaction *act,
1149  struct sigaction *oldact) {
1150  int rc = sigaction(signum, act, oldact);
1151  KMP_CHECK_SYSFAIL_ERRNO("sigaction", rc);
1152 }
1153 
1154 static void __kmp_install_one_handler(int sig, sig_func_t handler_func,
1155  int parallel_init) {
1156  KMP_MB(); // Flush all pending memory write invalidates.
1157  KB_TRACE(60,
1158  ("__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1159  if (parallel_init) {
1160  struct sigaction new_action;
1161  struct sigaction old_action;
1162  new_action.sa_handler = handler_func;
1163  new_action.sa_flags = 0;
1164  sigfillset(&new_action.sa_mask);
1165  __kmp_sigaction(sig, &new_action, &old_action);
1166  if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1167  sigaddset(&__kmp_sigset, sig);
1168  } else {
1169  // Restore/keep user's handler if one previously installed.
1170  __kmp_sigaction(sig, &old_action, NULL);
1171  }
1172  } else {
1173  // Save initial/system signal handlers to see if user handlers installed.
1174  __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1175  }
1176  KMP_MB(); // Flush all pending memory write invalidates.
1177 } // __kmp_install_one_handler
1178 
1179 static void __kmp_remove_one_handler(int sig) {
1180  KB_TRACE(60, ("__kmp_remove_one_handler( %d )\n", sig));
1181  if (sigismember(&__kmp_sigset, sig)) {
1182  struct sigaction old;
1183  KMP_MB(); // Flush all pending memory write invalidates.
1184  __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1185  if ((old.sa_handler != __kmp_team_handler) &&
1186  (old.sa_handler != __kmp_null_handler)) {
1187  // Restore the users signal handler.
1188  KB_TRACE(10, ("__kmp_remove_one_handler: oops, not our handler, "
1189  "restoring: sig=%d\n",
1190  sig));
1191  __kmp_sigaction(sig, &old, NULL);
1192  }
1193  sigdelset(&__kmp_sigset, sig);
1194  KMP_MB(); // Flush all pending memory write invalidates.
1195  }
1196 } // __kmp_remove_one_handler
1197 
1198 void __kmp_install_signals(int parallel_init) {
1199  KB_TRACE(10, ("__kmp_install_signals( %d )\n", parallel_init));
1200  if (__kmp_handle_signals || !parallel_init) {
1201  // If ! parallel_init, we do not install handlers, just save original
1202  // handlers. Let us do it even __handle_signals is 0.
1203  sigemptyset(&__kmp_sigset);
1204  __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1205  __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1206  __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1207  __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1208  __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1209  __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1210  __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1211  __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1212 #ifdef SIGSYS
1213  __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1214 #endif // SIGSYS
1215  __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1216 #ifdef SIGPIPE
1217  __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1218 #endif // SIGPIPE
1219  }
1220 } // __kmp_install_signals
1221 
1222 void __kmp_remove_signals(void) {
1223  int sig;
1224  KB_TRACE(10, ("__kmp_remove_signals()\n"));
1225  for (sig = 1; sig < NSIG; ++sig) {
1226  __kmp_remove_one_handler(sig);
1227  }
1228 } // __kmp_remove_signals
1229 
1230 #endif // KMP_HANDLE_SIGNALS
1231 
1232 void __kmp_enable(int new_state) {
1233 #ifdef KMP_CANCEL_THREADS
1234  int status, old_state;
1235  status = pthread_setcancelstate(new_state, &old_state);
1236  KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
1237  KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1238 #endif
1239 }
1240 
1241 void __kmp_disable(int *old_state) {
1242 #ifdef KMP_CANCEL_THREADS
1243  int status;
1244  status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1245  KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
1246 #endif
1247 }
1248 
1249 static void __kmp_atfork_prepare(void) {
1250  __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
1251  __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1252 }
1253 
1254 static void __kmp_atfork_parent(void) {
1255  __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1256  __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1257 }
1258 
1259 /* Reset the library so execution in the child starts "all over again" with
1260  clean data structures in initial states. Don't worry about freeing memory
1261  allocated by parent, just abandon it to be safe. */
1262 static void __kmp_atfork_child(void) {
1263  __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1264  __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1265  /* TODO make sure this is done right for nested/sibling */
1266  // ATT: Memory leaks are here? TODO: Check it and fix.
1267  /* KMP_ASSERT( 0 ); */
1268 
1269  ++__kmp_fork_count;
1270 
1271 #if KMP_AFFINITY_SUPPORTED
1272 #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_AIX
1273  // reset the affinity in the child to the initial thread
1274  // affinity in the parent
1275  kmp_set_thread_affinity_mask_initial();
1276 #endif
1277  // Set default not to bind threads tightly in the child (we're expecting
1278  // over-subscription after the fork and this can improve things for
1279  // scripting languages that use OpenMP inside process-parallel code).
1280  if (__kmp_nested_proc_bind.bind_types != NULL) {
1281  __kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
1282  }
1283  for (kmp_affinity_t *affinity : __kmp_affinities)
1284  *affinity = KMP_AFFINITY_INIT(affinity->env_var);
1285  __kmp_affin_fullMask = nullptr;
1286  __kmp_affin_origMask = nullptr;
1287  __kmp_topology = nullptr;
1288 #endif // KMP_AFFINITY_SUPPORTED
1289 
1290 #if KMP_USE_MONITOR
1291  __kmp_init_monitor = 0;
1292 #endif
1293  __kmp_init_parallel = FALSE;
1294  __kmp_init_middle = FALSE;
1295  __kmp_init_serial = FALSE;
1296  TCW_4(__kmp_init_gtid, FALSE);
1297  __kmp_init_common = FALSE;
1298 
1299  TCW_4(__kmp_init_user_locks, FALSE);
1300 #if !KMP_USE_DYNAMIC_LOCK
1301  __kmp_user_lock_table.used = 1;
1302  __kmp_user_lock_table.allocated = 0;
1303  __kmp_user_lock_table.table = NULL;
1304  __kmp_lock_blocks = NULL;
1305 #endif
1306 
1307  __kmp_all_nth = 0;
1308  TCW_4(__kmp_nth, 0);
1309 
1310  __kmp_thread_pool = NULL;
1311  __kmp_thread_pool_insert_pt = NULL;
1312  __kmp_team_pool = NULL;
1313 
1314  /* Must actually zero all the *cache arguments passed to __kmpc_threadprivate
1315  here so threadprivate doesn't use stale data */
1316  KA_TRACE(10, ("__kmp_atfork_child: checking cache address list %p\n",
1317  __kmp_threadpriv_cache_list));
1318 
1319  while (__kmp_threadpriv_cache_list != NULL) {
1320 
1321  if (*__kmp_threadpriv_cache_list->addr != NULL) {
1322  KC_TRACE(50, ("__kmp_atfork_child: zeroing cache at address %p\n",
1323  &(*__kmp_threadpriv_cache_list->addr)));
1324 
1325  *__kmp_threadpriv_cache_list->addr = NULL;
1326  }
1327  __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next;
1328  }
1329 
1330  __kmp_init_runtime = FALSE;
1331 
1332  /* reset statically initialized locks */
1333  __kmp_init_bootstrap_lock(&__kmp_initz_lock);
1334  __kmp_init_bootstrap_lock(&__kmp_stdio_lock);
1335  __kmp_init_bootstrap_lock(&__kmp_console_lock);
1336  __kmp_init_bootstrap_lock(&__kmp_task_team_lock);
1337 
1338 #if USE_ITT_BUILD
1339  __kmp_itt_reset(); // reset ITT's global state
1340 #endif /* USE_ITT_BUILD */
1341 
1342  {
1343  // Child process often get terminated without any use of OpenMP. That might
1344  // cause mapped shared memory file to be left unattended. Thus we postpone
1345  // library registration till middle initialization in the child process.
1346  __kmp_need_register_serial = FALSE;
1347  __kmp_serial_initialize();
1348  }
1349 
1350  /* This is necessary to make sure no stale data is left around */
1351  /* AC: customers complain that we use unsafe routines in the atfork
1352  handler. Mathworks: dlsym() is unsafe. We call dlsym and dlopen
1353  in dynamic_link when check the presence of shared tbbmalloc library.
1354  Suggestion is to make the library initialization lazier, similar
1355  to what done for __kmpc_begin(). */
1356  // TODO: synchronize all static initializations with regular library
1357  // startup; look at kmp_global.cpp and etc.
1358  //__kmp_internal_begin ();
1359 }
1360 
1361 void __kmp_register_atfork(void) {
1362  if (__kmp_need_register_atfork) {
1363 #if !KMP_OS_WASI
1364  int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1365  __kmp_atfork_child);
1366  KMP_CHECK_SYSFAIL("pthread_atfork", status);
1367 #endif
1368  __kmp_need_register_atfork = FALSE;
1369  }
1370 }
1371 
1372 void __kmp_suspend_initialize(void) {
1373  int status;
1374  status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1375  KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status);
1376  status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1377  KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
1378 }
1379 
1380 void __kmp_suspend_initialize_thread(kmp_info_t *th) {
1381  int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
1382  int new_value = __kmp_fork_count + 1;
1383  // Return if already initialized
1384  if (old_value == new_value)
1385  return;
1386  // Wait, then return if being initialized
1387  if (old_value == -1 || !__kmp_atomic_compare_store(
1388  &th->th.th_suspend_init_count, old_value, -1)) {
1389  while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
1390  KMP_CPU_PAUSE();
1391  }
1392  } else {
1393  // Claim to be the initializer and do initializations
1394  int status;
1395  status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1396  &__kmp_suspend_cond_attr);
1397  KMP_CHECK_SYSFAIL("pthread_cond_init", status);
1398  status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1399  &__kmp_suspend_mutex_attr);
1400  KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
1401  KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
1402  }
1403 }
1404 
1405 void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
1406  if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
1407  /* this means we have initialize the suspension pthread objects for this
1408  thread in this instance of the process */
1409  int status;
1410 
1411  status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1412  if (status != 0 && status != EBUSY) {
1413  KMP_SYSFAIL("pthread_cond_destroy", status);
1414  }
1415  status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1416  if (status != 0 && status != EBUSY) {
1417  KMP_SYSFAIL("pthread_mutex_destroy", status);
1418  }
1419  --th->th.th_suspend_init_count;
1420  KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count) ==
1421  __kmp_fork_count);
1422  }
1423 }
1424 
1425 // return true if lock obtained, false otherwise
1426 int __kmp_try_suspend_mx(kmp_info_t *th) {
1427  return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1428 }
1429 
1430 void __kmp_lock_suspend_mx(kmp_info_t *th) {
1431  int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1432  KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
1433 }
1434 
1435 void __kmp_unlock_suspend_mx(kmp_info_t *th) {
1436  int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1437  KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
1438 }
1439 
1440 /* This routine puts the calling thread to sleep after setting the
1441  sleep bit for the indicated flag variable to true. */
1442 template <class C>
1443 static inline void __kmp_suspend_template(int th_gtid, C *flag) {
1444  KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1445  kmp_info_t *th = __kmp_threads[th_gtid];
1446  int status;
1447  typename C::flag_t old_spin;
1448 
1449  KF_TRACE(30, ("__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1450  flag->get()));
1451 
1452  __kmp_suspend_initialize_thread(th);
1453 
1454  __kmp_lock_suspend_mx(th);
1455 
1456  KF_TRACE(10, ("__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1457  th_gtid, flag->get()));
1458 
1459  /* TODO: shouldn't this use release semantics to ensure that
1460  __kmp_suspend_initialize_thread gets called first? */
1461  old_spin = flag->set_sleeping();
1462  TCW_PTR(th->th.th_sleep_loc, (void *)flag);
1463  th->th.th_sleep_loc_type = flag->get_type();
1464  if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
1465  __kmp_pause_status != kmp_soft_paused) {
1466  flag->unset_sleeping();
1467  TCW_PTR(th->th.th_sleep_loc, NULL);
1468  th->th.th_sleep_loc_type = flag_unset;
1469  __kmp_unlock_suspend_mx(th);
1470  return;
1471  }
1472  KF_TRACE(5, ("__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
1473  " was %x\n",
1474  th_gtid, flag->get(), flag->load(), old_spin));
1475 
1476  if (flag->done_check_val(old_spin) || flag->done_check()) {
1477  flag->unset_sleeping();
1478  TCW_PTR(th->th.th_sleep_loc, NULL);
1479  th->th.th_sleep_loc_type = flag_unset;
1480  KF_TRACE(5, ("__kmp_suspend_template: T#%d false alarm, reset sleep bit "
1481  "for spin(%p)\n",
1482  th_gtid, flag->get()));
1483  } else {
1484  /* Encapsulate in a loop as the documentation states that this may
1485  "with low probability" return when the condition variable has
1486  not been signaled or broadcast */
1487  int deactivated = FALSE;
1488 
1489  while (flag->is_sleeping()) {
1490 #ifdef DEBUG_SUSPEND
1491  char buffer[128];
1492  __kmp_suspend_count++;
1493  __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1494  __kmp_printf("__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1495  buffer);
1496 #endif
1497  // Mark the thread as no longer active (only in the first iteration of the
1498  // loop).
1499  if (!deactivated) {
1500  th->th.th_active = FALSE;
1501  if (th->th.th_active_in_pool) {
1502  th->th.th_active_in_pool = FALSE;
1503  KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
1504  KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
1505  }
1506  deactivated = TRUE;
1507  }
1508 
1509  KMP_DEBUG_ASSERT(th->th.th_sleep_loc);
1510  KMP_DEBUG_ASSERT(flag->get_type() == th->th.th_sleep_loc_type);
1511 
1512 #if USE_SUSPEND_TIMEOUT
1513  struct timespec now;
1514  struct timeval tval;
1515  int msecs;
1516 
1517  status = gettimeofday(&tval, NULL);
1518  KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1519  TIMEVAL_TO_TIMESPEC(&tval, &now);
1520 
1521  msecs = (4 * __kmp_dflt_blocktime) + 200;
1522  now.tv_sec += msecs / 1000;
1523  now.tv_nsec += (msecs % 1000) * 1000;
1524 
1525  KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform "
1526  "pthread_cond_timedwait\n",
1527  th_gtid));
1528  status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1529  &th->th.th_suspend_mx.m_mutex, &now);
1530 #else
1531  KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform"
1532  " pthread_cond_wait\n",
1533  th_gtid));
1534  status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1535  &th->th.th_suspend_mx.m_mutex);
1536 #endif // USE_SUSPEND_TIMEOUT
1537 
1538  if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1539  KMP_SYSFAIL("pthread_cond_wait", status);
1540  }
1541 
1542  KMP_DEBUG_ASSERT(flag->get_type() == flag->get_ptr_type());
1543 
1544  if (!flag->is_sleeping() &&
1545  ((status == EINTR) || (status == ETIMEDOUT))) {
1546  // if interrupt or timeout, and thread is no longer sleeping, we need to
1547  // make sure sleep_loc gets reset; however, this shouldn't be needed if
1548  // we woke up with resume
1549  flag->unset_sleeping();
1550  TCW_PTR(th->th.th_sleep_loc, NULL);
1551  th->th.th_sleep_loc_type = flag_unset;
1552  }
1553 #ifdef KMP_DEBUG
1554  if (status == ETIMEDOUT) {
1555  if (flag->is_sleeping()) {
1556  KF_TRACE(100,
1557  ("__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1558  } else {
1559  KF_TRACE(2, ("__kmp_suspend_template: T#%d timeout wakeup, sleep bit "
1560  "not set!\n",
1561  th_gtid));
1562  TCW_PTR(th->th.th_sleep_loc, NULL);
1563  th->th.th_sleep_loc_type = flag_unset;
1564  }
1565  } else if (flag->is_sleeping()) {
1566  KF_TRACE(100,
1567  ("__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1568  }
1569 #endif
1570  } // while
1571 
1572  // Mark the thread as active again (if it was previous marked as inactive)
1573  if (deactivated) {
1574  th->th.th_active = TRUE;
1575  if (TCR_4(th->th.th_in_pool)) {
1576  KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
1577  th->th.th_active_in_pool = TRUE;
1578  }
1579  }
1580  }
1581  // We may have had the loop variable set before entering the loop body;
1582  // so we need to reset sleep_loc.
1583  TCW_PTR(th->th.th_sleep_loc, NULL);
1584  th->th.th_sleep_loc_type = flag_unset;
1585 
1586  KMP_DEBUG_ASSERT(!flag->is_sleeping());
1587  KMP_DEBUG_ASSERT(!th->th.th_sleep_loc);
1588 #ifdef DEBUG_SUSPEND
1589  {
1590  char buffer[128];
1591  __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1592  __kmp_printf("__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1593  buffer);
1594  }
1595 #endif
1596 
1597  __kmp_unlock_suspend_mx(th);
1598  KF_TRACE(30, ("__kmp_suspend_template: T#%d exit\n", th_gtid));
1599 }
1600 
1601 template <bool C, bool S>
1602 void __kmp_suspend_32(int th_gtid, kmp_flag_32<C, S> *flag) {
1603  __kmp_suspend_template(th_gtid, flag);
1604 }
1605 template <bool C, bool S>
1606 void __kmp_suspend_64(int th_gtid, kmp_flag_64<C, S> *flag) {
1607  __kmp_suspend_template(th_gtid, flag);
1608 }
1609 template <bool C, bool S>
1610 void __kmp_atomic_suspend_64(int th_gtid, kmp_atomic_flag_64<C, S> *flag) {
1611  __kmp_suspend_template(th_gtid, flag);
1612 }
1613 void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag) {
1614  __kmp_suspend_template(th_gtid, flag);
1615 }
1616 
1617 template void __kmp_suspend_32<false, false>(int, kmp_flag_32<false, false> *);
1618 template void __kmp_suspend_64<false, true>(int, kmp_flag_64<false, true> *);
1619 template void __kmp_suspend_64<true, false>(int, kmp_flag_64<true, false> *);
1620 template void
1621 __kmp_atomic_suspend_64<false, true>(int, kmp_atomic_flag_64<false, true> *);
1622 template void
1623 __kmp_atomic_suspend_64<true, false>(int, kmp_atomic_flag_64<true, false> *);
1624 
1625 /* This routine signals the thread specified by target_gtid to wake up
1626  after setting the sleep bit indicated by the flag argument to FALSE.
1627  The target thread must already have called __kmp_suspend_template() */
1628 template <class C>
1629 static inline void __kmp_resume_template(int target_gtid, C *flag) {
1630  KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1631  kmp_info_t *th = __kmp_threads[target_gtid];
1632  int status;
1633 
1634 #ifdef KMP_DEBUG
1635  int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1636 #endif
1637 
1638  KF_TRACE(30, ("__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1639  gtid, target_gtid));
1640  KMP_DEBUG_ASSERT(gtid != target_gtid);
1641 
1642  __kmp_suspend_initialize_thread(th);
1643 
1644  __kmp_lock_suspend_mx(th);
1645 
1646  if (!flag || flag != th->th.th_sleep_loc) {
1647  // coming from __kmp_null_resume_wrapper, or thread is now sleeping on a
1648  // different location; wake up at new location
1649  flag = (C *)CCAST(void *, th->th.th_sleep_loc);
1650  }
1651 
1652  // First, check if the flag is null or its type has changed. If so, someone
1653  // else woke it up.
1654  if (!flag) { // Thread doesn't appear to be sleeping on anything
1655  KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already "
1656  "awake: flag(%p)\n",
1657  gtid, target_gtid, (void *)NULL));
1658  __kmp_unlock_suspend_mx(th);
1659  return;
1660  } else if (flag->get_type() != th->th.th_sleep_loc_type) {
1661  // Flag type does not appear to match this function template; possibly the
1662  // thread is sleeping on something else. Try null resume again.
1663  KF_TRACE(
1664  5,
1665  ("__kmp_resume_template: T#%d retrying, thread T#%d Mismatch flag(%p), "
1666  "spin(%p) type=%d ptr_type=%d\n",
1667  gtid, target_gtid, flag, flag->get(), flag->get_type(),
1668  th->th.th_sleep_loc_type));
1669  __kmp_unlock_suspend_mx(th);
1670  __kmp_null_resume_wrapper(th);
1671  return;
1672  } else { // if multiple threads are sleeping, flag should be internally
1673  // referring to a specific thread here
1674  if (!flag->is_sleeping()) {
1675  KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already "
1676  "awake: flag(%p): %u\n",
1677  gtid, target_gtid, flag->get(), (unsigned int)flag->load()));
1678  __kmp_unlock_suspend_mx(th);
1679  return;
1680  }
1681  }
1682  KMP_DEBUG_ASSERT(flag);
1683  flag->unset_sleeping();
1684  TCW_PTR(th->th.th_sleep_loc, NULL);
1685  th->th.th_sleep_loc_type = flag_unset;
1686 
1687  KF_TRACE(5, ("__kmp_resume_template: T#%d about to wakeup T#%d, reset "
1688  "sleep bit for flag's loc(%p): %u\n",
1689  gtid, target_gtid, flag->get(), (unsigned int)flag->load()));
1690 
1691 #ifdef DEBUG_SUSPEND
1692  {
1693  char buffer[128];
1694  __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1695  __kmp_printf("__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1696  target_gtid, buffer);
1697  }
1698 #endif
1699  status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1700  KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
1701  __kmp_unlock_suspend_mx(th);
1702  KF_TRACE(30, ("__kmp_resume_template: T#%d exiting after signaling wake up"
1703  " for T#%d\n",
1704  gtid, target_gtid));
1705 }
1706 
1707 template <bool C, bool S>
1708 void __kmp_resume_32(int target_gtid, kmp_flag_32<C, S> *flag) {
1709  __kmp_resume_template(target_gtid, flag);
1710 }
1711 template <bool C, bool S>
1712 void __kmp_resume_64(int target_gtid, kmp_flag_64<C, S> *flag) {
1713  __kmp_resume_template(target_gtid, flag);
1714 }
1715 template <bool C, bool S>
1716 void __kmp_atomic_resume_64(int target_gtid, kmp_atomic_flag_64<C, S> *flag) {
1717  __kmp_resume_template(target_gtid, flag);
1718 }
1719 void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag) {
1720  __kmp_resume_template(target_gtid, flag);
1721 }
1722 
1723 template void __kmp_resume_32<false, true>(int, kmp_flag_32<false, true> *);
1724 template void __kmp_resume_32<false, false>(int, kmp_flag_32<false, false> *);
1725 template void __kmp_resume_64<false, true>(int, kmp_flag_64<false, true> *);
1726 template void
1727 __kmp_atomic_resume_64<false, true>(int, kmp_atomic_flag_64<false, true> *);
1728 
1729 #if KMP_USE_MONITOR
1730 void __kmp_resume_monitor() {
1731  KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1732  int status;
1733 #ifdef KMP_DEBUG
1734  int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1735  KF_TRACE(30, ("__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1736  KMP_GTID_MONITOR));
1737  KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR);
1738 #endif
1739  status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1740  KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
1741 #ifdef DEBUG_SUSPEND
1742  {
1743  char buffer[128];
1744  __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1745  __kmp_printf("__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1746  KMP_GTID_MONITOR, buffer);
1747  }
1748 #endif
1749  status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1750  KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
1751  status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1752  KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
1753  KF_TRACE(30, ("__kmp_resume_monitor: T#%d exiting after signaling wake up"
1754  " for T#%d\n",
1755  gtid, KMP_GTID_MONITOR));
1756 }
1757 #endif // KMP_USE_MONITOR
1758 
1759 void __kmp_yield() { sched_yield(); }
1760 
1761 void __kmp_gtid_set_specific(int gtid) {
1762  if (__kmp_init_gtid) {
1763  int status;
1764  status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1765  (void *)(intptr_t)(gtid + 1));
1766  KMP_CHECK_SYSFAIL("pthread_setspecific", status);
1767  } else {
1768  KA_TRACE(50, ("__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1769  }
1770 }
1771 
1772 int __kmp_gtid_get_specific() {
1773  int gtid;
1774  if (!__kmp_init_gtid) {
1775  KA_TRACE(50, ("__kmp_gtid_get_specific: runtime shutdown, returning "
1776  "KMP_GTID_SHUTDOWN\n"));
1777  return KMP_GTID_SHUTDOWN;
1778  }
1779  gtid = (int)(size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1780  if (gtid == 0) {
1781  gtid = KMP_GTID_DNE;
1782  } else {
1783  gtid--;
1784  }
1785  KA_TRACE(50, ("__kmp_gtid_get_specific: key:%d gtid:%d\n",
1786  __kmp_gtid_threadprivate_key, gtid));
1787  return gtid;
1788 }
1789 
1790 double __kmp_read_cpu_time(void) {
1791  /*clock_t t;*/
1792  struct tms buffer;
1793 
1794  /*t =*/times(&buffer);
1795 
1796  return (double)(buffer.tms_utime + buffer.tms_cutime) /
1797  (double)CLOCKS_PER_SEC;
1798 }
1799 
1800 int __kmp_read_system_info(struct kmp_sys_info *info) {
1801  int status;
1802  struct rusage r_usage;
1803 
1804  memset(info, 0, sizeof(*info));
1805 
1806  status = getrusage(RUSAGE_SELF, &r_usage);
1807  KMP_CHECK_SYSFAIL_ERRNO("getrusage", status);
1808 
1809 #if !KMP_OS_WASI
1810  // The maximum resident set size utilized (in kilobytes)
1811  info->maxrss = r_usage.ru_maxrss;
1812  // The number of page faults serviced without any I/O
1813  info->minflt = r_usage.ru_minflt;
1814  // The number of page faults serviced that required I/O
1815  info->majflt = r_usage.ru_majflt;
1816  // The number of times a process was "swapped" out of memory
1817  info->nswap = r_usage.ru_nswap;
1818  // The number of times the file system had to perform input
1819  info->inblock = r_usage.ru_inblock;
1820  // The number of times the file system had to perform output
1821  info->oublock = r_usage.ru_oublock;
1822  // The number of times a context switch was voluntarily
1823  info->nvcsw = r_usage.ru_nvcsw;
1824  // The number of times a context switch was forced
1825  info->nivcsw = r_usage.ru_nivcsw;
1826 #endif
1827 
1828  return (status != 0);
1829 }
1830 
1831 void __kmp_read_system_time(double *delta) {
1832  double t_ns;
1833  struct timeval tval;
1834  struct timespec stop;
1835  int status;
1836 
1837  status = gettimeofday(&tval, NULL);
1838  KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1839  TIMEVAL_TO_TIMESPEC(&tval, &stop);
1840  t_ns = (double)(TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start));
1841  *delta = (t_ns * 1e-9);
1842 }
1843 
1844 void __kmp_clear_system_time(void) {
1845  struct timeval tval;
1846  int status;
1847  status = gettimeofday(&tval, NULL);
1848  KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1849  TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start);
1850 }
1851 
1852 static int __kmp_get_xproc(void) {
1853 
1854  int r = 0;
1855 
1856 #if KMP_OS_LINUX
1857 
1858  __kmp_type_convert(sysconf(_SC_NPROCESSORS_CONF), &(r));
1859 
1860 #elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_OPENBSD || \
1861  KMP_OS_HURD || KMP_OS_SOLARIS || KMP_OS_WASI || KMP_OS_AIX
1862 
1863  __kmp_type_convert(sysconf(_SC_NPROCESSORS_ONLN), &(r));
1864 
1865 #elif KMP_OS_DARWIN
1866 
1867  // Bug C77011 High "OpenMP Threads and number of active cores".
1868 
1869  // Find the number of available CPUs.
1870  kern_return_t rc;
1871  host_basic_info_data_t info;
1872  mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
1873  rc = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &num);
1874  if (rc == 0 && num == HOST_BASIC_INFO_COUNT) {
1875  // Cannot use KA_TRACE() here because this code works before trace support
1876  // is initialized.
1877  r = info.avail_cpus;
1878  } else {
1879  KMP_WARNING(CantGetNumAvailCPU);
1880  KMP_INFORM(AssumedNumCPU);
1881  }
1882 
1883 #else
1884 
1885 #error "Unknown or unsupported OS."
1886 
1887 #endif
1888 
1889  return r > 0 ? r : 2; /* guess value of 2 if OS told us 0 */
1890 
1891 } // __kmp_get_xproc
1892 
1893 int __kmp_read_from_file(char const *path, char const *format, ...) {
1894  int result;
1895  va_list args;
1896 
1897  va_start(args, format);
1898  FILE *f = fopen(path, "rb");
1899  if (f == NULL) {
1900  va_end(args);
1901  return 0;
1902  }
1903  result = vfscanf(f, format, args);
1904  fclose(f);
1905  va_end(args);
1906 
1907  return result;
1908 }
1909 
1910 void __kmp_runtime_initialize(void) {
1911  int status;
1912  pthread_mutexattr_t mutex_attr;
1913  pthread_condattr_t cond_attr;
1914 
1915  if (__kmp_init_runtime) {
1916  return;
1917  }
1918 
1919 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1920  if (!__kmp_cpuinfo.initialized) {
1921  __kmp_query_cpuid(&__kmp_cpuinfo);
1922  }
1923 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
1924 
1925  __kmp_xproc = __kmp_get_xproc();
1926 
1927 #if !KMP_32_BIT_ARCH
1928  struct rlimit rlim;
1929  // read stack size of calling thread, save it as default for worker threads;
1930  // this should be done before reading environment variables
1931  status = getrlimit(RLIMIT_STACK, &rlim);
1932  if (status == 0) { // success?
1933  __kmp_stksize = rlim.rlim_cur;
1934  __kmp_check_stksize(&__kmp_stksize); // check value and adjust if needed
1935  }
1936 #endif /* KMP_32_BIT_ARCH */
1937 
1938  if (sysconf(_SC_THREADS)) {
1939 
1940  /* Query the maximum number of threads */
1941  __kmp_type_convert(sysconf(_SC_THREAD_THREADS_MAX), &(__kmp_sys_max_nth));
1942 #ifdef __ve__
1943  if (__kmp_sys_max_nth == -1) {
1944  // VE's pthread supports only up to 64 threads per a VE process.
1945  // So we use that KMP_MAX_NTH (predefined as 64) here.
1946  __kmp_sys_max_nth = KMP_MAX_NTH;
1947  }
1948 #else
1949  if (__kmp_sys_max_nth == -1) {
1950  /* Unlimited threads for NPTL */
1951  __kmp_sys_max_nth = INT_MAX;
1952  } else if (__kmp_sys_max_nth <= 1) {
1953  /* Can't tell, just use PTHREAD_THREADS_MAX */
1954  __kmp_sys_max_nth = KMP_MAX_NTH;
1955  }
1956 #endif
1957 
1958  /* Query the minimum stack size */
1959  __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1960  if (__kmp_sys_min_stksize <= 1) {
1961  __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
1962  }
1963  }
1964 
1965  /* Set up minimum number of threads to switch to TLS gtid */
1966  __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
1967 
1968  status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1969  __kmp_internal_end_dest);
1970  KMP_CHECK_SYSFAIL("pthread_key_create", status);
1971  status = pthread_mutexattr_init(&mutex_attr);
1972  KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status);
1973  status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1974  KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
1975  status = pthread_mutexattr_destroy(&mutex_attr);
1976  KMP_CHECK_SYSFAIL("pthread_mutexattr_destroy", status);
1977  status = pthread_condattr_init(&cond_attr);
1978  KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
1979  status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1980  KMP_CHECK_SYSFAIL("pthread_cond_init", status);
1981  status = pthread_condattr_destroy(&cond_attr);
1982  KMP_CHECK_SYSFAIL("pthread_condattr_destroy", status);
1983 #if USE_ITT_BUILD
1984  __kmp_itt_initialize();
1985 #endif /* USE_ITT_BUILD */
1986 
1987  __kmp_init_runtime = TRUE;
1988 }
1989 
1990 void __kmp_runtime_destroy(void) {
1991  int status;
1992 
1993  if (!__kmp_init_runtime) {
1994  return; // Nothing to do.
1995  }
1996 
1997 #if USE_ITT_BUILD
1998  __kmp_itt_destroy();
1999 #endif /* USE_ITT_BUILD */
2000 
2001  status = pthread_key_delete(__kmp_gtid_threadprivate_key);
2002  KMP_CHECK_SYSFAIL("pthread_key_delete", status);
2003 
2004  status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
2005  if (status != 0 && status != EBUSY) {
2006  KMP_SYSFAIL("pthread_mutex_destroy", status);
2007  }
2008  status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
2009  if (status != 0 && status != EBUSY) {
2010  KMP_SYSFAIL("pthread_cond_destroy", status);
2011  }
2012 #if KMP_AFFINITY_SUPPORTED
2013  __kmp_affinity_uninitialize();
2014 #endif
2015 
2016  __kmp_init_runtime = FALSE;
2017 }
2018 
2019 /* Put the thread to sleep for a time period */
2020 /* NOTE: not currently used anywhere */
2021 void __kmp_thread_sleep(int millis) { sleep((millis + 500) / 1000); }
2022 
2023 /* Calculate the elapsed wall clock time for the user */
2024 void __kmp_elapsed(double *t) {
2025  int status;
2026 #ifdef FIX_SGI_CLOCK
2027  struct timespec ts;
2028 
2029  status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
2030  KMP_CHECK_SYSFAIL_ERRNO("clock_gettime", status);
2031  *t =
2032  (double)ts.tv_nsec * (1.0 / (double)KMP_NSEC_PER_SEC) + (double)ts.tv_sec;
2033 #else
2034  struct timeval tv;
2035 
2036  status = gettimeofday(&tv, NULL);
2037  KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
2038  *t =
2039  (double)tv.tv_usec * (1.0 / (double)KMP_USEC_PER_SEC) + (double)tv.tv_sec;
2040 #endif
2041 }
2042 
2043 /* Calculate the elapsed wall clock tick for the user */
2044 void __kmp_elapsed_tick(double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
2045 
2046 /* Return the current time stamp in nsec */
2047 kmp_uint64 __kmp_now_nsec() {
2048  struct timeval t;
2049  gettimeofday(&t, NULL);
2050  kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
2051  (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
2052  return nsec;
2053 }
2054 
2055 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2056 /* Measure clock ticks per millisecond */
2057 void __kmp_initialize_system_tick() {
2058  kmp_uint64 now, nsec2, diff;
2059  kmp_uint64 delay = 1000000; // ~450 usec on most machines.
2060  kmp_uint64 nsec = __kmp_now_nsec();
2061  kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
2062  while ((now = __kmp_hardware_timestamp()) < goal)
2063  ;
2064  nsec2 = __kmp_now_nsec();
2065  diff = nsec2 - nsec;
2066  if (diff > 0) {
2067  double tpus = 1000.0 * (double)(delay + (now - goal)) / (double)diff;
2068  if (tpus > 0.0) {
2069  __kmp_ticks_per_msec = (kmp_uint64)(tpus * 1000.0);
2070  __kmp_ticks_per_usec = (kmp_uint64)tpus;
2071  }
2072  }
2073 }
2074 #endif
2075 
2076 /* Determine whether the given address is mapped into the current address
2077  space. */
2078 
2079 int __kmp_is_address_mapped(void *addr) {
2080 
2081  int found = 0;
2082  int rc;
2083 
2084 #if KMP_OS_LINUX || KMP_OS_HURD
2085 
2086  /* On GNUish OSes, read the /proc/<pid>/maps pseudo-file to get all the
2087  address ranges mapped into the address space. */
2088 
2089  char *name = __kmp_str_format("/proc/%d/maps", getpid());
2090  FILE *file = NULL;
2091 
2092  file = fopen(name, "r");
2093  KMP_ASSERT(file != NULL);
2094 
2095  for (;;) {
2096 
2097  void *beginning = NULL;
2098  void *ending = NULL;
2099  char perms[5];
2100 
2101  rc = fscanf(file, "%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2102  if (rc == EOF) {
2103  break;
2104  }
2105  KMP_ASSERT(rc == 3 &&
2106  KMP_STRLEN(perms) == 4); // Make sure all fields are read.
2107 
2108  // Ending address is not included in the region, but beginning is.
2109  if ((addr >= beginning) && (addr < ending)) {
2110  perms[2] = 0; // 3th and 4th character does not matter.
2111  if (strcmp(perms, "rw") == 0) {
2112  // Memory we are looking for should be readable and writable.
2113  found = 1;
2114  }
2115  break;
2116  }
2117  }
2118 
2119  // Free resources.
2120  fclose(file);
2121  KMP_INTERNAL_FREE(name);
2122 #elif KMP_OS_FREEBSD
2123  char *buf;
2124  size_t lstsz;
2125  int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
2126  rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2127  if (rc < 0)
2128  return 0;
2129  // We pass from number of vm entry's semantic
2130  // to size of whole entry map list.
2131  lstsz = lstsz * 4 / 3;
2132  buf = reinterpret_cast<char *>(kmpc_malloc(lstsz));
2133  rc = sysctl(mib, 4, buf, &lstsz, NULL, 0);
2134  if (rc < 0) {
2135  kmpc_free(buf);
2136  return 0;
2137  }
2138 
2139  char *lw = buf;
2140  char *up = buf + lstsz;
2141 
2142  while (lw < up) {
2143  struct kinfo_vmentry *cur = reinterpret_cast<struct kinfo_vmentry *>(lw);
2144  size_t cursz = cur->kve_structsize;
2145  if (cursz == 0)
2146  break;
2147  void *start = reinterpret_cast<void *>(cur->kve_start);
2148  void *end = reinterpret_cast<void *>(cur->kve_end);
2149  // Readable/Writable addresses within current map entry
2150  if ((addr >= start) && (addr < end)) {
2151  if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2152  (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2153  found = 1;
2154  break;
2155  }
2156  }
2157  lw += cursz;
2158  }
2159  kmpc_free(buf);
2160 
2161 #elif KMP_OS_DARWIN
2162 
2163  /* On OS X*, /proc pseudo filesystem is not available. Try to read memory
2164  using vm interface. */
2165 
2166  int buffer;
2167  vm_size_t count;
2168  rc = vm_read_overwrite(
2169  mach_task_self(), // Task to read memory of.
2170  (vm_address_t)(addr), // Address to read from.
2171  1, // Number of bytes to be read.
2172  (vm_address_t)(&buffer), // Address of buffer to save read bytes in.
2173  &count // Address of var to save number of read bytes in.
2174  );
2175  if (rc == 0) {
2176  // Memory successfully read.
2177  found = 1;
2178  }
2179 
2180 #elif KMP_OS_NETBSD
2181 
2182  int mib[5];
2183  mib[0] = CTL_VM;
2184  mib[1] = VM_PROC;
2185  mib[2] = VM_PROC_MAP;
2186  mib[3] = getpid();
2187  mib[4] = sizeof(struct kinfo_vmentry);
2188 
2189  size_t size;
2190  rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2191  KMP_ASSERT(!rc);
2192  KMP_ASSERT(size);
2193 
2194  size = size * 4 / 3;
2195  struct kinfo_vmentry *kiv = (struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2196  KMP_ASSERT(kiv);
2197 
2198  rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2199  KMP_ASSERT(!rc);
2200  KMP_ASSERT(size);
2201 
2202  for (size_t i = 0; i < size; i++) {
2203  if (kiv[i].kve_start >= (uint64_t)addr &&
2204  kiv[i].kve_end <= (uint64_t)addr) {
2205  found = 1;
2206  break;
2207  }
2208  }
2209  KMP_INTERNAL_FREE(kiv);
2210 #elif KMP_OS_OPENBSD
2211 
2212  int mib[3];
2213  mib[0] = CTL_KERN;
2214  mib[1] = KERN_PROC_VMMAP;
2215  mib[2] = getpid();
2216 
2217  size_t size;
2218  uint64_t end;
2219  rc = sysctl(mib, 3, NULL, &size, NULL, 0);
2220  KMP_ASSERT(!rc);
2221  KMP_ASSERT(size);
2222  end = size;
2223 
2224  struct kinfo_vmentry kiv = {.kve_start = 0};
2225 
2226  while ((rc = sysctl(mib, 3, &kiv, &size, NULL, 0)) == 0) {
2227  KMP_ASSERT(size);
2228  if (kiv.kve_end == end)
2229  break;
2230 
2231  if (kiv.kve_start >= (uint64_t)addr && kiv.kve_end <= (uint64_t)addr) {
2232  found = 1;
2233  break;
2234  }
2235  kiv.kve_start += 1;
2236  }
2237 #elif KMP_OS_WASI
2238  found = (int)addr < (__builtin_wasm_memory_size(0) * PAGESIZE);
2239 #elif KMP_OS_DRAGONFLY || KMP_OS_SOLARIS || KMP_OS_AIX
2240 
2241  (void)rc;
2242  // FIXME(DragonFly, Solaris, AIX): Implement this
2243  found = 1;
2244 
2245 #else
2246 
2247 #error "Unknown or unsupported OS"
2248 
2249 #endif
2250 
2251  return found;
2252 
2253 } // __kmp_is_address_mapped
2254 
2255 #ifdef USE_LOAD_BALANCE
2256 
2257 #if KMP_OS_DARWIN || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
2258  KMP_OS_OPENBSD || KMP_OS_SOLARIS
2259 
2260 // The function returns the rounded value of the system load average
2261 // during given time interval which depends on the value of
2262 // __kmp_load_balance_interval variable (default is 60 sec, other values
2263 // may be 300 sec or 900 sec).
2264 // It returns -1 in case of error.
2265 int __kmp_get_load_balance(int max) {
2266  double averages[3];
2267  int ret_avg = 0;
2268 
2269  int res = getloadavg(averages, 3);
2270 
2271  // Check __kmp_load_balance_interval to determine which of averages to use.
2272  // getloadavg() may return the number of samples less than requested that is
2273  // less than 3.
2274  if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2275  ret_avg = (int)averages[0]; // 1 min
2276  } else if ((__kmp_load_balance_interval >= 180 &&
2277  __kmp_load_balance_interval < 600) &&
2278  (res >= 2)) {
2279  ret_avg = (int)averages[1]; // 5 min
2280  } else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2281  ret_avg = (int)averages[2]; // 15 min
2282  } else { // Error occurred
2283  return -1;
2284  }
2285 
2286  return ret_avg;
2287 }
2288 
2289 #else // Linux* OS
2290 
2291 // The function returns number of running (not sleeping) threads, or -1 in case
2292 // of error. Error could be reported if Linux* OS kernel too old (without
2293 // "/proc" support). Counting running threads stops if max running threads
2294 // encountered.
2295 int __kmp_get_load_balance(int max) {
2296  static int permanent_error = 0;
2297  static int glb_running_threads = 0; // Saved count of the running threads for
2298  // the thread balance algorithm
2299  static double glb_call_time = 0; /* Thread balance algorithm call time */
2300 
2301  int running_threads = 0; // Number of running threads in the system.
2302 
2303  DIR *proc_dir = NULL; // Handle of "/proc/" directory.
2304  struct dirent *proc_entry = NULL;
2305 
2306  kmp_str_buf_t task_path; // "/proc/<pid>/task/<tid>/" path.
2307  DIR *task_dir = NULL; // Handle of "/proc/<pid>/task/<tid>/" directory.
2308  struct dirent *task_entry = NULL;
2309  int task_path_fixed_len;
2310 
2311  kmp_str_buf_t stat_path; // "/proc/<pid>/task/<tid>/stat" path.
2312  int stat_file = -1;
2313  int stat_path_fixed_len;
2314 
2315 #ifdef KMP_DEBUG
2316  int total_processes = 0; // Total number of processes in system.
2317 #endif
2318 
2319  double call_time = 0.0;
2320 
2321  __kmp_str_buf_init(&task_path);
2322  __kmp_str_buf_init(&stat_path);
2323 
2324  __kmp_elapsed(&call_time);
2325 
2326  if (glb_call_time &&
2327  (call_time - glb_call_time < __kmp_load_balance_interval)) {
2328  running_threads = glb_running_threads;
2329  goto finish;
2330  }
2331 
2332  glb_call_time = call_time;
2333 
2334  // Do not spend time on scanning "/proc/" if we have a permanent error.
2335  if (permanent_error) {
2336  running_threads = -1;
2337  goto finish;
2338  }
2339 
2340  if (max <= 0) {
2341  max = INT_MAX;
2342  }
2343 
2344  // Open "/proc/" directory.
2345  proc_dir = opendir("/proc");
2346  if (proc_dir == NULL) {
2347  // Cannot open "/proc/". Probably the kernel does not support it. Return an
2348  // error now and in subsequent calls.
2349  running_threads = -1;
2350  permanent_error = 1;
2351  goto finish;
2352  }
2353 
2354  // Initialize fixed part of task_path. This part will not change.
2355  __kmp_str_buf_cat(&task_path, "/proc/", 6);
2356  task_path_fixed_len = task_path.used; // Remember number of used characters.
2357 
2358  proc_entry = readdir(proc_dir);
2359  while (proc_entry != NULL) {
2360 #if KMP_OS_AIX
2361  // Proc entry name starts with a digit. Assume it is a process' directory.
2362  if (isdigit(proc_entry->d_name[0])) {
2363 #else
2364  // Proc entry is a directory and name starts with a digit. Assume it is a
2365  // process' directory.
2366  if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2367 #endif
2368 
2369 #ifdef KMP_DEBUG
2370  ++total_processes;
2371 #endif
2372  // Make sure init process is the very first in "/proc", so we can replace
2373  // strcmp( proc_entry->d_name, "1" ) == 0 with simpler total_processes ==
2374  // 1. We are going to check that total_processes == 1 => d_name == "1" is
2375  // true (where "=>" is implication). Since C++ does not have => operator,
2376  // let us replace it with its equivalent: a => b == ! a || b.
2377  KMP_DEBUG_ASSERT(total_processes != 1 ||
2378  strcmp(proc_entry->d_name, "1") == 0);
2379 
2380  // Construct task_path.
2381  task_path.used = task_path_fixed_len; // Reset task_path to "/proc/".
2382  __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2383  KMP_STRLEN(proc_entry->d_name));
2384  __kmp_str_buf_cat(&task_path, "/task", 5);
2385 
2386  task_dir = opendir(task_path.str);
2387  if (task_dir == NULL) {
2388  // Process can finish between reading "/proc/" directory entry and
2389  // opening process' "task/" directory. So, in general case we should not
2390  // complain, but have to skip this process and read the next one. But on
2391  // systems with no "task/" support we will spend lot of time to scan
2392  // "/proc/" tree again and again without any benefit. "init" process
2393  // (its pid is 1) should exist always, so, if we cannot open
2394  // "/proc/1/task/" directory, it means "task/" is not supported by
2395  // kernel. Report an error now and in the future.
2396  if (strcmp(proc_entry->d_name, "1") == 0) {
2397  running_threads = -1;
2398  permanent_error = 1;
2399  goto finish;
2400  }
2401  } else {
2402  // Construct fixed part of stat file path.
2403  __kmp_str_buf_clear(&stat_path);
2404  __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2405  __kmp_str_buf_cat(&stat_path, "/", 1);
2406  stat_path_fixed_len = stat_path.used;
2407 
2408  task_entry = readdir(task_dir);
2409  while (task_entry != NULL) {
2410  // It is a directory and name starts with a digit.
2411 #if KMP_OS_AIX
2412  if (isdigit(task_entry->d_name[0])) {
2413 #else
2414  if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2415 #endif
2416 
2417  // Construct complete stat file path. Easiest way would be:
2418  // __kmp_str_buf_print( & stat_path, "%s/%s/stat", task_path.str,
2419  // task_entry->d_name );
2420  // but seriae of __kmp_str_buf_cat works a bit faster.
2421  stat_path.used =
2422  stat_path_fixed_len; // Reset stat path to its fixed part.
2423  __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2424  KMP_STRLEN(task_entry->d_name));
2425  __kmp_str_buf_cat(&stat_path, "/stat", 5);
2426 
2427  // Note: Low-level API (open/read/close) is used. High-level API
2428  // (fopen/fclose) works ~ 30 % slower.
2429  stat_file = open(stat_path.str, O_RDONLY);
2430  if (stat_file == -1) {
2431  // We cannot report an error because task (thread) can terminate
2432  // just before reading this file.
2433  } else {
2434  /* Content of "stat" file looks like:
2435  24285 (program) S ...
2436 
2437  It is a single line (if program name does not include funny
2438  symbols). First number is a thread id, then name of executable
2439  file name in paretheses, then state of the thread. We need just
2440  thread state.
2441 
2442  Good news: Length of program name is 15 characters max. Longer
2443  names are truncated.
2444 
2445  Thus, we need rather short buffer: 15 chars for program name +
2446  2 parenthesis, + 3 spaces + ~7 digits of pid = 37.
2447 
2448  Bad news: Program name may contain special symbols like space,
2449  closing parenthesis, or even new line. This makes parsing
2450  "stat" file not 100 % reliable. In case of fanny program names
2451  parsing may fail (report incorrect thread state).
2452 
2453  Parsing "status" file looks more promissing (due to different
2454  file structure and escaping special symbols) but reading and
2455  parsing of "status" file works slower.
2456  -- ln
2457  */
2458  char buffer[65];
2459  ssize_t len;
2460  len = read(stat_file, buffer, sizeof(buffer) - 1);
2461  if (len >= 0) {
2462  buffer[len] = 0;
2463  // Using scanf:
2464  // sscanf( buffer, "%*d (%*s) %c ", & state );
2465  // looks very nice, but searching for a closing parenthesis
2466  // works a bit faster.
2467  char *close_parent = strstr(buffer, ") ");
2468  if (close_parent != NULL) {
2469  char state = *(close_parent + 2);
2470  if (state == 'R') {
2471  ++running_threads;
2472  if (running_threads >= max) {
2473  goto finish;
2474  }
2475  }
2476  }
2477  }
2478  close(stat_file);
2479  stat_file = -1;
2480  }
2481  }
2482  task_entry = readdir(task_dir);
2483  }
2484  closedir(task_dir);
2485  task_dir = NULL;
2486  }
2487  }
2488  proc_entry = readdir(proc_dir);
2489  }
2490 
2491  // There _might_ be a timing hole where the thread executing this
2492  // code get skipped in the load balance, and running_threads is 0.
2493  // Assert in the debug builds only!!!
2494  KMP_DEBUG_ASSERT(running_threads > 0);
2495  if (running_threads <= 0) {
2496  running_threads = 1;
2497  }
2498 
2499 finish: // Clean up and exit.
2500  if (proc_dir != NULL) {
2501  closedir(proc_dir);
2502  }
2503  __kmp_str_buf_free(&task_path);
2504  if (task_dir != NULL) {
2505  closedir(task_dir);
2506  }
2507  __kmp_str_buf_free(&stat_path);
2508  if (stat_file != -1) {
2509  close(stat_file);
2510  }
2511 
2512  glb_running_threads = running_threads;
2513 
2514  return running_threads;
2515 
2516 } // __kmp_get_load_balance
2517 
2518 #endif // KMP_OS_DARWIN
2519 
2520 #endif // USE_LOAD_BALANCE
2521 
2522 #if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || \
2523  ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) || \
2524  KMP_ARCH_PPC64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || \
2525  KMP_ARCH_ARM || KMP_ARCH_VE || KMP_ARCH_S390X || KMP_ARCH_PPC_XCOFF)
2526 
2527 // we really only need the case with 1 argument, because CLANG always build
2528 // a struct of pointers to shared variables referenced in the outlined function
2529 int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int tid, int argc,
2530  void *p_argv[]
2531 #if OMPT_SUPPORT
2532  ,
2533  void **exit_frame_ptr
2534 #endif
2535 ) {
2536 #if OMPT_SUPPORT
2537  *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2538 #endif
2539 
2540  switch (argc) {
2541  default:
2542  fprintf(stderr, "Too many args to microtask: %d!\n", argc);
2543  fflush(stderr);
2544  exit(-1);
2545  case 0:
2546  (*pkfn)(&gtid, &tid);
2547  break;
2548  case 1:
2549  (*pkfn)(&gtid, &tid, p_argv[0]);
2550  break;
2551  case 2:
2552  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1]);
2553  break;
2554  case 3:
2555  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2]);
2556  break;
2557  case 4:
2558  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2559  break;
2560  case 5:
2561  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2562  break;
2563  case 6:
2564  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2565  p_argv[5]);
2566  break;
2567  case 7:
2568  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2569  p_argv[5], p_argv[6]);
2570  break;
2571  case 8:
2572  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2573  p_argv[5], p_argv[6], p_argv[7]);
2574  break;
2575  case 9:
2576  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2577  p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2578  break;
2579  case 10:
2580  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2581  p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2582  break;
2583  case 11:
2584  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2585  p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2586  break;
2587  case 12:
2588  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2589  p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2590  p_argv[11]);
2591  break;
2592  case 13:
2593  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2594  p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2595  p_argv[11], p_argv[12]);
2596  break;
2597  case 14:
2598  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2599  p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2600  p_argv[11], p_argv[12], p_argv[13]);
2601  break;
2602  case 15:
2603  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2604  p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2605  p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2606  break;
2607  }
2608 
2609  return 1;
2610 }
2611 
2612 #endif
2613 
2614 #if KMP_OS_LINUX
2615 // Functions for hidden helper task
2616 namespace {
2617 // Condition variable for initializing hidden helper team
2618 pthread_cond_t hidden_helper_threads_initz_cond_var;
2619 pthread_mutex_t hidden_helper_threads_initz_lock;
2620 volatile int hidden_helper_initz_signaled = FALSE;
2621 
2622 // Condition variable for deinitializing hidden helper team
2623 pthread_cond_t hidden_helper_threads_deinitz_cond_var;
2624 pthread_mutex_t hidden_helper_threads_deinitz_lock;
2625 volatile int hidden_helper_deinitz_signaled = FALSE;
2626 
2627 // Condition variable for the wrapper function of main thread
2628 pthread_cond_t hidden_helper_main_thread_cond_var;
2629 pthread_mutex_t hidden_helper_main_thread_lock;
2630 volatile int hidden_helper_main_thread_signaled = FALSE;
2631 
2632 // Semaphore for worker threads. We don't use condition variable here in case
2633 // that when multiple signals are sent at the same time, only one thread might
2634 // be waken.
2635 sem_t hidden_helper_task_sem;
2636 } // namespace
2637 
2638 void __kmp_hidden_helper_worker_thread_wait() {
2639  int status = sem_wait(&hidden_helper_task_sem);
2640  KMP_CHECK_SYSFAIL("sem_wait", status);
2641 }
2642 
2643 void __kmp_do_initialize_hidden_helper_threads() {
2644  // Initialize condition variable
2645  int status =
2646  pthread_cond_init(&hidden_helper_threads_initz_cond_var, nullptr);
2647  KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2648 
2649  status = pthread_cond_init(&hidden_helper_threads_deinitz_cond_var, nullptr);
2650  KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2651 
2652  status = pthread_cond_init(&hidden_helper_main_thread_cond_var, nullptr);
2653  KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2654 
2655  status = pthread_mutex_init(&hidden_helper_threads_initz_lock, nullptr);
2656  KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2657 
2658  status = pthread_mutex_init(&hidden_helper_threads_deinitz_lock, nullptr);
2659  KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2660 
2661  status = pthread_mutex_init(&hidden_helper_main_thread_lock, nullptr);
2662  KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2663 
2664  // Initialize the semaphore
2665  status = sem_init(&hidden_helper_task_sem, 0, 0);
2666  KMP_CHECK_SYSFAIL("sem_init", status);
2667 
2668  // Create a new thread to finish initialization
2669  pthread_t handle;
2670  status = pthread_create(
2671  &handle, nullptr,
2672  [](void *) -> void * {
2673  __kmp_hidden_helper_threads_initz_routine();
2674  return nullptr;
2675  },
2676  nullptr);
2677  KMP_CHECK_SYSFAIL("pthread_create", status);
2678 }
2679 
2680 void __kmp_hidden_helper_threads_initz_wait() {
2681  // Initial thread waits here for the completion of the initialization. The
2682  // condition variable will be notified by main thread of hidden helper teams.
2683  int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2684  KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2685 
2686  if (!TCR_4(hidden_helper_initz_signaled)) {
2687  status = pthread_cond_wait(&hidden_helper_threads_initz_cond_var,
2688  &hidden_helper_threads_initz_lock);
2689  KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2690  }
2691 
2692  status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2693  KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2694 }
2695 
2696 void __kmp_hidden_helper_initz_release() {
2697  // After all initialization, reset __kmp_init_hidden_helper_threads to false.
2698  int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2699  KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2700 
2701  status = pthread_cond_signal(&hidden_helper_threads_initz_cond_var);
2702  KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2703 
2704  TCW_SYNC_4(hidden_helper_initz_signaled, TRUE);
2705 
2706  status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2707  KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2708 }
2709 
2710 void __kmp_hidden_helper_main_thread_wait() {
2711  // The main thread of hidden helper team will be blocked here. The
2712  // condition variable can only be signal in the destructor of RTL.
2713  int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2714  KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2715 
2716  if (!TCR_4(hidden_helper_main_thread_signaled)) {
2717  status = pthread_cond_wait(&hidden_helper_main_thread_cond_var,
2718  &hidden_helper_main_thread_lock);
2719  KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2720  }
2721 
2722  status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2723  KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2724 }
2725 
2726 void __kmp_hidden_helper_main_thread_release() {
2727  // The initial thread of OpenMP RTL should call this function to wake up the
2728  // main thread of hidden helper team.
2729  int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2730  KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2731 
2732  status = pthread_cond_signal(&hidden_helper_main_thread_cond_var);
2733  KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
2734 
2735  // The hidden helper team is done here
2736  TCW_SYNC_4(hidden_helper_main_thread_signaled, TRUE);
2737 
2738  status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2739  KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2740 }
2741 
2742 void __kmp_hidden_helper_worker_thread_signal() {
2743  int status = sem_post(&hidden_helper_task_sem);
2744  KMP_CHECK_SYSFAIL("sem_post", status);
2745 }
2746 
2747 void __kmp_hidden_helper_threads_deinitz_wait() {
2748  // Initial thread waits here for the completion of the deinitialization. The
2749  // condition variable will be notified by main thread of hidden helper teams.
2750  int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2751  KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2752 
2753  if (!TCR_4(hidden_helper_deinitz_signaled)) {
2754  status = pthread_cond_wait(&hidden_helper_threads_deinitz_cond_var,
2755  &hidden_helper_threads_deinitz_lock);
2756  KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2757  }
2758 
2759  status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2760  KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2761 }
2762 
2763 void __kmp_hidden_helper_threads_deinitz_release() {
2764  int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2765  KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2766 
2767  status = pthread_cond_signal(&hidden_helper_threads_deinitz_cond_var);
2768  KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2769 
2770  TCW_SYNC_4(hidden_helper_deinitz_signaled, TRUE);
2771 
2772  status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2773  KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2774 }
2775 #else // KMP_OS_LINUX
2776 void __kmp_hidden_helper_worker_thread_wait() {
2777  KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2778 }
2779 
2780 void __kmp_do_initialize_hidden_helper_threads() {
2781  KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2782 }
2783 
2784 void __kmp_hidden_helper_threads_initz_wait() {
2785  KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2786 }
2787 
2788 void __kmp_hidden_helper_initz_release() {
2789  KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2790 }
2791 
2792 void __kmp_hidden_helper_main_thread_wait() {
2793  KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2794 }
2795 
2796 void __kmp_hidden_helper_main_thread_release() {
2797  KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2798 }
2799 
2800 void __kmp_hidden_helper_worker_thread_signal() {
2801  KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2802 }
2803 
2804 void __kmp_hidden_helper_threads_deinitz_wait() {
2805  KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2806 }
2807 
2808 void __kmp_hidden_helper_threads_deinitz_release() {
2809  KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2810 }
2811 #endif // KMP_OS_LINUX
2812 
2813 bool __kmp_detect_shm() {
2814  DIR *dir = opendir("/dev/shm");
2815  if (dir) { // /dev/shm exists
2816  closedir(dir);
2817  return true;
2818  } else if (ENOENT == errno) { // /dev/shm does not exist
2819  return false;
2820  } else { // opendir() failed
2821  return false;
2822  }
2823 }
2824 
2825 bool __kmp_detect_tmp() {
2826  DIR *dir = opendir("/tmp");
2827  if (dir) { // /tmp exists
2828  closedir(dir);
2829  return true;
2830  } else if (ENOENT == errno) { // /tmp does not exist
2831  return false;
2832  } else { // opendir() failed
2833  return false;
2834  }
2835 }
2836 
2837 // end of file //
#define KMP_INIT_PARTITIONED_TIMERS(name)
Initializes the partitioned timers to begin with name.
Definition: kmp_stats.h:940