diff --git a/.gitmodules b/.gitmodules index d74200d9cb..385b65847d 100644 --- a/.gitmodules +++ b/.gitmodules @@ -2,10 +2,6 @@ path = lwip url = https://github.com/hermitcore/LwIP.git branch = hermit -[submodule "usr/libomp"] - path = usr/libomp - url = https://github.com/hermitcore/libomp_oss.git - branch = hermit [submodule "caves"] path = caves url = https://github.com/hermitcore/hermit-caves.git diff --git a/CMakeLists.txt b/CMakeLists.txt index 090812927f..e7c28f8d4a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -159,16 +159,6 @@ if("${TARGET_ARCH}" STREQUAL "x86_64-hermit") build_external(arch_x86_loader ${HERMIT_ROOT}/arch/x86_64/loader "") -## Intel's OpenMP runtime for x86 (libomp) -build_external(libiomp ${HERMIT_ROOT}/usr/libomp "" - -DHERMIT=1 - -DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE} - -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX}/${TARGET_ARCH}) - -# libomp is part of HermitCore's runtime and should be available before any -# application will link -add_dependencies(hermit libiomp) - ## iRCCE build_external(ircce ${HERMIT_ROOT}/usr/ircce "") add_dependencies(hermit ircce) @@ -181,11 +171,9 @@ add_dependencies(hermit xray) endif() ## Tests and benchmarks -if("${TARGET_ARCH}" STREQUAL "x86_64-hermit") build_external(tests ${HERMIT_ROOT}/usr/tests hermit) build_external(benchmarks ${HERMIT_ROOT}/usr/benchmarks hermit) build_external(openmpbench ${HERMIT_ROOT}/usr/openmpbench hermit) -endif() ## relocate the local prefix to our install destination install(DIRECTORY ${LOCAL_PREFIX_DIR}/ diff --git a/tests.sh b/tests.sh index eef38bea68..d7a69d00f7 100755 --- a/tests.sh +++ b/tests.sh @@ -36,7 +36,7 @@ apt-get install -y qemu-system-x86 cmake wget curl gnupg checkinstall gawk dialo echo "deb [trusted=yes] https://dl.bintray.com/hermitcore/ubuntu bionic main" | tee -a /etc/apt/sources.list apt-get -qq update -apt-get install -y --allow-unauthenticated binutils-hermit newlib-hermit pte-hermit gcc-hermit #gcc-hermit-bootstrap +apt-get install -y --allow-unauthenticated -o Dpkg::Options::="--force-overwrite" binutils-hermit newlib-hermit pte-hermit gcc-hermit libomp-hermit #gcc-hermit-bootstrap export PATH=/opt/hermit/bin:$PATH mkdir -p build @@ -46,7 +46,6 @@ make hermit-bootstrap make hermit-bootstrap-install rm -rf * cmake -DTOOLCHAIN_BIN_DIR=/opt/hermit/bin -DCMAKE_INSTALL_PREFIX=/opt/hermit .. -install -m 644 ../usr/libomp/libgomp.spec /opt/hermit/x86_64-hermit/lib make -j1 package cd .. diff --git a/usr/libgomp/Makefile b/usr/libgomp/Makefile deleted file mode 100644 index d72b66cf3a..0000000000 --- a/usr/libgomp/Makefile +++ /dev/null @@ -1,44 +0,0 @@ -NEWLIB = ../x86/x86_64-hermit -MAKE = make -ARFLAGS_FOR_TARGET = rsv -CP = cp -C_source = $(wildcard *.c) -NAME = libgomp.a -OBJS = $(C_source:.c=.o) - -# -# Prettify output -V = 0 -ifeq ($V,0) - Q = @ - P = > /dev/null -endif - -# other implicit rules -%.o : %.c - @echo [CC] $@ - $Q$(CC_FOR_TARGET) -c $(CFLAGS_FOR_TARGET) -o $@ $< - -default: all - -all: $(NAME) - -$(NAME): $(OBJS) - $Q$(AR_FOR_TARGET) $(ARFLAGS_FOR_TARGET) $@ $(OBJS) - $Q$(CP) $@ $(NEWLIB)/lib - $Q$(CP) libgomp.spec $(NEWLIB)/lib - $Q$(CP) omp.h $(NEWLIB)/include - -clean: - @echo Cleaning examples - $Q$(RM) $(NAME) *.o *~ - -veryclean: - @echo Propper cleaning examples - $Q$(RM) $(NAME) *.o *~ - -depend: - $Q$(CC_FOR_TARGET) -MM $(CFLAGS_FOR_TARGET) *.c > Makefile.dep - --include Makefile.dep -# DO NOT DELETE diff --git a/usr/libgomp/affinity.c b/usr/libgomp/affinity.c deleted file mode 100644 index 6840d3a727..0000000000 --- a/usr/libgomp/affinity.c +++ /dev/null @@ -1,116 +0,0 @@ -/* Copyright (C) 2006-2015 Free Software Foundation, Inc. - Contributed by Jakub Jelinek . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This is a generic stub implementation of a CPU affinity setting. */ - -#include "libgomp.h" - -void -gomp_init_affinity (void) -{ -} - -void -gomp_init_thread_affinity (pthread_attr_t *attr, unsigned int place) -{ - (void) attr; - (void) place; -} - -void ** -gomp_affinity_alloc (unsigned long count, bool quiet) -{ - (void) count; - if (!quiet) - gomp_error ("Affinity not supported on this configuration"); - return NULL; -} - -void -gomp_affinity_init_place (void *p) -{ - (void) p; -} - -bool -gomp_affinity_add_cpus (void *p, unsigned long num, - unsigned long len, long stride, bool quiet) -{ - (void) p; - (void) num; - (void) len; - (void) stride; - (void) quiet; - return false; -} - -bool -gomp_affinity_remove_cpu (void *p, unsigned long num) -{ - (void) p; - (void) num; - return false; -} - -bool -gomp_affinity_copy_place (void *p, void *q, long stride) -{ - (void) p; - (void) q; - (void) stride; - return false; -} - -bool -gomp_affinity_same_place (void *p, void *q) -{ - (void) p; - (void) q; - return false; -} - -bool -gomp_affinity_finalize_place_list (bool quiet) -{ - (void) quiet; - return false; -} - -bool -gomp_affinity_init_level (int level, unsigned long count, bool quiet) -{ - (void) level; - (void) count; - (void) quiet; - if (!quiet) - gomp_error ("Affinity not supported on this configuration"); - return NULL; -} - -void -gomp_affinity_print_place (void *p) -{ - (void) p; -} diff --git a/usr/libgomp/alloc.c b/usr/libgomp/alloc.c deleted file mode 100644 index f738a663d2..0000000000 --- a/usr/libgomp/alloc.c +++ /dev/null @@ -1,59 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file contains wrappers for the system allocation routines. Most - places in the OpenMP API do not make any provision for failure, so in - general we cannot allow memory allocation to fail. */ - -#include "libgomp.h" -#include - - -void * -gomp_malloc (size_t size) -{ - void *ret = malloc (size); - if (ret == NULL) - gomp_fatal ("Out of memory allocating %lu bytes", (unsigned long) size); - return ret; -} - -void * -gomp_malloc_cleared (size_t size) -{ - void *ret = calloc (1, size); - if (ret == NULL) - gomp_fatal ("Out of memory allocating %lu bytes", (unsigned long) size); - return ret; -} - -void * -gomp_realloc (void *old, size_t size) -{ - void *ret = realloc (old, size); - if (ret == NULL) - gomp_fatal ("Out of memory allocating %lu bytes", (unsigned long) size); - return ret; -} diff --git a/usr/libgomp/bar.c b/usr/libgomp/bar.c deleted file mode 100644 index de66d6c14f..0000000000 --- a/usr/libgomp/bar.c +++ /dev/null @@ -1,299 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This is the default implementation of a barrier synchronization mechanism - for libgomp. This type is private to the library. Note that we rely on - being able to adjust the barrier count while threads are blocked, so the - POSIX pthread_barrier_t won't work. */ - -#include "libgomp.h" - - -void -gomp_barrier_init (gomp_barrier_t *bar, unsigned count) -{ - gomp_mutex_init (&bar->mutex1); -#ifndef HAVE_SYNC_BUILTINS - gomp_mutex_init (&bar->mutex2); -#endif - gomp_sem_init (&bar->sem1, 0); - gomp_sem_init (&bar->sem2, 0); - bar->total = count; - bar->arrived = 0; - bar->generation = 0; - bar->cancellable = false; -} - -void -gomp_barrier_destroy (gomp_barrier_t *bar) -{ - /* Before destroying, make sure all threads have left the barrier. */ - gomp_mutex_lock (&bar->mutex1); - gomp_mutex_unlock (&bar->mutex1); - - gomp_mutex_destroy (&bar->mutex1); -#ifndef HAVE_SYNC_BUILTINS - gomp_mutex_destroy (&bar->mutex2); -#endif - gomp_sem_destroy (&bar->sem1); - gomp_sem_destroy (&bar->sem2); -} - -void -gomp_barrier_reinit (gomp_barrier_t *bar, unsigned count) -{ - gomp_mutex_lock (&bar->mutex1); - bar->total = count; - gomp_mutex_unlock (&bar->mutex1); -} - -void -gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state) -{ - unsigned int n; - - if (state & BAR_WAS_LAST) - { - n = --bar->arrived; - if (n > 0) - { - do - gomp_sem_post (&bar->sem1); - while (--n != 0); - gomp_sem_wait (&bar->sem2); - } - gomp_mutex_unlock (&bar->mutex1); - } - else - { - gomp_mutex_unlock (&bar->mutex1); - gomp_sem_wait (&bar->sem1); - -#ifdef HAVE_SYNC_BUILTINS - n = __sync_add_and_fetch (&bar->arrived, -1); -#else - gomp_mutex_lock (&bar->mutex2); - n = --bar->arrived; - gomp_mutex_unlock (&bar->mutex2); -#endif - - if (n == 0) - gomp_sem_post (&bar->sem2); - } -} - -void -gomp_barrier_wait (gomp_barrier_t *barrier) -{ - gomp_barrier_wait_end (barrier, gomp_barrier_wait_start (barrier)); -} - -void -gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state) -{ - unsigned int n; - - state &= ~BAR_CANCELLED; - if (state & BAR_WAS_LAST) - { - n = --bar->arrived; - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - - team->work_share_cancelled = 0; - if (team->task_count) - { - gomp_barrier_handle_tasks (state); - if (n > 0) - gomp_sem_wait (&bar->sem2); - gomp_mutex_unlock (&bar->mutex1); - return; - } - - bar->generation = state + BAR_INCR - BAR_WAS_LAST; - if (n > 0) - { - do - gomp_sem_post (&bar->sem1); - while (--n != 0); - gomp_sem_wait (&bar->sem2); - } - gomp_mutex_unlock (&bar->mutex1); - } - else - { - gomp_mutex_unlock (&bar->mutex1); - int gen; - do - { - gomp_sem_wait (&bar->sem1); - gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE); - if (gen & BAR_TASK_PENDING) - { - gomp_barrier_handle_tasks (state); - gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE); - } - } - while (gen != state + BAR_INCR); - -#ifdef HAVE_SYNC_BUILTINS - n = __sync_add_and_fetch (&bar->arrived, -1); -#else - gomp_mutex_lock (&bar->mutex2); - n = --bar->arrived; - gomp_mutex_unlock (&bar->mutex2); -#endif - - if (n == 0) - gomp_sem_post (&bar->sem2); - } -} - -bool -gomp_team_barrier_wait_cancel_end (gomp_barrier_t *bar, - gomp_barrier_state_t state) -{ - unsigned int n; - - if (state & BAR_WAS_LAST) - { - bar->cancellable = false; - n = --bar->arrived; - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - - team->work_share_cancelled = 0; - if (team->task_count) - { - gomp_barrier_handle_tasks (state); - if (n > 0) - gomp_sem_wait (&bar->sem2); - gomp_mutex_unlock (&bar->mutex1); - return false; - } - - bar->generation = state + BAR_INCR - BAR_WAS_LAST; - if (n > 0) - { - do - gomp_sem_post (&bar->sem1); - while (--n != 0); - gomp_sem_wait (&bar->sem2); - } - gomp_mutex_unlock (&bar->mutex1); - } - else - { - if (state & BAR_CANCELLED) - { - gomp_mutex_unlock (&bar->mutex1); - return true; - } - bar->cancellable = true; - gomp_mutex_unlock (&bar->mutex1); - int gen; - do - { - gomp_sem_wait (&bar->sem1); - gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE); - if (gen & BAR_CANCELLED) - break; - if (gen & BAR_TASK_PENDING) - { - gomp_barrier_handle_tasks (state); - gen = __atomic_load_n (&bar->generation, MEMMODEL_ACQUIRE); - if (gen & BAR_CANCELLED) - break; - } - } - while (gen != state + BAR_INCR); - -#ifdef HAVE_SYNC_BUILTINS - n = __sync_add_and_fetch (&bar->arrived, -1); -#else - gomp_mutex_lock (&bar->mutex2); - n = --bar->arrived; - gomp_mutex_unlock (&bar->mutex2); -#endif - - if (n == 0) - gomp_sem_post (&bar->sem2); - if (gen & BAR_CANCELLED) - return true; - } - return false; -} - -void -gomp_team_barrier_wait (gomp_barrier_t *barrier) -{ - gomp_team_barrier_wait_end (barrier, gomp_barrier_wait_start (barrier)); -} - -void -gomp_team_barrier_wake (gomp_barrier_t *bar, int count) -{ - if (count == 0) - count = bar->total - 1; - while (count-- > 0) - gomp_sem_post (&bar->sem1); -} - -bool -gomp_team_barrier_wait_cancel (gomp_barrier_t *bar) -{ - gomp_barrier_state_t state = gomp_barrier_wait_cancel_start (bar); - return gomp_team_barrier_wait_cancel_end (bar, state); -} - -void -gomp_team_barrier_cancel (struct gomp_team *team) -{ - if (team->barrier.generation & BAR_CANCELLED) - return; - gomp_mutex_lock (&team->barrier.mutex1); - gomp_mutex_lock (&team->task_lock); - if (team->barrier.generation & BAR_CANCELLED) - { - gomp_mutex_unlock (&team->task_lock); - gomp_mutex_unlock (&team->barrier.mutex1); - return; - } - team->barrier.generation |= BAR_CANCELLED; - gomp_mutex_unlock (&team->task_lock); - if (team->barrier.cancellable) - { - int n = team->barrier.arrived; - if (n > 0) - { - do - gomp_sem_post (&team->barrier.sem1); - while (--n != 0); - gomp_sem_wait (&team->barrier.sem2); - } - team->barrier.cancellable = false; - } - gomp_mutex_unlock (&team->barrier.mutex1); -} diff --git a/usr/libgomp/bar.h b/usr/libgomp/bar.h deleted file mode 100644 index 3b29c31990..0000000000 --- a/usr/libgomp/bar.h +++ /dev/null @@ -1,158 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This is the default implementation of a barrier synchronization mechanism - for libgomp. This type is private to the library. Note that we rely on - being able to adjust the barrier count while threads are blocked, so the - POSIX pthread_barrier_t won't work. */ - -#ifndef GOMP_BARRIER_H -#define GOMP_BARRIER_H 1 - -#include - -typedef struct -{ - gomp_mutex_t mutex1; -#ifndef HAVE_SYNC_BUILTINS - gomp_mutex_t mutex2; -#endif - gomp_sem_t sem1; - gomp_sem_t sem2; - unsigned total; - unsigned arrived; - unsigned generation; - bool cancellable; -} gomp_barrier_t; - -typedef unsigned int gomp_barrier_state_t; - -/* The generation field contains a counter in the high bits, with a few - low bits dedicated to flags. Note that TASK_PENDING and WAS_LAST can - share space because WAS_LAST is never stored back to generation. */ -#define BAR_TASK_PENDING 1 -#define BAR_WAS_LAST 1 -#define BAR_WAITING_FOR_TASK 2 -#define BAR_CANCELLED 4 -#define BAR_INCR 8 - -extern void gomp_barrier_init (gomp_barrier_t *, unsigned); -extern void gomp_barrier_reinit (gomp_barrier_t *, unsigned); -extern void gomp_barrier_destroy (gomp_barrier_t *); - -extern void gomp_barrier_wait (gomp_barrier_t *); -extern void gomp_barrier_wait_end (gomp_barrier_t *, gomp_barrier_state_t); -extern void gomp_team_barrier_wait (gomp_barrier_t *); -extern void gomp_team_barrier_wait_end (gomp_barrier_t *, - gomp_barrier_state_t); -extern bool gomp_team_barrier_wait_cancel (gomp_barrier_t *); -extern bool gomp_team_barrier_wait_cancel_end (gomp_barrier_t *, - gomp_barrier_state_t); -extern void gomp_team_barrier_wake (gomp_barrier_t *, int); -struct gomp_team; -extern void gomp_team_barrier_cancel (struct gomp_team *); - -static inline gomp_barrier_state_t -gomp_barrier_wait_start (gomp_barrier_t *bar) -{ - unsigned int ret; - gomp_mutex_lock (&bar->mutex1); - ret = bar->generation & (-BAR_INCR | BAR_CANCELLED); - if (++bar->arrived == bar->total) - ret |= BAR_WAS_LAST; - return ret; -} - -static inline gomp_barrier_state_t -gomp_barrier_wait_cancel_start (gomp_barrier_t *bar) -{ - unsigned int ret; - gomp_mutex_lock (&bar->mutex1); - ret = bar->generation & (-BAR_INCR | BAR_CANCELLED); - if (ret & BAR_CANCELLED) - return ret; - if (++bar->arrived == bar->total) - ret |= BAR_WAS_LAST; - return ret; -} - -static inline void -gomp_team_barrier_wait_final (gomp_barrier_t *bar) -{ - gomp_team_barrier_wait (bar); -} - -static inline bool -gomp_barrier_last_thread (gomp_barrier_state_t state) -{ - return state & BAR_WAS_LAST; -} - -static inline void -gomp_barrier_wait_last (gomp_barrier_t *bar) -{ - gomp_barrier_wait (bar); -} - -/* All the inlines below must be called with team->task_lock - held. */ - -static inline void -gomp_team_barrier_set_task_pending (gomp_barrier_t *bar) -{ - bar->generation |= BAR_TASK_PENDING; -} - -static inline void -gomp_team_barrier_clear_task_pending (gomp_barrier_t *bar) -{ - bar->generation &= ~BAR_TASK_PENDING; -} - -static inline void -gomp_team_barrier_set_waiting_for_tasks (gomp_barrier_t *bar) -{ - bar->generation |= BAR_WAITING_FOR_TASK; -} - -static inline bool -gomp_team_barrier_waiting_for_tasks (gomp_barrier_t *bar) -{ - return (bar->generation & BAR_WAITING_FOR_TASK) != 0; -} - -static inline bool -gomp_team_barrier_cancelled (gomp_barrier_t *bar) -{ - return __builtin_expect ((bar->generation & BAR_CANCELLED) != 0, 0); -} - -static inline void -gomp_team_barrier_done (gomp_barrier_t *bar, gomp_barrier_state_t state) -{ - bar->generation = (state & -BAR_INCR) + BAR_INCR; -} - -#endif /* GOMP_BARRIER_H */ diff --git a/usr/libgomp/barrier.c b/usr/libgomp/barrier.c deleted file mode 100644 index c17660ca38..0000000000 --- a/usr/libgomp/barrier.c +++ /dev/null @@ -1,54 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file handles the BARRIER construct. */ - -#include "libgomp.h" - - -void -GOMP_barrier (void) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - - /* It is legal to have orphaned barriers. */ - if (team == NULL) - return; - - gomp_team_barrier_wait (&team->barrier); -} - -bool -GOMP_barrier_cancel (void) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - - /* The compiler transforms to barrier_cancel when it sees that the - barrier is within a construct that can cancel. Thus we should - never have an orphaned cancellable barrier. */ - return gomp_team_barrier_wait_cancel (&team->barrier); -} diff --git a/usr/libgomp/config.h b/usr/libgomp/config.h deleted file mode 100644 index a15def0d81..0000000000 --- a/usr/libgomp/config.h +++ /dev/null @@ -1,154 +0,0 @@ -/* config.h. Generated from config.h.in by configure. */ -/* config.h.in. Generated from configure.ac by autoheader. */ - -/* Define to 1 if the target assembler supports .symver directive. */ -#define HAVE_AS_SYMVER_DIRECTIVE 1 - -/* Define to 1 if the target supports __attribute__((alias(...))). */ -#define HAVE_ATTRIBUTE_ALIAS 1 - -/* Define to 1 if the target supports __attribute__((dllexport)). */ -/* #undef HAVE_ATTRIBUTE_DLLEXPORT */ - -/* Define to 1 if the target supports __attribute__((visibility(...))). */ -#define HAVE_ATTRIBUTE_VISIBILITY 1 - -/* Define if the POSIX Semaphores do not work on your system. */ -/* #undef HAVE_BROKEN_POSIX_SEMAPHORES */ - -/* Define to 1 if the target assembler supports thread-local storage. */ -#define def HAVE_CC_TLS 1 - -/* Define to 1 if you have the `clock_gettime' function. */ -#define HAVE_CLOCK_GETTIME 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_DLFCN_H 1 - -/* Define to 1 if you have the `getloadavg' function. */ -#undef HAVE_GETLOADAVG - -/* Define to 1 if you have the header file. */ -#define HAVE_INTTYPES_H 1 - -/* Define to 1 if you have the `dl' library (-ldl). */ -#undef HAVE_LIBDL - -/* Define to 1 if you have the header file. */ -#define HAVE_MEMORY_H 1 - -/* Define if pthread_{,attr_}{g,s}etaffinity_np is supported. */ -#define HAVE_PTHREAD_AFFINITY_NP 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_PTHREAD_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SEMAPHORE_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDINT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDLIB_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRINGS_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRING_H 1 - -/* Define to 1 if you have the `strtoull' function. */ -#define HAVE_STRTOULL 1 - -/* Define to 1 if the target runtime linker supports binding the same symbol - to different versions. */ -/* #undef HAVE_SYMVER_SYMBOL_RENAMING_RUNTIME_SUPPORT */ - -/* Define to 1 if the target supports __sync_*_compare_and_swap */ -#define HAVE_SYNC_BUILTINS 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_SYS_LOADAVG_H */ - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_STAT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TIME_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TYPES_H 1 - -/* Define to 1 if the target supports thread-local storage. */ -#define HAVE_TLS 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_UNISTD_H 1 - -/* Define to 1 if GNU symbol versioning is used for libgomp. */ -/* #undef LIBGOMP_GNU_SYMBOL_VERSIONING */ - -/* Define to the sub-directory in which libtool stores uninstalled libraries. - */ -#define LT_OBJDIR ".libs/" - -/* Define to hold the list of target names suitable for offloading. */ -#define OFFLOAD_TARGETS "host_nonshm" - -/* Name of package */ -#define PACKAGE "libgomp" - -/* Define to the address where bug reports for this package should be sent. */ -#define PACKAGE_BUGREPORT "" - -/* Define to the full name of this package. */ -#define PACKAGE_NAME "GNU Offloading and Multi Processing Runtime Library" - -/* Define to the full name and version of this package. */ -#define PACKAGE_STRING "GNU Offloading and Multi Processing Runtime Library 1.0" - -/* Define to the one symbol short name of this package. */ -#define PACKAGE_TARNAME "libgomp" - -/* Define to the home page for this package. */ -#define PACKAGE_URL "http://www.gnu.org/software/libgomp/" - -/* Define to the version of this package. */ -#define PACKAGE_VERSION "1.0" - -/* Define to 1 if the NVIDIA plugin is built, 0 if not. */ -#define PLUGIN_NVPTX 0 - -/* Define if all infrastructure, needed for plugins, is supported. */ -#define PLUGIN_SUPPORT 1 - -/* The size of `char', as computed by sizeof. */ -/* #undef SIZEOF_CHAR */ - -/* The size of `int', as computed by sizeof. */ -/* #undef SIZEOF_INT */ - -/* The size of `long', as computed by sizeof. */ -/* #undef SIZEOF_LONG */ - -/* The size of `short', as computed by sizeof. */ -/* #undef SIZEOF_SHORT */ - -/* The size of `void *', as computed by sizeof. */ -/* #undef SIZEOF_VOID_P */ - -/* Define to 1 if you have the ANSI C header files. */ -#define STDC_HEADERS 1 - -/* Define if you can safely include both and . */ -#define STRING_WITH_STRINGS 1 - -/* Define to 1 if you can safely include both and . */ -#define TIME_WITH_SYS_TIME 1 - -/* Define to 1 if the target use emutls for thread-local storage. */ -/* #undef USE_EMUTLS */ - -/* Version number of package */ -#define VERSION "1.0" diff --git a/usr/libgomp/critical.c b/usr/libgomp/critical.c deleted file mode 100644 index 12b23d5662..0000000000 --- a/usr/libgomp/critical.c +++ /dev/null @@ -1,149 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file handles the CRITICAL construct. */ - -#include "libgomp.h" -#include - - -static gomp_mutex_t default_lock; - -void -GOMP_critical_start (void) -{ - /* There is an implicit flush on entry to a critical region. */ - __atomic_thread_fence (MEMMODEL_RELEASE); - gomp_mutex_lock (&default_lock); -} - -void -GOMP_critical_end (void) -{ - gomp_mutex_unlock (&default_lock); -} - -#ifndef HAVE_SYNC_BUILTINS -static gomp_mutex_t create_lock_lock; -#endif - -void -GOMP_critical_name_start (void **pptr) -{ - gomp_mutex_t *plock; - - /* If a mutex fits within the space for a pointer, and is zero initialized, - then use the pointer space directly. */ - if (GOMP_MUTEX_INIT_0 - && sizeof (gomp_mutex_t) <= sizeof (void *) - && __alignof (gomp_mutex_t) <= sizeof (void *)) - plock = (gomp_mutex_t *)pptr; - - /* Otherwise we have to be prepared to malloc storage. */ - else - { - plock = *pptr; - - if (plock == NULL) - { -#ifdef HAVE_SYNC_BUILTINS - gomp_mutex_t *nlock = gomp_malloc (sizeof (gomp_mutex_t)); - gomp_mutex_init (nlock); - - plock = __sync_val_compare_and_swap (pptr, NULL, nlock); - if (plock != NULL) - { - gomp_mutex_destroy (nlock); - free (nlock); - } - else - plock = nlock; -#else - gomp_mutex_lock (&create_lock_lock); - plock = *pptr; - if (plock == NULL) - { - plock = gomp_malloc (sizeof (gomp_mutex_t)); - gomp_mutex_init (plock); - __sync_synchronize (); - *pptr = plock; - } - gomp_mutex_unlock (&create_lock_lock); -#endif - } - } - - gomp_mutex_lock (plock); -} - -void -GOMP_critical_name_end (void **pptr) -{ - gomp_mutex_t *plock; - - /* If a mutex fits within the space for a pointer, and is zero initialized, - then use the pointer space directly. */ - if (GOMP_MUTEX_INIT_0 - && sizeof (gomp_mutex_t) <= sizeof (void *) - && __alignof (gomp_mutex_t) <= sizeof (void *)) - plock = (gomp_mutex_t *)pptr; - else - plock = *pptr; - - gomp_mutex_unlock (plock); -} - -/* This mutex is used when atomic operations don't exist for the target - in the mode requested. The result is not globally atomic, but works so - long as all parallel references are within #pragma omp atomic directives. - According to responses received from omp@openmp.org, appears to be within - spec. Which makes sense, since that's how several other compilers - handle this situation as well. */ - -static gomp_mutex_t atomic_lock; - -void -GOMP_atomic_start (void) -{ - gomp_mutex_lock (&atomic_lock); -} - -void -GOMP_atomic_end (void) -{ - gomp_mutex_unlock (&atomic_lock); -} - -#if !GOMP_MUTEX_INIT_0 -static void __attribute__((constructor)) -initialize_critical (void) -{ - gomp_mutex_init (&default_lock); - gomp_mutex_init (&atomic_lock); -#ifndef HAVE_SYNC_BUILTINS - gomp_mutex_init (&create_lock_lock); -#endif -} -#endif diff --git a/usr/libgomp/env.c b/usr/libgomp/env.c deleted file mode 100644 index 4d6a6bc5e8..0000000000 --- a/usr/libgomp/env.c +++ /dev/null @@ -1,1479 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file defines the OpenMP internal control variables, and arranges - for them to be initialized from environment variables at startup. */ - -#include "libgomp.h" -#include "libgomp_f.h" -//#include "oacc-int.h" -#include -#include -#include -#ifdef HAVE_INTTYPES_H -# include /* For PRIu64. */ -#endif -#ifdef STRING_WITH_STRINGS -# include -# include -#else -# ifdef HAVE_STRING_H -# include -# else -# ifdef HAVE_STRINGS_H -# include -# endif -# endif -#endif -#include -#include - -#ifndef HAVE_STRTOULL -# define strtoull(ptr, eptr, base) strtoul (ptr, eptr, base) -#endif - -struct gomp_task_icv gomp_global_icv = { - .nthreads_var = 1, - .thread_limit_var = UINT_MAX, - .run_sched_var = GFS_DYNAMIC, - .run_sched_modifier = 1, - .default_device_var = 0, - .dyn_var = false, - .nest_var = false, - .bind_var = omp_proc_bind_false, - .target_data = NULL -}; - -unsigned long gomp_max_active_levels_var = INT_MAX; -bool gomp_cancel_var = false; -#ifndef HAVE_SYNC_BUILTINS -gomp_mutex_t gomp_managed_threads_lock; -#endif -unsigned long gomp_available_cpus = 1, gomp_managed_threads = 1; -unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var; -unsigned long *gomp_nthreads_var_list, gomp_nthreads_var_list_len; -char *gomp_bind_var_list; -unsigned long gomp_bind_var_list_len; -void **gomp_places_list; -unsigned long gomp_places_list_len; -int gomp_debug_var; -char *goacc_device_type; -int goacc_device_num; - -/* Parse the OMP_SCHEDULE environment variable. */ - -static void -parse_schedule (void) -{ - char *env, *end; - unsigned long value; - - env = getenv ("OMP_SCHEDULE"); - if (env == NULL) - return; - - while (isspace ((unsigned char) *env)) - ++env; - if (strncasecmp (env, "static", 6) == 0) - { - gomp_global_icv.run_sched_var = GFS_STATIC; - env += 6; - } - else if (strncasecmp (env, "dynamic", 7) == 0) - { - gomp_global_icv.run_sched_var = GFS_DYNAMIC; - env += 7; - } - else if (strncasecmp (env, "guided", 6) == 0) - { - gomp_global_icv.run_sched_var = GFS_GUIDED; - env += 6; - } - else if (strncasecmp (env, "auto", 4) == 0) - { - gomp_global_icv.run_sched_var = GFS_AUTO; - env += 4; - } - else - goto unknown; - - while (isspace ((unsigned char) *env)) - ++env; - if (*env == '\0') - { - gomp_global_icv.run_sched_modifier - = gomp_global_icv.run_sched_var != GFS_STATIC; - return; - } - if (*env++ != ',') - goto unknown; - while (isspace ((unsigned char) *env)) - ++env; - if (*env == '\0') - goto invalid; - - errno = 0; - value = strtoul (env, &end, 10); - if (errno) - goto invalid; - - while (isspace ((unsigned char) *end)) - ++end; - if (*end != '\0') - goto invalid; - - if ((int)value != value) - goto invalid; - - if (value == 0 && gomp_global_icv.run_sched_var != GFS_STATIC) - value = 1; - gomp_global_icv.run_sched_modifier = value; - return; - - unknown: - gomp_error ("Unknown value for environment variable OMP_SCHEDULE"); - return; - - invalid: - gomp_error ("Invalid value for chunk size in " - "environment variable OMP_SCHEDULE"); - return; -} - -/* Parse an unsigned long environment variable. Return true if one was - present and it was successfully parsed. */ - -static bool -parse_unsigned_long (const char *name, unsigned long *pvalue, bool allow_zero) -{ - char *env, *end; - unsigned long value; - - env = getenv (name); - if (env == NULL) - return false; - - while (isspace ((unsigned char) *env)) - ++env; - if (*env == '\0') - goto invalid; - - errno = 0; - value = strtoul (env, &end, 10); - if (errno || (long) value <= 0 - allow_zero) - goto invalid; - - while (isspace ((unsigned char) *end)) - ++end; - if (*end != '\0') - goto invalid; - - *pvalue = value; - return true; - - invalid: - gomp_error ("Invalid value for environment variable %s", name); - return false; -} - -/* Parse a positive int environment variable. Return true if one was - present and it was successfully parsed. */ - -static bool -parse_int (const char *name, int *pvalue, bool allow_zero) -{ - unsigned long value; - if (!parse_unsigned_long (name, &value, allow_zero)) - return false; - if (value > INT_MAX) - { - gomp_error ("Invalid value for environment variable %s", name); - return false; - } - *pvalue = (int) value; - return true; -} - -/* Parse an unsigned long list environment variable. Return true if one was - present and it was successfully parsed. */ - -static bool -parse_unsigned_long_list (const char *name, unsigned long *p1stvalue, - unsigned long **pvalues, - unsigned long *pnvalues) -{ - char *env, *end; - unsigned long value, *values = NULL; - - env = getenv (name); - if (env == NULL) - return false; - - while (isspace ((unsigned char) *env)) - ++env; - if (*env == '\0') - goto invalid; - - errno = 0; - value = strtoul (env, &end, 10); - if (errno || (long) value <= 0) - goto invalid; - - while (isspace ((unsigned char) *end)) - ++end; - if (*end != '\0') - { - if (*end == ',') - { - unsigned long nvalues = 0, nalloced = 0; - - do - { - env = end + 1; - if (nvalues == nalloced) - { - unsigned long *n; - nalloced = nalloced ? nalloced * 2 : 16; - n = realloc (values, nalloced * sizeof (unsigned long)); - if (n == NULL) - { - free (values); - gomp_error ("Out of memory while trying to parse" - " environment variable %s", name); - return false; - } - values = n; - if (nvalues == 0) - values[nvalues++] = value; - } - - while (isspace ((unsigned char) *env)) - ++env; - if (*env == '\0') - goto invalid; - - errno = 0; - value = strtoul (env, &end, 10); - if (errno || (long) value <= 0) - goto invalid; - - values[nvalues++] = value; - while (isspace ((unsigned char) *end)) - ++end; - if (*end == '\0') - break; - if (*end != ',') - goto invalid; - } - while (1); - *p1stvalue = values[0]; - *pvalues = values; - *pnvalues = nvalues; - return true; - } - goto invalid; - } - - *p1stvalue = value; - return true; - - invalid: - free (values); - gomp_error ("Invalid value for environment variable %s", name); - return false; -} - -/* Parse environment variable set to a boolean or list of omp_proc_bind_t - enum values. Return true if one was present and it was successfully - parsed. */ - -static bool -parse_bind_var (const char *name, char *p1stvalue, - char **pvalues, unsigned long *pnvalues) -{ - char *env; - char value = omp_proc_bind_false, *values = NULL; - int i; - static struct proc_bind_kinds - { - const char name[7]; - const char len; - omp_proc_bind_t kind; - } kinds[] = - { - { "false", 5, omp_proc_bind_false }, - { "true", 4, omp_proc_bind_true }, - { "master", 6, omp_proc_bind_master }, - { "close", 5, omp_proc_bind_close }, - { "spread", 6, omp_proc_bind_spread } - }; - - env = getenv (name); - if (env == NULL) - return false; - - while (isspace ((unsigned char) *env)) - ++env; - if (*env == '\0') - goto invalid; - - for (i = 0; i < 5; i++) - if (strncasecmp (env, kinds[i].name, kinds[i].len) == 0) - { - value = kinds[i].kind; - env += kinds[i].len; - break; - } - if (i == 5) - goto invalid; - - while (isspace ((unsigned char) *env)) - ++env; - if (*env != '\0') - { - if (*env == ',') - { - unsigned long nvalues = 0, nalloced = 0; - - if (value == omp_proc_bind_false - || value == omp_proc_bind_true) - goto invalid; - - do - { - env++; - if (nvalues == nalloced) - { - char *n; - nalloced = nalloced ? nalloced * 2 : 16; - n = realloc (values, nalloced); - if (n == NULL) - { - free (values); - gomp_error ("Out of memory while trying to parse" - " environment variable %s", name); - return false; - } - values = n; - if (nvalues == 0) - values[nvalues++] = value; - } - - while (isspace ((unsigned char) *env)) - ++env; - if (*env == '\0') - goto invalid; - - for (i = 2; i < 5; i++) - if (strncasecmp (env, kinds[i].name, kinds[i].len) == 0) - { - value = kinds[i].kind; - env += kinds[i].len; - break; - } - if (i == 5) - goto invalid; - - values[nvalues++] = value; - while (isspace ((unsigned char) *env)) - ++env; - if (*env == '\0') - break; - if (*env != ',') - goto invalid; - } - while (1); - *p1stvalue = values[0]; - *pvalues = values; - *pnvalues = nvalues; - return true; - } - goto invalid; - } - - *p1stvalue = value; - return true; - - invalid: - free (values); - gomp_error ("Invalid value for environment variable %s", name); - return false; -} - -static bool -parse_one_place (char **envp, bool *negatep, unsigned long *lenp, - long *stridep) -{ - char *env = *envp, *start; - void *p = gomp_places_list ? gomp_places_list[gomp_places_list_len] : NULL; - unsigned long len = 1; - long stride = 1; - int pass; - bool any_negate = false; - *negatep = false; - while (isspace ((unsigned char) *env)) - ++env; - if (*env == '!') - { - *negatep = true; - ++env; - while (isspace ((unsigned char) *env)) - ++env; - } - if (*env != '{') - return false; - ++env; - while (isspace ((unsigned char) *env)) - ++env; - start = env; - for (pass = 0; pass < (any_negate ? 2 : 1); pass++) - { - env = start; - do - { - unsigned long this_num, this_len = 1; - long this_stride = 1; - bool this_negate = (*env == '!'); - if (this_negate) - { - if (gomp_places_list) - any_negate = true; - ++env; - while (isspace ((unsigned char) *env)) - ++env; - } - - errno = 0; - this_num = strtoul (env, &env, 10); - if (errno) - return false; - while (isspace ((unsigned char) *env)) - ++env; - if (*env == ':') - { - ++env; - while (isspace ((unsigned char) *env)) - ++env; - errno = 0; - this_len = strtoul (env, &env, 10); - if (errno || this_len == 0) - return false; - while (isspace ((unsigned char) *env)) - ++env; - if (*env == ':') - { - ++env; - while (isspace ((unsigned char) *env)) - ++env; - errno = 0; - this_stride = strtol (env, &env, 10); - if (errno) - return false; - while (isspace ((unsigned char) *env)) - ++env; - } - } - if (this_negate && this_len != 1) - return false; - if (gomp_places_list && pass == this_negate) - { - if (this_negate) - { - if (!gomp_affinity_remove_cpu (p, this_num)) - return false; - } - else if (!gomp_affinity_add_cpus (p, this_num, this_len, - this_stride, false)) - return false; - } - if (*env == '}') - break; - if (*env != ',') - return false; - ++env; - } - while (1); - } - - ++env; - while (isspace ((unsigned char) *env)) - ++env; - if (*env == ':') - { - ++env; - while (isspace ((unsigned char) *env)) - ++env; - errno = 0; - len = strtoul (env, &env, 10); - if (errno || len == 0 || len >= 65536) - return false; - while (isspace ((unsigned char) *env)) - ++env; - if (*env == ':') - { - ++env; - while (isspace ((unsigned char) *env)) - ++env; - errno = 0; - stride = strtol (env, &env, 10); - if (errno) - return false; - while (isspace ((unsigned char) *env)) - ++env; - } - } - if (*negatep && len != 1) - return false; - *envp = env; - *lenp = len; - *stridep = stride; - return true; -} - -static bool -parse_places_var (const char *name, bool ignore) -{ - char *env = getenv (name), *end; - bool any_negate = false; - int level = 0; - unsigned long count = 0; - if (env == NULL) - return false; - - while (isspace ((unsigned char) *env)) - ++env; - if (*env == '\0') - goto invalid; - - if (strncasecmp (env, "threads", 7) == 0) - { - env += 7; - level = 1; - } - else if (strncasecmp (env, "cores", 5) == 0) - { - env += 5; - level = 2; - } - else if (strncasecmp (env, "sockets", 7) == 0) - { - env += 7; - level = 3; - } - if (level) - { - count = ULONG_MAX; - while (isspace ((unsigned char) *env)) - ++env; - if (*env != '\0') - { - if (*env++ != '(') - goto invalid; - while (isspace ((unsigned char) *env)) - ++env; - - errno = 0; - count = strtoul (env, &end, 10); - if (errno) - goto invalid; - env = end; - while (isspace ((unsigned char) *env)) - ++env; - if (*env != ')') - goto invalid; - ++env; - while (isspace ((unsigned char) *env)) - ++env; - if (*env != '\0') - goto invalid; - } - - if (ignore) - return false; - - return gomp_affinity_init_level (level, count, false); - } - - count = 0; - end = env; - do - { - bool negate; - unsigned long len; - long stride; - if (!parse_one_place (&end, &negate, &len, &stride)) - goto invalid; - if (negate) - { - if (!any_negate) - count++; - any_negate = true; - } - else - count += len; - if (count > 65536) - goto invalid; - if (*end == '\0') - break; - if (*end != ',') - goto invalid; - end++; - } - while (1); - - if (ignore) - return false; - - gomp_places_list_len = 0; - gomp_places_list = gomp_affinity_alloc (count, false); - if (gomp_places_list == NULL) - return false; - - do - { - bool negate; - unsigned long len; - long stride; - gomp_affinity_init_place (gomp_places_list[gomp_places_list_len]); - if (!parse_one_place (&env, &negate, &len, &stride)) - goto invalid; - if (negate) - { - void *p; - for (count = 0; count < gomp_places_list_len; count++) - if (gomp_affinity_same_place - (gomp_places_list[count], - gomp_places_list[gomp_places_list_len])) - break; - if (count == gomp_places_list_len) - { - gomp_error ("Trying to remove a non-existing place from list " - "of places"); - goto invalid; - } - p = gomp_places_list[count]; - memmove (&gomp_places_list[count], - &gomp_places_list[count + 1], - (gomp_places_list_len - count - 1) * sizeof (void *)); - --gomp_places_list_len; - gomp_places_list[gomp_places_list_len] = p; - } - else if (len == 1) - ++gomp_places_list_len; - else - { - for (count = 0; count < len - 1; count++) - if (!gomp_affinity_copy_place - (gomp_places_list[gomp_places_list_len + count + 1], - gomp_places_list[gomp_places_list_len + count], - stride)) - goto invalid; - gomp_places_list_len += len; - } - if (*env == '\0') - break; - env++; - } - while (1); - - if (gomp_places_list_len == 0) - { - gomp_error ("All places have been removed"); - goto invalid; - } - if (!gomp_affinity_finalize_place_list (false)) - goto invalid; - return true; - - invalid: - free (gomp_places_list); - gomp_places_list = NULL; - gomp_places_list_len = 0; - gomp_error ("Invalid value for environment variable %s", name); - return false; -} - -/* Parse the OMP_STACKSIZE environment varible. Return true if one was - present and it was successfully parsed. */ - -static bool -parse_stacksize (const char *name, unsigned long *pvalue) -{ - char *env, *end; - unsigned long value, shift = 10; - - env = getenv (name); - if (env == NULL) - return false; - - while (isspace ((unsigned char) *env)) - ++env; - if (*env == '\0') - goto invalid; - - errno = 0; - value = strtoul (env, &end, 10); - if (errno) - goto invalid; - - while (isspace ((unsigned char) *end)) - ++end; - if (*end != '\0') - { - switch (tolower ((unsigned char) *end)) - { - case 'b': - shift = 0; - break; - case 'k': - break; - case 'm': - shift = 20; - break; - case 'g': - shift = 30; - break; - default: - goto invalid; - } - ++end; - while (isspace ((unsigned char) *end)) - ++end; - if (*end != '\0') - goto invalid; - } - - if (((value << shift) >> shift) != value) - goto invalid; - - *pvalue = value << shift; - return true; - - invalid: - gomp_error ("Invalid value for environment variable %s", name); - return false; -} - -/* Parse the GOMP_SPINCOUNT environment varible. Return true if one was - present and it was successfully parsed. */ - -static bool -parse_spincount (const char *name, unsigned long long *pvalue) -{ - char *env, *end; - unsigned long long value, mult = 1; - - env = getenv (name); - if (env == NULL) - return false; - - while (isspace ((unsigned char) *env)) - ++env; - if (*env == '\0') - goto invalid; - - if (strncasecmp (env, "infinite", 8) == 0 - || strncasecmp (env, "infinity", 8) == 0) - { - value = ~0ULL; - end = env + 8; - goto check_tail; - } - - errno = 0; - value = strtoull (env, &end, 10); - if (errno) - goto invalid; - - while (isspace ((unsigned char) *end)) - ++end; - if (*end != '\0') - { - switch (tolower ((unsigned char) *end)) - { - case 'k': - mult = 1000LL; - break; - case 'm': - mult = 1000LL * 1000LL; - break; - case 'g': - mult = 1000LL * 1000LL * 1000LL; - break; - case 't': - mult = 1000LL * 1000LL * 1000LL * 1000LL; - break; - default: - goto invalid; - } - ++end; - check_tail: - while (isspace ((unsigned char) *end)) - ++end; - if (*end != '\0') - goto invalid; - } - - if (value > ~0ULL / mult) - value = ~0ULL; - else - value *= mult; - - *pvalue = value; - return true; - - invalid: - gomp_error ("Invalid value for environment variable %s", name); - return false; -} - -/* Parse a boolean value for environment variable NAME and store the - result in VALUE. */ - -static void -parse_boolean (const char *name, bool *value) -{ - const char *env; - - env = getenv (name); - if (env == NULL) - return; - - while (isspace ((unsigned char) *env)) - ++env; - if (strncasecmp (env, "true", 4) == 0) - { - *value = true; - env += 4; - } - else if (strncasecmp (env, "false", 5) == 0) - { - *value = false; - env += 5; - } - else - env = "X"; - while (isspace ((unsigned char) *env)) - ++env; - if (*env != '\0') - gomp_error ("Invalid value for environment variable %s", name); -} - -/* Parse the OMP_WAIT_POLICY environment variable and store the - result in gomp_active_wait_policy. */ - -static int -parse_wait_policy (void) -{ - const char *env; - int ret = -1; - - env = getenv ("OMP_WAIT_POLICY"); - if (env == NULL) - return -1; - - while (isspace ((unsigned char) *env)) - ++env; - if (strncasecmp (env, "active", 6) == 0) - { - ret = 1; - env += 6; - } - else if (strncasecmp (env, "passive", 7) == 0) - { - ret = 0; - env += 7; - } - else - env = "X"; - while (isspace ((unsigned char) *env)) - ++env; - if (*env == '\0') - return ret; - gomp_error ("Invalid value for environment variable OMP_WAIT_POLICY"); - return -1; -} - -/* Parse the GOMP_CPU_AFFINITY environment varible. Return true if one was - present and it was successfully parsed. */ - -static bool -parse_affinity (bool ignore) -{ - char *env, *end, *start; - int pass; - unsigned long cpu_beg, cpu_end, cpu_stride; - size_t count = 0, needed; - - env = getenv ("GOMP_CPU_AFFINITY"); - if (env == NULL) - return false; - - start = env; - for (pass = 0; pass < 2; pass++) - { - env = start; - if (pass == 1) - { - if (ignore) - return false; - - gomp_places_list_len = 0; - gomp_places_list = gomp_affinity_alloc (count, true); - if (gomp_places_list == NULL) - return false; - } - do - { - while (isspace ((unsigned char) *env)) - ++env; - - errno = 0; - cpu_beg = strtoul (env, &end, 0); - if (errno || cpu_beg >= 65536) - goto invalid; - cpu_end = cpu_beg; - cpu_stride = 1; - - env = end; - if (*env == '-') - { - errno = 0; - cpu_end = strtoul (++env, &end, 0); - if (errno || cpu_end >= 65536 || cpu_end < cpu_beg) - goto invalid; - - env = end; - if (*env == ':') - { - errno = 0; - cpu_stride = strtoul (++env, &end, 0); - if (errno || cpu_stride == 0 || cpu_stride >= 65536) - goto invalid; - - env = end; - } - } - - needed = (cpu_end - cpu_beg) / cpu_stride + 1; - if (pass == 0) - count += needed; - else - { - while (needed--) - { - void *p = gomp_places_list[gomp_places_list_len]; - gomp_affinity_init_place (p); - if (gomp_affinity_add_cpus (p, cpu_beg, 1, 0, true)) - ++gomp_places_list_len; - cpu_beg += cpu_stride; - } - } - - while (isspace ((unsigned char) *env)) - ++env; - - if (*env == ',') - env++; - else if (*env == '\0') - break; - } - while (1); - } - - if (gomp_places_list_len == 0) - { - free (gomp_places_list); - gomp_places_list = NULL; - return false; - } - return true; - - invalid: - gomp_error ("Invalid value for enviroment variable GOMP_CPU_AFFINITY"); - return false; -} - -static void -parse_acc_device_type (void) -{ - const char *env = getenv ("ACC_DEVICE_TYPE"); - - if (env && *env != '\0') - goacc_device_type = strdup (env); - else - goacc_device_type = NULL; -} - -static void -handle_omp_display_env (unsigned long stacksize, int wait_policy) -{ - const char *env; - bool display = false; - bool verbose = false; - int i; - - env = getenv ("OMP_DISPLAY_ENV"); - if (env == NULL) - return; - - while (isspace ((unsigned char) *env)) - ++env; - if (strncasecmp (env, "true", 4) == 0) - { - display = true; - env += 4; - } - else if (strncasecmp (env, "false", 5) == 0) - { - display = false; - env += 5; - } - else if (strncasecmp (env, "verbose", 7) == 0) - { - display = true; - verbose = true; - env += 7; - } - else - env = "X"; - while (isspace ((unsigned char) *env)) - ++env; - if (*env != '\0') - gomp_error ("Invalid value for environment variable OMP_DISPLAY_ENV"); - - if (!display) - return; - - fputs ("\nOPENMP DISPLAY ENVIRONMENT BEGIN\n", stderr); - - fputs (" _OPENMP = '201307'\n", stderr); - fprintf (stderr, " OMP_DYNAMIC = '%s'\n", - gomp_global_icv.dyn_var ? "TRUE" : "FALSE"); - fprintf (stderr, " OMP_NESTED = '%s'\n", - gomp_global_icv.nest_var ? "TRUE" : "FALSE"); - - fprintf (stderr, " OMP_NUM_THREADS = '%lu", gomp_global_icv.nthreads_var); - for (i = 1; i < gomp_nthreads_var_list_len; i++) - fprintf (stderr, ",%lu", gomp_nthreads_var_list[i]); - fputs ("'\n", stderr); - - fprintf (stderr, " OMP_SCHEDULE = '"); - switch (gomp_global_icv.run_sched_var) - { - case GFS_RUNTIME: - fputs ("RUNTIME", stderr); - break; - case GFS_STATIC: - fputs ("STATIC", stderr); - break; - case GFS_DYNAMIC: - fputs ("DYNAMIC", stderr); - break; - case GFS_GUIDED: - fputs ("GUIDED", stderr); - break; - case GFS_AUTO: - fputs ("AUTO", stderr); - break; - } - fputs ("'\n", stderr); - - fputs (" OMP_PROC_BIND = '", stderr); - switch (gomp_global_icv.bind_var) - { - case omp_proc_bind_false: - fputs ("FALSE", stderr); - break; - case omp_proc_bind_true: - fputs ("TRUE", stderr); - break; - case omp_proc_bind_master: - fputs ("MASTER", stderr); - break; - case omp_proc_bind_close: - fputs ("CLOSE", stderr); - break; - case omp_proc_bind_spread: - fputs ("SPREAD", stderr); - break; - } - for (i = 1; i < gomp_bind_var_list_len; i++) - switch (gomp_bind_var_list[i]) - { - case omp_proc_bind_master: - fputs (",MASTER", stderr); - break; - case omp_proc_bind_close: - fputs (",CLOSE", stderr); - break; - case omp_proc_bind_spread: - fputs (",SPREAD", stderr); - break; - } - fputs ("'\n", stderr); - fputs (" OMP_PLACES = '", stderr); - for (i = 0; i < gomp_places_list_len; i++) - { - fputs ("{", stderr); - gomp_affinity_print_place (gomp_places_list[i]); - fputs (i + 1 == gomp_places_list_len ? "}" : "},", stderr); - } - fputs ("'\n", stderr); - - fprintf (stderr, " OMP_STACKSIZE = '%lu'\n", stacksize); - - /* GOMP's default value is actually neither active nor passive. */ - fprintf (stderr, " OMP_WAIT_POLICY = '%s'\n", - wait_policy > 0 ? "ACTIVE" : "PASSIVE"); - fprintf (stderr, " OMP_THREAD_LIMIT = '%u'\n", - gomp_global_icv.thread_limit_var); - fprintf (stderr, " OMP_MAX_ACTIVE_LEVELS = '%lu'\n", - gomp_max_active_levels_var); - - fprintf (stderr, " OMP_CANCELLATION = '%s'\n", - gomp_cancel_var ? "TRUE" : "FALSE"); - fprintf (stderr, " OMP_DEFAULT_DEVICE = '%d'\n", - gomp_global_icv.default_device_var); - - if (verbose) - { - fputs (" GOMP_CPU_AFFINITY = ''\n", stderr); - fprintf (stderr, " GOMP_STACKSIZE = '%lu'\n", stacksize); -#ifdef HAVE_INTTYPES_H - fprintf (stderr, " GOMP_SPINCOUNT = '%"PRIu64"'\n", - (uint64_t) gomp_spin_count_var); -#else - fprintf (stderr, " GOMP_SPINCOUNT = '%lu'\n", - (unsigned long) gomp_spin_count_var); -#endif - } - - fputs ("OPENMP DISPLAY ENVIRONMENT END\n", stderr); -} - - -static void __attribute__((constructor)) -initialize_env (void) -{ - unsigned long thread_limit_var, stacksize; - int wait_policy; - -#ifndef __hermit__ - /* Do a compile time check that mkomp_h.pl did good job. */ - omp_check_defines (); -#endif - - parse_schedule (); - parse_boolean ("OMP_DYNAMIC", &gomp_global_icv.dyn_var); - parse_boolean ("OMP_NESTED", &gomp_global_icv.nest_var); - parse_boolean ("OMP_CANCELLATION", &gomp_cancel_var); - parse_int ("OMP_DEFAULT_DEVICE", &gomp_global_icv.default_device_var, true); - parse_unsigned_long ("OMP_MAX_ACTIVE_LEVELS", &gomp_max_active_levels_var, - true); - if (parse_unsigned_long ("OMP_THREAD_LIMIT", &thread_limit_var, false)) - { - gomp_global_icv.thread_limit_var - = thread_limit_var > INT_MAX ? UINT_MAX : thread_limit_var; - } - parse_int ("GOMP_DEBUG", &gomp_debug_var, true); -#ifndef HAVE_SYNC_BUILTINS - gomp_mutex_init (&gomp_managed_threads_lock); -#endif - gomp_init_num_threads (); - gomp_available_cpus = gomp_global_icv.nthreads_var; - if (!parse_unsigned_long_list ("OMP_NUM_THREADS", - &gomp_global_icv.nthreads_var, - &gomp_nthreads_var_list, - &gomp_nthreads_var_list_len)) - gomp_global_icv.nthreads_var = gomp_available_cpus; - bool ignore = false; - if (parse_bind_var ("OMP_PROC_BIND", - &gomp_global_icv.bind_var, - &gomp_bind_var_list, - &gomp_bind_var_list_len) - && gomp_global_icv.bind_var == omp_proc_bind_false) - ignore = true; - /* Make sure OMP_PLACES and GOMP_CPU_AFFINITY env vars are always - parsed if present in the environment. If OMP_PROC_BIND was set - explictly to false, don't populate places list though. If places - list was successfully set from OMP_PLACES, only parse but don't process - GOMP_CPU_AFFINITY. If OMP_PROC_BIND was not set in the environment, - default to OMP_PROC_BIND=true if OMP_PLACES or GOMP_CPU_AFFINITY - was successfully parsed into a places list, otherwise to - OMP_PROC_BIND=false. */ - if (parse_places_var ("OMP_PLACES", ignore)) - { - if (gomp_global_icv.bind_var == omp_proc_bind_false) - gomp_global_icv.bind_var = true; - ignore = true; - } - if (parse_affinity (ignore)) - { - if (gomp_global_icv.bind_var == omp_proc_bind_false) - gomp_global_icv.bind_var = true; - ignore = true; - } - if (gomp_global_icv.bind_var != omp_proc_bind_false) - gomp_init_affinity (); - wait_policy = parse_wait_policy (); - if (!parse_spincount ("GOMP_SPINCOUNT", &gomp_spin_count_var)) - { - /* Using a rough estimation of 100000 spins per msec, - use 5 min blocking for OMP_WAIT_POLICY=active, - 3 msec blocking when OMP_WAIT_POLICY is not specificed - and 0 when OMP_WAIT_POLICY=passive. - Depending on the CPU speed, this can be e.g. 5 times longer - or 5 times shorter. */ - if (wait_policy > 0) - gomp_spin_count_var = 30000000000LL; - else if (wait_policy < 0) - gomp_spin_count_var = 300000LL; - } - /* gomp_throttled_spin_count_var is used when there are more libgomp - managed threads than available CPUs. Use very short spinning. */ - if (wait_policy > 0) - gomp_throttled_spin_count_var = 1000LL; - else if (wait_policy < 0) - gomp_throttled_spin_count_var = 100LL; - if (gomp_throttled_spin_count_var > gomp_spin_count_var) - gomp_throttled_spin_count_var = gomp_spin_count_var; - - /* Not strictly environment related, but ordering constructors is tricky. */ - pthread_attr_init (&gomp_thread_attr); - pthread_attr_setdetachstate (&gomp_thread_attr, PTHREAD_CREATE_DETACHED); - - if (parse_stacksize ("OMP_STACKSIZE", &stacksize) - || parse_stacksize ("GOMP_STACKSIZE", &stacksize)) - { - int err; - - err = pthread_attr_setstacksize (&gomp_thread_attr, stacksize); - -#ifdef PTHREAD_STACK_MIN - if (err == EINVAL) - { - if (stacksize < PTHREAD_STACK_MIN) - gomp_error ("Stack size less than minimum of %luk", - PTHREAD_STACK_MIN / 1024ul - + (PTHREAD_STACK_MIN % 1024 != 0)); - else - gomp_error ("Stack size larger than system limit"); - } - else -#endif - if (err != 0) - gomp_error ("Stack size change failed: %s", strerror (err)); - } - - handle_omp_display_env (stacksize, wait_policy); - - /* OpenACC. */ - - if (!parse_int ("ACC_DEVICE_NUM", &goacc_device_num, true)) - goacc_device_num = 0; - - parse_acc_device_type (); - -#if 0 - goacc_runtime_initialize (); -#endif -} - - -/* The public OpenMP API routines that access these variables. */ - -void -omp_set_num_threads (int n) -{ - struct gomp_task_icv *icv = gomp_icv (true); - icv->nthreads_var = (n > 0 ? n : 1); -} - -void -omp_set_dynamic (int val) -{ - struct gomp_task_icv *icv = gomp_icv (true); - icv->dyn_var = val; -} - -int -omp_get_dynamic (void) -{ - struct gomp_task_icv *icv = gomp_icv (false); - return icv->dyn_var; -} - -void -omp_set_nested (int val) -{ - struct gomp_task_icv *icv = gomp_icv (true); - icv->nest_var = val; -} - -int -omp_get_nested (void) -{ - struct gomp_task_icv *icv = gomp_icv (false); - return icv->nest_var; -} - -void -omp_set_schedule (omp_sched_t kind, int modifier) -{ - struct gomp_task_icv *icv = gomp_icv (true); - switch (kind) - { - case omp_sched_static: - if (modifier < 1) - modifier = 0; - icv->run_sched_modifier = modifier; - break; - case omp_sched_dynamic: - case omp_sched_guided: - if (modifier < 1) - modifier = 1; - icv->run_sched_modifier = modifier; - break; - case omp_sched_auto: - break; - default: - return; - } - icv->run_sched_var = kind; -} - -void -omp_get_schedule (omp_sched_t *kind, int *modifier) -{ - struct gomp_task_icv *icv = gomp_icv (false); - *kind = icv->run_sched_var; - *modifier = icv->run_sched_modifier; -} - -int -omp_get_max_threads (void) -{ - struct gomp_task_icv *icv = gomp_icv (false); - return icv->nthreads_var; -} - -int -omp_get_thread_limit (void) -{ - struct gomp_task_icv *icv = gomp_icv (false); - return icv->thread_limit_var > INT_MAX ? INT_MAX : icv->thread_limit_var; -} - -void -omp_set_max_active_levels (int max_levels) -{ - if (max_levels >= 0) - gomp_max_active_levels_var = max_levels; -} - -int -omp_get_max_active_levels (void) -{ - return gomp_max_active_levels_var; -} - -int -omp_get_cancellation (void) -{ - return gomp_cancel_var; -} - -omp_proc_bind_t -omp_get_proc_bind (void) -{ - struct gomp_task_icv *icv = gomp_icv (false); - return icv->bind_var; -} - -void -omp_set_default_device (int device_num) -{ - struct gomp_task_icv *icv = gomp_icv (true); - icv->default_device_var = device_num >= 0 ? device_num : 0; -} - -int -omp_get_default_device (void) -{ - struct gomp_task_icv *icv = gomp_icv (false); - return icv->default_device_var; -} - -int -omp_get_num_devices (void) -{ -#ifdef __hermit__ - return 0; -#else - return gomp_get_num_devices (); -#endif -} - -int -omp_get_num_teams (void) -{ - /* Hardcoded to 1 on host, MIC, HSAIL? Maybe variable on PTX. */ - return 1; -} - -int -omp_get_team_num (void) -{ - /* Hardcoded to 0 on host, MIC, HSAIL? Maybe variable on PTX. */ - return 0; -} - -int -omp_is_initial_device (void) -{ - /* Hardcoded to 1 on host, should be 0 on MIC, HSAIL, PTX. */ - return 1; -} - -ialias (omp_set_dynamic) -ialias (omp_set_nested) -ialias (omp_set_num_threads) -ialias (omp_get_dynamic) -ialias (omp_get_nested) -ialias (omp_set_schedule) -ialias (omp_get_schedule) -ialias (omp_get_max_threads) -ialias (omp_get_thread_limit) -ialias (omp_set_max_active_levels) -ialias (omp_get_max_active_levels) -ialias (omp_get_cancellation) -ialias (omp_get_proc_bind) -ialias (omp_set_default_device) -ialias (omp_get_default_device) -ialias (omp_get_num_devices) -ialias (omp_get_num_teams) -ialias (omp_get_team_num) -ialias (omp_is_initial_device) diff --git a/usr/libgomp/error.c b/usr/libgomp/error.c deleted file mode 100644 index 094c24a38c..0000000000 --- a/usr/libgomp/error.c +++ /dev/null @@ -1,91 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file contains routines used to signal errors. Most places in the - OpenMP API do not make any provision for failure, so we can't just - defer the decision on reporting the problem to the user; we must do it - ourselves or not at all. */ -/* ??? Is this about what other implementations do? Assume stderr hasn't - been pointed somewhere unsafe? */ - -#include "libgomp.h" -#include -#include -#include - - -#undef gomp_vdebug -void -gomp_vdebug (int kind __attribute__ ((unused)), const char *msg, va_list list) -{ - if (gomp_debug_var) - vfprintf (stderr, msg, list); -} - -#undef gomp_debug -void -gomp_debug (int kind, const char *msg, ...) -{ - va_list list; - - va_start (list, msg); - gomp_vdebug (kind, msg, list); - va_end (list); -} - -void -gomp_verror (const char *fmt, va_list list) -{ - fputs ("\nlibgomp: ", stderr); - vfprintf (stderr, fmt, list); - fputc ('\n', stderr); -} - -void -gomp_error (const char *fmt, ...) -{ - va_list list; - - va_start (list, fmt); - gomp_verror (fmt, list); - va_end (list); -} - -void -gomp_vfatal (const char *fmt, va_list list) -{ - gomp_verror (fmt, list); - exit (EXIT_FAILURE); -} - -void -gomp_fatal (const char *fmt, ...) -{ - va_list list; - - va_start (list, fmt); - gomp_vfatal (fmt, list); - va_end (list); -} diff --git a/usr/libgomp/fortran.c b/usr/libgomp/fortran.c deleted file mode 100644 index 993145f889..0000000000 --- a/usr/libgomp/fortran.c +++ /dev/null @@ -1,495 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Jakub Jelinek . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file contains Fortran wrapper routines. */ - -#include "libgomp.h" -#include "libgomp_f.h" -#include -#include - -#ifdef HAVE_ATTRIBUTE_ALIAS -/* Use internal aliases if possible. */ -# ifndef LIBGOMP_GNU_SYMBOL_VERSIONING -ialias_redirect (omp_init_lock) -ialias_redirect (omp_init_nest_lock) -ialias_redirect (omp_destroy_lock) -ialias_redirect (omp_destroy_nest_lock) -ialias_redirect (omp_set_lock) -ialias_redirect (omp_set_nest_lock) -ialias_redirect (omp_unset_lock) -ialias_redirect (omp_unset_nest_lock) -ialias_redirect (omp_test_lock) -ialias_redirect (omp_test_nest_lock) -# endif -ialias_redirect (omp_set_dynamic) -ialias_redirect (omp_set_nested) -ialias_redirect (omp_set_num_threads) -ialias_redirect (omp_get_dynamic) -ialias_redirect (omp_get_nested) -ialias_redirect (omp_in_parallel) -ialias_redirect (omp_get_max_threads) -ialias_redirect (omp_get_num_procs) -ialias_redirect (omp_get_num_threads) -ialias_redirect (omp_get_thread_num) -ialias_redirect (omp_get_wtick) -ialias_redirect (omp_get_wtime) -ialias_redirect (omp_set_schedule) -ialias_redirect (omp_get_schedule) -ialias_redirect (omp_get_thread_limit) -ialias_redirect (omp_set_max_active_levels) -ialias_redirect (omp_get_max_active_levels) -ialias_redirect (omp_get_level) -ialias_redirect (omp_get_ancestor_thread_num) -ialias_redirect (omp_get_team_size) -ialias_redirect (omp_get_active_level) -ialias_redirect (omp_in_final) -ialias_redirect (omp_get_cancellation) -ialias_redirect (omp_get_proc_bind) -ialias_redirect (omp_set_default_device) -ialias_redirect (omp_get_default_device) -ialias_redirect (omp_get_num_devices) -ialias_redirect (omp_get_num_teams) -ialias_redirect (omp_get_team_num) -ialias_redirect (omp_is_initial_device) -#endif - -#ifndef LIBGOMP_GNU_SYMBOL_VERSIONING -# define gomp_init_lock__30 omp_init_lock_ -# define gomp_destroy_lock__30 omp_destroy_lock_ -# define gomp_set_lock__30 omp_set_lock_ -# define gomp_unset_lock__30 omp_unset_lock_ -# define gomp_test_lock__30 omp_test_lock_ -# define gomp_init_nest_lock__30 omp_init_nest_lock_ -# define gomp_destroy_nest_lock__30 omp_destroy_nest_lock_ -# define gomp_set_nest_lock__30 omp_set_nest_lock_ -# define gomp_unset_nest_lock__30 omp_unset_nest_lock_ -# define gomp_test_nest_lock__30 omp_test_nest_lock_ -#endif - -void -gomp_init_lock__30 (omp_lock_arg_t lock) -{ -#ifndef OMP_LOCK_DIRECT - omp_lock_arg (lock) = malloc (sizeof (omp_lock_t)); -#endif - gomp_init_lock_30 (omp_lock_arg (lock)); -} - -void -gomp_init_nest_lock__30 (omp_nest_lock_arg_t lock) -{ -#ifndef OMP_NEST_LOCK_DIRECT - omp_nest_lock_arg (lock) = malloc (sizeof (omp_nest_lock_t)); -#endif - gomp_init_nest_lock_30 (omp_nest_lock_arg (lock)); -} - -void -gomp_destroy_lock__30 (omp_lock_arg_t lock) -{ - gomp_destroy_lock_30 (omp_lock_arg (lock)); -#ifndef OMP_LOCK_DIRECT - free (omp_lock_arg (lock)); - omp_lock_arg (lock) = NULL; -#endif -} - -void -gomp_destroy_nest_lock__30 (omp_nest_lock_arg_t lock) -{ - gomp_destroy_nest_lock_30 (omp_nest_lock_arg (lock)); -#ifndef OMP_NEST_LOCK_DIRECT - free (omp_nest_lock_arg (lock)); - omp_nest_lock_arg (lock) = NULL; -#endif -} - -void -gomp_set_lock__30 (omp_lock_arg_t lock) -{ - gomp_set_lock_30 (omp_lock_arg (lock)); -} - -void -gomp_set_nest_lock__30 (omp_nest_lock_arg_t lock) -{ - gomp_set_nest_lock_30 (omp_nest_lock_arg (lock)); -} - -void -gomp_unset_lock__30 (omp_lock_arg_t lock) -{ - gomp_unset_lock_30 (omp_lock_arg (lock)); -} - -void -gomp_unset_nest_lock__30 (omp_nest_lock_arg_t lock) -{ - gomp_unset_nest_lock_30 (omp_nest_lock_arg (lock)); -} - -int32_t -gomp_test_lock__30 (omp_lock_arg_t lock) -{ - return gomp_test_lock_30 (omp_lock_arg (lock)); -} - -int32_t -gomp_test_nest_lock__30 (omp_nest_lock_arg_t lock) -{ - return gomp_test_nest_lock_30 (omp_nest_lock_arg (lock)); -} - -#ifdef LIBGOMP_GNU_SYMBOL_VERSIONING -void -gomp_init_lock__25 (omp_lock_25_arg_t lock) -{ -#ifndef OMP_LOCK_25_DIRECT - omp_lock_25_arg (lock) = malloc (sizeof (omp_lock_25_t)); -#endif - gomp_init_lock_25 (omp_lock_25_arg (lock)); -} - -void -gomp_init_nest_lock__25 (omp_nest_lock_25_arg_t lock) -{ -#ifndef OMP_NEST_LOCK_25_DIRECT - omp_nest_lock_25_arg (lock) = malloc (sizeof (omp_nest_lock_25_t)); -#endif - gomp_init_nest_lock_25 (omp_nest_lock_25_arg (lock)); -} - -void -gomp_destroy_lock__25 (omp_lock_25_arg_t lock) -{ - gomp_destroy_lock_25 (omp_lock_25_arg (lock)); -#ifndef OMP_LOCK_25_DIRECT - free (omp_lock_25_arg (lock)); - omp_lock_25_arg (lock) = NULL; -#endif -} - -void -gomp_destroy_nest_lock__25 (omp_nest_lock_25_arg_t lock) -{ - gomp_destroy_nest_lock_25 (omp_nest_lock_25_arg (lock)); -#ifndef OMP_NEST_LOCK_25_DIRECT - free (omp_nest_lock_25_arg (lock)); - omp_nest_lock_25_arg (lock) = NULL; -#endif -} - -void -gomp_set_lock__25 (omp_lock_25_arg_t lock) -{ - gomp_set_lock_25 (omp_lock_25_arg (lock)); -} - -void -gomp_set_nest_lock__25 (omp_nest_lock_25_arg_t lock) -{ - gomp_set_nest_lock_25 (omp_nest_lock_25_arg (lock)); -} - -void -gomp_unset_lock__25 (omp_lock_25_arg_t lock) -{ - gomp_unset_lock_25 (omp_lock_25_arg (lock)); -} - -void -gomp_unset_nest_lock__25 (omp_nest_lock_25_arg_t lock) -{ - gomp_unset_nest_lock_25 (omp_nest_lock_25_arg (lock)); -} - -int32_t -gomp_test_lock__25 (omp_lock_25_arg_t lock) -{ - return gomp_test_lock_25 (omp_lock_25_arg (lock)); -} - -int32_t -gomp_test_nest_lock__25 (omp_nest_lock_25_arg_t lock) -{ - return gomp_test_nest_lock_25 (omp_nest_lock_25_arg (lock)); -} - -omp_lock_symver (omp_init_lock_) -omp_lock_symver (omp_destroy_lock_) -omp_lock_symver (omp_set_lock_) -omp_lock_symver (omp_unset_lock_) -omp_lock_symver (omp_test_lock_) -omp_lock_symver (omp_init_nest_lock_) -omp_lock_symver (omp_destroy_nest_lock_) -omp_lock_symver (omp_set_nest_lock_) -omp_lock_symver (omp_unset_nest_lock_) -omp_lock_symver (omp_test_nest_lock_) -#endif - -#define TO_INT(x) ((x) > INT_MIN ? (x) < INT_MAX ? (x) : INT_MAX : INT_MIN) - -void -omp_set_dynamic_ (const int32_t *set) -{ - omp_set_dynamic (*set); -} - -void -omp_set_dynamic_8_ (const int64_t *set) -{ - omp_set_dynamic (!!*set); -} - -void -omp_set_nested_ (const int32_t *set) -{ - omp_set_nested (*set); -} - -void -omp_set_nested_8_ (const int64_t *set) -{ - omp_set_nested (!!*set); -} - -void -omp_set_num_threads_ (const int32_t *set) -{ - omp_set_num_threads (*set); -} - -void -omp_set_num_threads_8_ (const int64_t *set) -{ - omp_set_num_threads (TO_INT (*set)); -} - -int32_t -omp_get_dynamic_ (void) -{ - return omp_get_dynamic (); -} - -int32_t -omp_get_nested_ (void) -{ - return omp_get_nested (); -} - -int32_t -omp_in_parallel_ (void) -{ - return omp_in_parallel (); -} - -int32_t -omp_get_max_threads_ (void) -{ - return omp_get_max_threads (); -} - -int32_t -omp_get_num_procs_ (void) -{ - return omp_get_num_procs (); -} - -int32_t -omp_get_num_threads_ (void) -{ - return omp_get_num_threads (); -} - -int32_t -omp_get_thread_num_ (void) -{ - return omp_get_thread_num (); -} - -double -omp_get_wtick_ (void) -{ - return omp_get_wtick (); -} - -double -omp_get_wtime_ (void) -{ - return omp_get_wtime (); -} - -void -omp_set_schedule_ (const int32_t *kind, const int32_t *modifier) -{ - omp_set_schedule (*kind, *modifier); -} - -void -omp_set_schedule_8_ (const int32_t *kind, const int64_t *modifier) -{ - omp_set_schedule (*kind, TO_INT (*modifier)); -} - -void -omp_get_schedule_ (int32_t *kind, int32_t *modifier) -{ - omp_sched_t k; - int m; - omp_get_schedule (&k, &m); - *kind = k; - *modifier = m; -} - -void -omp_get_schedule_8_ (int32_t *kind, int64_t *modifier) -{ - omp_sched_t k; - int m; - omp_get_schedule (&k, &m); - *kind = k; - *modifier = m; -} - -int32_t -omp_get_thread_limit_ (void) -{ - return omp_get_thread_limit (); -} - -void -omp_set_max_active_levels_ (const int32_t *levels) -{ - omp_set_max_active_levels (*levels); -} - -void -omp_set_max_active_levels_8_ (const int64_t *levels) -{ - omp_set_max_active_levels (TO_INT (*levels)); -} - -int32_t -omp_get_max_active_levels_ (void) -{ - return omp_get_max_active_levels (); -} - -int32_t -omp_get_level_ (void) -{ - return omp_get_level (); -} - -int32_t -omp_get_ancestor_thread_num_ (const int32_t *level) -{ - return omp_get_ancestor_thread_num (*level); -} - -int32_t -omp_get_ancestor_thread_num_8_ (const int64_t *level) -{ - return omp_get_ancestor_thread_num (TO_INT (*level)); -} - -int32_t -omp_get_team_size_ (const int32_t *level) -{ - return omp_get_team_size (*level); -} - -int32_t -omp_get_team_size_8_ (const int64_t *level) -{ - return omp_get_team_size (TO_INT (*level)); -} - -int32_t -omp_get_active_level_ (void) -{ - return omp_get_active_level (); -} - -int32_t -omp_in_final_ (void) -{ - return omp_in_final (); -} - -int32_t -omp_get_cancellation_ (void) -{ - return omp_get_cancellation (); -} - -int32_t -omp_get_proc_bind_ (void) -{ - return omp_get_proc_bind (); -} - -void -omp_set_default_device_ (const int32_t *device_num) -{ - return omp_set_default_device (*device_num); -} - -void -omp_set_default_device_8_ (const int64_t *device_num) -{ - return omp_set_default_device (TO_INT (*device_num)); -} - -int32_t -omp_get_default_device_ (void) -{ - return omp_get_default_device (); -} - -int32_t -omp_get_num_devices_ (void) -{ - return omp_get_num_devices (); -} - -int32_t -omp_get_num_teams_ (void) -{ - return omp_get_num_teams (); -} - -int32_t -omp_get_team_num_ (void) -{ - return omp_get_team_num (); -} - -int32_t -omp_is_initial_device_ (void) -{ - return omp_is_initial_device (); -} diff --git a/usr/libgomp/hashtab.h b/usr/libgomp/hashtab.h deleted file mode 100644 index 0cc224ddb3..0000000000 --- a/usr/libgomp/hashtab.h +++ /dev/null @@ -1,442 +0,0 @@ -/* An expandable hash tables datatype. - Copyright (C) 1999-2015 Free Software Foundation, Inc. - Contributed by Vladimir Makarov . - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ - -/* The hash table code copied from include/hashtab.[hc] and adjusted, - so that the hash table entries are in the flexible array at the end - of the control structure, no callbacks are used and the elements in the - table are of the hash_entry_type type. - Before including this file, define hash_entry_type type and - htab_alloc and htab_free functions. After including it, define - htab_hash and htab_eq inline functions. */ - -/* This package implements basic hash table functionality. It is possible - to search for an entry, create an entry and destroy an entry. - - Elements in the table are generic pointers. - - The size of the table is not fixed; if the occupancy of the table - grows too high the hash table will be expanded. - - The abstract data implementation is based on generalized Algorithm D - from Knuth's book "The art of computer programming". Hash table is - expanded by creation of new hash table and transferring elements from - the old table to the new table. */ - -/* The type for a hash code. */ -typedef unsigned int hashval_t; - -static inline hashval_t htab_hash (hash_entry_type); -static inline bool htab_eq (hash_entry_type, hash_entry_type); - -/* This macro defines reserved value for empty table entry. */ - -#define HTAB_EMPTY_ENTRY ((hash_entry_type) 0) - -/* This macro defines reserved value for table entry which contained - a deleted element. */ - -#define HTAB_DELETED_ENTRY ((hash_entry_type) 1) - -/* Hash tables are of the following type. The structure - (implementation) of this type is not needed for using the hash - tables. All work with hash table should be executed only through - functions mentioned below. The size of this structure is subject to - change. */ - -struct htab { - /* Current size (in entries) of the hash table. */ - size_t size; - - /* Current number of elements including also deleted elements. */ - size_t n_elements; - - /* Current number of deleted elements in the table. */ - size_t n_deleted; - - /* Current size (in entries) of the hash table, as an index into the - table of primes. */ - unsigned int size_prime_index; - - /* Table itself. */ - hash_entry_type entries[]; -}; - -typedef struct htab *htab_t; - -/* An enum saying whether we insert into the hash table or not. */ -enum insert_option {NO_INSERT, INSERT}; - -/* Table of primes and multiplicative inverses. - - Note that these are not minimally reduced inverses. Unlike when generating - code to divide by a constant, we want to be able to use the same algorithm - all the time. All of these inverses (are implied to) have bit 32 set. - - For the record, the function that computed the table is in - libiberty/hashtab.c. */ - -struct prime_ent -{ - hashval_t prime; - hashval_t inv; - hashval_t inv_m2; /* inverse of prime-2 */ - hashval_t shift; -}; - -static struct prime_ent const prime_tab[] = { - { 7, 0x24924925, 0x9999999b, 2 }, - { 13, 0x3b13b13c, 0x745d1747, 3 }, - { 31, 0x08421085, 0x1a7b9612, 4 }, - { 61, 0x0c9714fc, 0x15b1e5f8, 5 }, - { 127, 0x02040811, 0x0624dd30, 6 }, - { 251, 0x05197f7e, 0x073260a5, 7 }, - { 509, 0x01824366, 0x02864fc8, 8 }, - { 1021, 0x00c0906d, 0x014191f7, 9 }, - { 2039, 0x0121456f, 0x0161e69e, 10 }, - { 4093, 0x00300902, 0x00501908, 11 }, - { 8191, 0x00080041, 0x00180241, 12 }, - { 16381, 0x000c0091, 0x00140191, 13 }, - { 32749, 0x002605a5, 0x002a06e6, 14 }, - { 65521, 0x000f00e2, 0x00110122, 15 }, - { 131071, 0x00008001, 0x00018003, 16 }, - { 262139, 0x00014002, 0x0001c004, 17 }, - { 524287, 0x00002001, 0x00006001, 18 }, - { 1048573, 0x00003001, 0x00005001, 19 }, - { 2097143, 0x00004801, 0x00005801, 20 }, - { 4194301, 0x00000c01, 0x00001401, 21 }, - { 8388593, 0x00001e01, 0x00002201, 22 }, - { 16777213, 0x00000301, 0x00000501, 23 }, - { 33554393, 0x00001381, 0x00001481, 24 }, - { 67108859, 0x00000141, 0x000001c1, 25 }, - { 134217689, 0x000004e1, 0x00000521, 26 }, - { 268435399, 0x00000391, 0x000003b1, 27 }, - { 536870909, 0x00000019, 0x00000029, 28 }, - { 1073741789, 0x0000008d, 0x00000095, 29 }, - { 2147483647, 0x00000003, 0x00000007, 30 }, - /* Avoid "decimal constant so large it is unsigned" for 4294967291. */ - { 0xfffffffb, 0x00000006, 0x00000008, 31 } -}; - -/* The following function returns an index into the above table of the - nearest prime number which is greater than N, and near a power of two. */ - -static unsigned int -higher_prime_index (unsigned long n) -{ - unsigned int low = 0; - unsigned int high = sizeof(prime_tab) / sizeof(prime_tab[0]); - - while (low != high) - { - unsigned int mid = low + (high - low) / 2; - if (n > prime_tab[mid].prime) - low = mid + 1; - else - high = mid; - } - - /* If we've run out of primes, abort. */ - if (n > prime_tab[low].prime) - abort (); - - return low; -} - -/* Return the current size of given hash table. */ - -static inline size_t -htab_size (htab_t htab) -{ - return htab->size; -} - -/* Return the current number of elements in given hash table. */ - -static inline size_t -htab_elements (htab_t htab) -{ - return htab->n_elements - htab->n_deleted; -} - -/* Return X % Y. */ - -static inline hashval_t -htab_mod_1 (hashval_t x, hashval_t y, hashval_t inv, int shift) -{ - /* The multiplicative inverses computed above are for 32-bit types, and - requires that we be able to compute a highpart multiply. */ - if (sizeof (hashval_t) * __CHAR_BIT__ <= 32) - { - hashval_t t1, t2, t3, t4, q, r; - - t1 = ((unsigned long long)x * inv) >> 32; - t2 = x - t1; - t3 = t2 >> 1; - t4 = t1 + t3; - q = t4 >> shift; - r = x - (q * y); - - return r; - } - - /* Otherwise just use the native division routines. */ - return x % y; -} - -/* Compute the primary hash for HASH given HTAB's current size. */ - -static inline hashval_t -htab_mod (hashval_t hash, htab_t htab) -{ - const struct prime_ent *p = &prime_tab[htab->size_prime_index]; - return htab_mod_1 (hash, p->prime, p->inv, p->shift); -} - -/* Compute the secondary hash for HASH given HTAB's current size. */ - -static inline hashval_t -htab_mod_m2 (hashval_t hash, htab_t htab) -{ - const struct prime_ent *p = &prime_tab[htab->size_prime_index]; - return 1 + htab_mod_1 (hash, p->prime - 2, p->inv_m2, p->shift); -} - -/* Create hash table of size SIZE. */ - -static htab_t -htab_create (size_t size) -{ - htab_t result; - unsigned int size_prime_index; - - size_prime_index = higher_prime_index (size); - size = prime_tab[size_prime_index].prime; - - result = (htab_t) htab_alloc (sizeof (struct htab) - + size * sizeof (hash_entry_type)); - result->size = size; - result->n_elements = 0; - result->n_deleted = 0; - result->size_prime_index = size_prime_index; - memset (result->entries, 0, size * sizeof (hash_entry_type)); - return result; -} - -/* Similar to htab_find_slot, but without several unwanted side effects: - - Does not call htab_eq when it finds an existing entry. - - Does not change the count of elements in the hash table. - This function also assumes there are no deleted entries in the table. - HASH is the hash value for the element to be inserted. */ - -static hash_entry_type * -find_empty_slot_for_expand (htab_t htab, hashval_t hash) -{ - hashval_t index = htab_mod (hash, htab); - size_t size = htab_size (htab); - hash_entry_type *slot = htab->entries + index; - hashval_t hash2; - - if (*slot == HTAB_EMPTY_ENTRY) - return slot; - else if (*slot == HTAB_DELETED_ENTRY) - abort (); - - hash2 = htab_mod_m2 (hash, htab); - for (;;) - { - index += hash2; - if (index >= size) - index -= size; - - slot = htab->entries + index; - if (*slot == HTAB_EMPTY_ENTRY) - return slot; - else if (*slot == HTAB_DELETED_ENTRY) - abort (); - } -} - -/* The following function changes size of memory allocated for the - entries and repeatedly inserts the table elements. The occupancy - of the table after the call will be about 50%. Naturally the hash - table must already exist. Remember also that the place of the - table entries is changed. */ - -static htab_t -htab_expand (htab_t htab) -{ - htab_t nhtab; - hash_entry_type *olimit; - hash_entry_type *p; - size_t osize, elts; - - osize = htab->size; - olimit = htab->entries + osize; - elts = htab_elements (htab); - - /* Resize only when table after removal of unused elements is either - too full or too empty. */ - if (elts * 2 > osize || (elts * 8 < osize && osize > 32)) - nhtab = htab_create (elts * 2); - else - nhtab = htab_create (osize - 1); - nhtab->n_elements = htab->n_elements - htab->n_deleted; - - p = htab->entries; - do - { - hash_entry_type x = *p; - - if (x != HTAB_EMPTY_ENTRY && x != HTAB_DELETED_ENTRY) - *find_empty_slot_for_expand (nhtab, htab_hash (x)) = x; - - p++; - } - while (p < olimit); - - htab_free (htab); - return nhtab; -} - -/* This function searches for a hash table entry equal to the given - element. It cannot be used to insert or delete an element. */ - -static hash_entry_type -htab_find (htab_t htab, const hash_entry_type element) -{ - hashval_t index, hash2, hash = htab_hash (element); - size_t size; - hash_entry_type entry; - - size = htab_size (htab); - index = htab_mod (hash, htab); - - entry = htab->entries[index]; - if (entry == HTAB_EMPTY_ENTRY - || (entry != HTAB_DELETED_ENTRY && htab_eq (entry, element))) - return entry; - - hash2 = htab_mod_m2 (hash, htab); - for (;;) - { - index += hash2; - if (index >= size) - index -= size; - - entry = htab->entries[index]; - if (entry == HTAB_EMPTY_ENTRY - || (entry != HTAB_DELETED_ENTRY && htab_eq (entry, element))) - return entry; - } -} - -/* This function searches for a hash table slot containing an entry - equal to the given element. To delete an entry, call this with - insert=NO_INSERT, then call htab_clear_slot on the slot returned - (possibly after doing some checks). To insert an entry, call this - with insert=INSERT, then write the value you want into the returned - slot. */ - -static hash_entry_type * -htab_find_slot (htab_t *htabp, const hash_entry_type element, - enum insert_option insert) -{ - hash_entry_type *first_deleted_slot; - hashval_t index, hash2, hash = htab_hash (element); - size_t size; - hash_entry_type entry; - htab_t htab = *htabp; - - size = htab_size (htab); - if (insert == INSERT && size * 3 <= htab->n_elements * 4) - { - htab = *htabp = htab_expand (htab); - size = htab_size (htab); - } - - index = htab_mod (hash, htab); - - first_deleted_slot = NULL; - - entry = htab->entries[index]; - if (entry == HTAB_EMPTY_ENTRY) - goto empty_entry; - else if (entry == HTAB_DELETED_ENTRY) - first_deleted_slot = &htab->entries[index]; - else if (htab_eq (entry, element)) - return &htab->entries[index]; - - hash2 = htab_mod_m2 (hash, htab); - for (;;) - { - index += hash2; - if (index >= size) - index -= size; - - entry = htab->entries[index]; - if (entry == HTAB_EMPTY_ENTRY) - goto empty_entry; - else if (entry == HTAB_DELETED_ENTRY) - { - if (!first_deleted_slot) - first_deleted_slot = &htab->entries[index]; - } - else if (htab_eq (entry, element)) - return &htab->entries[index]; - } - - empty_entry: - if (insert == NO_INSERT) - return NULL; - - if (first_deleted_slot) - { - htab->n_deleted--; - *first_deleted_slot = HTAB_EMPTY_ENTRY; - return first_deleted_slot; - } - - htab->n_elements++; - return &htab->entries[index]; -} - -/* This function clears a specified slot in a hash table. It is - useful when you've already done the lookup and don't want to do it - again. */ - -static inline void -htab_clear_slot (htab_t htab, hash_entry_type *slot) -{ - if (slot < htab->entries || slot >= htab->entries + htab_size (htab) - || *slot == HTAB_EMPTY_ENTRY || *slot == HTAB_DELETED_ENTRY) - abort (); - - *slot = HTAB_DELETED_ENTRY; - htab->n_deleted++; -} - -/* Returns a hash code for pointer P. Simplified version of evahash */ - -static inline hashval_t -hash_pointer (const void *p) -{ - uintptr_t v = (uintptr_t) p; - if (sizeof (v) > sizeof (hashval_t)) - v ^= v >> (sizeof (uintptr_t) / 2 * __CHAR_BIT__); - return v; -} diff --git a/usr/libgomp/iter.c b/usr/libgomp/iter.c deleted file mode 100644 index 0ceb41d909..0000000000 --- a/usr/libgomp/iter.c +++ /dev/null @@ -1,338 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file contains routines for managing work-share iteration, both - for loops and sections. */ - -#include "libgomp.h" -#include - - -/* This function implements the STATIC scheduling method. The caller should - iterate *pstart <= x < *pend. Return zero if there are more iterations - to perform; nonzero if not. Return less than 0 if this thread had - received the absolutely last iteration. */ - -int -gomp_iter_static_next (long *pstart, long *pend) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - struct gomp_work_share *ws = thr->ts.work_share; - unsigned long nthreads = team ? team->nthreads : 1; - - if (thr->ts.static_trip == -1) - return -1; - - /* Quick test for degenerate teams and orphaned constructs. */ - if (nthreads == 1) - { - *pstart = ws->next; - *pend = ws->end; - thr->ts.static_trip = -1; - return ws->next == ws->end; - } - - /* We interpret chunk_size zero as "unspecified", which means that we - should break up the iterations such that each thread makes only one - trip through the outer loop. */ - if (ws->chunk_size == 0) - { - unsigned long n, q, i, t; - unsigned long s0, e0; - long s, e; - - if (thr->ts.static_trip > 0) - return 1; - - /* Compute the total number of iterations. */ - s = ws->incr + (ws->incr > 0 ? -1 : 1); - n = (ws->end - ws->next + s) / ws->incr; - i = thr->ts.team_id; - - /* Compute the "zero-based" start and end points. That is, as - if the loop began at zero and incremented by one. */ - q = n / nthreads; - t = n % nthreads; - if (i < t) - { - t = 0; - q++; - } - s0 = q * i + t; - e0 = s0 + q; - - /* Notice when no iterations allocated for this thread. */ - if (s0 >= e0) - { - thr->ts.static_trip = 1; - return 1; - } - - /* Transform these to the actual start and end numbers. */ - s = (long)s0 * ws->incr + ws->next; - e = (long)e0 * ws->incr + ws->next; - - *pstart = s; - *pend = e; - thr->ts.static_trip = (e0 == n ? -1 : 1); - return 0; - } - else - { - unsigned long n, s0, e0, i, c; - long s, e; - - /* Otherwise, each thread gets exactly chunk_size iterations - (if available) each time through the loop. */ - - s = ws->incr + (ws->incr > 0 ? -1 : 1); - n = (ws->end - ws->next + s) / ws->incr; - i = thr->ts.team_id; - c = ws->chunk_size; - - /* Initial guess is a C sized chunk positioned nthreads iterations - in, offset by our thread number. */ - s0 = (thr->ts.static_trip * nthreads + i) * c; - e0 = s0 + c; - - /* Detect overflow. */ - if (s0 >= n) - return 1; - if (e0 > n) - e0 = n; - - /* Transform these to the actual start and end numbers. */ - s = (long)s0 * ws->incr + ws->next; - e = (long)e0 * ws->incr + ws->next; - - *pstart = s; - *pend = e; - - if (e0 == n) - thr->ts.static_trip = -1; - else - thr->ts.static_trip++; - return 0; - } -} - - -/* This function implements the DYNAMIC scheduling method. Arguments are - as for gomp_iter_static_next. This function must be called with ws->lock - held. */ - -bool -gomp_iter_dynamic_next_locked (long *pstart, long *pend) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_work_share *ws = thr->ts.work_share; - long start, end, chunk, left; - - start = ws->next; - if (start == ws->end) - return false; - - chunk = ws->chunk_size; - left = ws->end - start; - if (ws->incr < 0) - { - if (chunk < left) - chunk = left; - } - else - { - if (chunk > left) - chunk = left; - } - end = start + chunk; - - ws->next = end; - *pstart = start; - *pend = end; - return true; -} - - -#ifdef HAVE_SYNC_BUILTINS -/* Similar, but doesn't require the lock held, and uses compare-and-swap - instead. Note that the only memory value that changes is ws->next. */ - -bool -gomp_iter_dynamic_next (long *pstart, long *pend) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_work_share *ws = thr->ts.work_share; - long start, end, nend, chunk, incr; - - end = ws->end; - incr = ws->incr; - chunk = ws->chunk_size; - - if (__builtin_expect (ws->mode, 1)) - { - long tmp = __sync_fetch_and_add (&ws->next, chunk); - if (incr > 0) - { - if (tmp >= end) - return false; - nend = tmp + chunk; - if (nend > end) - nend = end; - *pstart = tmp; - *pend = nend; - return true; - } - else - { - if (tmp <= end) - return false; - nend = tmp + chunk; - if (nend < end) - nend = end; - *pstart = tmp; - *pend = nend; - return true; - } - } - - start = ws->next; - while (1) - { - long left = end - start; - long tmp; - - if (start == end) - return false; - - if (incr < 0) - { - if (chunk < left) - chunk = left; - } - else - { - if (chunk > left) - chunk = left; - } - nend = start + chunk; - - tmp = __sync_val_compare_and_swap (&ws->next, start, nend); - if (__builtin_expect (tmp == start, 1)) - break; - - start = tmp; - } - - *pstart = start; - *pend = nend; - return true; -} -#endif /* HAVE_SYNC_BUILTINS */ - - -/* This function implements the GUIDED scheduling method. Arguments are - as for gomp_iter_static_next. This function must be called with the - work share lock held. */ - -bool -gomp_iter_guided_next_locked (long *pstart, long *pend) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_work_share *ws = thr->ts.work_share; - struct gomp_team *team = thr->ts.team; - unsigned long nthreads = team ? team->nthreads : 1; - unsigned long n, q; - long start, end; - - if (ws->next == ws->end) - return false; - - start = ws->next; - n = (ws->end - start) / ws->incr; - q = (n + nthreads - 1) / nthreads; - - if (q < ws->chunk_size) - q = ws->chunk_size; - if (q <= n) - end = start + q * ws->incr; - else - end = ws->end; - - ws->next = end; - *pstart = start; - *pend = end; - return true; -} - -#ifdef HAVE_SYNC_BUILTINS -/* Similar, but doesn't require the lock held, and uses compare-and-swap - instead. Note that the only memory value that changes is ws->next. */ - -bool -gomp_iter_guided_next (long *pstart, long *pend) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_work_share *ws = thr->ts.work_share; - struct gomp_team *team = thr->ts.team; - unsigned long nthreads = team ? team->nthreads : 1; - long start, end, nend, incr; - unsigned long chunk_size; - - start = ws->next; - end = ws->end; - incr = ws->incr; - chunk_size = ws->chunk_size; - - while (1) - { - unsigned long n, q; - long tmp; - - if (start == end) - return false; - - n = (end - start) / incr; - q = (n + nthreads - 1) / nthreads; - - if (q < chunk_size) - q = chunk_size; - if (__builtin_expect (q <= n, 1)) - nend = start + q * incr; - else - nend = end; - - tmp = __sync_val_compare_and_swap (&ws->next, start, nend); - if (__builtin_expect (tmp == start, 1)) - break; - - start = tmp; - } - - *pstart = start; - *pend = nend; - return true; -} -#endif /* HAVE_SYNC_BUILTINS */ diff --git a/usr/libgomp/iter_ull.c b/usr/libgomp/iter_ull.c deleted file mode 100644 index b1cad84d4c..0000000000 --- a/usr/libgomp/iter_ull.c +++ /dev/null @@ -1,345 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file contains routines for managing work-share iteration, both - for loops and sections. */ - -#include "libgomp.h" -#include - -typedef unsigned long long gomp_ull; - -/* This function implements the STATIC scheduling method. The caller should - iterate *pstart <= x < *pend. Return zero if there are more iterations - to perform; nonzero if not. Return less than 0 if this thread had - received the absolutely last iteration. */ - -int -gomp_iter_ull_static_next (gomp_ull *pstart, gomp_ull *pend) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - struct gomp_work_share *ws = thr->ts.work_share; - unsigned long nthreads = team ? team->nthreads : 1; - - if (thr->ts.static_trip == -1) - return -1; - - /* Quick test for degenerate teams and orphaned constructs. */ - if (nthreads == 1) - { - *pstart = ws->next_ull; - *pend = ws->end_ull; - thr->ts.static_trip = -1; - return ws->next_ull == ws->end_ull; - } - - /* We interpret chunk_size zero as "unspecified", which means that we - should break up the iterations such that each thread makes only one - trip through the outer loop. */ - if (ws->chunk_size_ull == 0) - { - gomp_ull n, q, i, t, s0, e0, s, e; - - if (thr->ts.static_trip > 0) - return 1; - - /* Compute the total number of iterations. */ - if (__builtin_expect (ws->mode, 0) == 0) - n = (ws->end_ull - ws->next_ull + ws->incr_ull - 1) / ws->incr_ull; - else - n = (ws->next_ull - ws->end_ull - ws->incr_ull - 1) / -ws->incr_ull; - i = thr->ts.team_id; - - /* Compute the "zero-based" start and end points. That is, as - if the loop began at zero and incremented by one. */ - q = n / nthreads; - t = n % nthreads; - if (i < t) - { - t = 0; - q++; - } - s0 = q * i + t; - e0 = s0 + q; - - /* Notice when no iterations allocated for this thread. */ - if (s0 >= e0) - { - thr->ts.static_trip = 1; - return 1; - } - - /* Transform these to the actual start and end numbers. */ - s = s0 * ws->incr_ull + ws->next_ull; - e = e0 * ws->incr_ull + ws->next_ull; - - *pstart = s; - *pend = e; - thr->ts.static_trip = (e0 == n ? -1 : 1); - return 0; - } - else - { - gomp_ull n, s0, e0, i, c, s, e; - - /* Otherwise, each thread gets exactly chunk_size iterations - (if available) each time through the loop. */ - - if (__builtin_expect (ws->mode, 0) == 0) - n = (ws->end_ull - ws->next_ull + ws->incr_ull - 1) / ws->incr_ull; - else - n = (ws->next_ull - ws->end_ull - ws->incr_ull - 1) / -ws->incr_ull; - i = thr->ts.team_id; - c = ws->chunk_size_ull; - - /* Initial guess is a C sized chunk positioned nthreads iterations - in, offset by our thread number. */ - s0 = (thr->ts.static_trip * (gomp_ull) nthreads + i) * c; - e0 = s0 + c; - - /* Detect overflow. */ - if (s0 >= n) - return 1; - if (e0 > n) - e0 = n; - - /* Transform these to the actual start and end numbers. */ - s = s0 * ws->incr_ull + ws->next_ull; - e = e0 * ws->incr_ull + ws->next_ull; - - *pstart = s; - *pend = e; - - if (e0 == n) - thr->ts.static_trip = -1; - else - thr->ts.static_trip++; - return 0; - } -} - - -/* This function implements the DYNAMIC scheduling method. Arguments are - as for gomp_iter_ull_static_next. This function must be called with - ws->lock held. */ - -bool -gomp_iter_ull_dynamic_next_locked (gomp_ull *pstart, gomp_ull *pend) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_work_share *ws = thr->ts.work_share; - gomp_ull start, end, chunk, left; - - start = ws->next_ull; - if (start == ws->end_ull) - return false; - - chunk = ws->chunk_size_ull; - left = ws->end_ull - start; - if (__builtin_expect (ws->mode & 2, 0)) - { - if (chunk < left) - chunk = left; - } - else - { - if (chunk > left) - chunk = left; - } - end = start + chunk; - - ws->next_ull = end; - *pstart = start; - *pend = end; - return true; -} - - -#if defined HAVE_SYNC_BUILTINS && defined __LP64__ -/* Similar, but doesn't require the lock held, and uses compare-and-swap - instead. Note that the only memory value that changes is ws->next_ull. */ - -bool -gomp_iter_ull_dynamic_next (gomp_ull *pstart, gomp_ull *pend) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_work_share *ws = thr->ts.work_share; - gomp_ull start, end, nend, chunk; - - end = ws->end_ull; - chunk = ws->chunk_size_ull; - - if (__builtin_expect (ws->mode & 1, 1)) - { - gomp_ull tmp = __sync_fetch_and_add (&ws->next_ull, chunk); - if (__builtin_expect (ws->mode & 2, 0) == 0) - { - if (tmp >= end) - return false; - nend = tmp + chunk; - if (nend > end) - nend = end; - *pstart = tmp; - *pend = nend; - return true; - } - else - { - if (tmp <= end) - return false; - nend = tmp + chunk; - if (nend < end) - nend = end; - *pstart = tmp; - *pend = nend; - return true; - } - } - - start = ws->next_ull; - while (1) - { - gomp_ull left = end - start; - gomp_ull tmp; - - if (start == end) - return false; - - if (__builtin_expect (ws->mode & 2, 0)) - { - if (chunk < left) - chunk = left; - } - else - { - if (chunk > left) - chunk = left; - } - nend = start + chunk; - - tmp = __sync_val_compare_and_swap (&ws->next_ull, start, nend); - if (__builtin_expect (tmp == start, 1)) - break; - - start = tmp; - } - - *pstart = start; - *pend = nend; - return true; -} -#endif /* HAVE_SYNC_BUILTINS */ - - -/* This function implements the GUIDED scheduling method. Arguments are - as for gomp_iter_ull_static_next. This function must be called with the - work share lock held. */ - -bool -gomp_iter_ull_guided_next_locked (gomp_ull *pstart, gomp_ull *pend) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_work_share *ws = thr->ts.work_share; - struct gomp_team *team = thr->ts.team; - gomp_ull nthreads = team ? team->nthreads : 1; - gomp_ull n, q; - gomp_ull start, end; - - if (ws->next_ull == ws->end_ull) - return false; - - start = ws->next_ull; - if (__builtin_expect (ws->mode, 0) == 0) - n = (ws->end_ull - start) / ws->incr_ull; - else - n = (start - ws->end_ull) / -ws->incr_ull; - q = (n + nthreads - 1) / nthreads; - - if (q < ws->chunk_size_ull) - q = ws->chunk_size_ull; - if (q <= n) - end = start + q * ws->incr_ull; - else - end = ws->end_ull; - - ws->next_ull = end; - *pstart = start; - *pend = end; - return true; -} - -#if defined HAVE_SYNC_BUILTINS && defined __LP64__ -/* Similar, but doesn't require the lock held, and uses compare-and-swap - instead. Note that the only memory value that changes is ws->next_ull. */ - -bool -gomp_iter_ull_guided_next (gomp_ull *pstart, gomp_ull *pend) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_work_share *ws = thr->ts.work_share; - struct gomp_team *team = thr->ts.team; - gomp_ull nthreads = team ? team->nthreads : 1; - gomp_ull start, end, nend, incr; - gomp_ull chunk_size; - - start = ws->next_ull; - end = ws->end_ull; - incr = ws->incr_ull; - chunk_size = ws->chunk_size_ull; - - while (1) - { - gomp_ull n, q; - gomp_ull tmp; - - if (start == end) - return false; - - if (__builtin_expect (ws->mode, 0) == 0) - n = (end - start) / incr; - else - n = (start - end) / -incr; - q = (n + nthreads - 1) / nthreads; - - if (q < chunk_size) - q = chunk_size; - if (__builtin_expect (q <= n, 1)) - nend = start + q * incr; - else - nend = end; - - tmp = __sync_val_compare_and_swap (&ws->next_ull, start, nend); - if (__builtin_expect (tmp == start, 1)) - break; - - start = tmp; - } - - *pstart = start; - *pend = nend; - return true; -} -#endif /* HAVE_SYNC_BUILTINS */ diff --git a/usr/libgomp/libgomp.h b/usr/libgomp/libgomp.h deleted file mode 100644 index a6213ec0b1..0000000000 --- a/usr/libgomp/libgomp.h +++ /dev/null @@ -1,887 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file contains data types and function declarations that are not - part of the official OpenACC or OpenMP user interfaces. There are - declarations in here that are part of the GNU Offloading and Multi - Processing ABI, in that the compiler is required to know about them - and use them. - - The convention is that the all caps prefix "GOMP" is used group items - that are part of the external ABI, and the lower case prefix "gomp" - is used group items that are completely private to the library. */ - -#ifndef LIBGOMP_H -#define LIBGOMP_H 1 - -#include "config.h" -//#include "gstdint.h" -//#include "libgomp-plugin.h" - -#include -#include -#include -#include -#include - -#ifdef HAVE_ATTRIBUTE_VISIBILITY -# pragma GCC visibility push(hidden) -#endif - -/* If we were a C++ library, we'd get this from . */ -enum memmodel -{ - MEMMODEL_RELAXED = 0, - MEMMODEL_CONSUME = 1, - MEMMODEL_ACQUIRE = 2, - MEMMODEL_RELEASE = 3, - MEMMODEL_ACQ_REL = 4, - MEMMODEL_SEQ_CST = 5 -}; - -#include "sem.h" -#include "mutex.h" -#include "bar.h" -#include "ptrlock.h" - - -/* This structure contains the data to control one work-sharing construct, - either a LOOP (FOR/DO) or a SECTIONS. */ - -enum gomp_schedule_type -{ - GFS_RUNTIME, - GFS_STATIC, - GFS_DYNAMIC, - GFS_GUIDED, - GFS_AUTO -}; - -struct gomp_work_share -{ - /* This member records the SCHEDULE clause to be used for this construct. - The user specification of "runtime" will already have been resolved. - If this is a SECTIONS construct, this value will always be DYNAMIC. */ - enum gomp_schedule_type sched; - - int mode; - - union { - struct { - /* This is the chunk_size argument to the SCHEDULE clause. */ - long chunk_size; - - /* This is the iteration end point. If this is a SECTIONS construct, - this is the number of contained sections. */ - long end; - - /* This is the iteration step. If this is a SECTIONS construct, this - is always 1. */ - long incr; - }; - - struct { - /* The same as above, but for the unsigned long long loop variants. */ - unsigned long long chunk_size_ull; - unsigned long long end_ull; - unsigned long long incr_ull; - }; - }; - - /* This is a circular queue that details which threads will be allowed - into the ordered region and in which order. When a thread allocates - iterations on which it is going to work, it also registers itself at - the end of the array. When a thread reaches the ordered region, it - checks to see if it is the one at the head of the queue. If not, it - blocks on its RELEASE semaphore. */ - unsigned *ordered_team_ids; - - /* This is the number of threads that have registered themselves in - the circular queue ordered_team_ids. */ - unsigned ordered_num_used; - - /* This is the team_id of the currently acknowledged owner of the ordered - section, or -1u if the ordered section has not been acknowledged by - any thread. This is distinguished from the thread that is *allowed* - to take the section next. */ - unsigned ordered_owner; - - /* This is the index into the circular queue ordered_team_ids of the - current thread that's allowed into the ordered reason. */ - unsigned ordered_cur; - - /* This is a chain of allocated gomp_work_share blocks, valid only - in the first gomp_work_share struct in the block. */ - struct gomp_work_share *next_alloc; - - /* The above fields are written once during workshare initialization, - or related to ordered worksharing. Make sure the following fields - are in a different cache line. */ - - /* This lock protects the update of the following members. */ - gomp_mutex_t lock __attribute__((aligned (64))); - - /* This is the count of the number of threads that have exited the work - share construct. If the construct was marked nowait, they have moved on - to other work; otherwise they're blocked on a barrier. The last member - of the team to exit the work share construct must deallocate it. */ - unsigned threads_completed; - - union { - /* This is the next iteration value to be allocated. In the case of - GFS_STATIC loops, this the iteration start point and never changes. */ - long next; - - /* The same, but with unsigned long long type. */ - unsigned long long next_ull; - - /* This is the returned data structure for SINGLE COPYPRIVATE. */ - void *copyprivate; - }; - - union { - /* Link to gomp_work_share struct for next work sharing construct - encountered after this one. */ - gomp_ptrlock_t next_ws; - - /* gomp_work_share structs are chained in the free work share cache - through this. */ - struct gomp_work_share *next_free; - }; - - /* If only few threads are in the team, ordered_team_ids can point - to this array which fills the padding at the end of this struct. */ - unsigned inline_ordered_team_ids[0]; -}; - -/* This structure contains all of the thread-local data associated with - a thread team. This is the data that must be saved when a thread - encounters a nested PARALLEL construct. */ - -struct gomp_team_state -{ - /* This is the team of which the thread is currently a member. */ - struct gomp_team *team; - - /* This is the work share construct which this thread is currently - processing. Recall that with NOWAIT, not all threads may be - processing the same construct. */ - struct gomp_work_share *work_share; - - /* This is the previous work share construct or NULL if there wasn't any. - When all threads are done with the current work sharing construct, - the previous one can be freed. The current one can't, as its - next_ws field is used. */ - struct gomp_work_share *last_work_share; - - /* This is the ID of this thread within the team. This value is - guaranteed to be between 0 and N-1, where N is the number of - threads in the team. */ - unsigned team_id; - - /* Nesting level. */ - unsigned level; - - /* Active nesting level. Only active parallel regions are counted. */ - unsigned active_level; - - /* Place-partition-var, offset and length into gomp_places_list array. */ - unsigned place_partition_off; - unsigned place_partition_len; - -#ifdef HAVE_SYNC_BUILTINS - /* Number of single stmts encountered. */ - unsigned long single_count; -#endif - - /* For GFS_RUNTIME loops that resolved to GFS_STATIC, this is the - trip number through the loop. So first time a particular loop - is encountered this number is 0, the second time through the loop - is 1, etc. This is unused when the compiler knows in advance that - the loop is statically scheduled. */ - unsigned long static_trip; -}; - -struct target_mem_desc; - -/* These are the OpenMP 4.0 Internal Control Variables described in - section 2.3.1. Those described as having one copy per task are - stored within the structure; those described as having one copy - for the whole program are (naturally) global variables. */ - -struct gomp_task_icv -{ - unsigned long nthreads_var; - enum gomp_schedule_type run_sched_var; - int run_sched_modifier; - int default_device_var; - unsigned int thread_limit_var; - bool dyn_var; - bool nest_var; - char bind_var; - /* Internal ICV. */ - struct target_mem_desc *target_data; -}; - -extern struct gomp_task_icv gomp_global_icv; -#ifndef HAVE_SYNC_BUILTINS -extern gomp_mutex_t gomp_managed_threads_lock; -#endif -extern unsigned long gomp_max_active_levels_var; -extern bool gomp_cancel_var; -extern unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var; -extern unsigned long gomp_available_cpus, gomp_managed_threads; -extern unsigned long *gomp_nthreads_var_list, gomp_nthreads_var_list_len; -extern char *gomp_bind_var_list; -extern unsigned long gomp_bind_var_list_len; -extern void **gomp_places_list; -extern unsigned long gomp_places_list_len; -extern int gomp_debug_var; -extern int goacc_device_num; -extern char *goacc_device_type; - -enum gomp_task_kind -{ - GOMP_TASK_IMPLICIT, - GOMP_TASK_IFFALSE, - GOMP_TASK_WAITING, - GOMP_TASK_TIED -}; - -struct gomp_task; -struct gomp_taskgroup; -struct htab; - -struct gomp_task_depend_entry -{ - void *addr; - struct gomp_task_depend_entry *next; - struct gomp_task_depend_entry *prev; - struct gomp_task *task; - bool is_in; - bool redundant; - bool redundant_out; -}; - -struct gomp_dependers_vec -{ - size_t n_elem; - size_t allocated; - struct gomp_task *elem[]; -}; - -/* Used when in GOMP_taskwait or in gomp_task_maybe_wait_for_dependencies. */ - -struct gomp_taskwait -{ - bool in_taskwait; - bool in_depend_wait; - size_t n_depend; - struct gomp_task *last_parent_depends_on; - gomp_sem_t taskwait_sem; -}; - -/* This structure describes a "task" to be run by a thread. */ - -struct gomp_task -{ - struct gomp_task *parent; - struct gomp_task *children; - struct gomp_task *next_child; - struct gomp_task *prev_child; - struct gomp_task *next_queue; - struct gomp_task *prev_queue; - struct gomp_task *next_taskgroup; - struct gomp_task *prev_taskgroup; - struct gomp_taskgroup *taskgroup; - struct gomp_dependers_vec *dependers; - struct htab *depend_hash; - struct gomp_taskwait *taskwait; - size_t depend_count; - size_t num_dependees; - struct gomp_task_icv icv; - void (*fn) (void *); - void *fn_data; - enum gomp_task_kind kind; - bool in_tied_task; - bool final_task; - bool copy_ctors_done; - bool parent_depends_on; - struct gomp_task_depend_entry depend[]; -}; - -struct gomp_taskgroup -{ - struct gomp_taskgroup *prev; - struct gomp_task *children; - bool in_taskgroup_wait; - bool cancelled; - gomp_sem_t taskgroup_sem; - size_t num_children; -}; - -/* This structure describes a "team" of threads. These are the threads - that are spawned by a PARALLEL constructs, as well as the work sharing - constructs that the team encounters. */ - -struct gomp_team -{ - /* This is the number of threads in the current team. */ - unsigned nthreads; - - /* This is number of gomp_work_share structs that have been allocated - as a block last time. */ - unsigned work_share_chunk; - - /* This is the saved team state that applied to a master thread before - the current thread was created. */ - struct gomp_team_state prev_ts; - - /* This semaphore should be used by the master thread instead of its - "native" semaphore in the thread structure. Required for nested - parallels, as the master is a member of two teams. */ - gomp_sem_t master_release; - - /* This points to an array with pointers to the release semaphore - of the threads in the team. */ - gomp_sem_t **ordered_release; - - /* List of work shares on which gomp_fini_work_share hasn't been - called yet. If the team hasn't been cancelled, this should be - equal to each thr->ts.work_share, but otherwise it can be a possibly - long list of workshares. */ - struct gomp_work_share *work_shares_to_free; - - /* List of gomp_work_share structs chained through next_free fields. - This is populated and taken off only by the first thread in the - team encountering a new work sharing construct, in a critical - section. */ - struct gomp_work_share *work_share_list_alloc; - - /* List of gomp_work_share structs freed by free_work_share. New - entries are atomically added to the start of the list, and - alloc_work_share can safely only move all but the first entry - to work_share_list alloc, as free_work_share can happen concurrently - with alloc_work_share. */ - struct gomp_work_share *work_share_list_free; - -#ifdef HAVE_SYNC_BUILTINS - /* Number of simple single regions encountered by threads in this - team. */ - unsigned long single_count; -#else - /* Mutex protecting addition of workshares to work_share_list_free. */ - gomp_mutex_t work_share_list_free_lock; -#endif - - /* This barrier is used for most synchronization of the team. */ - gomp_barrier_t barrier; - - /* Initial work shares, to avoid allocating any gomp_work_share - structs in the common case. */ - struct gomp_work_share work_shares[8]; - - gomp_mutex_t task_lock; - struct gomp_task *task_queue; - /* Number of all GOMP_TASK_{WAITING,TIED} tasks in the team. */ - unsigned int task_count; - /* Number of GOMP_TASK_WAITING tasks currently waiting to be scheduled. */ - unsigned int task_queued_count; - /* Number of GOMP_TASK_{WAITING,TIED} tasks currently running - directly in gomp_barrier_handle_tasks; tasks spawned - from e.g. GOMP_taskwait or GOMP_taskgroup_end don't count, even when - that is called from a task run from gomp_barrier_handle_tasks. - task_running_count should be always <= team->nthreads, - and if current task isn't in_tied_task, then it will be - even < team->nthreads. */ - unsigned int task_running_count; - int work_share_cancelled; - int team_cancelled; - - /* This array contains structures for implicit tasks. */ - struct gomp_task implicit_task[]; -}; - -/* This structure contains all data that is private to libgomp and is - allocated per thread. */ - -struct gomp_thread -{ - /* This is the function that the thread should run upon launch. */ - void (*fn) (void *data); - void *data; - - /* This is the current team state for this thread. The ts.team member - is NULL only if the thread is idle. */ - struct gomp_team_state ts; - - /* This is the task that the thread is currently executing. */ - struct gomp_task *task; - - /* This semaphore is used for ordered loops. */ - gomp_sem_t release; - - /* Place this thread is bound to plus one, or zero if not bound - to any place. */ - unsigned int place; - - /* User pthread thread pool */ - struct gomp_thread_pool *thread_pool; -} __attribute__ ((aligned (64))); - - -struct gomp_thread_pool -{ - /* This array manages threads spawned from the top level, which will - return to the idle loop once the current PARALLEL construct ends. */ - struct gomp_thread **threads; - unsigned threads_size; - unsigned threads_used; - struct gomp_team *last_team; - /* Number of threads running in this contention group. */ - unsigned long threads_busy; - - /* This barrier holds and releases threads waiting in threads. */ - gomp_barrier_t threads_dock; -}; - -enum gomp_cancel_kind -{ - GOMP_CANCEL_PARALLEL = 1, - GOMP_CANCEL_LOOP = 2, - GOMP_CANCEL_FOR = GOMP_CANCEL_LOOP, - GOMP_CANCEL_DO = GOMP_CANCEL_LOOP, - GOMP_CANCEL_SECTIONS = 4, - GOMP_CANCEL_TASKGROUP = 8 -}; - -/* ... and here is that TLS data. */ - -#if defined HAVE_TLS || defined USE_EMUTLS -extern __thread struct gomp_thread* gomp_tls_data; -static inline struct gomp_thread *gomp_thread (void) -{ - return gomp_tls_data; -} -#else -extern pthread_key_t gomp_tls_key; -static inline struct gomp_thread *gomp_thread (void) -{ - return pthread_getspecific (gomp_tls_key); -} -#endif - -extern struct gomp_task_icv *gomp_new_icv (void); - -/* Here's how to access the current copy of the ICVs. */ - -static inline struct gomp_task_icv *gomp_icv (bool write) -{ - struct gomp_task *task = gomp_thread ()->task; - if (task) - return &task->icv; - else if (write) - return gomp_new_icv (); - else - return &gomp_global_icv; -} - -/* The attributes to be used during thread creation. */ -extern pthread_attr_t gomp_thread_attr; - -/* Function prototypes. */ - -/* affinity.c */ - -extern void gomp_init_affinity (void); -extern void gomp_init_thread_affinity (pthread_attr_t *, unsigned int); -extern void **gomp_affinity_alloc (unsigned long, bool); -extern void gomp_affinity_init_place (void *); -extern bool gomp_affinity_add_cpus (void *, unsigned long, unsigned long, - long, bool); -extern bool gomp_affinity_remove_cpu (void *, unsigned long); -extern bool gomp_affinity_copy_place (void *, void *, long); -extern bool gomp_affinity_same_place (void *, void *); -extern bool gomp_affinity_finalize_place_list (bool); -extern bool gomp_affinity_init_level (int, unsigned long, bool); -extern void gomp_affinity_print_place (void *); - -/* alloc.c */ - -extern void *gomp_malloc (size_t) __attribute__((malloc)); -extern void *gomp_malloc_cleared (size_t) __attribute__((malloc)); -extern void *gomp_realloc (void *, size_t); - -/* Avoid conflicting prototypes of alloca() in system headers by using - GCC's builtin alloca(). */ -#define gomp_alloca(x) __builtin_alloca(x) - -/* error.c */ - -extern void gomp_vdebug (int, const char *, va_list); -extern void gomp_debug (int, const char *, ...) - __attribute__ ((format (printf, 2, 3))); -#define gomp_vdebug(KIND, FMT, VALIST) \ - do { \ - if (__builtin_expect (gomp_debug_var, 0)) \ - (gomp_vdebug) ((KIND), (FMT), (VALIST)); \ - } while (0) -#define gomp_debug(KIND, ...) \ - do { \ - if (__builtin_expect (gomp_debug_var, 0)) \ - (gomp_debug) ((KIND), __VA_ARGS__); \ - } while (0) -extern void gomp_verror (const char *, va_list); -extern void gomp_error (const char *, ...) - __attribute__ ((format (printf, 1, 2))); -extern void gomp_vfatal (const char *, va_list) - __attribute__ ((noreturn)); -extern void gomp_fatal (const char *, ...) - __attribute__ ((noreturn, format (printf, 1, 2))); - -/* iter.c */ - -extern int gomp_iter_static_next (long *, long *); -extern bool gomp_iter_dynamic_next_locked (long *, long *); -extern bool gomp_iter_guided_next_locked (long *, long *); - -#ifdef HAVE_SYNC_BUILTINS -extern bool gomp_iter_dynamic_next (long *, long *); -extern bool gomp_iter_guided_next (long *, long *); -#endif - -/* iter_ull.c */ - -extern int gomp_iter_ull_static_next (unsigned long long *, - unsigned long long *); -extern bool gomp_iter_ull_dynamic_next_locked (unsigned long long *, - unsigned long long *); -extern bool gomp_iter_ull_guided_next_locked (unsigned long long *, - unsigned long long *); - -#if defined HAVE_SYNC_BUILTINS && defined __LP64__ -extern bool gomp_iter_ull_dynamic_next (unsigned long long *, - unsigned long long *); -extern bool gomp_iter_ull_guided_next (unsigned long long *, - unsigned long long *); -#endif - -/* ordered.c */ - -extern void gomp_ordered_first (void); -extern void gomp_ordered_last (void); -extern void gomp_ordered_next (void); -extern void gomp_ordered_static_init (void); -extern void gomp_ordered_static_next (void); -extern void gomp_ordered_sync (void); - -/* parallel.c */ - -extern unsigned gomp_resolve_num_threads (unsigned, unsigned); - -/* proc.c (in config/) */ - -extern void gomp_init_num_threads (void); -extern unsigned gomp_dynamic_max_threads (void); - -/* task.c */ - -extern void gomp_init_task (struct gomp_task *, struct gomp_task *, - struct gomp_task_icv *); -extern void gomp_end_task (void); -extern void gomp_barrier_handle_tasks (gomp_barrier_state_t); - -static void inline -gomp_finish_task (struct gomp_task *task) -{ - if (__builtin_expect (task->depend_hash != NULL, 0)) - free (task->depend_hash); -} - -/* team.c */ - -extern struct gomp_team *gomp_new_team (unsigned); -extern void gomp_team_start (void (*) (void *), void *, unsigned, - unsigned, struct gomp_team *); -extern void gomp_team_end (void); -extern void gomp_free_thread (void *); - -/* target.c */ - -extern void gomp_init_targets_once (void); -extern int gomp_get_num_devices (void); - -typedef struct splay_tree_node_s *splay_tree_node; -typedef struct splay_tree_s *splay_tree; -typedef struct splay_tree_key_s *splay_tree_key; - -struct target_mem_desc { - /* Reference count. */ - uintptr_t refcount; - /* All the splay nodes allocated together. */ - splay_tree_node array; - /* Start of the target region. */ - uintptr_t tgt_start; - /* End of the targer region. */ - uintptr_t tgt_end; - /* Handle to free. */ - void *to_free; - /* Previous target_mem_desc. */ - struct target_mem_desc *prev; - /* Number of items in following list. */ - size_t list_count; - - /* Corresponding target device descriptor. */ - struct gomp_device_descr *device_descr; - - /* List of splay keys to remove (or decrease refcount) - at the end of region. */ - splay_tree_key list[]; -}; - -struct splay_tree_key_s { - /* Address of the host object. */ - uintptr_t host_start; - /* Address immediately after the host object. */ - uintptr_t host_end; - /* Descriptor of the target memory. */ - struct target_mem_desc *tgt; - /* Offset from tgt->tgt_start to the start of the target object. */ - uintptr_t tgt_offset; - /* Reference count. */ - uintptr_t refcount; - /* Asynchronous reference count. */ - uintptr_t async_refcount; - /* True if data should be copied from device to host at the end. */ - bool copy_from; -}; - -#if 0 -#include "splay-tree.h" - -typedef struct acc_dispatch_t -{ - /* This is a linked list of data mapped using the - acc_map_data/acc_unmap_data or "acc enter data"/"acc exit data" pragmas. - Unlike mapped_data in the goacc_thread struct, unmapping can - happen out-of-order with respect to mapping. */ - /* This is guarded by the lock in the "outer" struct gomp_device_descr. */ - struct target_mem_desc *data_environ; - - /* Execute. */ - void (*exec_func) (void (*) (void *), size_t, void **, void **, size_t *, - unsigned short *, int, int, int, int, void *); - - /* Async cleanup callback registration. */ - void (*register_async_cleanup_func) (void *); - - /* Asynchronous routines. */ - int (*async_test_func) (int); - int (*async_test_all_func) (void); - void (*async_wait_func) (int); - void (*async_wait_async_func) (int, int); - void (*async_wait_all_func) (void); - void (*async_wait_all_async_func) (int); - void (*async_set_async_func) (int); - - /* Create/destroy TLS data. */ - void *(*create_thread_data_func) (int); - void (*destroy_thread_data_func) (void *); - - /* NVIDIA target specific routines. */ - struct { - void *(*get_current_device_func) (void); - void *(*get_current_context_func) (void); - void *(*get_stream_func) (int); - int (*set_stream_func) (int, void *); - } cuda; -} acc_dispatch_t; - -/* This structure describes accelerator device. - It contains name of the corresponding libgomp plugin, function handlers for - interaction with the device, ID-number of the device, and information about - mapped memory. */ -struct gomp_device_descr -{ - /* Immutable data, which is only set during initialization, and which is not - guarded by the lock. */ - - /* The name of the device. */ - const char *name; - - /* Capabilities of device (supports OpenACC, OpenMP). */ - unsigned int capabilities; - - /* This is the ID number of device among devices of the same type. */ - int target_id; - - /* This is the TYPE of device. */ - enum offload_target_type type; - - /* Function handlers. */ - const char *(*get_name_func) (void); - unsigned int (*get_caps_func) (void); - int (*get_type_func) (void); - int (*get_num_devices_func) (void); - void (*init_device_func) (int); - void (*fini_device_func) (int); - int (*load_image_func) (int, void *, struct addr_pair **); - void (*unload_image_func) (int, void *); - void *(*alloc_func) (int, size_t); - void (*free_func) (int, void *); - void *(*dev2host_func) (int, void *, const void *, size_t); - void *(*host2dev_func) (int, void *, const void *, size_t); - void (*run_func) (int, void *, void *); - - /* Splay tree containing information about mapped memory regions. */ - struct splay_tree_s mem_map; - - /* Mutex for the mutable data. */ - gomp_mutex_t lock; - - /* Set to true when device is initialized. */ - bool is_initialized; - - /* OpenACC-specific data and functions. */ - /* This is mutable because of its mutable data_environ and target_data - members. */ - acc_dispatch_t openacc; -}; -#endif - -extern void gomp_acc_insert_pointer (size_t, void **, size_t *, void *); -extern void gomp_acc_remove_pointer (void *, bool, int, int); - -extern struct target_mem_desc *gomp_map_vars (struct gomp_device_descr *, - size_t, void **, void **, - size_t *, void *, bool, bool); -extern void gomp_copy_from_async (struct target_mem_desc *); -extern void gomp_unmap_vars (struct target_mem_desc *, bool); -extern void gomp_init_device (struct gomp_device_descr *); -extern void gomp_free_memmap (struct splay_tree_s *); -extern void gomp_fini_device (struct gomp_device_descr *); - -/* work.c */ - -extern void gomp_init_work_share (struct gomp_work_share *, bool, unsigned); -extern void gomp_fini_work_share (struct gomp_work_share *); -extern bool gomp_work_share_start (bool); -extern void gomp_work_share_end (void); -extern bool gomp_work_share_end_cancel (void); -extern void gomp_work_share_end_nowait (void); - -static inline void -gomp_work_share_init_done (void) -{ - struct gomp_thread *thr = gomp_thread (); - if (__builtin_expect (thr->ts.last_work_share != NULL, 1)) - gomp_ptrlock_set (&thr->ts.last_work_share->next_ws, thr->ts.work_share); -} - -#ifdef HAVE_ATTRIBUTE_VISIBILITY -# pragma GCC visibility pop -#endif - -/* Now that we're back to default visibility, include the globals. */ -#include "libgomp_g.h" - -/* Include omp.h by parts. */ -#include "omp-lock.h" -#define _LIBGOMP_OMP_LOCK_DEFINED 1 -//#include "omp.h.in" -#include "omp.h" - -#if !defined (HAVE_ATTRIBUTE_VISIBILITY) \ - || !defined (HAVE_ATTRIBUTE_ALIAS) \ - || !defined (HAVE_AS_SYMVER_DIRECTIVE) \ - || !defined (PIC) \ - || !defined (HAVE_SYMVER_SYMBOL_RENAMING_RUNTIME_SUPPORT) -# undef LIBGOMP_GNU_SYMBOL_VERSIONING -#endif - -#ifdef LIBGOMP_GNU_SYMBOL_VERSIONING -extern void gomp_init_lock_30 (omp_lock_t *) __GOMP_NOTHROW; -extern void gomp_destroy_lock_30 (omp_lock_t *) __GOMP_NOTHROW; -extern void gomp_set_lock_30 (omp_lock_t *) __GOMP_NOTHROW; -extern void gomp_unset_lock_30 (omp_lock_t *) __GOMP_NOTHROW; -extern int gomp_test_lock_30 (omp_lock_t *) __GOMP_NOTHROW; -extern void gomp_init_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; -extern void gomp_destroy_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; -extern void gomp_set_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; -extern void gomp_unset_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; -extern int gomp_test_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW; - -extern void gomp_init_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; -extern void gomp_destroy_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; -extern void gomp_set_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; -extern void gomp_unset_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; -extern int gomp_test_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW; -extern void gomp_init_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; -extern void gomp_destroy_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; -extern void gomp_set_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; -extern void gomp_unset_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; -extern int gomp_test_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW; - -# define strong_alias(fn, al) \ - extern __typeof (fn) al __attribute__ ((alias (#fn))); -# define omp_lock_symver(fn) \ - __asm (".symver g" #fn "_30, " #fn "@@OMP_3.0"); \ - __asm (".symver g" #fn "_25, " #fn "@OMP_1.0"); -#else -# define gomp_init_lock_30 omp_init_lock -# define gomp_destroy_lock_30 omp_destroy_lock -# define gomp_set_lock_30 omp_set_lock -# define gomp_unset_lock_30 omp_unset_lock -# define gomp_test_lock_30 omp_test_lock -# define gomp_init_nest_lock_30 omp_init_nest_lock -# define gomp_destroy_nest_lock_30 omp_destroy_nest_lock -# define gomp_set_nest_lock_30 omp_set_nest_lock -# define gomp_unset_nest_lock_30 omp_unset_nest_lock -# define gomp_test_nest_lock_30 omp_test_nest_lock -#endif - -#ifdef HAVE_ATTRIBUTE_VISIBILITY -# define attribute_hidden __attribute__ ((visibility ("hidden"))) -#else -# define attribute_hidden -#endif - -#ifdef HAVE_ATTRIBUTE_ALIAS -# define ialias_ulp ialias_str1(__USER_LABEL_PREFIX__) -# define ialias_str1(x) ialias_str2(x) -# define ialias_str2(x) #x -# define ialias(fn) \ - extern __typeof (fn) gomp_ialias_##fn \ - __attribute__ ((alias (#fn))) attribute_hidden; -# define ialias_redirect(fn) \ - extern __typeof (fn) fn __asm__ (ialias_ulp "gomp_ialias_" #fn) attribute_hidden; -# define ialias_call(fn) gomp_ialias_ ## fn -#else -# define ialias(fn) -# define ialias_redirect(fn) -# define ialias_call(fn) fn -#endif - -#endif /* LIBGOMP_H */ diff --git a/usr/libgomp/libgomp.spec b/usr/libgomp/libgomp.spec deleted file mode 100644 index 5b3a383aeb..0000000000 --- a/usr/libgomp/libgomp.spec +++ /dev/null @@ -1,3 +0,0 @@ -# This spec file is read by gcc when linking. It is used to specify the -# standard libraries we need in order to link with libgomp. -*link_gomp: -lgomp diff --git a/usr/libgomp/libgomp_f.h b/usr/libgomp/libgomp_f.h deleted file mode 100644 index 2c495965f9..0000000000 --- a/usr/libgomp/libgomp_f.h +++ /dev/null @@ -1,96 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Jakub Jelinek . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file contains prototypes of functions in the external ABI. - This file is included by files in the testsuite. */ - -#ifndef LIBGOMP_F_H -#define LIBGOMP_F_H 1 - -#include "libgomp.h" - -#if (32 == 8) \ - && (8 <= 32) -# define OMP_LOCK_DIRECT -typedef omp_lock_t *omp_lock_arg_t; -# define omp_lock_arg(arg) (arg) -#else -typedef union { omp_lock_t *lock; uint64_t u; } *omp_lock_arg_t; -# define omp_lock_arg(arg) ((arg)->lock) -# endif - -#if (48 == 8) \ - && (8 <= 48) -# define OMP_NEST_LOCK_DIRECT -typedef omp_nest_lock_t *omp_nest_lock_arg_t; -# define omp_nest_lock_arg(arg) (arg) -#else -typedef union { omp_nest_lock_t *lock; uint64_t u; } *omp_nest_lock_arg_t; -# define omp_nest_lock_arg(arg) ((arg)->lock) -# endif - -#if (40 == 8) \ - && (8 <= 40) -# define OMP_LOCK_25_DIRECT -typedef omp_lock_25_t *omp_lock_25_arg_t; -# define omp_lock_25_arg(arg) (arg) -#else -typedef union { omp_lock_25_t *lock; uint64_t u; } *omp_lock_25_arg_t; -# define omp_lock_25_arg(arg) ((arg)->lock) -# endif - -#if (48 == 8) \ - && (8 <= 48) -# define OMP_NEST_LOCK_25_DIRECT -typedef omp_nest_lock_25_t *omp_nest_lock_25_arg_t; -# define omp_nest_lock_25_arg(arg) (arg) -#else -typedef union { omp_nest_lock_25_t *lock; uint64_t u; } *omp_nest_lock_25_arg_t; -# define omp_nest_lock_25_arg(arg) ((arg)->lock) -# endif - -#ifndef __hermit__ -static inline void -omp_check_defines (void) -{ - char test[(32 != sizeof (omp_lock_t) - || 8 != __alignof (omp_lock_t) - || 48 != sizeof (omp_nest_lock_t) - || 8 != __alignof (omp_nest_lock_t) - || 8 != sizeof (*(omp_lock_arg_t) 0) - || 8 != sizeof (*(omp_nest_lock_arg_t) 0)) - ? -1 : 1] __attribute__ ((__unused__)); - char test2[(40 != sizeof (omp_lock_25_t) - || 8 != __alignof (omp_lock_25_t) - || 48 != sizeof (omp_nest_lock_25_t) - || 8 != __alignof (omp_nest_lock_25_t) - || 8 != sizeof (*(omp_lock_25_arg_t) 0) - || 8 - != sizeof (*(omp_nest_lock_25_arg_t) 0)) - ? -1 : 1] __attribute__ ((__unused__)); -} -#endif - -#endif /* LIBGOMP_F_H */ diff --git a/usr/libgomp/libgomp_g.h b/usr/libgomp/libgomp_g.h deleted file mode 100644 index 5e88d451b9..0000000000 --- a/usr/libgomp/libgomp_g.h +++ /dev/null @@ -1,234 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file contains prototypes of functions in the external ABI. - This file is included by files in the testsuite. */ - -#ifndef LIBGOMP_G_H -#define LIBGOMP_G_H 1 - -#include -#include - -/* barrier.c */ - -extern void GOMP_barrier (void); -extern bool GOMP_barrier_cancel (void); - -/* critical.c */ - -extern void GOMP_critical_start (void); -extern void GOMP_critical_end (void); -extern void GOMP_critical_name_start (void **); -extern void GOMP_critical_name_end (void **); -extern void GOMP_atomic_start (void); -extern void GOMP_atomic_end (void); - -/* loop.c */ - -extern bool GOMP_loop_static_start (long, long, long, long, long *, long *); -extern bool GOMP_loop_dynamic_start (long, long, long, long, long *, long *); -extern bool GOMP_loop_guided_start (long, long, long, long, long *, long *); -extern bool GOMP_loop_runtime_start (long, long, long, long *, long *); - -extern bool GOMP_loop_ordered_static_start (long, long, long, long, - long *, long *); -extern bool GOMP_loop_ordered_dynamic_start (long, long, long, long, - long *, long *); -extern bool GOMP_loop_ordered_guided_start (long, long, long, long, - long *, long *); -extern bool GOMP_loop_ordered_runtime_start (long, long, long, long *, long *); - -extern bool GOMP_loop_static_next (long *, long *); -extern bool GOMP_loop_dynamic_next (long *, long *); -extern bool GOMP_loop_guided_next (long *, long *); -extern bool GOMP_loop_runtime_next (long *, long *); - -extern bool GOMP_loop_ordered_static_next (long *, long *); -extern bool GOMP_loop_ordered_dynamic_next (long *, long *); -extern bool GOMP_loop_ordered_guided_next (long *, long *); -extern bool GOMP_loop_ordered_runtime_next (long *, long *); - -extern void GOMP_parallel_loop_static_start (void (*)(void *), void *, - unsigned, long, long, long, long); -extern void GOMP_parallel_loop_dynamic_start (void (*)(void *), void *, - unsigned, long, long, long, long); -extern void GOMP_parallel_loop_guided_start (void (*)(void *), void *, - unsigned, long, long, long, long); -extern void GOMP_parallel_loop_runtime_start (void (*)(void *), void *, - unsigned, long, long, long); -extern void GOMP_parallel_loop_static (void (*)(void *), void *, - unsigned, long, long, long, long, - unsigned); -extern void GOMP_parallel_loop_dynamic (void (*)(void *), void *, - unsigned, long, long, long, long, - unsigned); -extern void GOMP_parallel_loop_guided (void (*)(void *), void *, - unsigned, long, long, long, long, - unsigned); -extern void GOMP_parallel_loop_runtime (void (*)(void *), void *, - unsigned, long, long, long, - unsigned); - -extern void GOMP_loop_end (void); -extern void GOMP_loop_end_nowait (void); -extern bool GOMP_loop_end_cancel (void); - -/* loop_ull.c */ - -extern bool GOMP_loop_ull_static_start (bool, unsigned long long, - unsigned long long, - unsigned long long, - unsigned long long, - unsigned long long *, - unsigned long long *); -extern bool GOMP_loop_ull_dynamic_start (bool, unsigned long long, - unsigned long long, - unsigned long long, - unsigned long long, - unsigned long long *, - unsigned long long *); -extern bool GOMP_loop_ull_guided_start (bool, unsigned long long, - unsigned long long, - unsigned long long, - unsigned long long, - unsigned long long *, - unsigned long long *); -extern bool GOMP_loop_ull_runtime_start (bool, unsigned long long, - unsigned long long, - unsigned long long, - unsigned long long *, - unsigned long long *); - -extern bool GOMP_loop_ull_ordered_static_start (bool, unsigned long long, - unsigned long long, - unsigned long long, - unsigned long long, - unsigned long long *, - unsigned long long *); -extern bool GOMP_loop_ull_ordered_dynamic_start (bool, unsigned long long, - unsigned long long, - unsigned long long, - unsigned long long, - unsigned long long *, - unsigned long long *); -extern bool GOMP_loop_ull_ordered_guided_start (bool, unsigned long long, - unsigned long long, - unsigned long long, - unsigned long long, - unsigned long long *, - unsigned long long *); -extern bool GOMP_loop_ull_ordered_runtime_start (bool, unsigned long long, - unsigned long long, - unsigned long long, - unsigned long long *, - unsigned long long *); - -extern bool GOMP_loop_ull_static_next (unsigned long long *, - unsigned long long *); -extern bool GOMP_loop_ull_dynamic_next (unsigned long long *, - unsigned long long *); -extern bool GOMP_loop_ull_guided_next (unsigned long long *, - unsigned long long *); -extern bool GOMP_loop_ull_runtime_next (unsigned long long *, - unsigned long long *); - -extern bool GOMP_loop_ull_ordered_static_next (unsigned long long *, - unsigned long long *); -extern bool GOMP_loop_ull_ordered_dynamic_next (unsigned long long *, - unsigned long long *); -extern bool GOMP_loop_ull_ordered_guided_next (unsigned long long *, - unsigned long long *); -extern bool GOMP_loop_ull_ordered_runtime_next (unsigned long long *, - unsigned long long *); - -/* ordered.c */ - -extern void GOMP_ordered_start (void); -extern void GOMP_ordered_end (void); - -/* parallel.c */ - -extern void GOMP_parallel_start (void (*) (void *), void *, unsigned); -extern void GOMP_parallel_end (void); -extern void GOMP_parallel (void (*) (void *), void *, unsigned, unsigned); -extern bool GOMP_cancel (int, bool); -extern bool GOMP_cancellation_point (int); - -/* task.c */ - -extern void GOMP_task (void (*) (void *), void *, void (*) (void *, void *), - long, long, bool, unsigned, void **); -extern void GOMP_taskwait (void); -extern void GOMP_taskyield (void); -extern void GOMP_taskgroup_start (void); -extern void GOMP_taskgroup_end (void); - -/* sections.c */ - -extern unsigned GOMP_sections_start (unsigned); -extern unsigned GOMP_sections_next (void); -extern void GOMP_parallel_sections_start (void (*) (void *), void *, - unsigned, unsigned); -extern void GOMP_parallel_sections (void (*) (void *), void *, - unsigned, unsigned, unsigned); -extern void GOMP_sections_end (void); -extern void GOMP_sections_end_nowait (void); -extern bool GOMP_sections_end_cancel (void); - -/* single.c */ - -extern bool GOMP_single_start (void); -extern void *GOMP_single_copy_start (void); -extern void GOMP_single_copy_end (void *); - -/* target.c */ - -extern void GOMP_target (int, void (*) (void *), const void *, - size_t, void **, size_t *, unsigned char *); -extern void GOMP_target_data (int, const void *, - size_t, void **, size_t *, unsigned char *); -extern void GOMP_target_end_data (void); -extern void GOMP_target_update (int, const void *, - size_t, void **, size_t *, unsigned char *); -extern void GOMP_teams (unsigned int, unsigned int); - -/* oacc-parallel.c */ - -extern void GOACC_data_start (int, size_t, void **, size_t *, - unsigned short *); -extern void GOACC_data_end (void); -extern void GOACC_enter_exit_data (int, size_t, void **, - size_t *, unsigned short *, int, int, ...); -extern void GOACC_parallel (int, void (*) (void *), size_t, - void **, size_t *, unsigned short *, int, int, int, - int, int, ...); -extern void GOACC_update (int, size_t, void **, size_t *, - unsigned short *, int, int, ...); -extern void GOACC_wait (int, int, ...); -extern int GOACC_get_num_threads (void); -extern int GOACC_get_thread_num (void); - -#endif /* LIBGOMP_G_H */ diff --git a/usr/libgomp/lock.c b/usr/libgomp/lock.c deleted file mode 100644 index 6cbc1c3ccc..0000000000 --- a/usr/libgomp/lock.c +++ /dev/null @@ -1,305 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This is the default PTHREADS implementation of the public OpenMP - locking primitives. - - Because OpenMP uses different entry points for normal and recursive - locks, and pthreads uses only one entry point, a system may be able - to do better and streamline the locking as well as reduce the size - of the types exported. */ - -/* We need UNIX98/XPG5 extensions to get recursive locks. Request XPG6 since - Solaris requires this for C99 and later. */ -#define _XOPEN_SOURCE 600 - -#include "libgomp.h" - -#ifdef HAVE_BROKEN_POSIX_SEMAPHORES -void -gomp_init_lock_30 (omp_lock_t *lock) -{ - pthread_mutex_init (lock, NULL); -} - -void -gomp_destroy_lock_30 (omp_lock_t *lock) -{ - pthread_mutex_destroy (lock); -} - -void -gomp_set_lock_30 (omp_lock_t *lock) -{ - pthread_mutex_lock (lock); -} - -void -gomp_unset_lock_30 (omp_lock_t *lock) -{ - pthread_mutex_unlock (lock); -} - -int -gomp_test_lock_30 (omp_lock_t *lock) -{ - return pthread_mutex_trylock (lock) == 0; -} - -void -gomp_init_nest_lock_30 (omp_nest_lock_t *lock) -{ - pthread_mutex_init (&lock->lock, NULL); - lock->count = 0; - lock->owner = NULL; -} - -void -gomp_destroy_nest_lock_30 (omp_nest_lock_t *lock) -{ - pthread_mutex_destroy (&lock->lock); -} - -void -gomp_set_nest_lock_30 (omp_nest_lock_t *lock) -{ - void *me = gomp_icv (true); - - if (lock->owner != me) - { - pthread_mutex_lock (&lock->lock); - lock->owner = me; - } - lock->count++; -} - -void -gomp_unset_nest_lock_30 (omp_nest_lock_t *lock) -{ - if (--lock->count == 0) - { - lock->owner = NULL; - pthread_mutex_unlock (&lock->lock); - } -} - -int -gomp_test_nest_lock_30 (omp_nest_lock_t *lock) -{ - void *me = gomp_icv (true); - - if (lock->owner != me) - { - if (pthread_mutex_trylock (&lock->lock) != 0) - return 0; - lock->owner = me; - } - - return ++lock->count; -} - -#else - -void -gomp_init_lock_30 (omp_lock_t *lock) -{ - sem_init (lock, 0, 1); -} - -void -gomp_destroy_lock_30 (omp_lock_t *lock) -{ - sem_destroy (lock); -} - -void -gomp_set_lock_30 (omp_lock_t *lock) -{ - while (sem_wait (lock) != 0) - ; -} - -void -gomp_unset_lock_30 (omp_lock_t *lock) -{ - sem_post (lock); -} - -int -gomp_test_lock_30 (omp_lock_t *lock) -{ - return sem_trywait (lock) == 0; -} - -void -gomp_init_nest_lock_30 (omp_nest_lock_t *lock) -{ - sem_init (&lock->lock, 0, 1); - lock->count = 0; - lock->owner = NULL; -} - -void -gomp_destroy_nest_lock_30 (omp_nest_lock_t *lock) -{ - sem_destroy (&lock->lock); -} - -void -gomp_set_nest_lock_30 (omp_nest_lock_t *lock) -{ - void *me = gomp_icv (true); - - if (lock->owner != me) - { - while (sem_wait (&lock->lock) != 0) - ; - lock->owner = me; - } - lock->count++; -} - -void -gomp_unset_nest_lock_30 (omp_nest_lock_t *lock) -{ - if (--lock->count == 0) - { - lock->owner = NULL; - sem_post (&lock->lock); - } -} - -int -gomp_test_nest_lock_30 (omp_nest_lock_t *lock) -{ - void *me = gomp_icv (true); - - if (lock->owner != me) - { - if (sem_trywait (&lock->lock) != 0) - return 0; - lock->owner = me; - } - - return ++lock->count; -} -#endif - -#ifdef LIBGOMP_GNU_SYMBOL_VERSIONING -void -gomp_init_lock_25 (omp_lock_25_t *lock) -{ - pthread_mutex_init (lock, NULL); -} - -void -gomp_destroy_lock_25 (omp_lock_25_t *lock) -{ - pthread_mutex_destroy (lock); -} - -void -gomp_set_lock_25 (omp_lock_25_t *lock) -{ - pthread_mutex_lock (lock); -} - -void -gomp_unset_lock_25 (omp_lock_25_t *lock) -{ - pthread_mutex_unlock (lock); -} - -int -gomp_test_lock_25 (omp_lock_25_t *lock) -{ - return pthread_mutex_trylock (lock) == 0; -} - -void -gomp_init_nest_lock_25 (omp_nest_lock_25_t *lock) -{ - pthread_mutexattr_t attr; - - pthread_mutexattr_init (&attr); - pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE); - pthread_mutex_init (&lock->lock, &attr); - lock->count = 0; - pthread_mutexattr_destroy (&attr); -} - -void -gomp_destroy_nest_lock_25 (omp_nest_lock_25_t *lock) -{ - pthread_mutex_destroy (&lock->lock); -} - -void -gomp_set_nest_lock_25 (omp_nest_lock_25_t *lock) -{ - pthread_mutex_lock (&lock->lock); - lock->count++; -} - -void -gomp_unset_nest_lock_25 (omp_nest_lock_25_t *lock) -{ - lock->count--; - pthread_mutex_unlock (&lock->lock); -} - -int -gomp_test_nest_lock_25 (omp_nest_lock_25_t *lock) -{ - if (pthread_mutex_trylock (&lock->lock) == 0) - return ++lock->count; - return 0; -} - -omp_lock_symver (omp_init_lock) -omp_lock_symver (omp_destroy_lock) -omp_lock_symver (omp_set_lock) -omp_lock_symver (omp_unset_lock) -omp_lock_symver (omp_test_lock) -omp_lock_symver (omp_init_nest_lock) -omp_lock_symver (omp_destroy_nest_lock) -omp_lock_symver (omp_set_nest_lock) -omp_lock_symver (omp_unset_nest_lock) -omp_lock_symver (omp_test_nest_lock) - -#else - -ialias (omp_init_lock) -ialias (omp_init_nest_lock) -ialias (omp_destroy_lock) -ialias (omp_destroy_nest_lock) -ialias (omp_set_lock) -ialias (omp_set_nest_lock) -ialias (omp_unset_lock) -ialias (omp_unset_nest_lock) -ialias (omp_test_lock) -ialias (omp_test_nest_lock) - -#endif diff --git a/usr/libgomp/loop.c b/usr/libgomp/loop.c deleted file mode 100644 index 27d78db7a5..0000000000 --- a/usr/libgomp/loop.c +++ /dev/null @@ -1,675 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file handles the LOOP (FOR/DO) construct. */ - -#include -#include -#include "libgomp.h" - - -/* Initialize the given work share construct from the given arguments. */ - -static inline void -gomp_loop_init (struct gomp_work_share *ws, long start, long end, long incr, - enum gomp_schedule_type sched, long chunk_size) -{ - ws->sched = sched; - ws->chunk_size = chunk_size; - /* Canonicalize loops that have zero iterations to ->next == ->end. */ - ws->end = ((incr > 0 && start > end) || (incr < 0 && start < end)) - ? start : end; - ws->incr = incr; - ws->next = start; - if (sched == GFS_DYNAMIC) - { - ws->chunk_size *= incr; - -#ifdef HAVE_SYNC_BUILTINS - { - /* For dynamic scheduling prepare things to make each iteration - faster. */ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - long nthreads = team ? team->nthreads : 1; - - if (__builtin_expect (incr > 0, 1)) - { - /* Cheap overflow protection. */ - if (__builtin_expect ((nthreads | ws->chunk_size) - >= 1UL << (sizeof (long) - * __CHAR_BIT__ / 2 - 1), 0)) - ws->mode = 0; - else - ws->mode = ws->end < (LONG_MAX - - (nthreads + 1) * ws->chunk_size); - } - /* Cheap overflow protection. */ - else if (__builtin_expect ((nthreads | -ws->chunk_size) - >= 1UL << (sizeof (long) - * __CHAR_BIT__ / 2 - 1), 0)) - ws->mode = 0; - else - ws->mode = ws->end > (nthreads + 1) * -ws->chunk_size - LONG_MAX; - } -#endif - } -} - -/* The *_start routines are called when first encountering a loop construct - that is not bound directly to a parallel construct. The first thread - that arrives will create the work-share construct; subsequent threads - will see the construct exists and allocate work from it. - - START, END, INCR are the bounds of the loop; due to the restrictions of - OpenMP, these values must be the same in every thread. This is not - verified (nor is it entirely verifiable, since START is not necessarily - retained intact in the work-share data structure). CHUNK_SIZE is the - scheduling parameter; again this must be identical in all threads. - - Returns true if there's any work for this thread to perform. If so, - *ISTART and *IEND are filled with the bounds of the iteration block - allocated to this thread. Returns false if all work was assigned to - other threads prior to this thread's arrival. */ - -static bool -gomp_loop_static_start (long start, long end, long incr, long chunk_size, - long *istart, long *iend) -{ - struct gomp_thread *thr = gomp_thread (); - - thr->ts.static_trip = 0; - if (gomp_work_share_start (false)) - { - gomp_loop_init (thr->ts.work_share, start, end, incr, - GFS_STATIC, chunk_size); - gomp_work_share_init_done (); - } - - return !gomp_iter_static_next (istart, iend); -} - -static bool -gomp_loop_dynamic_start (long start, long end, long incr, long chunk_size, - long *istart, long *iend) -{ - struct gomp_thread *thr = gomp_thread (); - bool ret; - - if (gomp_work_share_start (false)) - { - gomp_loop_init (thr->ts.work_share, start, end, incr, - GFS_DYNAMIC, chunk_size); - gomp_work_share_init_done (); - } - -#ifdef HAVE_SYNC_BUILTINS - ret = gomp_iter_dynamic_next (istart, iend); -#else - gomp_mutex_lock (&thr->ts.work_share->lock); - ret = gomp_iter_dynamic_next_locked (istart, iend); - gomp_mutex_unlock (&thr->ts.work_share->lock); -#endif - - return ret; -} - -static bool -gomp_loop_guided_start (long start, long end, long incr, long chunk_size, - long *istart, long *iend) -{ - struct gomp_thread *thr = gomp_thread (); - bool ret; - - if (gomp_work_share_start (false)) - { - gomp_loop_init (thr->ts.work_share, start, end, incr, - GFS_GUIDED, chunk_size); - gomp_work_share_init_done (); - } - -#ifdef HAVE_SYNC_BUILTINS - ret = gomp_iter_guided_next (istart, iend); -#else - gomp_mutex_lock (&thr->ts.work_share->lock); - ret = gomp_iter_guided_next_locked (istart, iend); - gomp_mutex_unlock (&thr->ts.work_share->lock); -#endif - - return ret; -} - -bool -GOMP_loop_runtime_start (long start, long end, long incr, - long *istart, long *iend) -{ - struct gomp_task_icv *icv = gomp_icv (false); - switch (icv->run_sched_var) - { - case GFS_STATIC: - return gomp_loop_static_start (start, end, incr, icv->run_sched_modifier, - istart, iend); - case GFS_DYNAMIC: - return gomp_loop_dynamic_start (start, end, incr, icv->run_sched_modifier, - istart, iend); - case GFS_GUIDED: - return gomp_loop_guided_start (start, end, incr, icv->run_sched_modifier, - istart, iend); - case GFS_AUTO: - /* For now map to schedule(static), later on we could play with feedback - driven choice. */ - return gomp_loop_static_start (start, end, incr, 0, istart, iend); - default: - abort (); - } -} - -/* The *_ordered_*_start routines are similar. The only difference is that - this work-share construct is initialized to expect an ORDERED section. */ - -static bool -gomp_loop_ordered_static_start (long start, long end, long incr, - long chunk_size, long *istart, long *iend) -{ - struct gomp_thread *thr = gomp_thread (); - - thr->ts.static_trip = 0; - if (gomp_work_share_start (true)) - { - gomp_loop_init (thr->ts.work_share, start, end, incr, - GFS_STATIC, chunk_size); - gomp_ordered_static_init (); - gomp_work_share_init_done (); - } - - return !gomp_iter_static_next (istart, iend); -} - -static bool -gomp_loop_ordered_dynamic_start (long start, long end, long incr, - long chunk_size, long *istart, long *iend) -{ - struct gomp_thread *thr = gomp_thread (); - bool ret; - - if (gomp_work_share_start (true)) - { - gomp_loop_init (thr->ts.work_share, start, end, incr, - GFS_DYNAMIC, chunk_size); - gomp_mutex_lock (&thr->ts.work_share->lock); - gomp_work_share_init_done (); - } - else - gomp_mutex_lock (&thr->ts.work_share->lock); - - ret = gomp_iter_dynamic_next_locked (istart, iend); - if (ret) - gomp_ordered_first (); - gomp_mutex_unlock (&thr->ts.work_share->lock); - - return ret; -} - -static bool -gomp_loop_ordered_guided_start (long start, long end, long incr, - long chunk_size, long *istart, long *iend) -{ - struct gomp_thread *thr = gomp_thread (); - bool ret; - - if (gomp_work_share_start (true)) - { - gomp_loop_init (thr->ts.work_share, start, end, incr, - GFS_GUIDED, chunk_size); - gomp_mutex_lock (&thr->ts.work_share->lock); - gomp_work_share_init_done (); - } - else - gomp_mutex_lock (&thr->ts.work_share->lock); - - ret = gomp_iter_guided_next_locked (istart, iend); - if (ret) - gomp_ordered_first (); - gomp_mutex_unlock (&thr->ts.work_share->lock); - - return ret; -} - -bool -GOMP_loop_ordered_runtime_start (long start, long end, long incr, - long *istart, long *iend) -{ - struct gomp_task_icv *icv = gomp_icv (false); - switch (icv->run_sched_var) - { - case GFS_STATIC: - return gomp_loop_ordered_static_start (start, end, incr, - icv->run_sched_modifier, - istart, iend); - case GFS_DYNAMIC: - return gomp_loop_ordered_dynamic_start (start, end, incr, - icv->run_sched_modifier, - istart, iend); - case GFS_GUIDED: - return gomp_loop_ordered_guided_start (start, end, incr, - icv->run_sched_modifier, - istart, iend); - case GFS_AUTO: - /* For now map to schedule(static), later on we could play with feedback - driven choice. */ - return gomp_loop_ordered_static_start (start, end, incr, - 0, istart, iend); - default: - abort (); - } -} - -/* The *_next routines are called when the thread completes processing of - the iteration block currently assigned to it. If the work-share - construct is bound directly to a parallel construct, then the iteration - bounds may have been set up before the parallel. In which case, this - may be the first iteration for the thread. - - Returns true if there is work remaining to be performed; *ISTART and - *IEND are filled with a new iteration block. Returns false if all work - has been assigned. */ - -static bool -gomp_loop_static_next (long *istart, long *iend) -{ - return !gomp_iter_static_next (istart, iend); -} - -static bool -gomp_loop_dynamic_next (long *istart, long *iend) -{ - bool ret; - -#ifdef HAVE_SYNC_BUILTINS - ret = gomp_iter_dynamic_next (istart, iend); -#else - struct gomp_thread *thr = gomp_thread (); - gomp_mutex_lock (&thr->ts.work_share->lock); - ret = gomp_iter_dynamic_next_locked (istart, iend); - gomp_mutex_unlock (&thr->ts.work_share->lock); -#endif - - return ret; -} - -static bool -gomp_loop_guided_next (long *istart, long *iend) -{ - bool ret; - -#ifdef HAVE_SYNC_BUILTINS - ret = gomp_iter_guided_next (istart, iend); -#else - struct gomp_thread *thr = gomp_thread (); - gomp_mutex_lock (&thr->ts.work_share->lock); - ret = gomp_iter_guided_next_locked (istart, iend); - gomp_mutex_unlock (&thr->ts.work_share->lock); -#endif - - return ret; -} - -bool -GOMP_loop_runtime_next (long *istart, long *iend) -{ - struct gomp_thread *thr = gomp_thread (); - - switch (thr->ts.work_share->sched) - { - case GFS_STATIC: - case GFS_AUTO: - return gomp_loop_static_next (istart, iend); - case GFS_DYNAMIC: - return gomp_loop_dynamic_next (istart, iend); - case GFS_GUIDED: - return gomp_loop_guided_next (istart, iend); - default: - abort (); - } -} - -/* The *_ordered_*_next routines are called when the thread completes - processing of the iteration block currently assigned to it. - - Returns true if there is work remaining to be performed; *ISTART and - *IEND are filled with a new iteration block. Returns false if all work - has been assigned. */ - -static bool -gomp_loop_ordered_static_next (long *istart, long *iend) -{ - struct gomp_thread *thr = gomp_thread (); - int test; - - gomp_ordered_sync (); - gomp_mutex_lock (&thr->ts.work_share->lock); - test = gomp_iter_static_next (istart, iend); - if (test >= 0) - gomp_ordered_static_next (); - gomp_mutex_unlock (&thr->ts.work_share->lock); - - return test == 0; -} - -static bool -gomp_loop_ordered_dynamic_next (long *istart, long *iend) -{ - struct gomp_thread *thr = gomp_thread (); - bool ret; - - gomp_ordered_sync (); - gomp_mutex_lock (&thr->ts.work_share->lock); - ret = gomp_iter_dynamic_next_locked (istart, iend); - if (ret) - gomp_ordered_next (); - else - gomp_ordered_last (); - gomp_mutex_unlock (&thr->ts.work_share->lock); - - return ret; -} - -static bool -gomp_loop_ordered_guided_next (long *istart, long *iend) -{ - struct gomp_thread *thr = gomp_thread (); - bool ret; - - gomp_ordered_sync (); - gomp_mutex_lock (&thr->ts.work_share->lock); - ret = gomp_iter_guided_next_locked (istart, iend); - if (ret) - gomp_ordered_next (); - else - gomp_ordered_last (); - gomp_mutex_unlock (&thr->ts.work_share->lock); - - return ret; -} - -bool -GOMP_loop_ordered_runtime_next (long *istart, long *iend) -{ - struct gomp_thread *thr = gomp_thread (); - - switch (thr->ts.work_share->sched) - { - case GFS_STATIC: - case GFS_AUTO: - return gomp_loop_ordered_static_next (istart, iend); - case GFS_DYNAMIC: - return gomp_loop_ordered_dynamic_next (istart, iend); - case GFS_GUIDED: - return gomp_loop_ordered_guided_next (istart, iend); - default: - abort (); - } -} - -/* The GOMP_parallel_loop_* routines pre-initialize a work-share construct - to avoid one synchronization once we get into the loop. */ - -static void -gomp_parallel_loop_start (void (*fn) (void *), void *data, - unsigned num_threads, long start, long end, - long incr, enum gomp_schedule_type sched, - long chunk_size, unsigned int flags) -{ - struct gomp_team *team; - - num_threads = gomp_resolve_num_threads (num_threads, 0); - team = gomp_new_team (num_threads); - gomp_loop_init (&team->work_shares[0], start, end, incr, sched, chunk_size); - gomp_team_start (fn, data, num_threads, flags, team); -} - -void -GOMP_parallel_loop_static_start (void (*fn) (void *), void *data, - unsigned num_threads, long start, long end, - long incr, long chunk_size) -{ - gomp_parallel_loop_start (fn, data, num_threads, start, end, incr, - GFS_STATIC, chunk_size, 0); -} - -void -GOMP_parallel_loop_dynamic_start (void (*fn) (void *), void *data, - unsigned num_threads, long start, long end, - long incr, long chunk_size) -{ - gomp_parallel_loop_start (fn, data, num_threads, start, end, incr, - GFS_DYNAMIC, chunk_size, 0); -} - -void -GOMP_parallel_loop_guided_start (void (*fn) (void *), void *data, - unsigned num_threads, long start, long end, - long incr, long chunk_size) -{ - gomp_parallel_loop_start (fn, data, num_threads, start, end, incr, - GFS_GUIDED, chunk_size, 0); -} - -void -GOMP_parallel_loop_runtime_start (void (*fn) (void *), void *data, - unsigned num_threads, long start, long end, - long incr) -{ - struct gomp_task_icv *icv = gomp_icv (false); - gomp_parallel_loop_start (fn, data, num_threads, start, end, incr, - icv->run_sched_var, icv->run_sched_modifier, 0); -} - -ialias_redirect (GOMP_parallel_end) - -void -GOMP_parallel_loop_static (void (*fn) (void *), void *data, - unsigned num_threads, long start, long end, - long incr, long chunk_size, unsigned flags) -{ - gomp_parallel_loop_start (fn, data, num_threads, start, end, incr, - GFS_STATIC, chunk_size, flags); - fn (data); - GOMP_parallel_end (); -} - -void -GOMP_parallel_loop_dynamic (void (*fn) (void *), void *data, - unsigned num_threads, long start, long end, - long incr, long chunk_size, unsigned flags) -{ - gomp_parallel_loop_start (fn, data, num_threads, start, end, incr, - GFS_DYNAMIC, chunk_size, flags); - fn (data); - GOMP_parallel_end (); -} - -void -GOMP_parallel_loop_guided (void (*fn) (void *), void *data, - unsigned num_threads, long start, long end, - long incr, long chunk_size, unsigned flags) -{ - gomp_parallel_loop_start (fn, data, num_threads, start, end, incr, - GFS_GUIDED, chunk_size, flags); - fn (data); - GOMP_parallel_end (); -} - -void -GOMP_parallel_loop_runtime (void (*fn) (void *), void *data, - unsigned num_threads, long start, long end, - long incr, unsigned flags) -{ - struct gomp_task_icv *icv = gomp_icv (false); - gomp_parallel_loop_start (fn, data, num_threads, start, end, incr, - icv->run_sched_var, icv->run_sched_modifier, - flags); - fn (data); - GOMP_parallel_end (); -} - -/* The GOMP_loop_end* routines are called after the thread is told that - all loop iterations are complete. The first two versions synchronize - all threads; the nowait version does not. */ - -void -GOMP_loop_end (void) -{ - gomp_work_share_end (); -} - -bool -GOMP_loop_end_cancel (void) -{ - return gomp_work_share_end_cancel (); -} - -void -GOMP_loop_end_nowait (void) -{ - gomp_work_share_end_nowait (); -} - - -/* We use static functions above so that we're sure that the "runtime" - function can defer to the proper routine without interposition. We - export the static function with a strong alias when possible, or with - a wrapper function otherwise. */ - -#ifdef HAVE_ATTRIBUTE_ALIAS -extern __typeof(gomp_loop_static_start) GOMP_loop_static_start - __attribute__((alias ("gomp_loop_static_start"))); -extern __typeof(gomp_loop_dynamic_start) GOMP_loop_dynamic_start - __attribute__((alias ("gomp_loop_dynamic_start"))); -extern __typeof(gomp_loop_guided_start) GOMP_loop_guided_start - __attribute__((alias ("gomp_loop_guided_start"))); - -extern __typeof(gomp_loop_ordered_static_start) GOMP_loop_ordered_static_start - __attribute__((alias ("gomp_loop_ordered_static_start"))); -extern __typeof(gomp_loop_ordered_dynamic_start) GOMP_loop_ordered_dynamic_start - __attribute__((alias ("gomp_loop_ordered_dynamic_start"))); -extern __typeof(gomp_loop_ordered_guided_start) GOMP_loop_ordered_guided_start - __attribute__((alias ("gomp_loop_ordered_guided_start"))); - -extern __typeof(gomp_loop_static_next) GOMP_loop_static_next - __attribute__((alias ("gomp_loop_static_next"))); -extern __typeof(gomp_loop_dynamic_next) GOMP_loop_dynamic_next - __attribute__((alias ("gomp_loop_dynamic_next"))); -extern __typeof(gomp_loop_guided_next) GOMP_loop_guided_next - __attribute__((alias ("gomp_loop_guided_next"))); - -extern __typeof(gomp_loop_ordered_static_next) GOMP_loop_ordered_static_next - __attribute__((alias ("gomp_loop_ordered_static_next"))); -extern __typeof(gomp_loop_ordered_dynamic_next) GOMP_loop_ordered_dynamic_next - __attribute__((alias ("gomp_loop_ordered_dynamic_next"))); -extern __typeof(gomp_loop_ordered_guided_next) GOMP_loop_ordered_guided_next - __attribute__((alias ("gomp_loop_ordered_guided_next"))); -#else -bool -GOMP_loop_static_start (long start, long end, long incr, long chunk_size, - long *istart, long *iend) -{ - return gomp_loop_static_start (start, end, incr, chunk_size, istart, iend); -} - -bool -GOMP_loop_dynamic_start (long start, long end, long incr, long chunk_size, - long *istart, long *iend) -{ - return gomp_loop_dynamic_start (start, end, incr, chunk_size, istart, iend); -} - -bool -GOMP_loop_guided_start (long start, long end, long incr, long chunk_size, - long *istart, long *iend) -{ - return gomp_loop_guided_start (start, end, incr, chunk_size, istart, iend); -} - -bool -GOMP_loop_ordered_static_start (long start, long end, long incr, - long chunk_size, long *istart, long *iend) -{ - return gomp_loop_ordered_static_start (start, end, incr, chunk_size, - istart, iend); -} - -bool -GOMP_loop_ordered_dynamic_start (long start, long end, long incr, - long chunk_size, long *istart, long *iend) -{ - return gomp_loop_ordered_dynamic_start (start, end, incr, chunk_size, - istart, iend); -} - -bool -GOMP_loop_ordered_guided_start (long start, long end, long incr, - long chunk_size, long *istart, long *iend) -{ - return gomp_loop_ordered_guided_start (start, end, incr, chunk_size, - istart, iend); -} - -bool -GOMP_loop_static_next (long *istart, long *iend) -{ - return gomp_loop_static_next (istart, iend); -} - -bool -GOMP_loop_dynamic_next (long *istart, long *iend) -{ - return gomp_loop_dynamic_next (istart, iend); -} - -bool -GOMP_loop_guided_next (long *istart, long *iend) -{ - return gomp_loop_guided_next (istart, iend); -} - -bool -GOMP_loop_ordered_static_next (long *istart, long *iend) -{ - return gomp_loop_ordered_static_next (istart, iend); -} - -bool -GOMP_loop_ordered_dynamic_next (long *istart, long *iend) -{ - return gomp_loop_ordered_dynamic_next (istart, iend); -} - -bool -GOMP_loop_ordered_guided_next (long *istart, long *iend) -{ - return gomp_loop_ordered_guided_next (istart, iend); -} -#endif diff --git a/usr/libgomp/loop_ull.c b/usr/libgomp/loop_ull.c deleted file mode 100644 index de56ae0b7c..0000000000 --- a/usr/libgomp/loop_ull.c +++ /dev/null @@ -1,572 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file handles the LOOP (FOR/DO) construct. */ - -#include -#include -#include "libgomp.h" - -typedef unsigned long long gomp_ull; - -/* Initialize the given work share construct from the given arguments. */ - -static inline void -gomp_loop_ull_init (struct gomp_work_share *ws, bool up, gomp_ull start, - gomp_ull end, gomp_ull incr, enum gomp_schedule_type sched, - gomp_ull chunk_size) -{ - ws->sched = sched; - ws->chunk_size_ull = chunk_size; - /* Canonicalize loops that have zero iterations to ->next == ->end. */ - ws->end_ull = ((up && start > end) || (!up && start < end)) - ? start : end; - ws->incr_ull = incr; - ws->next_ull = start; - ws->mode = 0; - if (sched == GFS_DYNAMIC) - { - ws->chunk_size_ull *= incr; - -#if defined HAVE_SYNC_BUILTINS && defined __LP64__ - { - /* For dynamic scheduling prepare things to make each iteration - faster. */ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - long nthreads = team ? team->nthreads : 1; - - if (__builtin_expect (up, 1)) - { - /* Cheap overflow protection. */ - if (__builtin_expect ((nthreads | ws->chunk_size_ull) - < 1ULL << (sizeof (gomp_ull) - * __CHAR_BIT__ / 2 - 1), 1)) - ws->mode = ws->end_ull < (__LONG_LONG_MAX__ * 2ULL + 1 - - (nthreads + 1) * ws->chunk_size_ull); - } - /* Cheap overflow protection. */ - else if (__builtin_expect ((nthreads | -ws->chunk_size_ull) - < 1ULL << (sizeof (gomp_ull) - * __CHAR_BIT__ / 2 - 1), 1)) - ws->mode = ws->end_ull > ((nthreads + 1) * -ws->chunk_size_ull - - (__LONG_LONG_MAX__ * 2ULL + 1)); - } -#endif - } - if (!up) - ws->mode |= 2; -} - -/* The *_start routines are called when first encountering a loop construct - that is not bound directly to a parallel construct. The first thread - that arrives will create the work-share construct; subsequent threads - will see the construct exists and allocate work from it. - - START, END, INCR are the bounds of the loop; due to the restrictions of - OpenMP, these values must be the same in every thread. This is not - verified (nor is it entirely verifiable, since START is not necessarily - retained intact in the work-share data structure). CHUNK_SIZE is the - scheduling parameter; again this must be identical in all threads. - - Returns true if there's any work for this thread to perform. If so, - *ISTART and *IEND are filled with the bounds of the iteration block - allocated to this thread. Returns false if all work was assigned to - other threads prior to this thread's arrival. */ - -static bool -gomp_loop_ull_static_start (bool up, gomp_ull start, gomp_ull end, - gomp_ull incr, gomp_ull chunk_size, - gomp_ull *istart, gomp_ull *iend) -{ - struct gomp_thread *thr = gomp_thread (); - - thr->ts.static_trip = 0; - if (gomp_work_share_start (false)) - { - gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr, - GFS_STATIC, chunk_size); - gomp_work_share_init_done (); - } - - return !gomp_iter_ull_static_next (istart, iend); -} - -static bool -gomp_loop_ull_dynamic_start (bool up, gomp_ull start, gomp_ull end, - gomp_ull incr, gomp_ull chunk_size, - gomp_ull *istart, gomp_ull *iend) -{ - struct gomp_thread *thr = gomp_thread (); - bool ret; - - if (gomp_work_share_start (false)) - { - gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr, - GFS_DYNAMIC, chunk_size); - gomp_work_share_init_done (); - } - -#if defined HAVE_SYNC_BUILTINS && defined __LP64__ - ret = gomp_iter_ull_dynamic_next (istart, iend); -#else - gomp_mutex_lock (&thr->ts.work_share->lock); - ret = gomp_iter_ull_dynamic_next_locked (istart, iend); - gomp_mutex_unlock (&thr->ts.work_share->lock); -#endif - - return ret; -} - -static bool -gomp_loop_ull_guided_start (bool up, gomp_ull start, gomp_ull end, - gomp_ull incr, gomp_ull chunk_size, - gomp_ull *istart, gomp_ull *iend) -{ - struct gomp_thread *thr = gomp_thread (); - bool ret; - - if (gomp_work_share_start (false)) - { - gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr, - GFS_GUIDED, chunk_size); - gomp_work_share_init_done (); - } - -#if defined HAVE_SYNC_BUILTINS && defined __LP64__ - ret = gomp_iter_ull_guided_next (istart, iend); -#else - gomp_mutex_lock (&thr->ts.work_share->lock); - ret = gomp_iter_ull_guided_next_locked (istart, iend); - gomp_mutex_unlock (&thr->ts.work_share->lock); -#endif - - return ret; -} - -bool -GOMP_loop_ull_runtime_start (bool up, gomp_ull start, gomp_ull end, - gomp_ull incr, gomp_ull *istart, gomp_ull *iend) -{ - struct gomp_task_icv *icv = gomp_icv (false); - switch (icv->run_sched_var) - { - case GFS_STATIC: - return gomp_loop_ull_static_start (up, start, end, incr, - icv->run_sched_modifier, - istart, iend); - case GFS_DYNAMIC: - return gomp_loop_ull_dynamic_start (up, start, end, incr, - icv->run_sched_modifier, - istart, iend); - case GFS_GUIDED: - return gomp_loop_ull_guided_start (up, start, end, incr, - icv->run_sched_modifier, - istart, iend); - case GFS_AUTO: - /* For now map to schedule(static), later on we could play with feedback - driven choice. */ - return gomp_loop_ull_static_start (up, start, end, incr, - 0, istart, iend); - default: - abort (); - } -} - -/* The *_ordered_*_start routines are similar. The only difference is that - this work-share construct is initialized to expect an ORDERED section. */ - -static bool -gomp_loop_ull_ordered_static_start (bool up, gomp_ull start, gomp_ull end, - gomp_ull incr, gomp_ull chunk_size, - gomp_ull *istart, gomp_ull *iend) -{ - struct gomp_thread *thr = gomp_thread (); - - thr->ts.static_trip = 0; - if (gomp_work_share_start (true)) - { - gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr, - GFS_STATIC, chunk_size); - gomp_ordered_static_init (); - gomp_work_share_init_done (); - } - - return !gomp_iter_ull_static_next (istart, iend); -} - -static bool -gomp_loop_ull_ordered_dynamic_start (bool up, gomp_ull start, gomp_ull end, - gomp_ull incr, gomp_ull chunk_size, - gomp_ull *istart, gomp_ull *iend) -{ - struct gomp_thread *thr = gomp_thread (); - bool ret; - - if (gomp_work_share_start (true)) - { - gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr, - GFS_DYNAMIC, chunk_size); - gomp_mutex_lock (&thr->ts.work_share->lock); - gomp_work_share_init_done (); - } - else - gomp_mutex_lock (&thr->ts.work_share->lock); - - ret = gomp_iter_ull_dynamic_next_locked (istart, iend); - if (ret) - gomp_ordered_first (); - gomp_mutex_unlock (&thr->ts.work_share->lock); - - return ret; -} - -static bool -gomp_loop_ull_ordered_guided_start (bool up, gomp_ull start, gomp_ull end, - gomp_ull incr, gomp_ull chunk_size, - gomp_ull *istart, gomp_ull *iend) -{ - struct gomp_thread *thr = gomp_thread (); - bool ret; - - if (gomp_work_share_start (true)) - { - gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr, - GFS_GUIDED, chunk_size); - gomp_mutex_lock (&thr->ts.work_share->lock); - gomp_work_share_init_done (); - } - else - gomp_mutex_lock (&thr->ts.work_share->lock); - - ret = gomp_iter_ull_guided_next_locked (istart, iend); - if (ret) - gomp_ordered_first (); - gomp_mutex_unlock (&thr->ts.work_share->lock); - - return ret; -} - -bool -GOMP_loop_ull_ordered_runtime_start (bool up, gomp_ull start, gomp_ull end, - gomp_ull incr, gomp_ull *istart, - gomp_ull *iend) -{ - struct gomp_task_icv *icv = gomp_icv (false); - switch (icv->run_sched_var) - { - case GFS_STATIC: - return gomp_loop_ull_ordered_static_start (up, start, end, incr, - icv->run_sched_modifier, - istart, iend); - case GFS_DYNAMIC: - return gomp_loop_ull_ordered_dynamic_start (up, start, end, incr, - icv->run_sched_modifier, - istart, iend); - case GFS_GUIDED: - return gomp_loop_ull_ordered_guided_start (up, start, end, incr, - icv->run_sched_modifier, - istart, iend); - case GFS_AUTO: - /* For now map to schedule(static), later on we could play with feedback - driven choice. */ - return gomp_loop_ull_ordered_static_start (up, start, end, incr, - 0, istart, iend); - default: - abort (); - } -} - -/* The *_next routines are called when the thread completes processing of - the iteration block currently assigned to it. If the work-share - construct is bound directly to a parallel construct, then the iteration - bounds may have been set up before the parallel. In which case, this - may be the first iteration for the thread. - - Returns true if there is work remaining to be performed; *ISTART and - *IEND are filled with a new iteration block. Returns false if all work - has been assigned. */ - -static bool -gomp_loop_ull_static_next (gomp_ull *istart, gomp_ull *iend) -{ - return !gomp_iter_ull_static_next (istart, iend); -} - -static bool -gomp_loop_ull_dynamic_next (gomp_ull *istart, gomp_ull *iend) -{ - bool ret; - -#if defined HAVE_SYNC_BUILTINS && defined __LP64__ - ret = gomp_iter_ull_dynamic_next (istart, iend); -#else - struct gomp_thread *thr = gomp_thread (); - gomp_mutex_lock (&thr->ts.work_share->lock); - ret = gomp_iter_ull_dynamic_next_locked (istart, iend); - gomp_mutex_unlock (&thr->ts.work_share->lock); -#endif - - return ret; -} - -static bool -gomp_loop_ull_guided_next (gomp_ull *istart, gomp_ull *iend) -{ - bool ret; - -#if defined HAVE_SYNC_BUILTINS && defined __LP64__ - ret = gomp_iter_ull_guided_next (istart, iend); -#else - struct gomp_thread *thr = gomp_thread (); - gomp_mutex_lock (&thr->ts.work_share->lock); - ret = gomp_iter_ull_guided_next_locked (istart, iend); - gomp_mutex_unlock (&thr->ts.work_share->lock); -#endif - - return ret; -} - -bool -GOMP_loop_ull_runtime_next (gomp_ull *istart, gomp_ull *iend) -{ - struct gomp_thread *thr = gomp_thread (); - - switch (thr->ts.work_share->sched) - { - case GFS_STATIC: - case GFS_AUTO: - return gomp_loop_ull_static_next (istart, iend); - case GFS_DYNAMIC: - return gomp_loop_ull_dynamic_next (istart, iend); - case GFS_GUIDED: - return gomp_loop_ull_guided_next (istart, iend); - default: - abort (); - } -} - -/* The *_ordered_*_next routines are called when the thread completes - processing of the iteration block currently assigned to it. - - Returns true if there is work remaining to be performed; *ISTART and - *IEND are filled with a new iteration block. Returns false if all work - has been assigned. */ - -static bool -gomp_loop_ull_ordered_static_next (gomp_ull *istart, gomp_ull *iend) -{ - struct gomp_thread *thr = gomp_thread (); - int test; - - gomp_ordered_sync (); - gomp_mutex_lock (&thr->ts.work_share->lock); - test = gomp_iter_ull_static_next (istart, iend); - if (test >= 0) - gomp_ordered_static_next (); - gomp_mutex_unlock (&thr->ts.work_share->lock); - - return test == 0; -} - -static bool -gomp_loop_ull_ordered_dynamic_next (gomp_ull *istart, gomp_ull *iend) -{ - struct gomp_thread *thr = gomp_thread (); - bool ret; - - gomp_ordered_sync (); - gomp_mutex_lock (&thr->ts.work_share->lock); - ret = gomp_iter_ull_dynamic_next_locked (istart, iend); - if (ret) - gomp_ordered_next (); - else - gomp_ordered_last (); - gomp_mutex_unlock (&thr->ts.work_share->lock); - - return ret; -} - -static bool -gomp_loop_ull_ordered_guided_next (gomp_ull *istart, gomp_ull *iend) -{ - struct gomp_thread *thr = gomp_thread (); - bool ret; - - gomp_ordered_sync (); - gomp_mutex_lock (&thr->ts.work_share->lock); - ret = gomp_iter_ull_guided_next_locked (istart, iend); - if (ret) - gomp_ordered_next (); - else - gomp_ordered_last (); - gomp_mutex_unlock (&thr->ts.work_share->lock); - - return ret; -} - -bool -GOMP_loop_ull_ordered_runtime_next (gomp_ull *istart, gomp_ull *iend) -{ - struct gomp_thread *thr = gomp_thread (); - - switch (thr->ts.work_share->sched) - { - case GFS_STATIC: - case GFS_AUTO: - return gomp_loop_ull_ordered_static_next (istart, iend); - case GFS_DYNAMIC: - return gomp_loop_ull_ordered_dynamic_next (istart, iend); - case GFS_GUIDED: - return gomp_loop_ull_ordered_guided_next (istart, iend); - default: - abort (); - } -} - -/* We use static functions above so that we're sure that the "runtime" - function can defer to the proper routine without interposition. We - export the static function with a strong alias when possible, or with - a wrapper function otherwise. */ - -#ifdef HAVE_ATTRIBUTE_ALIAS -extern __typeof(gomp_loop_ull_static_start) GOMP_loop_ull_static_start - __attribute__((alias ("gomp_loop_ull_static_start"))); -extern __typeof(gomp_loop_ull_dynamic_start) GOMP_loop_ull_dynamic_start - __attribute__((alias ("gomp_loop_ull_dynamic_start"))); -extern __typeof(gomp_loop_ull_guided_start) GOMP_loop_ull_guided_start - __attribute__((alias ("gomp_loop_ull_guided_start"))); - -extern __typeof(gomp_loop_ull_ordered_static_start) GOMP_loop_ull_ordered_static_start - __attribute__((alias ("gomp_loop_ull_ordered_static_start"))); -extern __typeof(gomp_loop_ull_ordered_dynamic_start) GOMP_loop_ull_ordered_dynamic_start - __attribute__((alias ("gomp_loop_ull_ordered_dynamic_start"))); -extern __typeof(gomp_loop_ull_ordered_guided_start) GOMP_loop_ull_ordered_guided_start - __attribute__((alias ("gomp_loop_ull_ordered_guided_start"))); - -extern __typeof(gomp_loop_ull_static_next) GOMP_loop_ull_static_next - __attribute__((alias ("gomp_loop_ull_static_next"))); -extern __typeof(gomp_loop_ull_dynamic_next) GOMP_loop_ull_dynamic_next - __attribute__((alias ("gomp_loop_ull_dynamic_next"))); -extern __typeof(gomp_loop_ull_guided_next) GOMP_loop_ull_guided_next - __attribute__((alias ("gomp_loop_ull_guided_next"))); - -extern __typeof(gomp_loop_ull_ordered_static_next) GOMP_loop_ull_ordered_static_next - __attribute__((alias ("gomp_loop_ull_ordered_static_next"))); -extern __typeof(gomp_loop_ull_ordered_dynamic_next) GOMP_loop_ull_ordered_dynamic_next - __attribute__((alias ("gomp_loop_ull_ordered_dynamic_next"))); -extern __typeof(gomp_loop_ull_ordered_guided_next) GOMP_loop_ull_ordered_guided_next - __attribute__((alias ("gomp_loop_ull_ordered_guided_next"))); -#else -bool -GOMP_loop_ull_static_start (bool up, gomp_ull start, gomp_ull end, - gomp_ull incr, gomp_ull chunk_size, - gomp_ull *istart, gomp_ull *iend) -{ - return gomp_loop_ull_static_start (up, start, end, incr, chunk_size, istart, - iend); -} - -bool -GOMP_loop_ull_dynamic_start (bool up, gomp_ull start, gomp_ull end, - gomp_ull incr, gomp_ull chunk_size, - gomp_ull *istart, gomp_ull *iend) -{ - return gomp_loop_ull_dynamic_start (up, start, end, incr, chunk_size, istart, - iend); -} - -bool -GOMP_loop_ull_guided_start (bool up, gomp_ull start, gomp_ull end, - gomp_ull incr, gomp_ull chunk_size, - gomp_ull *istart, gomp_ull *iend) -{ - return gomp_loop_ull_guided_start (up, start, end, incr, chunk_size, istart, - iend); -} - -bool -GOMP_loop_ull_ordered_static_start (bool up, gomp_ull start, gomp_ull end, - gomp_ull incr, gomp_ull chunk_size, - gomp_ull *istart, gomp_ull *iend) -{ - return gomp_loop_ull_ordered_static_start (up, start, end, incr, chunk_size, - istart, iend); -} - -bool -GOMP_loop_ull_ordered_dynamic_start (bool up, gomp_ull start, gomp_ull end, - gomp_ull incr, gomp_ull chunk_size, - gomp_ull *istart, gomp_ull *iend) -{ - return gomp_loop_ull_ordered_dynamic_start (up, start, end, incr, chunk_size, - istart, iend); -} - -bool -GOMP_loop_ull_ordered_guided_start (bool up, gomp_ull start, gomp_ull end, - gomp_ull incr, gomp_ull chunk_size, - gomp_ull *istart, gomp_ull *iend) -{ - return gomp_loop_ull_ordered_guided_start (up, start, end, incr, chunk_size, - istart, iend); -} - -bool -GOMP_loop_ull_static_next (gomp_ull *istart, gomp_ull *iend) -{ - return gomp_loop_ull_static_next (istart, iend); -} - -bool -GOMP_loop_ull_dynamic_next (gomp_ull *istart, gomp_ull *iend) -{ - return gomp_loop_ull_dynamic_next (istart, iend); -} - -bool -GOMP_loop_ull_guided_next (gomp_ull *istart, gomp_ull *iend) -{ - return gomp_loop_ull_guided_next (istart, iend); -} - -bool -GOMP_loop_ull_ordered_static_next (gomp_ull *istart, gomp_ull *iend) -{ - return gomp_loop_ull_ordered_static_next (istart, iend); -} - -bool -GOMP_loop_ull_ordered_dynamic_next (gomp_ull *istart, gomp_ull *iend) -{ - return gomp_loop_ull_ordered_dynamic_next (istart, iend); -} - -bool -GOMP_loop_ull_ordered_guided_next (gomp_ull *istart, gomp_ull *iend) -{ - return gomp_loop_ull_ordered_guided_next (istart, iend); -} -#endif diff --git a/usr/libgomp/mutex.c b/usr/libgomp/mutex.c deleted file mode 100644 index 39bb64da0f..0000000000 --- a/usr/libgomp/mutex.c +++ /dev/null @@ -1 +0,0 @@ -/* Everything is in the header. */ diff --git a/usr/libgomp/mutex.h b/usr/libgomp/mutex.h deleted file mode 100644 index 5b46026e44..0000000000 --- a/usr/libgomp/mutex.h +++ /dev/null @@ -1,58 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This is the default PTHREADS implementation of a mutex synchronization - mechanism for libgomp. This type is private to the library. */ - -#ifndef GOMP_MUTEX_H -#define GOMP_MUTEX_H 1 - -#include - -typedef pthread_mutex_t gomp_mutex_t; - -#define GOMP_MUTEX_INIT_0 0 - -static inline void gomp_mutex_init (gomp_mutex_t *mutex) -{ - pthread_mutex_init (mutex, NULL); -} - -static inline void gomp_mutex_lock (gomp_mutex_t *mutex) -{ - pthread_mutex_lock (mutex); -} - -static inline void gomp_mutex_unlock (gomp_mutex_t *mutex) -{ - pthread_mutex_unlock (mutex); -} - -static inline void gomp_mutex_destroy (gomp_mutex_t *mutex) -{ - pthread_mutex_destroy (mutex); -} - -#endif /* GOMP_MUTEX_H */ diff --git a/usr/libgomp/omp-lock.h b/usr/libgomp/omp-lock.h deleted file mode 100644 index e51dc271f8..0000000000 --- a/usr/libgomp/omp-lock.h +++ /dev/null @@ -1,23 +0,0 @@ -/* This header is used during the build process to find the size and - alignment of the public OpenMP locks, so that we can export data - structures without polluting the namespace. - - In this default POSIX implementation, we used to map the two locks to the - same PTHREADS primitive, but for OpenMP 3.0 sem_t needs to be used - instead, as pthread_mutex_unlock should not be called by different - thread than the one that called pthread_mutex_lock. */ - -#include -#include - -typedef pthread_mutex_t omp_lock_25_t; -typedef struct { pthread_mutex_t lock; int count; } omp_nest_lock_25_t; -#ifdef HAVE_BROKEN_POSIX_SEMAPHORES -/* If we don't have working semaphores, we'll make all explicit tasks - tied to the creating thread. */ -typedef pthread_mutex_t omp_lock_t; -typedef struct { pthread_mutex_t lock; int count; void *owner; } omp_nest_lock_t; -#else -typedef sem_t omp_lock_t; -typedef struct { sem_t lock; int count; void *owner; } omp_nest_lock_t; -#endif diff --git a/usr/libgomp/omp.h b/usr/libgomp/omp.h deleted file mode 100644 index c15a22b26a..0000000000 --- a/usr/libgomp/omp.h +++ /dev/null @@ -1,128 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -#ifndef _OMP_H -#define _OMP_H 1 - -#ifndef _LIBGOMP_OMP_LOCK_DEFINED -#define _LIBGOMP_OMP_LOCK_DEFINED 1 -/* These two structures get edited by the libgomp build process to - reflect the shape of the two types. Their internals are private - to the library. */ - -typedef struct -{ - unsigned char _x[4] - __attribute__((__aligned__(4))); -} omp_lock_t; - -typedef struct -{ - unsigned char _x[8 + sizeof (void *)] - __attribute__((__aligned__(sizeof(void*)))); -} omp_nest_lock_t; -#endif - -typedef enum omp_sched_t -{ - omp_sched_static = 1, - omp_sched_dynamic = 2, - omp_sched_guided = 3, - omp_sched_auto = 4 -} omp_sched_t; - -typedef enum omp_proc_bind_t -{ - omp_proc_bind_false = 0, - omp_proc_bind_true = 1, - omp_proc_bind_master = 2, - omp_proc_bind_close = 3, - omp_proc_bind_spread = 4 -} omp_proc_bind_t; - -#ifdef __cplusplus -extern "C" { -# define __GOMP_NOTHROW throw () -#else -# define __GOMP_NOTHROW __attribute__((__nothrow__)) -#endif - -extern void omp_set_num_threads (int) __GOMP_NOTHROW; -extern int omp_get_num_threads (void) __GOMP_NOTHROW; -extern int omp_get_max_threads (void) __GOMP_NOTHROW; -extern int omp_get_thread_num (void) __GOMP_NOTHROW; -extern int omp_get_num_procs (void) __GOMP_NOTHROW; - -extern int omp_in_parallel (void) __GOMP_NOTHROW; - -extern void omp_set_dynamic (int) __GOMP_NOTHROW; -extern int omp_get_dynamic (void) __GOMP_NOTHROW; - -extern void omp_set_nested (int) __GOMP_NOTHROW; -extern int omp_get_nested (void) __GOMP_NOTHROW; - -extern void omp_init_lock (omp_lock_t *) __GOMP_NOTHROW; -extern void omp_destroy_lock (omp_lock_t *) __GOMP_NOTHROW; -extern void omp_set_lock (omp_lock_t *) __GOMP_NOTHROW; -extern void omp_unset_lock (omp_lock_t *) __GOMP_NOTHROW; -extern int omp_test_lock (omp_lock_t *) __GOMP_NOTHROW; - -extern void omp_init_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW; -extern void omp_destroy_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW; -extern void omp_set_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW; -extern void omp_unset_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW; -extern int omp_test_nest_lock (omp_nest_lock_t *) __GOMP_NOTHROW; - -extern double omp_get_wtime (void) __GOMP_NOTHROW; -extern double omp_get_wtick (void) __GOMP_NOTHROW; - -extern void omp_set_schedule (omp_sched_t, int) __GOMP_NOTHROW; -extern void omp_get_schedule (omp_sched_t *, int *) __GOMP_NOTHROW; -extern int omp_get_thread_limit (void) __GOMP_NOTHROW; -extern void omp_set_max_active_levels (int) __GOMP_NOTHROW; -extern int omp_get_max_active_levels (void) __GOMP_NOTHROW; -extern int omp_get_level (void) __GOMP_NOTHROW; -extern int omp_get_ancestor_thread_num (int) __GOMP_NOTHROW; -extern int omp_get_team_size (int) __GOMP_NOTHROW; -extern int omp_get_active_level (void) __GOMP_NOTHROW; - -extern int omp_in_final (void) __GOMP_NOTHROW; - -extern int omp_get_cancellation (void) __GOMP_NOTHROW; -extern omp_proc_bind_t omp_get_proc_bind (void) __GOMP_NOTHROW; - -extern void omp_set_default_device (int) __GOMP_NOTHROW; -extern int omp_get_default_device (void) __GOMP_NOTHROW; -extern int omp_get_num_devices (void) __GOMP_NOTHROW; -extern int omp_get_num_teams (void) __GOMP_NOTHROW; -extern int omp_get_team_num (void) __GOMP_NOTHROW; - -extern int omp_is_initial_device (void) __GOMP_NOTHROW; - -#ifdef __cplusplus -} -#endif - -#endif /* _OMP_H */ diff --git a/usr/libgomp/omp_lib.h b/usr/libgomp/omp_lib.h deleted file mode 100644 index 1de6eb70e9..0000000000 --- a/usr/libgomp/omp_lib.h +++ /dev/null @@ -1,98 +0,0 @@ -! Copyright (C) 2005-2015 Free Software Foundation, Inc. -! Contributed by Jakub Jelinek . - -! This file is part of the GNU Offloading and Multi Processing Library -! (libgomp). - -! Libgomp is free software; you can redistribute it and/or modify it -! under the terms of the GNU General Public License as published by -! the Free Software Foundation; either version 3, or (at your option) -! any later version. - -! Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY -! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -! FOR A PARTICULAR PURPOSE. See the GNU General Public License for -! more details. - -! Under Section 7 of GPL version 3, you are granted additional -! permissions described in the GCC Runtime Library Exception, version -! 3.1, as published by the Free Software Foundation. - -! You should have received a copy of the GNU General Public License and -! a copy of the GCC Runtime Library Exception along with this program; -! see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -! . - - integer omp_lock_kind, omp_nest_lock_kind, openmp_version - parameter (omp_lock_kind = 8) - parameter (omp_nest_lock_kind = 8) - integer omp_sched_kind - parameter (omp_sched_kind = 4) - integer (omp_sched_kind) omp_sched_static, omp_sched_dynamic - integer (omp_sched_kind) omp_sched_guided, omp_sched_auto - parameter (omp_sched_static = 1) - parameter (omp_sched_dynamic = 2) - parameter (omp_sched_guided = 3) - parameter (omp_sched_auto = 4) - integer omp_proc_bind_kind - parameter (omp_proc_bind_kind = 4) - integer (omp_proc_bind_kind) omp_proc_bind_false - integer (omp_proc_bind_kind) omp_proc_bind_true - integer (omp_proc_bind_kind) omp_proc_bind_master - integer (omp_proc_bind_kind) omp_proc_bind_close - integer (omp_proc_bind_kind) omp_proc_bind_spread - parameter (omp_proc_bind_false = 0) - parameter (omp_proc_bind_true = 1) - parameter (omp_proc_bind_master = 2) - parameter (omp_proc_bind_close = 3) - parameter (omp_proc_bind_spread = 4) - parameter (openmp_version = 201307) - - external omp_init_lock, omp_init_nest_lock - external omp_destroy_lock, omp_destroy_nest_lock - external omp_set_lock, omp_set_nest_lock - external omp_unset_lock, omp_unset_nest_lock - external omp_set_dynamic, omp_set_nested - external omp_set_num_threads - - external omp_get_dynamic, omp_get_nested - logical(4) omp_get_dynamic, omp_get_nested - external omp_test_lock, omp_in_parallel - logical(4) omp_test_lock, omp_in_parallel - - external omp_get_max_threads, omp_get_num_procs - integer(4) omp_get_max_threads, omp_get_num_procs - external omp_get_num_threads, omp_get_thread_num - integer(4) omp_get_num_threads, omp_get_thread_num - external omp_test_nest_lock - integer(4) omp_test_nest_lock - - external omp_get_wtick, omp_get_wtime - double precision omp_get_wtick, omp_get_wtime - - external omp_set_schedule, omp_get_schedule - external omp_get_thread_limit, omp_set_max_active_levels - external omp_get_max_active_levels, omp_get_level - external omp_get_ancestor_thread_num, omp_get_team_size - external omp_get_active_level - integer(4) omp_get_thread_limit, omp_get_max_active_levels - integer(4) omp_get_level, omp_get_ancestor_thread_num - integer(4) omp_get_team_size, omp_get_active_level - - external omp_in_final - logical(4) omp_in_final - - external omp_get_cancelllation - logical(4) omp_get_cancelllation - - external omp_get_proc_bind - integer(omp_proc_bind_kind) omp_get_proc_bind - - external omp_set_default_device, omp_get_default_device - external omp_get_num_devices, omp_get_num_teams - external omp_get_team_num - integer(4) omp_get_default_device, omp_get_num_devices - integer(4) omp_get_num_teams, omp_get_team_num - - external omp_is_initial_device - logical(4) omp_is_initial_device diff --git a/usr/libgomp/omp_lib.h.in b/usr/libgomp/omp_lib.h.in deleted file mode 100644 index d590bc1513..0000000000 --- a/usr/libgomp/omp_lib.h.in +++ /dev/null @@ -1,98 +0,0 @@ -! Copyright (C) 2005-2015 Free Software Foundation, Inc. -! Contributed by Jakub Jelinek . - -! This file is part of the GNU Offloading and Multi Processing Library -! (libgomp). - -! Libgomp is free software; you can redistribute it and/or modify it -! under the terms of the GNU General Public License as published by -! the Free Software Foundation; either version 3, or (at your option) -! any later version. - -! Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY -! WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -! FOR A PARTICULAR PURPOSE. See the GNU General Public License for -! more details. - -! Under Section 7 of GPL version 3, you are granted additional -! permissions described in the GCC Runtime Library Exception, version -! 3.1, as published by the Free Software Foundation. - -! You should have received a copy of the GNU General Public License and -! a copy of the GCC Runtime Library Exception along with this program; -! see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -! . - - integer omp_lock_kind, omp_nest_lock_kind, openmp_version - parameter (omp_lock_kind = @OMP_LOCK_KIND@) - parameter (omp_nest_lock_kind = @OMP_NEST_LOCK_KIND@) - integer omp_sched_kind - parameter (omp_sched_kind = 4) - integer (omp_sched_kind) omp_sched_static, omp_sched_dynamic - integer (omp_sched_kind) omp_sched_guided, omp_sched_auto - parameter (omp_sched_static = 1) - parameter (omp_sched_dynamic = 2) - parameter (omp_sched_guided = 3) - parameter (omp_sched_auto = 4) - integer omp_proc_bind_kind - parameter (omp_proc_bind_kind = 4) - integer (omp_proc_bind_kind) omp_proc_bind_false - integer (omp_proc_bind_kind) omp_proc_bind_true - integer (omp_proc_bind_kind) omp_proc_bind_master - integer (omp_proc_bind_kind) omp_proc_bind_close - integer (omp_proc_bind_kind) omp_proc_bind_spread - parameter (omp_proc_bind_false = 0) - parameter (omp_proc_bind_true = 1) - parameter (omp_proc_bind_master = 2) - parameter (omp_proc_bind_close = 3) - parameter (omp_proc_bind_spread = 4) - parameter (openmp_version = 201307) - - external omp_init_lock, omp_init_nest_lock - external omp_destroy_lock, omp_destroy_nest_lock - external omp_set_lock, omp_set_nest_lock - external omp_unset_lock, omp_unset_nest_lock - external omp_set_dynamic, omp_set_nested - external omp_set_num_threads - - external omp_get_dynamic, omp_get_nested - logical(4) omp_get_dynamic, omp_get_nested - external omp_test_lock, omp_in_parallel - logical(4) omp_test_lock, omp_in_parallel - - external omp_get_max_threads, omp_get_num_procs - integer(4) omp_get_max_threads, omp_get_num_procs - external omp_get_num_threads, omp_get_thread_num - integer(4) omp_get_num_threads, omp_get_thread_num - external omp_test_nest_lock - integer(4) omp_test_nest_lock - - external omp_get_wtick, omp_get_wtime - double precision omp_get_wtick, omp_get_wtime - - external omp_set_schedule, omp_get_schedule - external omp_get_thread_limit, omp_set_max_active_levels - external omp_get_max_active_levels, omp_get_level - external omp_get_ancestor_thread_num, omp_get_team_size - external omp_get_active_level - integer(4) omp_get_thread_limit, omp_get_max_active_levels - integer(4) omp_get_level, omp_get_ancestor_thread_num - integer(4) omp_get_team_size, omp_get_active_level - - external omp_in_final - logical(4) omp_in_final - - external omp_get_cancelllation - logical(4) omp_get_cancelllation - - external omp_get_proc_bind - integer(omp_proc_bind_kind) omp_get_proc_bind - - external omp_set_default_device, omp_get_default_device - external omp_get_num_devices, omp_get_num_teams - external omp_get_team_num - integer(4) omp_get_default_device, omp_get_num_devices - integer(4) omp_get_num_teams, omp_get_team_num - - external omp_is_initial_device - logical(4) omp_is_initial_device diff --git a/usr/libgomp/ordered.c b/usr/libgomp/ordered.c deleted file mode 100644 index 69ca217b4d..0000000000 --- a/usr/libgomp/ordered.c +++ /dev/null @@ -1,252 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file handles the ORDERED construct. */ - -#include "libgomp.h" - - -/* This function is called when first allocating an iteration block. That - is, the thread is not currently on the queue. The work-share lock must - be held on entry. */ - -void -gomp_ordered_first (void) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - struct gomp_work_share *ws = thr->ts.work_share; - unsigned index; - - /* Work share constructs can be orphaned. */ - if (team == NULL || team->nthreads == 1) - return; - - index = ws->ordered_cur + ws->ordered_num_used; - if (index >= team->nthreads) - index -= team->nthreads; - ws->ordered_team_ids[index] = thr->ts.team_id; - - /* If this is the first and only thread in the queue, then there is - no one to release us when we get to our ordered section. Post to - our own release queue now so that we won't block later. */ - if (ws->ordered_num_used++ == 0) - gomp_sem_post (team->ordered_release[thr->ts.team_id]); -} - -/* This function is called when completing the last iteration block. That - is, there are no more iterations to perform and so the thread should be - removed from the queue entirely. Because of the way ORDERED blocks are - managed, it follows that we currently own access to the ORDERED block, - and should now pass it on to the next thread. The work-share lock must - be held on entry. */ - -void -gomp_ordered_last (void) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - struct gomp_work_share *ws = thr->ts.work_share; - unsigned next_id; - - /* Work share constructs can be orphaned. */ - if (team == NULL || team->nthreads == 1) - return; - - /* We're no longer the owner. */ - ws->ordered_owner = -1; - - /* If we're not the last thread in the queue, then wake the next. */ - if (--ws->ordered_num_used > 0) - { - unsigned next = ws->ordered_cur + 1; - if (next == team->nthreads) - next = 0; - ws->ordered_cur = next; - - next_id = ws->ordered_team_ids[next]; - gomp_sem_post (team->ordered_release[next_id]); - } -} - - -/* This function is called when allocating a subsequent allocation block. - That is, we're done with the current iteration block and we're allocating - another. This is the logical combination of a call to gomp_ordered_last - followed by a call to gomp_ordered_first. The work-share lock must be - held on entry. */ - -void -gomp_ordered_next (void) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - struct gomp_work_share *ws = thr->ts.work_share; - unsigned index, next_id; - - /* Work share constructs can be orphaned. */ - if (team == NULL || team->nthreads == 1) - return; - - /* We're no longer the owner. */ - ws->ordered_owner = -1; - - /* If there's only one thread in the queue, that must be us. */ - if (ws->ordered_num_used == 1) - { - /* We have a similar situation as in gomp_ordered_first - where we need to post to our own release semaphore. */ - gomp_sem_post (team->ordered_release[thr->ts.team_id]); - return; - } - - /* If the queue is entirely full, then we move ourself to the end of - the queue merely by incrementing ordered_cur. Only if it's not - full do we have to write our id. */ - if (ws->ordered_num_used < team->nthreads) - { - index = ws->ordered_cur + ws->ordered_num_used; - if (index >= team->nthreads) - index -= team->nthreads; - ws->ordered_team_ids[index] = thr->ts.team_id; - } - - index = ws->ordered_cur + 1; - if (index == team->nthreads) - index = 0; - ws->ordered_cur = index; - - next_id = ws->ordered_team_ids[index]; - gomp_sem_post (team->ordered_release[next_id]); -} - - -/* This function is called when a statically scheduled loop is first - being created. */ - -void -gomp_ordered_static_init (void) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - - if (team == NULL || team->nthreads == 1) - return; - - gomp_sem_post (team->ordered_release[0]); -} - -/* This function is called when a statically scheduled loop is moving to - the next allocation block. Static schedules are not first come first - served like the others, so we're to move to the numerically next thread, - not the next thread on a list. The work-share lock should *not* be held - on entry. */ - -void -gomp_ordered_static_next (void) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - struct gomp_work_share *ws = thr->ts.work_share; - unsigned id = thr->ts.team_id; - - if (team == NULL || team->nthreads == 1) - return; - - ws->ordered_owner = -1; - - /* This thread currently owns the lock. Increment the owner. */ - if (++id == team->nthreads) - id = 0; - ws->ordered_team_ids[0] = id; - gomp_sem_post (team->ordered_release[id]); -} - -/* This function is called when we need to assert that the thread owns the - ordered section. Due to the problem of posted-but-not-waited semaphores, - this needs to happen before completing a loop iteration. */ - -void -gomp_ordered_sync (void) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - struct gomp_work_share *ws = thr->ts.work_share; - - /* Work share constructs can be orphaned. But this clearly means that - we are the only thread, and so we automatically own the section. */ - if (team == NULL || team->nthreads == 1) - return; - - /* ??? I believe it to be safe to access this data without taking the - ws->lock. The only presumed race condition is with the previous - thread on the queue incrementing ordered_cur such that it points - to us, concurrently with our check below. But our team_id is - already present in the queue, and the other thread will always - post to our release semaphore. So the two cases are that we will - either win the race an momentarily block on the semaphore, or lose - the race and find the semaphore already unlocked and so not block. - Either way we get correct results. - However, there is an implicit flush on entry to an ordered region, - so we do need to have a barrier here. If we were taking a lock - this could be MEMMODEL_RELEASE since the acquire would be coverd - by the lock. */ - - __atomic_thread_fence (MEMMODEL_ACQ_REL); - if (ws->ordered_owner != thr->ts.team_id) - { - gomp_sem_wait (team->ordered_release[thr->ts.team_id]); - ws->ordered_owner = thr->ts.team_id; - } -} - -/* This function is called by user code when encountering the start of an - ORDERED block. We must check to see if the current thread is at the - head of the queue, and if not, block. */ - -#ifdef HAVE_ATTRIBUTE_ALIAS -extern void GOMP_ordered_start (void) - __attribute__((alias ("gomp_ordered_sync"))); -#else -void -GOMP_ordered_start (void) -{ - gomp_ordered_sync (); -} -#endif - -/* This function is called by user code when encountering the end of an - ORDERED block. With the current ORDERED implementation there's nothing - for us to do. - - However, the current implementation has a flaw in that it does not allow - the next thread into the ORDERED section immediately after the current - thread exits the ORDERED section in its last iteration. The existance - of this function allows the implementation to change. */ - -void -GOMP_ordered_end (void) -{ -} diff --git a/usr/libgomp/parallel.c b/usr/libgomp/parallel.c deleted file mode 100644 index 6d5ef050f9..0000000000 --- a/usr/libgomp/parallel.c +++ /dev/null @@ -1,302 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file handles the (bare) PARALLEL construct. */ - -#include "libgomp.h" -#include - - -/* Determine the number of threads to be launched for a PARALLEL construct. - This algorithm is explicitly described in OpenMP 3.0 section 2.4.1. - SPECIFIED is a combination of the NUM_THREADS clause and the IF clause. - If the IF clause is false, SPECIFIED is forced to 1. When NUM_THREADS - is not present, SPECIFIED is 0. */ - -unsigned -gomp_resolve_num_threads (unsigned specified, unsigned count) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_task_icv *icv; - unsigned threads_requested, max_num_threads, num_threads; - unsigned long busy; - struct gomp_thread_pool *pool; - - icv = gomp_icv (false); - - if (specified == 1) - return 1; - else if (thr->ts.active_level >= 1 && !icv->nest_var) - return 1; - else if (thr->ts.active_level >= gomp_max_active_levels_var) - return 1; - - /* If NUM_THREADS not specified, use nthreads_var. */ - if (specified == 0) - threads_requested = icv->nthreads_var; - else - threads_requested = specified; - - max_num_threads = threads_requested; - - /* If dynamic threads are enabled, bound the number of threads - that we launch. */ - if (icv->dyn_var) - { - unsigned dyn = gomp_dynamic_max_threads (); - if (dyn < max_num_threads) - max_num_threads = dyn; - - /* Optimization for parallel sections. */ - if (count && count < max_num_threads) - max_num_threads = count; - } - - /* UINT_MAX stands for infinity. */ - if (__builtin_expect (icv->thread_limit_var == UINT_MAX, 1) - || max_num_threads == 1) - return max_num_threads; - - /* The threads_busy counter lives in thread_pool, if there - isn't a thread_pool yet, there must be just one thread - in the contention group. If thr->team is NULL, this isn't - nested parallel, so there is just one thread in the - contention group as well, no need to handle it atomically. */ - pool = thr->thread_pool; - if (thr->ts.team == NULL) - { - num_threads = max_num_threads; - if (num_threads > icv->thread_limit_var) - num_threads = icv->thread_limit_var; - if (pool) - pool->threads_busy = num_threads; - return num_threads; - } - -#ifdef HAVE_SYNC_BUILTINS - do - { - busy = pool->threads_busy; - num_threads = max_num_threads; - if (icv->thread_limit_var - busy + 1 < num_threads) - num_threads = icv->thread_limit_var - busy + 1; - } - while (__sync_val_compare_and_swap (&pool->threads_busy, - busy, busy + num_threads - 1) - != busy); -#else - gomp_mutex_lock (&gomp_managed_threads_lock); - num_threads = max_num_threads; - busy = pool->threads_busy; - if (icv->thread_limit_var - busy + 1 < num_threads) - num_threads = icv->thread_limit_var - busy + 1; - pool->threads_busy += num_threads - 1; - gomp_mutex_unlock (&gomp_managed_threads_lock); -#endif - - return num_threads; -} - -void -GOMP_parallel_start (void (*fn) (void *), void *data, unsigned num_threads) -{ - num_threads = gomp_resolve_num_threads (num_threads, 0); - gomp_team_start (fn, data, num_threads, 0, gomp_new_team (num_threads)); -} - -void -GOMP_parallel_end (void) -{ - struct gomp_task_icv *icv = gomp_icv (false); - if (__builtin_expect (icv->thread_limit_var != UINT_MAX, 0)) - { - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - unsigned int nthreads = team ? team->nthreads : 1; - gomp_team_end (); - if (nthreads > 1) - { - /* If not nested, there is just one thread in the - contention group left, no need for atomicity. */ - if (thr->ts.team == NULL) - thr->thread_pool->threads_busy = 1; - else - { -#ifdef HAVE_SYNC_BUILTINS - __sync_fetch_and_add (&thr->thread_pool->threads_busy, - 1UL - nthreads); -#else - gomp_mutex_lock (&gomp_managed_threads_lock); - thr->thread_pool->threads_busy -= nthreads - 1; - gomp_mutex_unlock (&gomp_managed_threads_lock); -#endif - } - } - } - else - gomp_team_end (); -} -ialias (GOMP_parallel_end) - -void -GOMP_parallel (void (*fn) (void *), void *data, unsigned num_threads, unsigned int flags) -{ - num_threads = gomp_resolve_num_threads (num_threads, 0); - gomp_team_start (fn, data, num_threads, flags, gomp_new_team (num_threads)); - fn (data); - ialias_call (GOMP_parallel_end) (); -} - -bool -GOMP_cancellation_point (int which) -{ - if (!gomp_cancel_var) - return false; - - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - if (which & (GOMP_CANCEL_LOOP | GOMP_CANCEL_SECTIONS)) - { - if (team == NULL) - return false; - return team->work_share_cancelled != 0; - } - else if (which & GOMP_CANCEL_TASKGROUP) - { - if (thr->task->taskgroup && thr->task->taskgroup->cancelled) - return true; - /* FALLTHRU into the GOMP_CANCEL_PARALLEL case, - as #pragma omp cancel parallel also cancels all explicit - tasks. */ - } - if (team) - return gomp_team_barrier_cancelled (&team->barrier); - return false; -} -ialias (GOMP_cancellation_point) - -bool -GOMP_cancel (int which, bool do_cancel) -{ - if (!gomp_cancel_var) - return false; - - if (!do_cancel) - return ialias_call (GOMP_cancellation_point) (which); - - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - if (which & (GOMP_CANCEL_LOOP | GOMP_CANCEL_SECTIONS)) - { - /* In orphaned worksharing region, all we want to cancel - is current thread. */ - if (team != NULL) - team->work_share_cancelled = 1; - return true; - } - else if (which & GOMP_CANCEL_TASKGROUP) - { - if (thr->task->taskgroup && !thr->task->taskgroup->cancelled) - { - gomp_mutex_lock (&team->task_lock); - thr->task->taskgroup->cancelled = true; - gomp_mutex_unlock (&team->task_lock); - } - return true; - } - team->team_cancelled = 1; - gomp_team_barrier_cancel (team); - return true; -} - -/* The public OpenMP API for thread and team related inquiries. */ - -int -omp_get_num_threads (void) -{ - struct gomp_team *team = gomp_thread ()->ts.team; - return team ? team->nthreads : 1; -} - -int -omp_get_thread_num (void) -{ - return gomp_thread ()->ts.team_id; -} - -/* This wasn't right for OpenMP 2.5. Active region used to be non-zero - when the IF clause doesn't evaluate to false, starting with OpenMP 3.0 - it is non-zero with more than one thread in the team. */ - -int -omp_in_parallel (void) -{ - return gomp_thread ()->ts.active_level > 0; -} - -int -omp_get_level (void) -{ - return gomp_thread ()->ts.level; -} - -int -omp_get_ancestor_thread_num (int level) -{ - struct gomp_team_state *ts = &gomp_thread ()->ts; - if (level < 0 || level > ts->level) - return -1; - for (level = ts->level - level; level > 0; --level) - ts = &ts->team->prev_ts; - return ts->team_id; -} - -int -omp_get_team_size (int level) -{ - struct gomp_team_state *ts = &gomp_thread ()->ts; - if (level < 0 || level > ts->level) - return -1; - for (level = ts->level - level; level > 0; --level) - ts = &ts->team->prev_ts; - if (ts->team == NULL) - return 1; - else - return ts->team->nthreads; -} - -int -omp_get_active_level (void) -{ - return gomp_thread ()->ts.active_level; -} - -ialias (omp_get_num_threads) -ialias (omp_get_thread_num) -ialias (omp_in_parallel) -ialias (omp_get_level) -ialias (omp_get_ancestor_thread_num) -ialias (omp_get_team_size) -ialias (omp_get_active_level) diff --git a/usr/libgomp/proc.c b/usr/libgomp/proc.c deleted file mode 100644 index 2fb955179e..0000000000 --- a/usr/libgomp/proc.c +++ /dev/null @@ -1,111 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file contains system specific routines related to counting - online processors and dynamic load balancing. It is expected that - a system may well want to write special versions of each of these. - - The following implementation uses a mix of POSIX and BSD routines. */ - -#include "libgomp.h" -#include -#include -#ifdef HAVE_GETLOADAVG -# ifdef HAVE_SYS_LOADAVG_H -# include -# endif -#endif - -unsigned int get_num_cpus(void); - -/* At startup, determine the default number of threads. It would seem - this should be related to the number of cpus online. */ - -void -gomp_init_num_threads (void) -{ -#ifdef __hermit__ - gomp_global_icv.nthreads_var = get_num_cpus(); -#elif defined(_SC_NPROCESSORS_ONLN) - gomp_global_icv.nthreads_var = sysconf (_SC_NPROCESSORS_ONLN); -#endif -} - -/* When OMP_DYNAMIC is set, at thread launch determine the number of - threads we should spawn for this team. */ -/* ??? I have no idea what best practice for this is. Surely some - function of the number of processors that are *still* online and - the load average. Here I use the number of processors online - minus the 15 minute load average. */ - -unsigned -gomp_dynamic_max_threads (void) -{ - unsigned n_onln, loadavg; -#ifndef __hermit__ - unsigned nthreads_var = gomp_icv (false)->nthreads_var; -#endif - -#ifdef __hermit__ - n_onln = get_num_cpus(); -#elif defined(_SC_NPROCESSORS_ONLN) - n_onln = sysconf (_SC_NPROCESSORS_ONLN); - if (n_onln > nthreads_var) - n_onln = nthreads_var; -#else - n_onln = nthreads_var; -#endif - - loadavg = 0; -#ifdef HAVE_GETLOADAVG - { - double dloadavg[3]; - if (getloadavg (dloadavg, 3) == 3) - { - /* Add 0.1 to get a kind of biased rounding. */ - loadavg = dloadavg[2] + 0.1; - } - } -#endif - - if (loadavg >= n_onln) - return 1; - else - return n_onln - loadavg; -} - -int -omp_get_num_procs (void) -{ -#ifdef __hermit__ - return get_num_cpus(); -#elif defined(_SC_NPROCESSORS_ONLN); - return sysconf (_SC_NPROCESSORS_ONLN); -#else - return gomp_icv (false)->nthreads_var; -#endif -} - -ialias (omp_get_num_procs) diff --git a/usr/libgomp/ptrlock.c b/usr/libgomp/ptrlock.c deleted file mode 100644 index 39bb64da0f..0000000000 --- a/usr/libgomp/ptrlock.c +++ /dev/null @@ -1 +0,0 @@ -/* Everything is in the header. */ diff --git a/usr/libgomp/ptrlock.h b/usr/libgomp/ptrlock.h deleted file mode 100644 index 86faad780a..0000000000 --- a/usr/libgomp/ptrlock.h +++ /dev/null @@ -1,66 +0,0 @@ -/* Copyright (C) 2008-2015 Free Software Foundation, Inc. - Contributed by Jakub Jelinek . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This is a generic POSIX implementation of a mutex synchronization - mechanism for libgomp. This type is private to the library. */ - -#ifndef GOMP_PTRLOCK_H -#define GOMP_PTRLOCK_H 1 - -typedef struct { void *ptr; gomp_mutex_t lock; } gomp_ptrlock_t; - -static inline void gomp_ptrlock_init (gomp_ptrlock_t *ptrlock, void *ptr) -{ - ptrlock->ptr = ptr; - gomp_mutex_init (&ptrlock->lock); -} - -static inline void *gomp_ptrlock_get (gomp_ptrlock_t *ptrlock) -{ - if (ptrlock->ptr != NULL) - return ptrlock->ptr; - - gomp_mutex_lock (&ptrlock->lock); - if (ptrlock->ptr != NULL) - { - gomp_mutex_unlock (&ptrlock->lock); - return ptrlock->ptr; - } - - return NULL; -} - -static inline void gomp_ptrlock_set (gomp_ptrlock_t *ptrlock, void *ptr) -{ - ptrlock->ptr = ptr; - gomp_mutex_unlock (&ptrlock->lock); -} - -static inline void gomp_ptrlock_destroy (gomp_ptrlock_t *ptrlock) -{ - gomp_mutex_destroy (&ptrlock->lock); -} - -#endif /* GOMP_PTRLOCK_H */ diff --git a/usr/libgomp/sections.c b/usr/libgomp/sections.c deleted file mode 100644 index f3a17259ff..0000000000 --- a/usr/libgomp/sections.c +++ /dev/null @@ -1,182 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file handles the SECTIONS construct. */ - -#include "libgomp.h" - - -/* Initialize the given work share construct from the given arguments. */ - -static inline void -gomp_sections_init (struct gomp_work_share *ws, unsigned count) -{ - ws->sched = GFS_DYNAMIC; - ws->chunk_size = 1; - ws->end = count + 1L; - ws->incr = 1; - ws->next = 1; -#ifdef HAVE_SYNC_BUILTINS - /* Prepare things to make each iteration faster. */ - if (sizeof (long) > sizeof (unsigned)) - ws->mode = 1; - else - { - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - long nthreads = team ? team->nthreads : 1; - - ws->mode = ((nthreads | ws->end) - < 1UL << (sizeof (long) * __CHAR_BIT__ / 2 - 1)); - } -#else - ws->mode = 0; -#endif -} - -/* This routine is called when first encountering a sections construct - that is not bound directly to a parallel construct. The first thread - that arrives will create the work-share construct; subsequent threads - will see the construct exists and allocate work from it. - - COUNT is the number of sections in this construct. - - Returns the 1-based section number for this thread to perform, or 0 if - all work was assigned to other threads prior to this thread's arrival. */ - -unsigned -GOMP_sections_start (unsigned count) -{ - struct gomp_thread *thr = gomp_thread (); - long s, e, ret; - - if (gomp_work_share_start (false)) - { - gomp_sections_init (thr->ts.work_share, count); - gomp_work_share_init_done (); - } - -#ifdef HAVE_SYNC_BUILTINS - if (gomp_iter_dynamic_next (&s, &e)) - ret = s; - else - ret = 0; -#else - gomp_mutex_lock (&thr->ts.work_share->lock); - if (gomp_iter_dynamic_next_locked (&s, &e)) - ret = s; - else - ret = 0; - gomp_mutex_unlock (&thr->ts.work_share->lock); -#endif - - return ret; -} - -/* This routine is called when the thread completes processing of the - section currently assigned to it. If the work-share construct is - bound directly to a parallel construct, then the construct may have - been set up before the parallel. In which case, this may be the - first iteration for the thread. - - Returns the 1-based section number for this thread to perform, or 0 if - all work was assigned to other threads prior to this thread's arrival. */ - -unsigned -GOMP_sections_next (void) -{ - long s, e, ret; - -#ifdef HAVE_SYNC_BUILTINS - if (gomp_iter_dynamic_next (&s, &e)) - ret = s; - else - ret = 0; -#else - struct gomp_thread *thr = gomp_thread (); - - gomp_mutex_lock (&thr->ts.work_share->lock); - if (gomp_iter_dynamic_next_locked (&s, &e)) - ret = s; - else - ret = 0; - gomp_mutex_unlock (&thr->ts.work_share->lock); -#endif - - return ret; -} - -/* This routine pre-initializes a work-share construct to avoid one - synchronization once we get into the loop. */ - -void -GOMP_parallel_sections_start (void (*fn) (void *), void *data, - unsigned num_threads, unsigned count) -{ - struct gomp_team *team; - - num_threads = gomp_resolve_num_threads (num_threads, count); - team = gomp_new_team (num_threads); - gomp_sections_init (&team->work_shares[0], count); - gomp_team_start (fn, data, num_threads, 0, team); -} - -ialias_redirect (GOMP_parallel_end) - -void -GOMP_parallel_sections (void (*fn) (void *), void *data, - unsigned num_threads, unsigned count, unsigned flags) -{ - struct gomp_team *team; - - num_threads = gomp_resolve_num_threads (num_threads, count); - team = gomp_new_team (num_threads); - gomp_sections_init (&team->work_shares[0], count); - gomp_team_start (fn, data, num_threads, flags, team); - fn (data); - GOMP_parallel_end (); -} - -/* The GOMP_section_end* routines are called after the thread is told - that all sections are complete. The first two versions synchronize - all threads; the nowait version does not. */ - -void -GOMP_sections_end (void) -{ - gomp_work_share_end (); -} - -bool -GOMP_sections_end_cancel (void) -{ - return gomp_work_share_end_cancel (); -} - -void -GOMP_sections_end_nowait (void) -{ - gomp_work_share_end_nowait (); -} diff --git a/usr/libgomp/sem.c b/usr/libgomp/sem.c deleted file mode 100644 index 4b8fb08315..0000000000 --- a/usr/libgomp/sem.c +++ /dev/null @@ -1,124 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This is the default POSIX 1003.1b implementation of a semaphore - synchronization mechanism for libgomp. This type is private to - the library. - - This is a bit heavy weight for what we need, in that we're not - interested in sem_wait as a cancelation point, but it's not too - bad for a default. */ - -#include "libgomp.h" - -#ifdef HAVE_BROKEN_POSIX_SEMAPHORES -#include - -void gomp_sem_init (gomp_sem_t *sem, int value) -{ - int ret; - - ret = pthread_mutex_init (&sem->mutex, NULL); - if (ret) - return; - - ret = pthread_cond_init (&sem->cond, NULL); - if (ret) - return; - - sem->value = value; -} - -void gomp_sem_wait (gomp_sem_t *sem) -{ - int ret; - - ret = pthread_mutex_lock (&sem->mutex); - if (ret) - return; - - if (sem->value > 0) - { - sem->value--; - ret = pthread_mutex_unlock (&sem->mutex); - return; - } - - while (sem->value <= 0) - { - ret = pthread_cond_wait (&sem->cond, &sem->mutex); - if (ret) - { - pthread_mutex_unlock (&sem->mutex); - return; - } - } - - sem->value--; - ret = pthread_mutex_unlock (&sem->mutex); - return; -} - -void gomp_sem_post (gomp_sem_t *sem) -{ - int ret; - - ret = pthread_mutex_lock (&sem->mutex); - if (ret) - return; - - sem->value++; - - ret = pthread_mutex_unlock (&sem->mutex); - if (ret) - return; - - ret = pthread_cond_signal (&sem->cond); - - return; -} - -void gomp_sem_destroy (gomp_sem_t *sem) -{ - int ret; - - ret = pthread_mutex_destroy (&sem->mutex); - if (ret) - return; - - ret = pthread_cond_destroy (&sem->cond); - - return; -} -#else /* HAVE_BROKEN_POSIX_SEMAPHORES */ -void -gomp_sem_wait (gomp_sem_t *sem) -{ - /* With POSIX, the wait can be canceled by signals. We don't want that. - It is expected that the return value here is -1 and errno is EINTR. */ - while (sem_wait (sem) != 0) - continue; -} -#endif diff --git a/usr/libgomp/sem.h b/usr/libgomp/sem.h deleted file mode 100644 index 51ba379dad..0000000000 --- a/usr/libgomp/sem.h +++ /dev/null @@ -1,88 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This is the default POSIX 1003.1b implementation of a semaphore - synchronization mechanism for libgomp. This type is private to - the library. - - This is a bit heavy weight for what we need, in that we're not - interested in sem_wait as a cancelation point, but it's not too - bad for a default. */ - -#ifndef GOMP_SEM_H -#define GOMP_SEM_H 1 - -#ifdef HAVE_ATTRIBUTE_VISIBILITY -# pragma GCC visibility push(default) -#endif - -#include - -#ifdef HAVE_ATTRIBUTE_VISIBILITY -# pragma GCC visibility pop -#endif - -#ifdef HAVE_BROKEN_POSIX_SEMAPHORES -#include - -struct gomp_sem -{ - pthread_mutex_t mutex; - pthread_cond_t cond; - int value; -}; - -typedef struct gomp_sem gomp_sem_t; - -extern void gomp_sem_init (gomp_sem_t *sem, int value); - -extern void gomp_sem_wait (gomp_sem_t *sem); - -extern void gomp_sem_post (gomp_sem_t *sem); - -extern void gomp_sem_destroy (gomp_sem_t *sem); - -#else /* HAVE_BROKEN_POSIX_SEMAPHORES */ - -typedef sem_t gomp_sem_t; - -static inline void gomp_sem_init (gomp_sem_t *sem, int value) -{ - sem_init (sem, 0, value); -} - -extern void gomp_sem_wait (gomp_sem_t *sem); - -static inline void gomp_sem_post (gomp_sem_t *sem) -{ - sem_post (sem); -} - -static inline void gomp_sem_destroy (gomp_sem_t *sem) -{ - sem_destroy (sem); -} -#endif /* doesn't HAVE_BROKEN_POSIX_SEMAPHORES */ -#endif /* GOMP_SEM_H */ diff --git a/usr/libgomp/single.c b/usr/libgomp/single.c deleted file mode 100644 index 7cb6eed382..0000000000 --- a/usr/libgomp/single.c +++ /dev/null @@ -1,105 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file handles the SINGLE construct. */ - -#include "libgomp.h" - - -/* This routine is called when first encountering a SINGLE construct that - doesn't have a COPYPRIVATE clause. Returns true if this is the thread - that should execute the clause. */ - -bool -GOMP_single_start (void) -{ -#ifdef HAVE_SYNC_BUILTINS - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - unsigned long single_count; - - if (__builtin_expect (team == NULL, 0)) - return true; - - single_count = thr->ts.single_count++; - return __sync_bool_compare_and_swap (&team->single_count, single_count, - single_count + 1L); -#else - bool ret = gomp_work_share_start (false); - if (ret) - gomp_work_share_init_done (); - gomp_work_share_end_nowait (); - return ret; -#endif -} - -/* This routine is called when first encountering a SINGLE construct that - does have a COPYPRIVATE clause. Returns NULL if this is the thread - that should execute the clause; otherwise the return value is pointer - given to GOMP_single_copy_end by the thread that did execute the clause. */ - -void * -GOMP_single_copy_start (void) -{ - struct gomp_thread *thr = gomp_thread (); - - bool first; - void *ret; - - first = gomp_work_share_start (false); - - if (first) - { - gomp_work_share_init_done (); - ret = NULL; - } - else - { - gomp_team_barrier_wait (&thr->ts.team->barrier); - - ret = thr->ts.work_share->copyprivate; - gomp_work_share_end_nowait (); - } - - return ret; -} - -/* This routine is called when the thread that entered a SINGLE construct - with a COPYPRIVATE clause gets to the end of the construct. */ - -void -GOMP_single_copy_end (void *data) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - - if (team != NULL) - { - thr->ts.work_share->copyprivate = data; - gomp_team_barrier_wait (&team->barrier); - } - - gomp_work_share_end_nowait (); -} diff --git a/usr/libgomp/task.c b/usr/libgomp/task.c deleted file mode 100644 index 74920d5ddb..0000000000 --- a/usr/libgomp/task.c +++ /dev/null @@ -1,1216 +0,0 @@ -/* Copyright (C) 2007-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file handles the maintainence of tasks in response to task - creation and termination. */ - -#include "libgomp.h" -#include -#include - -typedef struct gomp_task_depend_entry *hash_entry_type; - -static inline void * -htab_alloc (size_t size) -{ - return gomp_malloc (size); -} - -static inline void -htab_free (void *ptr) -{ - free (ptr); -} - -#include "hashtab.h" - -static inline hashval_t -htab_hash (hash_entry_type element) -{ - return hash_pointer (element->addr); -} - -static inline bool -htab_eq (hash_entry_type x, hash_entry_type y) -{ - return x->addr == y->addr; -} - -/* Create a new task data structure. */ - -void -gomp_init_task (struct gomp_task *task, struct gomp_task *parent_task, - struct gomp_task_icv *prev_icv) -{ - task->parent = parent_task; - task->icv = *prev_icv; - task->kind = GOMP_TASK_IMPLICIT; - task->taskwait = NULL; - task->in_tied_task = false; - task->final_task = false; - task->copy_ctors_done = false; - task->parent_depends_on = false; - task->children = NULL; - task->taskgroup = NULL; - task->dependers = NULL; - task->depend_hash = NULL; - task->depend_count = 0; -} - -/* Clean up a task, after completing it. */ - -void -gomp_end_task (void) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_task *task = thr->task; - - gomp_finish_task (task); - thr->task = task->parent; -} - -static inline void -gomp_clear_parent (struct gomp_task *children) -{ - struct gomp_task *task = children; - - if (task) - do - { - task->parent = NULL; - task = task->next_child; - } - while (task != children); -} - -static void gomp_task_maybe_wait_for_dependencies (void **depend); - -/* Called when encountering an explicit task directive. If IF_CLAUSE is - false, then we must not delay in executing the task. If UNTIED is true, - then the task may be executed by any member of the team. */ - -void -GOMP_task (void (*fn) (void *), void *data, void (*cpyfn) (void *, void *), - long arg_size, long arg_align, bool if_clause, unsigned flags, - void **depend) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - -#ifdef HAVE_BROKEN_POSIX_SEMAPHORES - /* If pthread_mutex_* is used for omp_*lock*, then each task must be - tied to one thread all the time. This means UNTIED tasks must be - tied and if CPYFN is non-NULL IF(0) must be forced, as CPYFN - might be running on different thread than FN. */ - if (cpyfn) - if_clause = false; - if (flags & 1) - flags &= ~1; -#endif - - /* If parallel or taskgroup has been cancelled, don't start new tasks. */ - if (team - && (gomp_team_barrier_cancelled (&team->barrier) - || (thr->task->taskgroup && thr->task->taskgroup->cancelled))) - return; - - if (!if_clause || team == NULL - || (thr->task && thr->task->final_task) - || team->task_count > 64 * team->nthreads) - { - struct gomp_task task; - - /* If there are depend clauses and earlier deferred sibling tasks - with depend clauses, check if there isn't a dependency. If there - is, we need to wait for them. There is no need to handle - depend clauses for non-deferred tasks other than this, because - the parent task is suspended until the child task finishes and thus - it can't start further child tasks. */ - if ((flags & 8) && thr->task && thr->task->depend_hash) - gomp_task_maybe_wait_for_dependencies (depend); - - gomp_init_task (&task, thr->task, gomp_icv (false)); - task.kind = GOMP_TASK_IFFALSE; - task.final_task = (thr->task && thr->task->final_task) || (flags & 2); - if (thr->task) - { - task.in_tied_task = thr->task->in_tied_task; - task.taskgroup = thr->task->taskgroup; - } - thr->task = &task; - if (__builtin_expect (cpyfn != NULL, 0)) - { - char buf[arg_size + arg_align - 1]; - char *arg = (char *) (((uintptr_t) buf + arg_align - 1) - & ~(uintptr_t) (arg_align - 1)); - cpyfn (arg, data); - fn (arg); - } - else - fn (data); - /* Access to "children" is normally done inside a task_lock - mutex region, but the only way this particular task.children - can be set is if this thread's task work function (fn) - creates children. So since the setter is *this* thread, we - need no barriers here when testing for non-NULL. We can have - task.children set by the current thread then changed by a - child thread, but seeing a stale non-NULL value is not a - problem. Once past the task_lock acquisition, this thread - will see the real value of task.children. */ - if (task.children != NULL) - { - gomp_mutex_lock (&team->task_lock); - gomp_clear_parent (task.children); - gomp_mutex_unlock (&team->task_lock); - } - gomp_end_task (); - } - else - { - struct gomp_task *task; - struct gomp_task *parent = thr->task; - struct gomp_taskgroup *taskgroup = parent->taskgroup; - char *arg; - bool do_wake; - size_t depend_size = 0; - - if (flags & 8) - depend_size = ((uintptr_t) depend[0] - * sizeof (struct gomp_task_depend_entry)); - task = gomp_malloc (sizeof (*task) + depend_size - + arg_size + arg_align - 1); - arg = (char *) (((uintptr_t) (task + 1) + depend_size + arg_align - 1) - & ~(uintptr_t) (arg_align - 1)); - gomp_init_task (task, parent, gomp_icv (false)); - task->kind = GOMP_TASK_IFFALSE; - task->in_tied_task = parent->in_tied_task; - task->taskgroup = taskgroup; - thr->task = task; - if (cpyfn) - { - cpyfn (arg, data); - task->copy_ctors_done = true; - } - else - memcpy (arg, data, arg_size); - thr->task = parent; - task->kind = GOMP_TASK_WAITING; - task->fn = fn; - task->fn_data = arg; - task->final_task = (flags & 2) >> 1; - gomp_mutex_lock (&team->task_lock); - /* If parallel or taskgroup has been cancelled, don't start new - tasks. */ - if (__builtin_expect ((gomp_team_barrier_cancelled (&team->barrier) - || (taskgroup && taskgroup->cancelled)) - && !task->copy_ctors_done, 0)) - { - gomp_mutex_unlock (&team->task_lock); - gomp_finish_task (task); - free (task); - return; - } - if (taskgroup) - taskgroup->num_children++; - if (depend_size) - { - size_t ndepend = (uintptr_t) depend[0]; - size_t nout = (uintptr_t) depend[1]; - size_t i; - hash_entry_type ent; - - task->depend_count = ndepend; - task->num_dependees = 0; - if (parent->depend_hash == NULL) - parent->depend_hash - = htab_create (2 * ndepend > 12 ? 2 * ndepend : 12); - for (i = 0; i < ndepend; i++) - { - task->depend[i].addr = depend[2 + i]; - task->depend[i].next = NULL; - task->depend[i].prev = NULL; - task->depend[i].task = task; - task->depend[i].is_in = i >= nout; - task->depend[i].redundant = false; - task->depend[i].redundant_out = false; - - hash_entry_type *slot - = htab_find_slot (&parent->depend_hash, &task->depend[i], - INSERT); - hash_entry_type out = NULL, last = NULL; - if (*slot) - { - /* If multiple depends on the same task are the - same, all but the first one are redundant. - As inout/out come first, if any of them is - inout/out, it will win, which is the right - semantics. */ - if ((*slot)->task == task) - { - task->depend[i].redundant = true; - continue; - } - for (ent = *slot; ent; ent = ent->next) - { - if (ent->redundant_out) - break; - - last = ent; - - /* depend(in:...) doesn't depend on earlier - depend(in:...). */ - if (i >= nout && ent->is_in) - continue; - - if (!ent->is_in) - out = ent; - - struct gomp_task *tsk = ent->task; - if (tsk->dependers == NULL) - { - tsk->dependers - = gomp_malloc (sizeof (struct gomp_dependers_vec) - + 6 * sizeof (struct gomp_task *)); - tsk->dependers->n_elem = 1; - tsk->dependers->allocated = 6; - tsk->dependers->elem[0] = task; - task->num_dependees++; - continue; - } - /* We already have some other dependency on tsk - from earlier depend clause. */ - else if (tsk->dependers->n_elem - && (tsk->dependers->elem[tsk->dependers->n_elem - - 1] - == task)) - continue; - else if (tsk->dependers->n_elem - == tsk->dependers->allocated) - { - tsk->dependers->allocated - = tsk->dependers->allocated * 2 + 2; - tsk->dependers - = gomp_realloc (tsk->dependers, - sizeof (struct gomp_dependers_vec) - + (tsk->dependers->allocated - * sizeof (struct gomp_task *))); - } - tsk->dependers->elem[tsk->dependers->n_elem++] = task; - task->num_dependees++; - } - task->depend[i].next = *slot; - (*slot)->prev = &task->depend[i]; - } - *slot = &task->depend[i]; - - /* There is no need to store more than one depend({,in}out:) - task per address in the hash table chain for the purpose - of creation of deferred tasks, because each out - depends on all earlier outs, thus it is enough to record - just the last depend({,in}out:). For depend(in:), we need - to keep all of the previous ones not terminated yet, because - a later depend({,in}out:) might need to depend on all of - them. So, if the new task's clause is depend({,in}out:), - we know there is at most one other depend({,in}out:) clause - in the list (out). For non-deferred tasks we want to see - all outs, so they are moved to the end of the chain, - after first redundant_out entry all following entries - should be redundant_out. */ - if (!task->depend[i].is_in && out) - { - if (out != last) - { - out->next->prev = out->prev; - out->prev->next = out->next; - out->next = last->next; - out->prev = last; - last->next = out; - if (out->next) - out->next->prev = out; - } - out->redundant_out = true; - } - } - if (task->num_dependees) - { - gomp_mutex_unlock (&team->task_lock); - return; - } - } - if (parent->children) - { - task->next_child = parent->children; - task->prev_child = parent->children->prev_child; - task->next_child->prev_child = task; - task->prev_child->next_child = task; - } - else - { - task->next_child = task; - task->prev_child = task; - } - parent->children = task; - if (taskgroup) - { - if (taskgroup->children) - { - task->next_taskgroup = taskgroup->children; - task->prev_taskgroup = taskgroup->children->prev_taskgroup; - task->next_taskgroup->prev_taskgroup = task; - task->prev_taskgroup->next_taskgroup = task; - } - else - { - task->next_taskgroup = task; - task->prev_taskgroup = task; - } - taskgroup->children = task; - } - if (team->task_queue) - { - task->next_queue = team->task_queue; - task->prev_queue = team->task_queue->prev_queue; - task->next_queue->prev_queue = task; - task->prev_queue->next_queue = task; - } - else - { - task->next_queue = task; - task->prev_queue = task; - team->task_queue = task; - } - ++team->task_count; - ++team->task_queued_count; - gomp_team_barrier_set_task_pending (&team->barrier); - do_wake = team->task_running_count + !parent->in_tied_task - < team->nthreads; - gomp_mutex_unlock (&team->task_lock); - if (do_wake) - gomp_team_barrier_wake (&team->barrier, 1); - } -} - -static inline bool -gomp_task_run_pre (struct gomp_task *child_task, struct gomp_task *parent, - struct gomp_taskgroup *taskgroup, struct gomp_team *team) -{ - if (parent) - { - if (parent->children == child_task) - parent->children = child_task->next_child; - if (__builtin_expect (child_task->parent_depends_on, 0) - && parent->taskwait->last_parent_depends_on == child_task) - { - if (child_task->prev_child->kind == GOMP_TASK_WAITING - && child_task->prev_child->parent_depends_on) - parent->taskwait->last_parent_depends_on = child_task->prev_child; - else - parent->taskwait->last_parent_depends_on = NULL; - } - } - if (taskgroup && taskgroup->children == child_task) - taskgroup->children = child_task->next_taskgroup; - child_task->prev_queue->next_queue = child_task->next_queue; - child_task->next_queue->prev_queue = child_task->prev_queue; - if (team->task_queue == child_task) - { - if (child_task->next_queue != child_task) - team->task_queue = child_task->next_queue; - else - team->task_queue = NULL; - } - child_task->kind = GOMP_TASK_TIED; - if (--team->task_queued_count == 0) - gomp_team_barrier_clear_task_pending (&team->barrier); - if ((gomp_team_barrier_cancelled (&team->barrier) - || (taskgroup && taskgroup->cancelled)) - && !child_task->copy_ctors_done) - return true; - return false; -} - -static void -gomp_task_run_post_handle_depend_hash (struct gomp_task *child_task) -{ - struct gomp_task *parent = child_task->parent; - size_t i; - - for (i = 0; i < child_task->depend_count; i++) - if (!child_task->depend[i].redundant) - { - if (child_task->depend[i].next) - child_task->depend[i].next->prev = child_task->depend[i].prev; - if (child_task->depend[i].prev) - child_task->depend[i].prev->next = child_task->depend[i].next; - else - { - hash_entry_type *slot - = htab_find_slot (&parent->depend_hash, &child_task->depend[i], - NO_INSERT); - if (*slot != &child_task->depend[i]) - abort (); - if (child_task->depend[i].next) - *slot = child_task->depend[i].next; - else - htab_clear_slot (parent->depend_hash, slot); - } - } -} - -static size_t -gomp_task_run_post_handle_dependers (struct gomp_task *child_task, - struct gomp_team *team) -{ - struct gomp_task *parent = child_task->parent; - size_t i, count = child_task->dependers->n_elem, ret = 0; - for (i = 0; i < count; i++) - { - struct gomp_task *task = child_task->dependers->elem[i]; - if (--task->num_dependees != 0) - continue; - - struct gomp_taskgroup *taskgroup = task->taskgroup; - if (parent) - { - if (parent->children) - { - /* If parent is in gomp_task_maybe_wait_for_dependencies - and it doesn't need to wait for this task, put it after - all ready to run tasks it needs to wait for. */ - if (parent->taskwait && parent->taskwait->last_parent_depends_on - && !task->parent_depends_on) - { - struct gomp_task *last_parent_depends_on - = parent->taskwait->last_parent_depends_on; - task->next_child = last_parent_depends_on->next_child; - task->prev_child = last_parent_depends_on; - } - else - { - task->next_child = parent->children; - task->prev_child = parent->children->prev_child; - parent->children = task; - } - task->next_child->prev_child = task; - task->prev_child->next_child = task; - } - else - { - task->next_child = task; - task->prev_child = task; - parent->children = task; - } - if (parent->taskwait) - { - if (parent->taskwait->in_taskwait) - { - parent->taskwait->in_taskwait = false; - gomp_sem_post (&parent->taskwait->taskwait_sem); - } - else if (parent->taskwait->in_depend_wait) - { - parent->taskwait->in_depend_wait = false; - gomp_sem_post (&parent->taskwait->taskwait_sem); - } - if (parent->taskwait->last_parent_depends_on == NULL - && task->parent_depends_on) - parent->taskwait->last_parent_depends_on = task; - } - } - if (taskgroup) - { - if (taskgroup->children) - { - task->next_taskgroup = taskgroup->children; - task->prev_taskgroup = taskgroup->children->prev_taskgroup; - task->next_taskgroup->prev_taskgroup = task; - task->prev_taskgroup->next_taskgroup = task; - } - else - { - task->next_taskgroup = task; - task->prev_taskgroup = task; - } - taskgroup->children = task; - if (taskgroup->in_taskgroup_wait) - { - taskgroup->in_taskgroup_wait = false; - gomp_sem_post (&taskgroup->taskgroup_sem); - } - } - if (team->task_queue) - { - task->next_queue = team->task_queue; - task->prev_queue = team->task_queue->prev_queue; - task->next_queue->prev_queue = task; - task->prev_queue->next_queue = task; - } - else - { - task->next_queue = task; - task->prev_queue = task; - team->task_queue = task; - } - ++team->task_count; - ++team->task_queued_count; - ++ret; - } - free (child_task->dependers); - child_task->dependers = NULL; - if (ret > 1) - gomp_team_barrier_set_task_pending (&team->barrier); - return ret; -} - -static inline size_t -gomp_task_run_post_handle_depend (struct gomp_task *child_task, - struct gomp_team *team) -{ - if (child_task->depend_count == 0) - return 0; - - /* If parent is gone already, the hash table is freed and nothing - will use the hash table anymore, no need to remove anything from it. */ - if (child_task->parent != NULL) - gomp_task_run_post_handle_depend_hash (child_task); - - if (child_task->dependers == NULL) - return 0; - - return gomp_task_run_post_handle_dependers (child_task, team); -} - -static inline void -gomp_task_run_post_remove_parent (struct gomp_task *child_task) -{ - struct gomp_task *parent = child_task->parent; - if (parent == NULL) - return; - if (__builtin_expect (child_task->parent_depends_on, 0) - && --parent->taskwait->n_depend == 0 - && parent->taskwait->in_depend_wait) - { - parent->taskwait->in_depend_wait = false; - gomp_sem_post (&parent->taskwait->taskwait_sem); - } - child_task->prev_child->next_child = child_task->next_child; - child_task->next_child->prev_child = child_task->prev_child; - if (parent->children != child_task) - return; - if (child_task->next_child != child_task) - parent->children = child_task->next_child; - else - { - /* We access task->children in GOMP_taskwait - outside of the task lock mutex region, so - need a release barrier here to ensure memory - written by child_task->fn above is flushed - before the NULL is written. */ - __atomic_store_n (&parent->children, NULL, MEMMODEL_RELEASE); - if (parent->taskwait && parent->taskwait->in_taskwait) - { - parent->taskwait->in_taskwait = false; - gomp_sem_post (&parent->taskwait->taskwait_sem); - } - } -} - -static inline void -gomp_task_run_post_remove_taskgroup (struct gomp_task *child_task) -{ - struct gomp_taskgroup *taskgroup = child_task->taskgroup; - if (taskgroup == NULL) - return; - child_task->prev_taskgroup->next_taskgroup = child_task->next_taskgroup; - child_task->next_taskgroup->prev_taskgroup = child_task->prev_taskgroup; - if (taskgroup->num_children > 1) - --taskgroup->num_children; - else - { - /* We access taskgroup->num_children in GOMP_taskgroup_end - outside of the task lock mutex region, so - need a release barrier here to ensure memory - written by child_task->fn above is flushed - before the NULL is written. */ - __atomic_store_n (&taskgroup->num_children, 0, MEMMODEL_RELEASE); - } - if (taskgroup->children != child_task) - return; - if (child_task->next_taskgroup != child_task) - taskgroup->children = child_task->next_taskgroup; - else - { - taskgroup->children = NULL; - if (taskgroup->in_taskgroup_wait) - { - taskgroup->in_taskgroup_wait = false; - gomp_sem_post (&taskgroup->taskgroup_sem); - } - } -} - -void -gomp_barrier_handle_tasks (gomp_barrier_state_t state) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - struct gomp_task *task = thr->task; - struct gomp_task *child_task = NULL; - struct gomp_task *to_free = NULL; - int do_wake = 0; - - gomp_mutex_lock (&team->task_lock); - if (gomp_barrier_last_thread (state)) - { - if (team->task_count == 0) - { - gomp_team_barrier_done (&team->barrier, state); - gomp_mutex_unlock (&team->task_lock); - gomp_team_barrier_wake (&team->barrier, 0); - return; - } - gomp_team_barrier_set_waiting_for_tasks (&team->barrier); - } - - while (1) - { - bool cancelled = false; - if (team->task_queue != NULL) - { - child_task = team->task_queue; - cancelled = gomp_task_run_pre (child_task, child_task->parent, - child_task->taskgroup, team); - if (__builtin_expect (cancelled, 0)) - { - if (to_free) - { - gomp_finish_task (to_free); - free (to_free); - to_free = NULL; - } - goto finish_cancelled; - } - team->task_running_count++; - child_task->in_tied_task = true; - } - gomp_mutex_unlock (&team->task_lock); - if (do_wake) - { - gomp_team_barrier_wake (&team->barrier, do_wake); - do_wake = 0; - } - if (to_free) - { - gomp_finish_task (to_free); - free (to_free); - to_free = NULL; - } - if (child_task) - { - thr->task = child_task; - child_task->fn (child_task->fn_data); - thr->task = task; - } - else - return; - gomp_mutex_lock (&team->task_lock); - if (child_task) - { - finish_cancelled:; - size_t new_tasks - = gomp_task_run_post_handle_depend (child_task, team); - gomp_task_run_post_remove_parent (child_task); - gomp_clear_parent (child_task->children); - gomp_task_run_post_remove_taskgroup (child_task); - to_free = child_task; - child_task = NULL; - if (!cancelled) - team->task_running_count--; - if (new_tasks > 1) - { - do_wake = team->nthreads - team->task_running_count; - if (do_wake > new_tasks) - do_wake = new_tasks; - } - if (--team->task_count == 0 - && gomp_team_barrier_waiting_for_tasks (&team->barrier)) - { - gomp_team_barrier_done (&team->barrier, state); - gomp_mutex_unlock (&team->task_lock); - gomp_team_barrier_wake (&team->barrier, 0); - gomp_mutex_lock (&team->task_lock); - } - } - } -} - -/* Called when encountering a taskwait directive. */ - -void -GOMP_taskwait (void) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - struct gomp_task *task = thr->task; - struct gomp_task *child_task = NULL; - struct gomp_task *to_free = NULL; - struct gomp_taskwait taskwait; - int do_wake = 0; - - /* The acquire barrier on load of task->children here synchronizes - with the write of a NULL in gomp_task_run_post_remove_parent. It is - not necessary that we synchronize with other non-NULL writes at - this point, but we must ensure that all writes to memory by a - child thread task work function are seen before we exit from - GOMP_taskwait. */ - if (task == NULL - || __atomic_load_n (&task->children, MEMMODEL_ACQUIRE) == NULL) - return; - - memset (&taskwait, 0, sizeof (taskwait)); - gomp_mutex_lock (&team->task_lock); - while (1) - { - bool cancelled = false; - if (task->children == NULL) - { - bool destroy_taskwait = task->taskwait != NULL; - task->taskwait = NULL; - gomp_mutex_unlock (&team->task_lock); - if (to_free) - { - gomp_finish_task (to_free); - free (to_free); - } - if (destroy_taskwait) - gomp_sem_destroy (&taskwait.taskwait_sem); - return; - } - if (task->children->kind == GOMP_TASK_WAITING) - { - child_task = task->children; - cancelled - = gomp_task_run_pre (child_task, task, child_task->taskgroup, - team); - if (__builtin_expect (cancelled, 0)) - { - if (to_free) - { - gomp_finish_task (to_free); - free (to_free); - to_free = NULL; - } - goto finish_cancelled; - } - } - else - { - /* All tasks we are waiting for are already running - in other threads. Wait for them. */ - if (task->taskwait == NULL) - { - taskwait.in_depend_wait = false; - gomp_sem_init (&taskwait.taskwait_sem, 0); - task->taskwait = &taskwait; - } - taskwait.in_taskwait = true; - } - gomp_mutex_unlock (&team->task_lock); - if (do_wake) - { - gomp_team_barrier_wake (&team->barrier, do_wake); - do_wake = 0; - } - if (to_free) - { - gomp_finish_task (to_free); - free (to_free); - to_free = NULL; - } - if (child_task) - { - thr->task = child_task; - child_task->fn (child_task->fn_data); - thr->task = task; - } - else - gomp_sem_wait (&taskwait.taskwait_sem); - gomp_mutex_lock (&team->task_lock); - if (child_task) - { - finish_cancelled:; - size_t new_tasks - = gomp_task_run_post_handle_depend (child_task, team); - child_task->prev_child->next_child = child_task->next_child; - child_task->next_child->prev_child = child_task->prev_child; - if (task->children == child_task) - { - if (child_task->next_child != child_task) - task->children = child_task->next_child; - else - task->children = NULL; - } - gomp_clear_parent (child_task->children); - gomp_task_run_post_remove_taskgroup (child_task); - to_free = child_task; - child_task = NULL; - team->task_count--; - if (new_tasks > 1) - { - do_wake = team->nthreads - team->task_running_count - - !task->in_tied_task; - if (do_wake > new_tasks) - do_wake = new_tasks; - } - } - } -} - -/* This is like GOMP_taskwait, but we only wait for tasks that the - upcoming task depends on. */ - -static void -gomp_task_maybe_wait_for_dependencies (void **depend) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_task *task = thr->task; - struct gomp_team *team = thr->ts.team; - struct gomp_task_depend_entry elem, *ent = NULL; - struct gomp_taskwait taskwait; - struct gomp_task *last_parent_depends_on = NULL; - size_t ndepend = (uintptr_t) depend[0]; - size_t nout = (uintptr_t) depend[1]; - size_t i; - size_t num_awaited = 0; - struct gomp_task *child_task = NULL; - struct gomp_task *to_free = NULL; - int do_wake = 0; - - gomp_mutex_lock (&team->task_lock); - for (i = 0; i < ndepend; i++) - { - elem.addr = depend[i + 2]; - ent = htab_find (task->depend_hash, &elem); - for (; ent; ent = ent->next) - if (i >= nout && ent->is_in) - continue; - else - { - struct gomp_task *tsk = ent->task; - if (!tsk->parent_depends_on) - { - tsk->parent_depends_on = true; - ++num_awaited; - if (tsk->num_dependees == 0 && tsk->kind == GOMP_TASK_WAITING) - { - /* If a task we need to wait for is not already - running and is ready to be scheduled, move it - to front, so that we run it as soon as possible. */ - if (last_parent_depends_on) - { - tsk->prev_child->next_child = tsk->next_child; - tsk->next_child->prev_child = tsk->prev_child; - tsk->prev_child = last_parent_depends_on; - tsk->next_child = last_parent_depends_on->next_child; - tsk->prev_child->next_child = tsk; - tsk->next_child->prev_child = tsk; - } - else if (tsk != task->children) - { - tsk->prev_child->next_child = tsk->next_child; - tsk->next_child->prev_child = tsk->prev_child; - tsk->prev_child = task->children; - tsk->next_child = task->children->next_child; - task->children = tsk; - tsk->prev_child->next_child = tsk; - tsk->next_child->prev_child = tsk; - } - last_parent_depends_on = tsk; - } - } - } - } - if (num_awaited == 0) - { - gomp_mutex_unlock (&team->task_lock); - return; - } - - memset (&taskwait, 0, sizeof (taskwait)); - taskwait.n_depend = num_awaited; - taskwait.last_parent_depends_on = last_parent_depends_on; - gomp_sem_init (&taskwait.taskwait_sem, 0); - task->taskwait = &taskwait; - - while (1) - { - bool cancelled = false; - if (taskwait.n_depend == 0) - { - task->taskwait = NULL; - gomp_mutex_unlock (&team->task_lock); - if (to_free) - { - gomp_finish_task (to_free); - free (to_free); - } - gomp_sem_destroy (&taskwait.taskwait_sem); - return; - } - if (task->children->kind == GOMP_TASK_WAITING) - { - child_task = task->children; - cancelled - = gomp_task_run_pre (child_task, task, child_task->taskgroup, - team); - if (__builtin_expect (cancelled, 0)) - { - if (to_free) - { - gomp_finish_task (to_free); - free (to_free); - to_free = NULL; - } - goto finish_cancelled; - } - } - else - /* All tasks we are waiting for are already running - in other threads. Wait for them. */ - taskwait.in_depend_wait = true; - gomp_mutex_unlock (&team->task_lock); - if (do_wake) - { - gomp_team_barrier_wake (&team->barrier, do_wake); - do_wake = 0; - } - if (to_free) - { - gomp_finish_task (to_free); - free (to_free); - to_free = NULL; - } - if (child_task) - { - thr->task = child_task; - child_task->fn (child_task->fn_data); - thr->task = task; - } - else - gomp_sem_wait (&taskwait.taskwait_sem); - gomp_mutex_lock (&team->task_lock); - if (child_task) - { - finish_cancelled:; - size_t new_tasks - = gomp_task_run_post_handle_depend (child_task, team); - if (child_task->parent_depends_on) - --taskwait.n_depend; - child_task->prev_child->next_child = child_task->next_child; - child_task->next_child->prev_child = child_task->prev_child; - if (task->children == child_task) - { - if (child_task->next_child != child_task) - task->children = child_task->next_child; - else - task->children = NULL; - } - gomp_clear_parent (child_task->children); - gomp_task_run_post_remove_taskgroup (child_task); - to_free = child_task; - child_task = NULL; - team->task_count--; - if (new_tasks > 1) - { - do_wake = team->nthreads - team->task_running_count - - !task->in_tied_task; - if (do_wake > new_tasks) - do_wake = new_tasks; - } - } - } -} - -/* Called when encountering a taskyield directive. */ - -void -GOMP_taskyield (void) -{ - /* Nothing at the moment. */ -} - -void -GOMP_taskgroup_start (void) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - struct gomp_task *task = thr->task; - struct gomp_taskgroup *taskgroup; - - /* If team is NULL, all tasks are executed as - GOMP_TASK_IFFALSE tasks and thus all children tasks of - taskgroup and their descendant tasks will be finished - by the time GOMP_taskgroup_end is called. */ - if (team == NULL) - return; - taskgroup = gomp_malloc (sizeof (struct gomp_taskgroup)); - taskgroup->prev = task->taskgroup; - taskgroup->children = NULL; - taskgroup->in_taskgroup_wait = false; - taskgroup->cancelled = false; - taskgroup->num_children = 0; - gomp_sem_init (&taskgroup->taskgroup_sem, 0); - task->taskgroup = taskgroup; -} - -void -GOMP_taskgroup_end (void) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - struct gomp_task *task = thr->task; - struct gomp_taskgroup *taskgroup; - struct gomp_task *child_task = NULL; - struct gomp_task *to_free = NULL; - int do_wake = 0; - - if (team == NULL) - return; - taskgroup = task->taskgroup; - - /* The acquire barrier on load of taskgroup->num_children here - synchronizes with the write of 0 in gomp_task_run_post_remove_taskgroup. - It is not necessary that we synchronize with other non-0 writes at - this point, but we must ensure that all writes to memory by a - child thread task work function are seen before we exit from - GOMP_taskgroup_end. */ - if (__atomic_load_n (&taskgroup->num_children, MEMMODEL_ACQUIRE) == 0) - goto finish; - - gomp_mutex_lock (&team->task_lock); - while (1) - { - bool cancelled = false; - if (taskgroup->children == NULL) - { - if (taskgroup->num_children) - { - if (task->children == NULL) - goto do_wait; - child_task = task->children; - } - else - { - gomp_mutex_unlock (&team->task_lock); - if (to_free) - { - gomp_finish_task (to_free); - free (to_free); - } - goto finish; - } - } - else - child_task = taskgroup->children; - if (child_task->kind == GOMP_TASK_WAITING) - { - cancelled - = gomp_task_run_pre (child_task, child_task->parent, taskgroup, - team); - if (__builtin_expect (cancelled, 0)) - { - if (to_free) - { - gomp_finish_task (to_free); - free (to_free); - to_free = NULL; - } - goto finish_cancelled; - } - } - else - { - child_task = NULL; - do_wait: - /* All tasks we are waiting for are already running - in other threads. Wait for them. */ - taskgroup->in_taskgroup_wait = true; - } - gomp_mutex_unlock (&team->task_lock); - if (do_wake) - { - gomp_team_barrier_wake (&team->barrier, do_wake); - do_wake = 0; - } - if (to_free) - { - gomp_finish_task (to_free); - free (to_free); - to_free = NULL; - } - if (child_task) - { - thr->task = child_task; - child_task->fn (child_task->fn_data); - thr->task = task; - } - else - gomp_sem_wait (&taskgroup->taskgroup_sem); - gomp_mutex_lock (&team->task_lock); - if (child_task) - { - finish_cancelled:; - size_t new_tasks - = gomp_task_run_post_handle_depend (child_task, team); - gomp_task_run_post_remove_parent (child_task); - gomp_clear_parent (child_task->children); - gomp_task_run_post_remove_taskgroup (child_task); - to_free = child_task; - child_task = NULL; - team->task_count--; - if (new_tasks > 1) - { - do_wake = team->nthreads - team->task_running_count - - !task->in_tied_task; - if (do_wake > new_tasks) - do_wake = new_tasks; - } - } - } - - finish: - task->taskgroup = taskgroup->prev; - gomp_sem_destroy (&taskgroup->taskgroup_sem); - free (taskgroup); -} - -int -omp_in_final (void) -{ - struct gomp_thread *thr = gomp_thread (); - return thr->task && thr->task->final_task; -} - -ialias (omp_in_final) diff --git a/usr/libgomp/team.c b/usr/libgomp/team.c deleted file mode 100644 index 07b5ff9f6a..0000000000 --- a/usr/libgomp/team.c +++ /dev/null @@ -1,958 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file handles the maintainence of threads in response to team - creation and termination. */ - -#include "libgomp.h" -#include -#include - -/* This attribute contains PTHREAD_CREATE_DETACHED. */ -pthread_attr_t gomp_thread_attr; - -/* This key is for the thread destructor. */ -pthread_key_t gomp_thread_destructor; - - -/* This is the libgomp per-thread data structure. */ -#if defined HAVE_TLS || defined USE_EMUTLS -__thread struct gomp_thread* gomp_tls_data = NULL; -#else -pthread_key_t gomp_tls_key; -#endif - - -/* This structure is used to communicate across pthread_create. */ - -struct gomp_thread_start_data -{ - void (*fn) (void *); - void *fn_data; - struct gomp_team_state ts; - struct gomp_task *task; - struct gomp_thread_pool *thread_pool; - unsigned int place; - bool nested; -}; - - -/* This function is a pthread_create entry point. This contains the idle - loop in which a thread waits to be called up to become part of a team. */ - -static void * -gomp_thread_start (void *xdata) -{ - struct gomp_thread_start_data *data = xdata; - struct gomp_thread *thr; - struct gomp_thread_pool *pool; - void (*local_fn) (void *); - void *local_data; - -#if defined HAVE_TLS || defined USE_EMUTLS - thr = gomp_tls_data = (struct gomp_thread*) gomp_malloc_cleared(sizeof(struct gomp_thread)); -#else - struct gomp_thread local_thr; - thr = &local_thr; - pthread_setspecific (gomp_tls_key, thr); -#endif - gomp_sem_init (&thr->release, 0); - - /* Extract what we need from data. */ - local_fn = data->fn; - local_data = data->fn_data; - thr->thread_pool = data->thread_pool; - thr->ts = data->ts; - thr->task = data->task; - thr->place = data->place; - - thr->ts.team->ordered_release[thr->ts.team_id] = &thr->release; - - /* Make thread pool local. */ - pool = thr->thread_pool; - - if (data->nested) - { - struct gomp_team *team = thr->ts.team; - struct gomp_task *task = thr->task; - - gomp_barrier_wait (&team->barrier); - - local_fn (local_data); - gomp_team_barrier_wait_final (&team->barrier); - gomp_finish_task (task); - gomp_barrier_wait_last (&team->barrier); - } - else - { - pool->threads[thr->ts.team_id] = thr; - - gomp_barrier_wait (&pool->threads_dock); - do - { - struct gomp_team *team = thr->ts.team; - struct gomp_task *task = thr->task; - - local_fn (local_data); - gomp_team_barrier_wait_final (&team->barrier); - gomp_finish_task (task); - - gomp_barrier_wait (&pool->threads_dock); - - local_fn = thr->fn; - local_data = thr->data; - thr->fn = NULL; - } - while (local_fn); - } - - gomp_sem_destroy (&thr->release); - thr->thread_pool = NULL; - thr->task = NULL; -#if defined HAVE_TLS || defined USE_EMUTLS - free(thr); - thr = gomp_tls_data = NULL; -#endif - return NULL; -} - - -/* Create a new team data structure. */ - -struct gomp_team * -gomp_new_team (unsigned nthreads) -{ - struct gomp_team *team; - size_t size; - int i; - - size = sizeof (*team) + nthreads * (sizeof (team->ordered_release[0]) - + sizeof (team->implicit_task[0])); - team = gomp_malloc (size); - - team->work_share_chunk = 8; -#ifdef HAVE_SYNC_BUILTINS - team->single_count = 0; -#else - gomp_mutex_init (&team->work_share_list_free_lock); -#endif - team->work_shares_to_free = &team->work_shares[0]; - gomp_init_work_share (&team->work_shares[0], false, nthreads); - team->work_shares[0].next_alloc = NULL; - team->work_share_list_free = NULL; - team->work_share_list_alloc = &team->work_shares[1]; - for (i = 1; i < 7; i++) - team->work_shares[i].next_free = &team->work_shares[i + 1]; - team->work_shares[i].next_free = NULL; - - team->nthreads = nthreads; - gomp_barrier_init (&team->barrier, nthreads); - - gomp_sem_init (&team->master_release, 0); - team->ordered_release = (void *) &team->implicit_task[nthreads]; - team->ordered_release[0] = &team->master_release; - - gomp_mutex_init (&team->task_lock); - team->task_queue = NULL; - team->task_count = 0; - team->task_queued_count = 0; - team->task_running_count = 0; - team->work_share_cancelled = 0; - team->team_cancelled = 0; - - return team; -} - - -/* Free a team data structure. */ - -static void -free_team (struct gomp_team *team) -{ - gomp_barrier_destroy (&team->barrier); - gomp_mutex_destroy (&team->task_lock); - free (team); -} - -/* Allocate and initialize a thread pool. */ - -static struct gomp_thread_pool *gomp_new_thread_pool (void) -{ - struct gomp_thread_pool *pool - = gomp_malloc (sizeof(struct gomp_thread_pool)); - pool->threads = NULL; - pool->threads_size = 0; - pool->threads_used = 0; - pool->last_team = NULL; - return pool; -} - -static void -gomp_free_pool_helper (void *thread_pool) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_thread_pool *pool - = (struct gomp_thread_pool *) thread_pool; - gomp_barrier_wait_last (&pool->threads_dock); - gomp_sem_destroy (&thr->release); - thr->thread_pool = NULL; - thr->task = NULL; - pthread_exit (NULL); -} - -/* Free a thread pool and release its threads. */ - -void -gomp_free_thread (void *arg __attribute__((unused))) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_thread_pool *pool = thr->thread_pool; - if (pool) - { - if (pool->threads_used > 0) - { - int i; - for (i = 1; i < pool->threads_used; i++) - { - struct gomp_thread *nthr = pool->threads[i]; - nthr->fn = gomp_free_pool_helper; - nthr->data = pool; - } - /* This barrier undocks threads docked on pool->threads_dock. */ - gomp_barrier_wait (&pool->threads_dock); - /* And this waits till all threads have called gomp_barrier_wait_last - in gomp_free_pool_helper. */ - gomp_barrier_wait (&pool->threads_dock); - /* Now it is safe to destroy the barrier and free the pool. */ - gomp_barrier_destroy (&pool->threads_dock); - -#ifdef HAVE_SYNC_BUILTINS - __sync_fetch_and_add (&gomp_managed_threads, - 1L - pool->threads_used); -#else - gomp_mutex_lock (&gomp_managed_threads_lock); - gomp_managed_threads -= pool->threads_used - 1L; - gomp_mutex_unlock (&gomp_managed_threads_lock); -#endif - } - free (pool->threads); - if (pool->last_team) - free_team (pool->last_team); - free (pool); - thr->thread_pool = NULL; - } - if (thr->task != NULL) - { - struct gomp_task *task = thr->task; - gomp_end_task (); - free (task); - } -} - -/* Launch a team. */ - -void -gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads, - unsigned flags, struct gomp_team *team) -{ - struct gomp_thread_start_data *start_data; - struct gomp_thread *thr, *nthr; - struct gomp_task *task; - struct gomp_task_icv *icv; - bool nested; - struct gomp_thread_pool *pool; - unsigned i, n, old_threads_used = 0; - pthread_attr_t thread_attr, *attr; - unsigned long nthreads_var; - char bind, bind_var; - unsigned int s = 0, rest = 0, p = 0, k = 0; - unsigned int affinity_count = 0; - struct gomp_thread **affinity_thr = NULL; - - thr = gomp_thread (); - nested = thr->ts.team != NULL; - if (__builtin_expect (thr->thread_pool == NULL, 0)) - { - thr->thread_pool = gomp_new_thread_pool (); - thr->thread_pool->threads_busy = nthreads; - pthread_setspecific (gomp_thread_destructor, thr); - } - pool = thr->thread_pool; - task = thr->task; - icv = task ? &task->icv : &gomp_global_icv; - if (__builtin_expect (gomp_places_list != NULL, 0) && thr->place == 0) - gomp_init_affinity (); - - /* Always save the previous state, even if this isn't a nested team. - In particular, we should save any work share state from an outer - orphaned work share construct. */ - team->prev_ts = thr->ts; - - thr->ts.team = team; - thr->ts.team_id = 0; - ++thr->ts.level; - if (nthreads > 1) - ++thr->ts.active_level; - thr->ts.work_share = &team->work_shares[0]; - thr->ts.last_work_share = NULL; -#ifdef HAVE_SYNC_BUILTINS - thr->ts.single_count = 0; -#endif - thr->ts.static_trip = 0; - thr->task = &team->implicit_task[0]; - nthreads_var = icv->nthreads_var; - if (__builtin_expect (gomp_nthreads_var_list != NULL, 0) - && thr->ts.level < gomp_nthreads_var_list_len) - nthreads_var = gomp_nthreads_var_list[thr->ts.level]; - bind_var = icv->bind_var; - if (bind_var != omp_proc_bind_false && (flags & 7) != omp_proc_bind_false) - bind_var = flags & 7; - bind = bind_var; - if (__builtin_expect (gomp_bind_var_list != NULL, 0) - && thr->ts.level < gomp_bind_var_list_len) - bind_var = gomp_bind_var_list[thr->ts.level]; - gomp_init_task (thr->task, task, icv); - team->implicit_task[0].icv.nthreads_var = nthreads_var; - team->implicit_task[0].icv.bind_var = bind_var; - - if (nthreads == 1) - return; - - i = 1; - - if (__builtin_expect (gomp_places_list != NULL, 0)) - { - /* Depending on chosen proc_bind model, set subpartition - for the master thread and initialize helper variables - P and optionally S, K and/or REST used by later place - computation for each additional thread. */ - p = thr->place - 1; - switch (bind) - { - case omp_proc_bind_true: - case omp_proc_bind_close: - if (nthreads > thr->ts.place_partition_len) - { - /* T > P. S threads will be placed in each place, - and the final REM threads placed one by one - into the already occupied places. */ - s = nthreads / thr->ts.place_partition_len; - rest = nthreads % thr->ts.place_partition_len; - } - else - s = 1; - k = 1; - break; - case omp_proc_bind_master: - /* Each thread will be bound to master's place. */ - break; - case omp_proc_bind_spread: - if (nthreads <= thr->ts.place_partition_len) - { - /* T <= P. Each subpartition will have in between s - and s+1 places (subpartitions starting at or - after rest will have s places, earlier s+1 places), - each thread will be bound to the first place in - its subpartition (except for the master thread - that can be bound to another place in its - subpartition). */ - s = thr->ts.place_partition_len / nthreads; - rest = thr->ts.place_partition_len % nthreads; - rest = (s + 1) * rest + thr->ts.place_partition_off; - if (p < rest) - { - p -= (p - thr->ts.place_partition_off) % (s + 1); - thr->ts.place_partition_len = s + 1; - } - else - { - p -= (p - rest) % s; - thr->ts.place_partition_len = s; - } - thr->ts.place_partition_off = p; - } - else - { - /* T > P. Each subpartition will have just a single - place and we'll place between s and s+1 - threads into each subpartition. */ - s = nthreads / thr->ts.place_partition_len; - rest = nthreads % thr->ts.place_partition_len; - thr->ts.place_partition_off = p; - thr->ts.place_partition_len = 1; - k = 1; - } - break; - } - } - else - bind = omp_proc_bind_false; - - /* We only allow the reuse of idle threads for non-nested PARALLEL - regions. This appears to be implied by the semantics of - threadprivate variables, but perhaps that's reading too much into - things. Certainly it does prevent any locking problems, since - only the initial program thread will modify gomp_threads. */ - if (!nested) - { - old_threads_used = pool->threads_used; - - if (nthreads <= old_threads_used) - n = nthreads; - else if (old_threads_used == 0) - { - n = 0; - gomp_barrier_init (&pool->threads_dock, nthreads); - } - else - { - n = old_threads_used; - - /* Increase the barrier threshold to make sure all new - threads arrive before the team is released. */ - gomp_barrier_reinit (&pool->threads_dock, nthreads); - } - - /* Not true yet, but soon will be. We're going to release all - threads from the dock, and those that aren't part of the - team will exit. */ - pool->threads_used = nthreads; - - /* If necessary, expand the size of the gomp_threads array. It is - expected that changes in the number of threads are rare, thus we - make no effort to expand gomp_threads_size geometrically. */ - if (nthreads >= pool->threads_size) - { - pool->threads_size = nthreads + 1; - pool->threads - = gomp_realloc (pool->threads, - pool->threads_size - * sizeof (struct gomp_thread_data *)); - } - - /* Release existing idle threads. */ - for (; i < n; ++i) - { - unsigned int place_partition_off = thr->ts.place_partition_off; - unsigned int place_partition_len = thr->ts.place_partition_len; - unsigned int place = 0; - if (__builtin_expect (gomp_places_list != NULL, 0)) - { - switch (bind) - { - case omp_proc_bind_true: - case omp_proc_bind_close: - if (k == s) - { - ++p; - if (p == (team->prev_ts.place_partition_off - + team->prev_ts.place_partition_len)) - p = team->prev_ts.place_partition_off; - k = 1; - if (i == nthreads - rest) - s = 1; - } - else - ++k; - break; - case omp_proc_bind_master: - break; - case omp_proc_bind_spread: - if (k == 0) - { - /* T <= P. */ - if (p < rest) - p += s + 1; - else - p += s; - if (p == (team->prev_ts.place_partition_off - + team->prev_ts.place_partition_len)) - p = team->prev_ts.place_partition_off; - place_partition_off = p; - if (p < rest) - place_partition_len = s + 1; - else - place_partition_len = s; - } - else - { - /* T > P. */ - if (k == s) - { - ++p; - if (p == (team->prev_ts.place_partition_off - + team->prev_ts.place_partition_len)) - p = team->prev_ts.place_partition_off; - k = 1; - if (i == nthreads - rest) - s = 1; - } - else - ++k; - place_partition_off = p; - place_partition_len = 1; - } - break; - } - if (affinity_thr != NULL - || (bind != omp_proc_bind_true - && pool->threads[i]->place != p + 1) - || pool->threads[i]->place <= place_partition_off - || pool->threads[i]->place > (place_partition_off - + place_partition_len)) - { - unsigned int l; - if (affinity_thr == NULL) - { - unsigned int j; - - if (team->prev_ts.place_partition_len > 64) - affinity_thr - = gomp_malloc (team->prev_ts.place_partition_len - * sizeof (struct gomp_thread *)); - else - affinity_thr - = gomp_alloca (team->prev_ts.place_partition_len - * sizeof (struct gomp_thread *)); - memset (affinity_thr, '\0', - team->prev_ts.place_partition_len - * sizeof (struct gomp_thread *)); - for (j = i; j < old_threads_used; j++) - { - if (pool->threads[j]->place - > team->prev_ts.place_partition_off - && (pool->threads[j]->place - <= (team->prev_ts.place_partition_off - + team->prev_ts.place_partition_len))) - { - l = pool->threads[j]->place - 1 - - team->prev_ts.place_partition_off; - pool->threads[j]->data = affinity_thr[l]; - affinity_thr[l] = pool->threads[j]; - } - pool->threads[j] = NULL; - } - if (nthreads > old_threads_used) - memset (&pool->threads[old_threads_used], - '\0', ((nthreads - old_threads_used) - * sizeof (struct gomp_thread *))); - n = nthreads; - affinity_count = old_threads_used - i; - } - if (affinity_count == 0) - break; - l = p; - if (affinity_thr[l - team->prev_ts.place_partition_off] - == NULL) - { - if (bind != omp_proc_bind_true) - continue; - for (l = place_partition_off; - l < place_partition_off + place_partition_len; - l++) - if (affinity_thr[l - team->prev_ts.place_partition_off] - != NULL) - break; - if (l == place_partition_off + place_partition_len) - continue; - } - nthr = affinity_thr[l - team->prev_ts.place_partition_off]; - affinity_thr[l - team->prev_ts.place_partition_off] - = (struct gomp_thread *) nthr->data; - affinity_count--; - pool->threads[i] = nthr; - } - else - nthr = pool->threads[i]; - place = p + 1; - } - else - nthr = pool->threads[i]; - nthr->ts.team = team; - nthr->ts.work_share = &team->work_shares[0]; - nthr->ts.last_work_share = NULL; - nthr->ts.team_id = i; - nthr->ts.level = team->prev_ts.level + 1; - nthr->ts.active_level = thr->ts.active_level; - nthr->ts.place_partition_off = place_partition_off; - nthr->ts.place_partition_len = place_partition_len; -#ifdef HAVE_SYNC_BUILTINS - nthr->ts.single_count = 0; -#endif - nthr->ts.static_trip = 0; - nthr->task = &team->implicit_task[i]; - nthr->place = place; - gomp_init_task (nthr->task, task, icv); - team->implicit_task[i].icv.nthreads_var = nthreads_var; - team->implicit_task[i].icv.bind_var = bind_var; - nthr->fn = fn; - nthr->data = data; - team->ordered_release[i] = &nthr->release; - } - - if (__builtin_expect (affinity_thr != NULL, 0)) - { - /* If AFFINITY_THR is non-NULL just because we had to - permute some threads in the pool, but we've managed - to find exactly as many old threads as we'd find - without affinity, we don't need to handle this - specially anymore. */ - if (nthreads <= old_threads_used - ? (affinity_count == old_threads_used - nthreads) - : (i == old_threads_used)) - { - if (team->prev_ts.place_partition_len > 64) - free (affinity_thr); - affinity_thr = NULL; - affinity_count = 0; - } - else - { - i = 1; - /* We are going to compute the places/subpartitions - again from the beginning. So, we need to reinitialize - vars modified by the switch (bind) above inside - of the loop, to the state they had after the initial - switch (bind). */ - switch (bind) - { - case omp_proc_bind_true: - case omp_proc_bind_close: - if (nthreads > thr->ts.place_partition_len) - /* T > P. S has been changed, so needs - to be recomputed. */ - s = nthreads / thr->ts.place_partition_len; - k = 1; - p = thr->place - 1; - break; - case omp_proc_bind_master: - /* No vars have been changed. */ - break; - case omp_proc_bind_spread: - p = thr->ts.place_partition_off; - if (k != 0) - { - /* T > P. */ - s = nthreads / team->prev_ts.place_partition_len; - k = 1; - } - break; - } - - /* Increase the barrier threshold to make sure all new - threads and all the threads we're going to let die - arrive before the team is released. */ - if (affinity_count) - gomp_barrier_reinit (&pool->threads_dock, - nthreads + affinity_count); - } - } - - if (i == nthreads) - goto do_release; - - } - - if (__builtin_expect (nthreads + affinity_count > old_threads_used, 0)) - { - long diff = (long) (nthreads + affinity_count) - (long) old_threads_used; - - if (old_threads_used == 0) - --diff; - -#ifdef HAVE_SYNC_BUILTINS - __sync_fetch_and_add (&gomp_managed_threads, diff); -#else - gomp_mutex_lock (&gomp_managed_threads_lock); - gomp_managed_threads += diff; - gomp_mutex_unlock (&gomp_managed_threads_lock); -#endif - } - - attr = &gomp_thread_attr; - if (__builtin_expect (gomp_places_list != NULL, 0)) - { - size_t stacksize; - pthread_attr_init (&thread_attr); - pthread_attr_setdetachstate (&thread_attr, PTHREAD_CREATE_DETACHED); - if (! pthread_attr_getstacksize (&gomp_thread_attr, &stacksize)) - pthread_attr_setstacksize (&thread_attr, stacksize); - attr = &thread_attr; - } - - start_data = gomp_alloca (sizeof (struct gomp_thread_start_data) - * (nthreads-i)); - - /* Launch new threads. */ - for (; i < nthreads; ++i) - { - pthread_t pt; - int err; - - start_data->ts.place_partition_off = thr->ts.place_partition_off; - start_data->ts.place_partition_len = thr->ts.place_partition_len; - start_data->place = 0; - if (__builtin_expect (gomp_places_list != NULL, 0)) - { - switch (bind) - { - case omp_proc_bind_true: - case omp_proc_bind_close: - if (k == s) - { - ++p; - if (p == (team->prev_ts.place_partition_off - + team->prev_ts.place_partition_len)) - p = team->prev_ts.place_partition_off; - k = 1; - if (i == nthreads - rest) - s = 1; - } - else - ++k; - break; - case omp_proc_bind_master: - break; - case omp_proc_bind_spread: - if (k == 0) - { - /* T <= P. */ - if (p < rest) - p += s + 1; - else - p += s; - if (p == (team->prev_ts.place_partition_off - + team->prev_ts.place_partition_len)) - p = team->prev_ts.place_partition_off; - start_data->ts.place_partition_off = p; - if (p < rest) - start_data->ts.place_partition_len = s + 1; - else - start_data->ts.place_partition_len = s; - } - else - { - /* T > P. */ - if (k == s) - { - ++p; - if (p == (team->prev_ts.place_partition_off - + team->prev_ts.place_partition_len)) - p = team->prev_ts.place_partition_off; - k = 1; - if (i == nthreads - rest) - s = 1; - } - else - ++k; - start_data->ts.place_partition_off = p; - start_data->ts.place_partition_len = 1; - } - break; - } - start_data->place = p + 1; - if (affinity_thr != NULL && pool->threads[i] != NULL) - continue; - gomp_init_thread_affinity (attr, p); - } - - start_data->fn = fn; - start_data->fn_data = data; - start_data->ts.team = team; - start_data->ts.work_share = &team->work_shares[0]; - start_data->ts.last_work_share = NULL; - start_data->ts.team_id = i; - start_data->ts.level = team->prev_ts.level + 1; - start_data->ts.active_level = thr->ts.active_level; -#ifdef HAVE_SYNC_BUILTINS - start_data->ts.single_count = 0; -#endif - start_data->ts.static_trip = 0; - start_data->task = &team->implicit_task[i]; - gomp_init_task (start_data->task, task, icv); - team->implicit_task[i].icv.nthreads_var = nthreads_var; - team->implicit_task[i].icv.bind_var = bind_var; - start_data->thread_pool = pool; - start_data->nested = nested; - - err = pthread_create (&pt, attr, gomp_thread_start, start_data++); - if (err != 0) - gomp_fatal ("Thread creation failed: %s", strerror (err)); - } - - if (__builtin_expect (gomp_places_list != NULL, 0)) - pthread_attr_destroy (&thread_attr); - - do_release: - gomp_barrier_wait (nested ? &team->barrier : &pool->threads_dock); - - /* Decrease the barrier threshold to match the number of threads - that should arrive back at the end of this team. The extra - threads should be exiting. Note that we arrange for this test - to never be true for nested teams. If AFFINITY_COUNT is non-zero, - the barrier as well as gomp_managed_threads was temporarily - set to NTHREADS + AFFINITY_COUNT. For NTHREADS < OLD_THREADS_COUNT, - AFFINITY_COUNT if non-zero will be always at least - OLD_THREADS_COUNT - NTHREADS. */ - if (__builtin_expect (nthreads < old_threads_used, 0) - || __builtin_expect (affinity_count, 0)) - { - long diff = (long) nthreads - (long) old_threads_used; - - if (affinity_count) - diff = -affinity_count; - - gomp_barrier_reinit (&pool->threads_dock, nthreads); - -#ifdef HAVE_SYNC_BUILTINS - __sync_fetch_and_add (&gomp_managed_threads, diff); -#else - gomp_mutex_lock (&gomp_managed_threads_lock); - gomp_managed_threads += diff; - gomp_mutex_unlock (&gomp_managed_threads_lock); -#endif - } - if (__builtin_expect (affinity_thr != NULL, 0) - && team->prev_ts.place_partition_len > 64) - free (affinity_thr); -} - - -/* Terminate the current team. This is only to be called by the master - thread. We assume that we must wait for the other threads. */ - -void -gomp_team_end (void) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - - /* This barrier handles all pending explicit threads. - As #pragma omp cancel parallel might get awaited count in - team->barrier in a inconsistent state, we need to use a different - counter here. */ - gomp_team_barrier_wait_final (&team->barrier); - if (__builtin_expect (team->team_cancelled, 0)) - { - struct gomp_work_share *ws = team->work_shares_to_free; - do - { - struct gomp_work_share *next_ws = gomp_ptrlock_get (&ws->next_ws); - if (next_ws == NULL) - gomp_ptrlock_set (&ws->next_ws, ws); - gomp_fini_work_share (ws); - ws = next_ws; - } - while (ws != NULL); - } - else - gomp_fini_work_share (thr->ts.work_share); - - gomp_end_task (); - thr->ts = team->prev_ts; - - if (__builtin_expect (thr->ts.team != NULL, 0)) - { -#ifdef HAVE_SYNC_BUILTINS - __sync_fetch_and_add (&gomp_managed_threads, 1L - team->nthreads); -#else - gomp_mutex_lock (&gomp_managed_threads_lock); - gomp_managed_threads -= team->nthreads - 1L; - gomp_mutex_unlock (&gomp_managed_threads_lock); -#endif - /* This barrier has gomp_barrier_wait_last counterparts - and ensures the team can be safely destroyed. */ - gomp_barrier_wait (&team->barrier); - } - - if (__builtin_expect (team->work_shares[0].next_alloc != NULL, 0)) - { - struct gomp_work_share *ws = team->work_shares[0].next_alloc; - do - { - struct gomp_work_share *next_ws = ws->next_alloc; - free (ws); - ws = next_ws; - } - while (ws != NULL); - } - gomp_sem_destroy (&team->master_release); -#ifndef HAVE_SYNC_BUILTINS - gomp_mutex_destroy (&team->work_share_list_free_lock); -#endif - - if (__builtin_expect (thr->ts.team != NULL, 0) - || __builtin_expect (team->nthreads == 1, 0)) - free_team (team); - else - { - struct gomp_thread_pool *pool = thr->thread_pool; - if (pool->last_team) - free_team (pool->last_team); - pool->last_team = team; - } -} - - -/* Constructors for this file. */ - -static void __attribute__((constructor)) -initialize_team (void) -{ -#if !defined HAVE_TLS && !defined USE_EMUTLS - static struct gomp_thread initial_thread_tls_data; - - pthread_key_create (&gomp_tls_key, NULL); - pthread_setspecific (gomp_tls_key, &initial_thread_tls_data); -#else - gomp_tls_data = (struct gomp_thread*) gomp_malloc_cleared(sizeof(struct gomp_thread)); -#endif - - if (pthread_key_create (&gomp_thread_destructor, gomp_free_thread) != 0) - gomp_fatal ("could not create thread pool destructor."); -} - -static void __attribute__((destructor)) -team_destructor (void) -{ -#if defined HAVE_TLS || defined USE_EMUTLS - free(gomp_tls_data); -#endif - - /* Without this dlclose on libgomp could lead to subsequent - crashes. */ - pthread_key_delete (gomp_thread_destructor); -} - -struct gomp_task_icv * -gomp_new_icv (void) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_task *task = gomp_malloc (sizeof (struct gomp_task)); - gomp_init_task (task, NULL, &gomp_global_icv); - thr->task = task; - pthread_setspecific (gomp_thread_destructor, thr); - return &task->icv; -} diff --git a/usr/libgomp/time.c b/usr/libgomp/time.c deleted file mode 100644 index e9f9d89433..0000000000 --- a/usr/libgomp/time.c +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright (C) 2015 RWTH Aachen University, Germany. - Contributed by Stefan Lankes . - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* - * This file contains system specific timer routines. It is expected that - * a system may well want to write special versions of each of these. - */ - -#include "libgomp.h" -#include - -extern unsigned int get_cpufreq(void); -static unsigned long long start_tsc; - -inline static unsigned long long rdtsc(void) -{ - unsigned long lo, hi; - asm volatile ("rdtsc" : "=a"(lo), "=d"(hi) :: "memory"); - return ((unsigned long long) hi << 32ULL | (unsigned long long) lo); -} - -__attribute__((constructor)) static void timer_init() -{ - start_tsc = rdtsc(); -} - -double -omp_get_wtime (void) -{ - double ret; - - ret = (double) (rdtsc() - start_tsc) / ((double) get_cpufreq() * 1000000.0); - //printf("CPU frequency: %d MHz\n", get_cpufreq()); - - return ret; -} - -double -omp_get_wtick (void) -{ - return 1.0 / ((double) get_cpufreq() * 1000000.0); -} - -ialias (omp_get_wtime) -ialias (omp_get_wtick) diff --git a/usr/libgomp/work.c b/usr/libgomp/work.c deleted file mode 100644 index 0570b90c70..0000000000 --- a/usr/libgomp/work.c +++ /dev/null @@ -1,297 +0,0 @@ -/* Copyright (C) 2005-2015 Free Software Foundation, Inc. - Contributed by Richard Henderson . - - This file is part of the GNU Offloading and Multi Processing Library - (libgomp). - - Libgomp is free software; you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 3, or (at your option) - any later version. - - Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - Under Section 7 of GPL version 3, you are granted additional - permissions described in the GCC Runtime Library Exception, version - 3.1, as published by the Free Software Foundation. - - You should have received a copy of the GNU General Public License and - a copy of the GCC Runtime Library Exception along with this program; - see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - . */ - -/* This file contains routines to manage the work-share queue for a team - of threads. */ - -#include "libgomp.h" -#include -#include -#include - - -/* Allocate a new work share structure, preferably from current team's - free gomp_work_share cache. */ - -static struct gomp_work_share * -alloc_work_share (struct gomp_team *team) -{ - struct gomp_work_share *ws; - unsigned int i; - - /* This is called in a critical section. */ - if (team->work_share_list_alloc != NULL) - { - ws = team->work_share_list_alloc; - team->work_share_list_alloc = ws->next_free; - return ws; - } - -#ifdef HAVE_SYNC_BUILTINS - ws = team->work_share_list_free; - /* We need atomic read from work_share_list_free, - as free_work_share can be called concurrently. */ - __asm ("" : "+r" (ws)); - - if (ws && ws->next_free) - { - struct gomp_work_share *next = ws->next_free; - ws->next_free = NULL; - team->work_share_list_alloc = next->next_free; - return next; - } -#else - gomp_mutex_lock (&team->work_share_list_free_lock); - ws = team->work_share_list_free; - if (ws) - { - team->work_share_list_alloc = ws->next_free; - team->work_share_list_free = NULL; - gomp_mutex_unlock (&team->work_share_list_free_lock); - return ws; - } - gomp_mutex_unlock (&team->work_share_list_free_lock); -#endif - - team->work_share_chunk *= 2; - ws = gomp_malloc (team->work_share_chunk * sizeof (struct gomp_work_share)); - ws->next_alloc = team->work_shares[0].next_alloc; - team->work_shares[0].next_alloc = ws; - team->work_share_list_alloc = &ws[1]; - for (i = 1; i < team->work_share_chunk - 1; i++) - ws[i].next_free = &ws[i + 1]; - ws[i].next_free = NULL; - return ws; -} - -/* Initialize an already allocated struct gomp_work_share. - This shouldn't touch the next_alloc field. */ - -void -gomp_init_work_share (struct gomp_work_share *ws, bool ordered, - unsigned nthreads) -{ - gomp_mutex_init (&ws->lock); - if (__builtin_expect (ordered, 0)) - { -#define INLINE_ORDERED_TEAM_IDS_CNT \ - ((sizeof (struct gomp_work_share) \ - - offsetof (struct gomp_work_share, inline_ordered_team_ids)) \ - / sizeof (((struct gomp_work_share *) 0)->inline_ordered_team_ids[0])) - - if (nthreads > INLINE_ORDERED_TEAM_IDS_CNT) - ws->ordered_team_ids - = gomp_malloc (nthreads * sizeof (*ws->ordered_team_ids)); - else - ws->ordered_team_ids = ws->inline_ordered_team_ids; - memset (ws->ordered_team_ids, '\0', - nthreads * sizeof (*ws->ordered_team_ids)); - ws->ordered_num_used = 0; - ws->ordered_owner = -1; - ws->ordered_cur = 0; - } - else - ws->ordered_team_ids = NULL; - gomp_ptrlock_init (&ws->next_ws, NULL); - ws->threads_completed = 0; -} - -/* Do any needed destruction of gomp_work_share fields before it - is put back into free gomp_work_share cache or freed. */ - -void -gomp_fini_work_share (struct gomp_work_share *ws) -{ - gomp_mutex_destroy (&ws->lock); - if (ws->ordered_team_ids != ws->inline_ordered_team_ids) - free (ws->ordered_team_ids); - gomp_ptrlock_destroy (&ws->next_ws); -} - -/* Free a work share struct, if not orphaned, put it into current - team's free gomp_work_share cache. */ - -static inline void -free_work_share (struct gomp_team *team, struct gomp_work_share *ws) -{ - gomp_fini_work_share (ws); - if (__builtin_expect (team == NULL, 0)) - free (ws); - else - { - struct gomp_work_share *next_ws; -#ifdef HAVE_SYNC_BUILTINS - do - { - next_ws = team->work_share_list_free; - ws->next_free = next_ws; - } - while (!__sync_bool_compare_and_swap (&team->work_share_list_free, - next_ws, ws)); -#else - gomp_mutex_lock (&team->work_share_list_free_lock); - next_ws = team->work_share_list_free; - ws->next_free = next_ws; - team->work_share_list_free = ws; - gomp_mutex_unlock (&team->work_share_list_free_lock); -#endif - } -} - -/* The current thread is ready to begin the next work sharing construct. - In all cases, thr->ts.work_share is updated to point to the new - structure. In all cases the work_share lock is locked. Return true - if this was the first thread to reach this point. */ - -bool -gomp_work_share_start (bool ordered) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - struct gomp_work_share *ws; - - /* Work sharing constructs can be orphaned. */ - if (team == NULL) - { - ws = gomp_malloc (sizeof (*ws)); - gomp_init_work_share (ws, ordered, 1); - thr->ts.work_share = ws; - return ws; - } - - ws = thr->ts.work_share; - thr->ts.last_work_share = ws; - ws = gomp_ptrlock_get (&ws->next_ws); - if (ws == NULL) - { - /* This thread encountered a new ws first. */ - struct gomp_work_share *ws = alloc_work_share (team); - gomp_init_work_share (ws, ordered, team->nthreads); - thr->ts.work_share = ws; - return true; - } - else - { - thr->ts.work_share = ws; - return false; - } -} - -/* The current thread is done with its current work sharing construct. - This version does imply a barrier at the end of the work-share. */ - -void -gomp_work_share_end (void) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - gomp_barrier_state_t bstate; - - /* Work sharing constructs can be orphaned. */ - if (team == NULL) - { - free_work_share (NULL, thr->ts.work_share); - thr->ts.work_share = NULL; - return; - } - - bstate = gomp_barrier_wait_start (&team->barrier); - - if (gomp_barrier_last_thread (bstate)) - { - if (__builtin_expect (thr->ts.last_work_share != NULL, 1)) - { - team->work_shares_to_free = thr->ts.work_share; - free_work_share (team, thr->ts.last_work_share); - } - } - - gomp_team_barrier_wait_end (&team->barrier, bstate); - thr->ts.last_work_share = NULL; -} - -/* The current thread is done with its current work sharing construct. - This version implies a cancellable barrier at the end of the work-share. */ - -bool -gomp_work_share_end_cancel (void) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - gomp_barrier_state_t bstate; - - /* Cancellable work sharing constructs cannot be orphaned. */ - bstate = gomp_barrier_wait_cancel_start (&team->barrier); - - if (gomp_barrier_last_thread (bstate)) - { - if (__builtin_expect (thr->ts.last_work_share != NULL, 1)) - { - team->work_shares_to_free = thr->ts.work_share; - free_work_share (team, thr->ts.last_work_share); - } - } - thr->ts.last_work_share = NULL; - - return gomp_team_barrier_wait_cancel_end (&team->barrier, bstate); -} - -/* The current thread is done with its current work sharing construct. - This version does NOT imply a barrier at the end of the work-share. */ - -void -gomp_work_share_end_nowait (void) -{ - struct gomp_thread *thr = gomp_thread (); - struct gomp_team *team = thr->ts.team; - struct gomp_work_share *ws = thr->ts.work_share; - unsigned completed; - - /* Work sharing constructs can be orphaned. */ - if (team == NULL) - { - free_work_share (NULL, ws); - thr->ts.work_share = NULL; - return; - } - - if (__builtin_expect (thr->ts.last_work_share == NULL, 0)) - return; - -#ifdef HAVE_SYNC_BUILTINS - completed = __sync_add_and_fetch (&ws->threads_completed, 1); -#else - gomp_mutex_lock (&ws->lock); - completed = ++ws->threads_completed; - gomp_mutex_unlock (&ws->lock); -#endif - - if (completed == team->nthreads) - { - team->work_shares_to_free = thr->ts.work_share; - free_work_share (team, thr->ts.last_work_share); - } - thr->ts.last_work_share = NULL; -} diff --git a/usr/libomp b/usr/libomp deleted file mode 160000 index 4a4ecc1a2e..0000000000 --- a/usr/libomp +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 4a4ecc1a2ebe29ca58213e1902831b1e547e8f16