Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Travis badge to README #8

Merged
merged 1 commit into from
Oct 23, 2018
Merged

Conversation

kpet
Copy link
Contributor

@kpet kpet commented Oct 23, 2018

Signed-off-by: Kevin Petit kevin.petit@arm.com

Signed-off-by: Kevin Petit <kevin.petit@arm.com>
@kpet kpet merged commit 53dafd1 into KhronosGroup:cl20_trunk Oct 23, 2018
@kpet kpet deleted the travis-cl20 branch October 23, 2018 17:31
gwawiork added a commit to gwawiork/OpenCL-CTS that referenced this pull request Apr 3, 2020
Fix ballot_count, ballot_inclusive_scan and ballot_exclusive_scan for non-uniform inputs
yanfeng3721 pushed a commit to yanfeng3721/OpenCL-CTS that referenced this pull request Apr 20, 2022
…hronosGroup#8)

See KhronosGroup#1092.

The atomics.zip was updated as follows,

```sh
unzip atomics.zip
cd atomics
llvm-dis test_atomic_fn.atomic_cmpxchg_{global,local}_{u,}int.bc{32,64}
patch < ../cmpxchg.patch
echo -n test_atomic_fn.atomic_cmpxchg_{global,local}_{u,}int.bc{32,64} | xargs -d' ' -l -i llvm-as {}.ll -o {}
rm *.ll
cd ..
zip -v -u -r atomics.zip atomics/test_atomic_fn.atomic_cmpxchg_*
```

The cmpxchg.patch above is

```diff
--- atomics/test_atomic_fn.atomic_cmpxchg_global_int.bc32.ll	2021-11-25 11:54:36.886237676 +0800
+++ atomics/test_atomic_fn.atomic_cmpxchg_global_int.bc32.ll	2021-11-26 08:56:57.278765109 +0800
@@ -33,7 +33,7 @@
   %14 = add nsw i32 %13, 1
   %15 = tail call spir_func i32 @_Z14atomic_cmpxchgPVU3AS1iii(i32 addrspace(1)* %G, i32 %13, i32 %14) #0
   %16 = load volatile i32, i32 addrspace(1)* %G, align 4, !tbaa !10
-  %17 = icmp eq i32 %15, %16
+  %17 = icmp ne i32 %15, %13
   br i1 %17, label %12, label %18

 18:                                               ; preds = %12
--- atomics/test_atomic_fn.atomic_cmpxchg_global_int.bc64.ll	2021-11-25 11:54:36.887237698 +0800
+++ atomics/test_atomic_fn.atomic_cmpxchg_global_int.bc64.ll	2021-11-26 08:56:57.278765109 +0800
@@ -38,7 +38,7 @@
   %18 = add nsw i32 %17, 1
   %19 = tail call spir_func i32 @_Z14atomic_cmpxchgPVU3AS1iii(i32 addrspace(1)* %G, i32 %17, i32 %18) #0
   %20 = load volatile i32, i32 addrspace(1)* %G, align 4, !tbaa !10
-  %21 = icmp eq i32 %19, %20
+  %21 = icmp ne i32 %19, %17
   br i1 %21, label %16, label %22

 22:                                               ; preds = %16
--- atomics/test_atomic_fn.atomic_cmpxchg_global_int.cl	2021-11-25 11:53:21.915589612 +0800
+++ atomics/test_atomic_fn.atomic_cmpxchg_global_int.cl	2021-11-26 08:56:57.279765131 +0800
@@ -4,7 +4,7 @@
 kernel void test_atomic_fn(volatile global int *G, global int *Sum) {

   int  lid = get_local_id(0), tid =  get_global_id(0), gid = get_group_id(0);
-  int oldValue, newValue;
+  int origValue, oldValue, newValue;
   local int L[MAX_LOCAL_SIZE];

   if (gid)
@@ -16,10 +16,10 @@
   barrier(CLK_LOCAL_MEM_FENCE);

   do {
-    oldValue = G[gid];
-    newValue = oldValue + 1;
-    oldValue = atomic_cmpxchg(&G[gid], oldValue, newValue);
-  } while (oldValue == G[gid]);
+    origValue = G[gid];
+    newValue = origValue + 1;
+    oldValue = atomic_cmpxchg(&G[gid], origValue, newValue);
+  } while (oldValue != origValue);

   if (lid < MAX_LOCAL_SIZE)
     L[lid] = oldValue;
--- atomics/test_atomic_fn.atomic_cmpxchg_global_uint.bc32.ll	2021-11-25 11:54:36.886237676 +0800
+++ atomics/test_atomic_fn.atomic_cmpxchg_global_uint.bc32.ll	2021-11-26 08:56:57.279765131 +0800
@@ -17,7 +17,7 @@
   %5 = add nsw i32 %4, 1
   %6 = tail call spir_func i32 @_Z14atomic_cmpxchgPVU3AS1jjj(i32 addrspace(1)* %destMemory, i32 %4, i32 %5) #0
   %7 = load volatile i32, i32 addrspace(1)* %destMemory, align 4, !tbaa !10
-  %8 = icmp eq i32 %6, %7
+  %8 = icmp ne i32 %6, %4
   br i1 %8, label %.preheader, label %9

 9:                                                ; preds = %.preheader
--- atomics/test_atomic_fn.atomic_cmpxchg_global_uint.bc64.ll	2021-11-25 11:54:36.886237676 +0800
+++ atomics/test_atomic_fn.atomic_cmpxchg_global_uint.bc64.ll	2021-11-26 08:56:57.279765131 +0800
@@ -19,7 +19,7 @@
   %7 = add nsw i32 %6, 1
   %8 = tail call spir_func i32 @_Z14atomic_cmpxchgPVU3AS1jjj(i32 addrspace(1)* %destMemory, i32 %6, i32 %7) #0
   %9 = load volatile i32, i32 addrspace(1)* %destMemory, align 4, !tbaa !10
-  %10 = icmp eq i32 %8, %9
+  %10 = icmp ne i32 %8, %6
   br i1 %10, label %.preheader, label %11

 11:                                               ; preds = %.preheader
--- atomics/test_atomic_fn.atomic_cmpxchg_global_uint.cl	2021-11-25 11:53:21.915589612 +0800
+++ atomics/test_atomic_fn.atomic_cmpxchg_global_uint.cl	2021-11-26 08:56:57.279765131 +0800
@@ -3,17 +3,17 @@
 kernel void test_atomic_fn(volatile global uint *destMemory, global uint *Sum) {

   int  tid = get_local_id(0), gid = get_group_id(0);
-  int oldValue, newValue;
+  int origValue, oldValue, newValue;
   local uint localValues[MAX_LOCAL_SIZE];

   if (gid)
     return;

   do {
-    oldValue = destMemory[gid];
-    newValue = oldValue + 1;
-    oldValue = atomic_cmpxchg(&destMemory[gid], oldValue, newValue);
-  } while (oldValue == destMemory[gid]);
+    origValue = destMemory[gid];
+    newValue = origValue + 1;
+    oldValue = atomic_cmpxchg(&destMemory[gid], origValue, newValue);
+  } while (oldValue != origValue);

   if (tid < MAX_LOCAL_SIZE)
     localValues[tid] = oldValue;
--- atomics/test_atomic_fn.atomic_cmpxchg_local_int.bc32.ll	2021-11-25 11:54:36.887237698 +0800
+++ atomics/test_atomic_fn.atomic_cmpxchg_local_int.bc32.ll	2021-11-26 09:01:57.481364572 +0800
@@ -17,18 +17,18 @@
   %8 = getelementptr inbounds i32, i32 addrspace(3)* %L, i32 %1
   store i32 %7, i32 addrspace(3)* %8, align 4, !tbaa !10
   tail call spir_func void @_Z7barrierj(i32 1) #0
-  %.pre = load i32, i32 addrspace(3)* %L, align 4, !tbaa !10
   br label %9

 9:                                                ; preds = %9, %4
-  %10 = phi i32 [ %12, %9 ], [ %.pre, %4 ]
+  %10 = load i32, i32 addrspace(3)* %L, align 4, !tbaa !10
   %11 = add nsw i32 %10, 1
   %12 = tail call spir_func i32 @_Z14atomic_cmpxchgPVU3AS3iii(i32 addrspace(3)* %L, i32 %10, i32 %11) #0
   %13 = load i32, i32 addrspace(3)* %L, align 4, !tbaa !10
-  %14 = icmp eq i32 %12, %13
+  %14 = icmp ne i32 %12, %10
   br i1 %14, label %9, label %15

 15:                                               ; preds = %9
+  tail call spir_func void @_Z7barrierj(i32 1) #0
   store i32 %12, i32 addrspace(3)* %8, align 4, !tbaa !10
   tail call spir_func void @_Z7barrierj(i32 1) #0
   %16 = icmp eq i32 %1, 0
--- atomics/test_atomic_fn.atomic_cmpxchg_local_int.bc64.ll	2021-11-25 11:54:36.887237698 +0800
+++ atomics/test_atomic_fn.atomic_cmpxchg_local_int.bc64.ll	2021-11-26 09:01:57.481364572 +0800
@@ -22,18 +22,18 @@
   %12 = getelementptr inbounds i32, i32 addrspace(3)* %L, i64 %11
   store i32 %10, i32 addrspace(3)* %12, align 4, !tbaa !10
   tail call spir_func void @_Z7barrierj(i32 1) #0
-  %.pre = load i32, i32 addrspace(3)* %L, align 4, !tbaa !10
   br label %13

 13:                                               ; preds = %13, %6
-  %14 = phi i32 [ %16, %13 ], [ %.pre, %6 ]
+  %14 = load i32, i32 addrspace(3)* %L, align 4, !tbaa !10
   %15 = add nsw i32 %14, 1
   %16 = tail call spir_func i32 @_Z14atomic_cmpxchgPVU3AS3iii(i32 addrspace(3)* %L, i32 %14, i32 %15) #0
   %17 = load i32, i32 addrspace(3)* %L, align 4, !tbaa !10
-  %18 = icmp eq i32 %16, %17
+  %18 = icmp ne i32 %16, %14
   br i1 %18, label %13, label %19

 19:                                               ; preds = %13
+  tail call spir_func void @_Z7barrierj(i32 1) #0
   store i32 %16, i32 addrspace(3)* %12, align 4, !tbaa !10
   tail call spir_func void @_Z7barrierj(i32 1) #0
   %20 = icmp eq i32 %2, 0
--- atomics/test_atomic_fn.atomic_cmpxchg_local_int.cl	2021-11-25 11:53:21.915589612 +0800
+++ atomics/test_atomic_fn.atomic_cmpxchg_local_int.cl	2021-11-26 09:00:05.862910823 +0800
@@ -2,7 +2,7 @@

 kernel void test_atomic_fn(volatile global int *G, local int *L, local int *S) {
   int  lid = get_local_id(0), tid = get_global_id(0), gid = get_group_id(0);
-  int oldValue, newValue;
+  int origValue, oldValue, newValue;
   int numIters, i;

   if (gid)
@@ -13,10 +13,13 @@
   barrier(CLK_LOCAL_MEM_FENCE);

   do {
-    oldValue = L[0];
-    newValue = oldValue + 1;
-    oldValue = atomic_cmpxchg(&L[0], oldValue, newValue);
-  } while (oldValue == L[0]);
+    origValue = L[0];
+    newValue = origValue + 1;
+    oldValue = atomic_cmpxchg(&L[0], origValue, newValue);
+  } while (oldValue != origValue);
+
+  // Avoid data race on L[0] with work item 0 and other work items.
+  barrier(CLK_LOCAL_MEM_FENCE);

   // Assign the old value, if it is safe to do so.
   L[lid] = oldValue;
--- atomics/test_atomic_fn.atomic_cmpxchg_local_uint.bc32.ll	2021-11-25 11:54:36.887237698 +0800
+++ atomics/test_atomic_fn.atomic_cmpxchg_local_uint.bc32.ll	2021-11-26 09:01:57.482364594 +0800
@@ -15,18 +15,18 @@
   %6 = getelementptr inbounds i32, i32 addrspace(3)* %L, i32 %1
   store i32 %5, i32 addrspace(3)* %6, align 4, !tbaa !10
   tail call spir_func void @_Z7barrierj(i32 1) #0
-  %.pre = load i32, i32 addrspace(3)* %L, align 4, !tbaa !10
   br label %7

 7:                                                ; preds = %7, %4
-  %8 = phi i32 [ %10, %7 ], [ %.pre, %4 ]
+  %8 = load i32, i32 addrspace(3)* %L, align 4, !tbaa !10
   %9 = add i32 %8, 1
   %10 = tail call spir_func i32 @_Z14atomic_cmpxchgPVU3AS3jjj(i32 addrspace(3)* %L, i32 %8, i32 %9) #0
   %11 = load i32, i32 addrspace(3)* %L, align 4, !tbaa !10
-  %12 = icmp eq i32 %10, %11
+  %12 = icmp ne i32 %10, %8
   br i1 %12, label %7, label %13

 13:                                               ; preds = %7
+  tail call spir_func void @_Z7barrierj(i32 1) #0
   store i32 %10, i32 addrspace(3)* %6, align 4, !tbaa !10
   tail call spir_func void @_Z7barrierj(i32 1) #0
   %14 = icmp eq i32 %1, 0
--- atomics/test_atomic_fn.atomic_cmpxchg_local_uint.bc64.ll	2021-11-25 11:54:36.887237698 +0800
+++ atomics/test_atomic_fn.atomic_cmpxchg_local_uint.bc64.ll	2021-11-26 09:01:57.482364594 +0800
@@ -18,18 +18,18 @@
   %9 = getelementptr inbounds i32, i32 addrspace(3)* %L, i64 %8
   store i32 %7, i32 addrspace(3)* %9, align 4, !tbaa !10
   tail call spir_func void @_Z7barrierj(i32 1) #0
-  %.pre = load i32, i32 addrspace(3)* %L, align 4, !tbaa !10
   br label %10

 10:                                               ; preds = %10, %6
-  %11 = phi i32 [ %13, %10 ], [ %.pre, %6 ]
+  %11 = load i32, i32 addrspace(3)* %L, align 4, !tbaa !10
   %12 = add i32 %11, 1
   %13 = tail call spir_func i32 @_Z14atomic_cmpxchgPVU3AS3jjj(i32 addrspace(3)* %L, i32 %11, i32 %12) #0
   %14 = load i32, i32 addrspace(3)* %L, align 4, !tbaa !10
-  %15 = icmp eq i32 %13, %14
+  %15 = icmp ne i32 %13, %11
   br i1 %15, label %10, label %16

 16:                                               ; preds = %10
+  tail call spir_func void @_Z7barrierj(i32 1) #0
   store i32 %13, i32 addrspace(3)* %9, align 4, !tbaa !10
   tail call spir_func void @_Z7barrierj(i32 1) #0
   %17 = icmp eq i32 %2, 0
--- atomics/test_atomic_fn.atomic_cmpxchg_local_uint.cl	2021-11-25 11:53:21.915589612 +0800
+++ atomics/test_atomic_fn.atomic_cmpxchg_local_uint.cl	2021-11-26 09:00:13.888087245 +0800
@@ -2,7 +2,7 @@

 kernel void test_atomic_fn(volatile global uint *G, local uint *L, global uint *S) {
   int  lid = get_local_id(0), tid = get_global_id(0), gid = get_group_id(0);
-  uint oldValue, newValue;
+  uint origValue, oldValue, newValue;
   int i;

   if (gid)
@@ -13,10 +13,13 @@
   barrier(CLK_LOCAL_MEM_FENCE);

   do {
-    oldValue = L[0];
-    newValue = oldValue + 1;
-    oldValue = atomic_cmpxchg(&L[0], oldValue, newValue);
-  } while (oldValue == L[0]);
+    origValue = L[0];
+    newValue = origValue + 1;
+    oldValue = atomic_cmpxchg(&L[0], origValue, newValue);
+  } while (oldValue != origValue);
+
+  // Avoid data race on L[0] with work item 0 and other work items.
+  barrier(CLK_LOCAL_MEM_FENCE);

   // Assign the old value to the local buffer.
   L[lid] = oldValue;
```
kbenzie added a commit to kbenzie/OpenCL-CTS that referenced this pull request Oct 28, 2022
While testing an OpenCL driver with ThreadSanitizer enabled the
OpenCL-CTS suffers from thread leaks in conversions and bruteforce on
posix systems. This is because `pthread_join` is never called in
`ThreadPool_Exit` for the `pthread_t`s created by the thread pool.
Instead, the threads are only informed to stop waiting on the condition
variable which unblocks the worker thread but does not clean up after
itself.

```
ThreadPool: thread 1 exiting.
ThreadPool: thread 5 exiting.
ThreadPool: thread 4 exiting.
ThreadPool: thread 2 exiting.
ThreadPool: thread 7 exiting.
ThreadPool: thread 0 exiting.
ThreadPool: thread 3 exiting.
ThreadPool: thread 6 exiting.
Thread pool exited in a orderly fashion.
==================
WARNING: ThreadSanitizer: thread leak (pid=2292842)
  Thread T9 (tid=2292855, finished) created by main thread at:
    #0 pthread_create ../../../../src/libsanitizer/tsan/tsan_interceptors_posix.cpp:969 (libtsan.so.0+0x5ad75)
    KhronosGroup#1 ThreadPool_Init() <null> (test_conversions+0x35b2c)
    KhronosGroup#2 pthread_once ../../../../src/libsanitizer/tsan/tsan_interceptors_posix.cpp:1449 (libtsan.so.0+0x4057c)
    KhronosGroup#3 GetThreadCount() <null> (test_conversions+0x36262)
    KhronosGroup#4 DoTest(_cl_device_id*, Type, Type, SaturationMode, RoundingMode, _MTdata*) [clone .isra.0] <null> (test_conversions+0x10555)
    KhronosGroup#5 test_conversions(_cl_device_id*, _cl_context*, _cl_command_queue*, int) <null> (test_conversions+0x13226)
    KhronosGroup#6 callSingleTestFunction(test_definition, _cl_device_id*, int, int, unsigned long) <null> (test_conversions+0x2e66d)
    KhronosGroup#7 parseAndCallCommandLineTests(int, char const**, _cl_device_id*, int, test_definition*, int, unsigned long, int) <null> (test_conversions+0x2fb3a)
    KhronosGroup#8 runTestHarnessWithCheck(int, char const**, int, test_definition*, int, unsigned long, test_status (*)(_cl_device_id*)) <null> (test_conversions+0x349d8)
    KhronosGroup#9 main <null> (test_conversions+0xd725)

  And 7 more similar thread leaks.

SUMMARY: ThreadSanitizer: thread leak (OpenCL-CTS/buildbin/conversions/test_conversions+0x35b2c) in ThreadPool_Init()
```

This patch adds global state to keep track of the `pthread_t`s created
by `pthread_create` in `ThreadPool_Init`. The list of `pthread_t`s is
then used by `ThreadPool_Exit` to call `pthread_join` to cleanup the
`pthread_t`s correctly.

A near identical example, and additional explanation, can be found on
[stackoverflow](https://stackoverflow.com/questions/72435574/thread-leak-detected-when-using-condition-variable-instead-of-join-with-pthrea).

On the Windows path, a similar change is not necessary because
`_beginthread` is used which automatically cleans up after itself when
the worker thread function returns.
kbenzie added a commit to kbenzie/OpenCL-CTS that referenced this pull request Nov 2, 2022
While testing an OpenCL driver with ThreadSanitizer enabled the
OpenCL-CTS suffers from thread leaks in conversions and bruteforce on
posix systems. This is because `pthread_join` is never called in
`ThreadPool_Exit` for the `pthread_t`s created by the thread pool.
Instead, the threads are only informed to stop waiting on the condition
variable which unblocks the worker thread but does not clean up after
itself.

```
ThreadPool: thread 1 exiting.
ThreadPool: thread 5 exiting.
ThreadPool: thread 4 exiting.
ThreadPool: thread 2 exiting.
ThreadPool: thread 7 exiting.
ThreadPool: thread 0 exiting.
ThreadPool: thread 3 exiting.
ThreadPool: thread 6 exiting.
Thread pool exited in a orderly fashion.
==================
WARNING: ThreadSanitizer: thread leak (pid=2292842)
  Thread T9 (tid=2292855, finished) created by main thread at:
    #0 pthread_create ../../../../src/libsanitizer/tsan/tsan_interceptors_posix.cpp:969 (libtsan.so.0+0x5ad75)
    KhronosGroup#1 ThreadPool_Init() <null> (test_conversions+0x35b2c)
    KhronosGroup#2 pthread_once ../../../../src/libsanitizer/tsan/tsan_interceptors_posix.cpp:1449 (libtsan.so.0+0x4057c)
    KhronosGroup#3 GetThreadCount() <null> (test_conversions+0x36262)
    KhronosGroup#4 DoTest(_cl_device_id*, Type, Type, SaturationMode, RoundingMode, _MTdata*) [clone .isra.0] <null> (test_conversions+0x10555)
    KhronosGroup#5 test_conversions(_cl_device_id*, _cl_context*, _cl_command_queue*, int) <null> (test_conversions+0x13226)
    KhronosGroup#6 callSingleTestFunction(test_definition, _cl_device_id*, int, int, unsigned long) <null> (test_conversions+0x2e66d)
    KhronosGroup#7 parseAndCallCommandLineTests(int, char const**, _cl_device_id*, int, test_definition*, int, unsigned long, int) <null> (test_conversions+0x2fb3a)
    KhronosGroup#8 runTestHarnessWithCheck(int, char const**, int, test_definition*, int, unsigned long, test_status (*)(_cl_device_id*)) <null> (test_conversions+0x349d8)
    KhronosGroup#9 main <null> (test_conversions+0xd725)

  And 7 more similar thread leaks.

SUMMARY: ThreadSanitizer: thread leak (OpenCL-CTS/buildbin/conversions/test_conversions+0x35b2c) in ThreadPool_Init()
```

This patch adds global state to keep track of the `pthread_t`s created
by `pthread_create` in `ThreadPool_Init`. The list of `pthread_t`s is
then used by `ThreadPool_Exit` to call `pthread_join` to cleanup the
`pthread_t`s correctly.

A near identical example, and additional explanation, can be found on
[stackoverflow](https://stackoverflow.com/questions/72435574/thread-leak-detected-when-using-condition-variable-instead-of-join-with-pthrea).

On the Windows path, a similar change is not necessary because
`_beginthread` is used which automatically cleans up after itself when
the worker thread function returns.
bashbaug pushed a commit that referenced this pull request Nov 8, 2022
While testing an OpenCL driver with ThreadSanitizer enabled the
OpenCL-CTS suffers from thread leaks in conversions and bruteforce on
posix systems. This is because `pthread_join` is never called in
`ThreadPool_Exit` for the `pthread_t`s created by the thread pool.
Instead, the threads are only informed to stop waiting on the condition
variable which unblocks the worker thread but does not clean up after
itself.

```
ThreadPool: thread 1 exiting.
ThreadPool: thread 5 exiting.
ThreadPool: thread 4 exiting.
ThreadPool: thread 2 exiting.
ThreadPool: thread 7 exiting.
ThreadPool: thread 0 exiting.
ThreadPool: thread 3 exiting.
ThreadPool: thread 6 exiting.
Thread pool exited in a orderly fashion.
==================
WARNING: ThreadSanitizer: thread leak (pid=2292842)
  Thread T9 (tid=2292855, finished) created by main thread at:
    #0 pthread_create ../../../../src/libsanitizer/tsan/tsan_interceptors_posix.cpp:969 (libtsan.so.0+0x5ad75)
    #1 ThreadPool_Init() <null> (test_conversions+0x35b2c)
    #2 pthread_once ../../../../src/libsanitizer/tsan/tsan_interceptors_posix.cpp:1449 (libtsan.so.0+0x4057c)
    #3 GetThreadCount() <null> (test_conversions+0x36262)
    #4 DoTest(_cl_device_id*, Type, Type, SaturationMode, RoundingMode, _MTdata*) [clone .isra.0] <null> (test_conversions+0x10555)
    #5 test_conversions(_cl_device_id*, _cl_context*, _cl_command_queue*, int) <null> (test_conversions+0x13226)
    #6 callSingleTestFunction(test_definition, _cl_device_id*, int, int, unsigned long) <null> (test_conversions+0x2e66d)
    #7 parseAndCallCommandLineTests(int, char const**, _cl_device_id*, int, test_definition*, int, unsigned long, int) <null> (test_conversions+0x2fb3a)
    #8 runTestHarnessWithCheck(int, char const**, int, test_definition*, int, unsigned long, test_status (*)(_cl_device_id*)) <null> (test_conversions+0x349d8)
    #9 main <null> (test_conversions+0xd725)

  And 7 more similar thread leaks.

SUMMARY: ThreadSanitizer: thread leak (OpenCL-CTS/buildbin/conversions/test_conversions+0x35b2c) in ThreadPool_Init()
```

This patch adds global state to keep track of the `pthread_t`s created
by `pthread_create` in `ThreadPool_Init`. The list of `pthread_t`s is
then used by `ThreadPool_Exit` to call `pthread_join` to cleanup the
`pthread_t`s correctly.

A near identical example, and additional explanation, can be found on
[stackoverflow](https://stackoverflow.com/questions/72435574/thread-leak-detected-when-using-condition-variable-instead-of-join-with-pthrea).

On the Windows path, a similar change is not necessary because
`_beginthread` is used which automatically cleans up after itself when
the worker thread function returns.
This pull request was closed.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

Successfully merging this pull request may close these issues.

1 participant