@@ -254,7 +254,6 @@ static int pooled_thread_func(void *void_arg) {
254
254
API_FUNC hw_pool_init_status hw_pool_init (uint32_t num_threads ) {
255
255
if unlikely (!num_threads ) return POOL_INIT_NO_THREADS_SPECIFIED ;
256
256
uint32_t old = 0u ;
257
- assert (num_threads < UINT32_MAX );
258
257
if unlikely (!atomic_compare_exchange_strong_explicit (& hw_pool .num_threads ,
259
258
& old ,
260
259
num_threads ,
@@ -284,16 +283,21 @@ API_FUNC hw_pool_init_status hw_pool_init(uint32_t num_threads) {
284
283
pooled_thread_control * thread_control =
285
284
(pooled_thread_control * )(buffer + alignment * (size_t )i );
286
285
init_thread_control (thread_control , i , & hw_pool );
287
- int status ;
286
+ if (i ) {
287
+ int status ;
288
288
#ifdef QPOOL_USE_PTHREADS
289
- status = pthread_create (
290
- & thread_control -> thread , & attr , pooled_thread_func , thread_control );
291
- if unlikely (status ) goto cleanup_threads ;
289
+ status = pthread_create (
290
+ & thread_control -> thread , & attr , pooled_thread_func , thread_control );
291
+ if unlikely (status ) goto cleanup_threads ;
292
292
#else
293
- status =
294
- thrd_create ( & thread_control -> thread , pooled_thread_func , thread_control );
295
- if unlikely (status != thrd_success ) goto cleanup_threads ;
293
+ status = thrd_create (
294
+ & thread_control -> thread , pooled_thread_func , thread_control );
295
+ if unlikely (status != thrd_success ) goto cleanup_threads ;
296
296
#endif
297
+ }
298
+ // Leave the thread object uninitialized for thread 0.
299
+ // It needs to be there for the sake of alignment,
300
+ // but other than that it's unused.
297
301
++ i ;
298
302
}
299
303
#ifdef QPOOL_USE_PTHREADS
@@ -303,7 +307,13 @@ API_FUNC hw_pool_init_status hw_pool_init(uint32_t num_threads) {
303
307
return POOL_INIT_SUCCESS ;
304
308
cleanup_threads :
305
309
if (i ) {
310
+ // Last thread failed to launch, so no need to clean it up.
311
+ // If an error was raised it would have been at an iteration
312
+ // higher than 0 for the thread create loop since no thread is
313
+ // created at 0.
306
314
uint32_t j = -- i ;
315
+ // current thread does the work of worker zero so
316
+ // no need to signal or join for that one.
307
317
while (i ) {
308
318
// TODO: fix deinit to match new layout and interrupt mechanism.
309
319
pooled_thread_control * thread_control =
@@ -347,17 +357,17 @@ API_FUNC __attribute__((no_sanitize("memory"))) void hw_pool_destroy() {
347
357
atomic_load_explicit (& hw_pool .num_threads , memory_order_relaxed );
348
358
char * buffer = atomic_load_explicit (& hw_pool .threads , memory_order_relaxed );
349
359
size_t alignment = QTHREAD_MAX ((size_t )64u , get_cache_line_size ());
350
- uint32_t i = num_threads ;
360
+ uint32_t i = num_threads - 1u ;
361
+ // Current thread is thread 0 so no need to notify/join that one.
351
362
while (i ) {
352
- -- i ;
353
363
// TODO: fix deinit to match new layout and interrupt mechanism.
354
364
pooled_thread_control * thread_control =
355
365
(pooled_thread_control * )(buffer + alignment * (size_t )i );
356
366
notify_worker_of_termination (thread_control );
367
+ -- i ;
357
368
}
358
- i = num_threads ;
369
+ i = num_threads - 1u ;
359
370
while (i ) {
360
- -- i ;
361
371
pooled_thread_control * thread_control =
362
372
(pooled_thread_control * )(buffer + alignment * (size_t )i );
363
373
// TODO: crash informatively if join fails somehow.
@@ -366,6 +376,7 @@ API_FUNC __attribute__((no_sanitize("memory"))) void hw_pool_destroy() {
366
376
#else
367
377
thrd_join (thread_control -> thread , NULL );
368
378
#endif
379
+ -- i ;
369
380
}
370
381
371
382
atomic_store_explicit (& hw_pool .threads , NULL , memory_order_relaxed );
@@ -379,31 +390,40 @@ API_FUNC uint32_t get_num_delegated_threads() {
379
390
return 1 ;
380
391
}
381
392
382
- // TODO: have the main thread fill the role of thread 0.
383
- // Instead of having the main thread wait/resume, swap in its thread-locals
384
- // then have it run the per-thread function.
385
- // This will avoid the suspend/resume OS overheads for at least that thread.
393
+ // Note: current thread fills the role of thread zero in the pool.
386
394
387
395
API_FUNC void
388
396
pool_run_on_all (pool_header * pool , qt_threadpool_func_type func , void * arg ) {
389
397
uint32_t num_threads =
390
398
atomic_load_explicit (& pool -> num_threads , memory_order_relaxed );
391
399
assert (num_threads );
392
- assert (num_threads < UINT32_MAX );
393
- char * buffer =
394
- (char * )atomic_load_explicit (& pool -> threads , memory_order_relaxed );
395
- atomic_store_explicit (
396
- & pool -> num_active_threads , num_threads , memory_order_relaxed );
397
- init_main_sync (pool );
398
- size_t alignment = QTHREAD_MAX ((size_t )64u , get_cache_line_size ());
399
- for (uint32_t i = 0u ;
400
- i < atomic_load_explicit (& pool -> num_threads , memory_order_relaxed );
401
- i ++ ) {
402
- pooled_thread_control * thread_control =
403
- (pooled_thread_control * )(buffer + alignment * (size_t )i );
404
- launch_work_on_thread (thread_control , func , arg );
400
+ if (num_threads > 1u ) {
401
+ char * buffer =
402
+ (char * )atomic_load_explicit (& pool -> threads , memory_order_relaxed );
403
+ atomic_store_explicit (
404
+ & pool -> num_active_threads , num_threads - 1u , memory_order_relaxed );
405
+ init_main_sync (pool );
406
+ size_t alignment = QTHREAD_MAX ((size_t )64u , get_cache_line_size ());
407
+ for (uint32_t i = 1u ;
408
+ i < atomic_load_explicit (& pool -> num_threads , memory_order_relaxed );
409
+ i ++ ) {
410
+ pooled_thread_control * thread_control =
411
+ (pooled_thread_control * )(buffer + alignment * (size_t )i );
412
+ launch_work_on_thread (thread_control , func , arg );
413
+ }
414
+ }
415
+ uint32_t outer_index = context_index ;
416
+ context_index = 0u ;
417
+ pool_header * outer_delegated_pool = delegated_pool ;
418
+ delegated_pool = NULL ;
419
+ func (arg );
420
+ delegated_pool = outer_delegated_pool ;
421
+ context_index = outer_index ;
422
+ if (num_threads > 1u ) {
423
+ // some loops may have threads that take dramatically longer
424
+ // so we still suspend, but it's potentially for much less time.
425
+ suspend_main_while_working (pool );
405
426
}
406
- suspend_main_while_working (pool );
407
427
}
408
428
409
429
API_FUNC void run_on_current_pool (qt_threadpool_func_type func , void * arg ) {
0 commit comments