thread: Add a function to pop and call all registered cleanup handlers
[strongswan.git] / src / libstrongswan / tests / suites / test_threading.c
1 /*
2 * Copyright (C) 2013 Tobias Brunner
3 * Copyright (C) 2008 Martin Willi
4 * Hochschule fuer Technik Rapperswil
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 */
16
17 #include "test_suite.h"
18
19 #include <unistd.h>
20
21 #include <threading/thread.h>
22 #include <threading/mutex.h>
23 #include <threading/condvar.h>
24 #include <threading/rwlock.h>
25 #include <threading/rwlock_condvar.h>
26 #include <threading/spinlock.h>
27 #include <threading/semaphore.h>
28 #include <threading/thread_value.h>
29
30 /*******************************************************************************
31 * recursive mutex test
32 */
33
34 #define THREADS 20
35
36 /**
37 * Thread barrier data
38 */
39 typedef struct {
40 mutex_t *mutex;
41 condvar_t *cond;
42 int count;
43 int current;
44 bool active;
45 } barrier_t;
46
47 /**
48 * Create a thread barrier for count threads
49 */
50 static barrier_t* barrier_create(int count)
51 {
52 barrier_t *this;
53
54 INIT(this,
55 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
56 .cond = condvar_create(CONDVAR_TYPE_DEFAULT),
57 .count = count,
58 );
59
60 return this;
61 }
62
63 /**
64 * Destroy a thread barrier
65 */
66 static void barrier_destroy(barrier_t *this)
67 {
68 this->mutex->destroy(this->mutex);
69 this->cond->destroy(this->cond);
70 free(this);
71 }
72
73 /**
74 * Wait to have configured number of threads in barrier
75 */
76 static bool barrier_wait(barrier_t *this)
77 {
78 bool winner = FALSE;
79
80 this->mutex->lock(this->mutex);
81 if (!this->active)
82 { /* first, reset */
83 this->active = TRUE;
84 this->current = 0;
85 }
86
87 this->current++;
88 while (this->current < this->count)
89 {
90 this->cond->wait(this->cond, this->mutex);
91 }
92 if (this->active)
93 { /* first, win */
94 winner = TRUE;
95 this->active = FALSE;
96 }
97 this->mutex->unlock(this->mutex);
98 this->cond->broadcast(this->cond);
99 sched_yield();
100
101 return winner;
102 }
103
104 /**
105 * Barrier for some tests
106 */
107 static barrier_t *barrier;
108
109 /**
110 * A mutex for tests requiring one
111 */
112 static mutex_t *mutex;
113
114 /**
115 * A condvar for tests requiring one
116 */
117 static condvar_t *condvar;
118
119 /**
120 * A counter for signaling
121 */
122 static int sigcount;
123
124 static void *mutex_run(void *data)
125 {
126 int locked = 0;
127 int i;
128
129 /* wait for all threads before getting in action */
130 barrier_wait(barrier);
131
132 for (i = 0; i < 100; i++)
133 {
134 mutex->lock(mutex);
135 mutex->lock(mutex);
136 mutex->lock(mutex);
137 locked++;
138 sched_yield();
139 if (locked > 1)
140 {
141 fail("two threads locked the mutex concurrently");
142 }
143 locked--;
144 mutex->unlock(mutex);
145 mutex->unlock(mutex);
146 mutex->unlock(mutex);
147 }
148 return NULL;
149 }
150
151 START_TEST(test_mutex)
152 {
153 thread_t *threads[THREADS];
154 int i;
155
156 barrier = barrier_create(THREADS);
157 mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
158
159 for (i = 0; i < 10; i++)
160 {
161 mutex->lock(mutex);
162 mutex->unlock(mutex);
163 }
164 for (i = 0; i < 10; i++)
165 {
166 mutex->lock(mutex);
167 }
168 for (i = 0; i < 10; i++)
169 {
170 mutex->unlock(mutex);
171 }
172
173 for (i = 0; i < THREADS; i++)
174 {
175 threads[i] = thread_create(mutex_run, NULL);
176 }
177 for (i = 0; i < THREADS; i++)
178 {
179 threads[i]->join(threads[i]);
180 }
181
182 mutex->destroy(mutex);
183 barrier_destroy(barrier);
184 }
185 END_TEST
186
187 /**
188 * Spinlock for testing
189 */
190 static spinlock_t *spinlock;
191
192 static void *spinlock_run(void *data)
193 {
194 int i, *locked = (int*)data;
195
196 barrier_wait(barrier);
197
198 for (i = 0; i < 1000; i++)
199 {
200 spinlock->lock(spinlock);
201 (*locked)++;
202 ck_assert_int_eq(*locked, 1);
203 (*locked)--;
204 spinlock->unlock(spinlock);
205 }
206 return NULL;
207 }
208
209 START_TEST(test_spinlock)
210 {
211 thread_t *threads[THREADS];
212 int i, locked = 0;
213
214 barrier = barrier_create(THREADS);
215 spinlock = spinlock_create();
216
217 for (i = 0; i < THREADS; i++)
218 {
219 threads[i] = thread_create(spinlock_run, &locked);
220 }
221 for (i = 0; i < THREADS; i++)
222 {
223 threads[i]->join(threads[i]);
224 }
225
226 spinlock->destroy(spinlock);
227 barrier_destroy(barrier);
228 }
229 END_TEST
230
231 static void *condvar_run(void *data)
232 {
233 mutex->lock(mutex);
234 sigcount++;
235 condvar->signal(condvar);
236 mutex->unlock(mutex);
237 return NULL;
238 }
239
240 START_TEST(test_condvar)
241 {
242 thread_t *threads[THREADS];
243 int i;
244
245 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
246 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
247 sigcount = 0;
248
249 for (i = 0; i < THREADS; i++)
250 {
251 threads[i] = thread_create(condvar_run, NULL);
252 }
253
254 mutex->lock(mutex);
255 while (sigcount < THREADS)
256 {
257 condvar->wait(condvar, mutex);
258 }
259 mutex->unlock(mutex);
260
261 for (i = 0; i < THREADS; i++)
262 {
263 threads[i]->join(threads[i]);
264 }
265
266 mutex->destroy(mutex);
267 condvar->destroy(condvar);
268 }
269 END_TEST
270
271 static void *condvar_recursive_run(void *data)
272 {
273 mutex->lock(mutex);
274 mutex->lock(mutex);
275 mutex->lock(mutex);
276 sigcount++;
277 condvar->signal(condvar);
278 mutex->unlock(mutex);
279 mutex->unlock(mutex);
280 mutex->unlock(mutex);
281 return NULL;
282 }
283
284 START_TEST(test_condvar_recursive)
285 {
286 thread_t *threads[THREADS];
287 int i;
288
289 mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
290 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
291 sigcount = 0;
292
293 mutex->lock(mutex);
294
295 for (i = 0; i < THREADS; i++)
296 {
297 threads[i] = thread_create(condvar_recursive_run, NULL);
298 }
299
300 mutex->lock(mutex);
301 mutex->lock(mutex);
302 while (sigcount < THREADS)
303 {
304 condvar->wait(condvar, mutex);
305 }
306 mutex->unlock(mutex);
307 mutex->unlock(mutex);
308 mutex->unlock(mutex);
309
310 for (i = 0; i < THREADS; i++)
311 {
312 threads[i]->join(threads[i]);
313 }
314
315 mutex->destroy(mutex);
316 condvar->destroy(condvar);
317 }
318 END_TEST
319
320 static void *condvar_run_broad(void *data)
321 {
322 mutex->lock(mutex);
323 while (sigcount < 0)
324 {
325 condvar->wait(condvar, mutex);
326 }
327 mutex->unlock(mutex);
328 return NULL;
329 }
330
331 START_TEST(test_condvar_broad)
332 {
333 thread_t *threads[THREADS];
334 int i;
335
336 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
337 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
338 sigcount = 0;
339
340 for (i = 0; i < THREADS; i++)
341 {
342 threads[i] = thread_create(condvar_run_broad, NULL);
343 }
344
345 sched_yield();
346
347 mutex->lock(mutex);
348 sigcount = 1;
349 condvar->broadcast(condvar);
350 mutex->unlock(mutex);
351
352 for (i = 0; i < THREADS; i++)
353 {
354 threads[i]->join(threads[i]);
355 }
356
357 mutex->destroy(mutex);
358 condvar->destroy(condvar);
359 }
360 END_TEST
361
362 START_TEST(test_condvar_timed)
363 {
364 thread_t *thread;
365 timeval_t start, end, diff = { .tv_usec = 50000 };
366
367 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
368 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
369 sigcount = 0;
370
371 mutex->lock(mutex);
372 while (TRUE)
373 {
374 time_monotonic(&start);
375 if (condvar->timed_wait(condvar, mutex, diff.tv_usec / 1000))
376 {
377 break;
378 }
379 }
380 time_monotonic(&end);
381 mutex->unlock(mutex);
382 timersub(&end, &start, &end);
383 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
384 end.tv_sec, end.tv_usec, diff.tv_sec, diff.tv_usec);
385
386 thread = thread_create(condvar_run, NULL);
387
388 mutex->lock(mutex);
389 while (sigcount == 0)
390 {
391 ck_assert(!condvar->timed_wait(condvar, mutex, 1000));
392 }
393 mutex->unlock(mutex);
394
395 thread->join(thread);
396 mutex->destroy(mutex);
397 condvar->destroy(condvar);
398 }
399 END_TEST
400
401 START_TEST(test_condvar_timed_abs)
402 {
403 thread_t *thread;
404 timeval_t start, end, abso, diff = { .tv_usec = 50000 };
405
406 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
407 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
408 sigcount = 0;
409
410 mutex->lock(mutex);
411 while (TRUE)
412 {
413 time_monotonic(&start);
414 timeradd(&start, &diff, &abso);
415 if (condvar->timed_wait_abs(condvar, mutex, abso))
416 {
417 break;
418 }
419 }
420 time_monotonic(&end);
421 mutex->unlock(mutex);
422 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
423 end.tv_sec, end.tv_usec, abso.tv_sec, abso.tv_usec);
424
425 thread = thread_create(condvar_run, NULL);
426
427 time_monotonic(&start);
428 diff.tv_sec = 1;
429 timeradd(&start, &diff, &abso);
430 mutex->lock(mutex);
431 while (sigcount == 0)
432 {
433 ck_assert(!condvar->timed_wait_abs(condvar, mutex, abso));
434 }
435 mutex->unlock(mutex);
436
437 thread->join(thread);
438 mutex->destroy(mutex);
439 condvar->destroy(condvar);
440 }
441 END_TEST
442
443 static void *condvar_cancel_run(void *data)
444 {
445 thread_cancelability(FALSE);
446
447 mutex->lock(mutex);
448
449 sigcount++;
450 condvar->broadcast(condvar);
451
452 thread_cleanup_push((void*)mutex->unlock, mutex);
453 thread_cancelability(TRUE);
454 while (TRUE)
455 {
456 condvar->wait(condvar, mutex);
457 }
458 thread_cleanup_pop(TRUE);
459
460 return NULL;
461 }
462
463 START_TEST(test_condvar_cancel)
464 {
465 thread_t *threads[THREADS];
466 int i;
467
468 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
469 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
470 sigcount = 0;
471
472 for (i = 0; i < THREADS; i++)
473 {
474 threads[i] = thread_create(condvar_cancel_run, NULL);
475 }
476
477 /* wait for all threads */
478 mutex->lock(mutex);
479 while (sigcount < THREADS)
480 {
481 condvar->wait(condvar, mutex);
482 }
483 mutex->unlock(mutex);
484
485 for (i = 0; i < THREADS; i++)
486 {
487 threads[i]->cancel(threads[i]);
488 }
489 for (i = 0; i < THREADS; i++)
490 {
491 threads[i]->join(threads[i]);
492 }
493
494 mutex->destroy(mutex);
495 condvar->destroy(condvar);
496 }
497 END_TEST
498
499 /**
500 * RWlock for different tests
501 */
502 static rwlock_t *rwlock;
503
504 static void *rwlock_run(refcount_t *refs)
505 {
506 rwlock->read_lock(rwlock);
507 ref_get(refs);
508 sched_yield();
509 ignore_result(ref_put(refs));
510 rwlock->unlock(rwlock);
511
512 if (rwlock->try_write_lock(rwlock))
513 {
514 ck_assert_int_eq(*refs, 0);
515 sched_yield();
516 rwlock->unlock(rwlock);
517 }
518
519 rwlock->write_lock(rwlock);
520 ck_assert_int_eq(*refs, 0);
521 sched_yield();
522 rwlock->unlock(rwlock);
523
524 rwlock->read_lock(rwlock);
525 rwlock->read_lock(rwlock);
526 ref_get(refs);
527 sched_yield();
528 ignore_result(ref_put(refs));
529 rwlock->unlock(rwlock);
530 rwlock->unlock(rwlock);
531
532 return NULL;
533 }
534
535 START_TEST(test_rwlock)
536 {
537 thread_t *threads[THREADS];
538 refcount_t refs = 0;
539 int i;
540
541 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
542
543 for (i = 0; i < THREADS; i++)
544 {
545 threads[i] = thread_create((void*)rwlock_run, &refs);
546 }
547 for (i = 0; i < THREADS; i++)
548 {
549 threads[i]->join(threads[i]);
550 }
551
552 rwlock->destroy(rwlock);
553 }
554 END_TEST
555
556 static void *rwlock_try_run(void *param)
557 {
558 if (rwlock->try_write_lock(rwlock))
559 {
560 rwlock->unlock(rwlock);
561 return param;
562 }
563 return NULL;
564 }
565
566 START_TEST(test_rwlock_try)
567 {
568 uintptr_t magic = 0xcafebabe;
569 thread_t *thread;
570
571 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
572
573 thread = thread_create(rwlock_try_run, (void*)magic);
574 ck_assert_int_eq((uintptr_t)thread->join(thread), magic);
575
576 rwlock->read_lock(rwlock);
577 thread = thread_create(rwlock_try_run, (void*)magic);
578 ck_assert(thread->join(thread) == NULL);
579 rwlock->unlock(rwlock);
580
581 rwlock->read_lock(rwlock);
582 rwlock->read_lock(rwlock);
583 rwlock->read_lock(rwlock);
584 thread = thread_create(rwlock_try_run, (void*)magic);
585 ck_assert(thread->join(thread) == NULL);
586 rwlock->unlock(rwlock);
587 rwlock->unlock(rwlock);
588 rwlock->unlock(rwlock);
589
590 rwlock->write_lock(rwlock);
591 thread = thread_create(rwlock_try_run, (void*)magic);
592 ck_assert(thread->join(thread) == NULL);
593 rwlock->unlock(rwlock);
594
595 rwlock->destroy(rwlock);
596 }
597 END_TEST
598
599 /**
600 * Rwlock condvar
601 */
602 static rwlock_condvar_t *rwcond;
603
604 static void *rwlock_condvar_run(void *data)
605 {
606 rwlock->write_lock(rwlock);
607 sigcount++;
608 rwcond->signal(rwcond);
609 rwlock->unlock(rwlock);
610 return NULL;
611 }
612
613 START_TEST(test_rwlock_condvar)
614 {
615 thread_t *threads[THREADS];
616 int i;
617
618 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
619 rwcond = rwlock_condvar_create();
620 sigcount = 0;
621
622 for (i = 0; i < THREADS; i++)
623 {
624 threads[i] = thread_create(rwlock_condvar_run, NULL);
625 }
626
627 rwlock->write_lock(rwlock);
628 while (sigcount < THREADS)
629 {
630 rwcond->wait(rwcond, rwlock);
631 }
632 rwlock->unlock(rwlock);
633
634 for (i = 0; i < THREADS; i++)
635 {
636 threads[i]->join(threads[i]);
637 }
638
639 rwlock->destroy(rwlock);
640 rwcond->destroy(rwcond);
641 }
642 END_TEST
643
644 static void *rwlock_condvar_run_broad(void *data)
645 {
646 rwlock->write_lock(rwlock);
647 while (sigcount < 0)
648 {
649 rwcond->wait(rwcond, rwlock);
650 }
651 rwlock->unlock(rwlock);
652 return NULL;
653 }
654
655 START_TEST(test_rwlock_condvar_broad)
656 {
657 thread_t *threads[THREADS];
658 int i;
659
660 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
661 rwcond = rwlock_condvar_create();
662 sigcount = 0;
663
664 for (i = 0; i < THREADS; i++)
665 {
666 threads[i] = thread_create(rwlock_condvar_run_broad, NULL);
667 }
668
669 sched_yield();
670
671 rwlock->write_lock(rwlock);
672 sigcount = 1;
673 rwcond->broadcast(rwcond);
674 rwlock->unlock(rwlock);
675
676 for (i = 0; i < THREADS; i++)
677 {
678 threads[i]->join(threads[i]);
679 }
680
681 rwlock->destroy(rwlock);
682 rwcond->destroy(rwcond);
683 }
684 END_TEST
685
686 START_TEST(test_rwlock_condvar_timed)
687 {
688 thread_t *thread;
689 timeval_t start, end, diff = { .tv_usec = 50000 };
690
691 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
692 rwcond = rwlock_condvar_create();
693 sigcount = 0;
694
695 rwlock->write_lock(rwlock);
696 while (TRUE)
697 {
698 time_monotonic(&start);
699 if (rwcond->timed_wait(rwcond, rwlock, diff.tv_usec / 1000))
700 {
701 break;
702 }
703 }
704 rwlock->unlock(rwlock);
705 time_monotonic(&end);
706 timersub(&end, &start, &end);
707 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
708 end.tv_sec, end.tv_usec, diff.tv_sec, diff.tv_usec);
709
710 thread = thread_create(rwlock_condvar_run, NULL);
711
712 rwlock->write_lock(rwlock);
713 while (sigcount == 0)
714 {
715 ck_assert(!rwcond->timed_wait(rwcond, rwlock, 1000));
716 }
717 rwlock->unlock(rwlock);
718
719 thread->join(thread);
720 rwlock->destroy(rwlock);
721 rwcond->destroy(rwcond);
722 }
723 END_TEST
724
725 START_TEST(test_rwlock_condvar_timed_abs)
726 {
727 thread_t *thread;
728 timeval_t start, end, abso, diff = { .tv_usec = 50000 };
729
730 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
731 rwcond = rwlock_condvar_create();
732 sigcount = 0;
733
734 rwlock->write_lock(rwlock);
735 while (TRUE)
736 {
737 time_monotonic(&start);
738 timeradd(&start, &diff, &abso);
739 if (rwcond->timed_wait_abs(rwcond, rwlock, abso))
740 {
741 break;
742 }
743 }
744 rwlock->unlock(rwlock);
745 time_monotonic(&end);
746 ck_assert_msg(timercmp(&end, &abso, >), "end: %u.%u, abso: %u.%u",
747 end.tv_sec, end.tv_usec, abso.tv_sec, abso.tv_usec);
748
749 thread = thread_create(rwlock_condvar_run, NULL);
750
751 time_monotonic(&start);
752 diff.tv_sec = 1;
753 timeradd(&start, &diff, &abso);
754 rwlock->write_lock(rwlock);
755 while (sigcount == 0)
756 {
757 ck_assert(!rwcond->timed_wait_abs(rwcond, rwlock, abso));
758 }
759 rwlock->unlock(rwlock);
760
761 thread->join(thread);
762 rwlock->destroy(rwlock);
763 rwcond->destroy(rwcond);
764 }
765 END_TEST
766
767 static void *rwlock_condvar_cancel_run(void *data)
768 {
769 thread_cancelability(FALSE);
770
771 rwlock->write_lock(rwlock);
772
773 sigcount++;
774 rwcond->broadcast(rwcond);
775
776 thread_cleanup_push((void*)rwlock->unlock, rwlock);
777 thread_cancelability(TRUE);
778 while (TRUE)
779 {
780 rwcond->wait(rwcond, rwlock);
781 }
782 thread_cleanup_pop(TRUE);
783
784 return NULL;
785 }
786
787 START_TEST(test_rwlock_condvar_cancel)
788 {
789 thread_t *threads[THREADS];
790 int i;
791
792 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
793 rwcond = rwlock_condvar_create();
794 sigcount = 0;
795
796 for (i = 0; i < THREADS; i++)
797 {
798 threads[i] = thread_create(rwlock_condvar_cancel_run, NULL);
799 }
800
801 /* wait for all threads */
802 rwlock->write_lock(rwlock);
803 while (sigcount < THREADS)
804 {
805 rwcond->wait(rwcond, rwlock);
806 }
807 rwlock->unlock(rwlock);
808
809 for (i = 0; i < THREADS; i++)
810 {
811 threads[i]->cancel(threads[i]);
812 }
813 for (i = 0; i < THREADS; i++)
814 {
815 threads[i]->join(threads[i]);
816 }
817
818 rwlock->destroy(rwlock);
819 rwcond->destroy(rwcond);
820 }
821 END_TEST
822
823 /**
824 * Semaphore for different tests
825 */
826 static semaphore_t *semaphore;
827
828 static void *semaphore_run(void *data)
829 {
830 semaphore->post(semaphore);
831 return NULL;
832 }
833
834 START_TEST(test_semaphore)
835 {
836 thread_t *threads[THREADS];
837 int i, initial = 5;
838
839 semaphore = semaphore_create(initial);
840
841 for (i = 0; i < THREADS; i++)
842 {
843 threads[i] = thread_create(semaphore_run, NULL);
844 }
845 for (i = 0; i < THREADS + initial; i++)
846 {
847 semaphore->wait(semaphore);
848 }
849 for (i = 0; i < THREADS; i++)
850 {
851 threads[i]->join(threads[i]);
852 }
853
854 semaphore->destroy(semaphore);
855 }
856 END_TEST
857
858 START_TEST(test_semaphore_timed)
859 {
860 thread_t *thread;
861 timeval_t start, end, diff = { .tv_usec = 50000 };
862
863 semaphore = semaphore_create(0);
864
865 time_monotonic(&start);
866 ck_assert(semaphore->timed_wait(semaphore, diff.tv_usec / 1000));
867 time_monotonic(&end);
868 timersub(&end, &start, &end);
869 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
870 end.tv_sec, end.tv_usec, diff.tv_sec, diff.tv_usec);
871
872 thread = thread_create(semaphore_run, NULL);
873
874 ck_assert(!semaphore->timed_wait(semaphore, 1000));
875
876 thread->join(thread);
877 semaphore->destroy(semaphore);
878 }
879 END_TEST
880
881 START_TEST(test_semaphore_timed_abs)
882 {
883 thread_t *thread;
884 timeval_t start, end, abso, diff = { .tv_usec = 50000 };
885
886 semaphore = semaphore_create(0);
887
888 time_monotonic(&start);
889 timeradd(&start, &diff, &abso);
890 ck_assert(semaphore->timed_wait_abs(semaphore, abso));
891 time_monotonic(&end);
892 ck_assert_msg(timercmp(&end, &abso, >), "end: %u.%u, abso: %u.%u",
893 end.tv_sec, end.tv_usec, abso.tv_sec, abso.tv_usec);
894
895 thread = thread_create(semaphore_run, NULL);
896
897 time_monotonic(&start);
898 diff.tv_sec = 1;
899 timeradd(&start, &diff, &abso);
900 ck_assert(!semaphore->timed_wait_abs(semaphore, abso));
901
902 thread->join(thread);
903 semaphore->destroy(semaphore);
904 }
905 END_TEST
906
907 static void *semaphore_cancel_run(void *data)
908 {
909 refcount_t *ready = (refcount_t*)data;
910
911 thread_cancelability(FALSE);
912 ref_get(ready);
913
914 thread_cancelability(TRUE);
915 semaphore->wait(semaphore);
916
917 ck_assert(FALSE);
918 return NULL;
919 }
920
921 START_TEST(test_semaphore_cancel)
922 {
923 thread_t *threads[THREADS];
924 refcount_t ready = 0;
925 int i;
926
927 semaphore = semaphore_create(0);
928
929 for (i = 0; i < THREADS; i++)
930 {
931 threads[i] = thread_create(semaphore_cancel_run, &ready);
932 }
933 while (ready < THREADS)
934 {
935 sched_yield();
936 }
937 for (i = 0; i < THREADS; i++)
938 {
939 threads[i]->cancel(threads[i]);
940 }
941 for (i = 0; i < THREADS; i++)
942 {
943 threads[i]->join(threads[i]);
944 }
945
946 semaphore->destroy(semaphore);
947 }
948 END_TEST
949
950 static void *join_run(void *data)
951 {
952 /* force some context switches */
953 sched_yield();
954 return (void*)((uintptr_t)data + THREADS);
955 }
956
957 START_TEST(test_join)
958 {
959 thread_t *threads[THREADS];
960 int i;
961
962 for (i = 0; i < THREADS; i++)
963 {
964 threads[i] = thread_create(join_run, (void*)(uintptr_t)i);
965 }
966 for (i = 0; i < THREADS; i++)
967 {
968 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
969 }
970 }
971 END_TEST
972
973 static void *exit_join_run(void *data)
974 {
975 sched_yield();
976 thread_exit((void*)((uintptr_t)data + THREADS));
977 /* not reached */
978 ck_assert(FALSE);
979 return NULL;
980 }
981
982 START_TEST(test_join_exit)
983 {
984 thread_t *threads[THREADS];
985 int i;
986
987 for (i = 0; i < THREADS; i++)
988 {
989 threads[i] = thread_create(exit_join_run, (void*)(uintptr_t)i);
990 }
991 for (i = 0; i < THREADS; i++)
992 {
993 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
994 }
995 }
996 END_TEST
997
998 static void *detach_run(void *data)
999 {
1000 refcount_t *running = (refcount_t*)data;
1001
1002 ignore_result(ref_put(running));
1003 return NULL;
1004 }
1005
1006 START_TEST(test_detach)
1007 {
1008 thread_t *threads[THREADS];
1009 int i;
1010 refcount_t running = 0;
1011
1012 for (i = 0; i < THREADS; i++)
1013 {
1014 ref_get(&running);
1015 threads[i] = thread_create(detach_run, &running);
1016 }
1017 for (i = 0; i < THREADS; i++)
1018 {
1019 threads[i]->detach(threads[i]);
1020 }
1021 while (running > 0)
1022 {
1023 sched_yield();
1024 }
1025 /* no checks done here, but we check that thread state gets cleaned
1026 * up with leak detective. give the threads time to clean up. */
1027 usleep(10000);
1028 }
1029 END_TEST
1030
1031 static void *detach_exit_run(void *data)
1032 {
1033 refcount_t *running = (refcount_t*)data;
1034
1035 ignore_result(ref_put(running));
1036 thread_exit(NULL);
1037 /* not reached */
1038 ck_assert(FALSE);
1039 return NULL;
1040 }
1041
1042 START_TEST(test_detach_exit)
1043 {
1044 thread_t *threads[THREADS];
1045 int i;
1046 refcount_t running = 0;
1047
1048 for (i = 0; i < THREADS; i++)
1049 {
1050 ref_get(&running);
1051 threads[i] = thread_create(detach_exit_run, &running);
1052 }
1053 for (i = 0; i < THREADS; i++)
1054 {
1055 threads[i]->detach(threads[i]);
1056 }
1057 while (running > 0)
1058 {
1059 sched_yield();
1060 }
1061 /* no checks done here, but we check that thread state gets cleaned
1062 * up with leak detective. give the threads time to clean up. */
1063 usleep(10000);
1064 }
1065 END_TEST
1066
1067 static void *cancel_run(void *data)
1068 {
1069 /* default cancellability should be TRUE, so don't change it */
1070 while (TRUE)
1071 {
1072 sleep(10);
1073 }
1074 return NULL;
1075 }
1076
1077 START_TEST(test_cancel)
1078 {
1079 thread_t *threads[THREADS];
1080 int i;
1081
1082 for (i = 0; i < THREADS; i++)
1083 {
1084 threads[i] = thread_create(cancel_run, NULL);
1085 }
1086 for (i = 0; i < THREADS; i++)
1087 {
1088 threads[i]->cancel(threads[i]);
1089 }
1090 for (i = 0; i < THREADS; i++)
1091 {
1092 threads[i]->join(threads[i]);
1093 }
1094 }
1095 END_TEST
1096
1097 static void *cancel_onoff_run(void *data)
1098 {
1099 bool *cancellable = (bool*)data;
1100
1101 thread_cancelability(FALSE);
1102 *cancellable = FALSE;
1103
1104 /* we should not get cancelled here */
1105 usleep(50000);
1106
1107 *cancellable = TRUE;
1108 thread_cancelability(TRUE);
1109
1110 /* but here */
1111 while (TRUE)
1112 {
1113 sleep(10);
1114 }
1115 return NULL;
1116 }
1117
1118 START_TEST(test_cancel_onoff)
1119 {
1120 thread_t *threads[THREADS];
1121 bool cancellable[THREADS];
1122 int i;
1123
1124 for (i = 0; i < THREADS; i++)
1125 {
1126 cancellable[i] = TRUE;
1127 threads[i] = thread_create(cancel_onoff_run, &cancellable[i]);
1128 }
1129 for (i = 0; i < THREADS; i++)
1130 {
1131 /* wait until thread has cleared its cancellability */
1132 while (cancellable[i])
1133 {
1134 sched_yield();
1135 }
1136 threads[i]->cancel(threads[i]);
1137 }
1138 for (i = 0; i < THREADS; i++)
1139 {
1140 threads[i]->join(threads[i]);
1141 ck_assert(cancellable[i]);
1142 }
1143 }
1144 END_TEST
1145
1146 static void *cancel_point_run(void *data)
1147 {
1148 thread_cancelability(FALSE);
1149 while (TRUE)
1150 {
1151 /* implicitly enables cancellability */
1152 thread_cancellation_point();
1153 }
1154 return NULL;
1155 }
1156
1157 START_TEST(test_cancel_point)
1158 {
1159 thread_t *threads[THREADS];
1160 int i;
1161
1162 for (i = 0; i < THREADS; i++)
1163 {
1164 threads[i] = thread_create(cancel_point_run, NULL);
1165 }
1166 sched_yield();
1167 for (i = 0; i < THREADS; i++)
1168 {
1169 threads[i]->cancel(threads[i]);
1170 }
1171 for (i = 0; i < THREADS; i++)
1172 {
1173 threads[i]->join(threads[i]);
1174 }
1175 }
1176 END_TEST
1177
1178 static void close_fd_ptr(void *fd)
1179 {
1180 close(*(int*)fd);
1181 }
1182
1183 static void cancellation_recv()
1184 {
1185 int sv[2];
1186 char buf[1];
1187
1188 ck_assert(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0);
1189
1190 thread_cleanup_push(close_fd_ptr, &sv[0]);
1191 thread_cleanup_push(close_fd_ptr, &sv[1]);
1192
1193 thread_cancelability(TRUE);
1194 while (TRUE)
1195 {
1196 ck_assert(recv(sv[0], buf, sizeof(buf), 0) == 1);
1197 }
1198 }
1199
1200 static void cancellation_read()
1201 {
1202 int sv[2];
1203 char buf[1];
1204
1205 ck_assert(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0);
1206
1207 thread_cleanup_push(close_fd_ptr, &sv[0]);
1208 thread_cleanup_push(close_fd_ptr, &sv[1]);
1209
1210 thread_cancelability(TRUE);
1211 while (TRUE)
1212 {
1213 ck_assert(read(sv[0], buf, sizeof(buf)) == 1);
1214 }
1215 }
1216
1217 static void cancellation_select()
1218 {
1219 int sv[2];
1220 fd_set set;
1221
1222 ck_assert(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0);
1223
1224 thread_cleanup_push(close_fd_ptr, &sv[0]);
1225 thread_cleanup_push(close_fd_ptr, &sv[1]);
1226
1227 FD_ZERO(&set);
1228 FD_SET(sv[0], &set);
1229 thread_cancelability(TRUE);
1230 while (TRUE)
1231 {
1232 ck_assert(select(sv[0] + 1, &set, NULL, NULL, NULL) == 1);
1233 }
1234 }
1235
1236 static void cancellation_poll()
1237 {
1238 int sv[2];
1239 struct pollfd pfd;
1240
1241 ck_assert(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0);
1242
1243 thread_cleanup_push(close_fd_ptr, &sv[0]);
1244 thread_cleanup_push(close_fd_ptr, &sv[1]);
1245
1246 pfd.fd = sv[0];
1247 pfd.events = POLLIN;
1248 thread_cancelability(TRUE);
1249 while (TRUE)
1250 {
1251 ck_assert(poll(&pfd, 1, -1) == 1);
1252 }
1253 }
1254
1255 static void cancellation_accept()
1256 {
1257 host_t *host;
1258 int fd, c;
1259
1260 fd = socket(AF_INET, SOCK_STREAM, 0);
1261 ck_assert(fd >= 0);
1262 host = host_create_from_string("127.0.0.1", 0);
1263 ck_assert_msg(bind(fd, host->get_sockaddr(host),
1264 *host->get_sockaddr_len(host)) == 0, "%m");
1265 host->destroy(host);
1266 ck_assert(listen(fd, 5) == 0);
1267
1268 thread_cleanup_push(close_fd_ptr, &fd);
1269
1270 thread_cancelability(TRUE);
1271 while (TRUE)
1272 {
1273 c = accept(fd, NULL, NULL);
1274 ck_assert(c >= 0);
1275 close(c);
1276 }
1277 }
1278
1279 static void cancellation_cond()
1280 {
1281 mutex_t *mutex;
1282 condvar_t *cond;
1283
1284 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
1285 cond = condvar_create(CONDVAR_TYPE_DEFAULT);
1286 mutex->lock(mutex);
1287
1288 thread_cleanup_push((void*)mutex->destroy, mutex);
1289 thread_cleanup_push((void*)cond->destroy, cond);
1290
1291 thread_cancelability(TRUE);
1292 while (TRUE)
1293 {
1294 cond->wait(cond, mutex);
1295 }
1296 }
1297
1298 static void cancellation_rwcond()
1299 {
1300 rwlock_t *lock;
1301 rwlock_condvar_t *cond;
1302
1303 lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
1304 cond = rwlock_condvar_create();
1305 lock->write_lock(lock);
1306
1307 thread_cleanup_push((void*)lock->destroy, lock);
1308 thread_cleanup_push((void*)cond->destroy, cond);
1309
1310 thread_cancelability(TRUE);
1311 while (TRUE)
1312 {
1313 cond->wait(cond, lock);
1314 }
1315 }
1316
1317 static void (*cancellation_points[])() = {
1318 cancellation_read,
1319 cancellation_recv,
1320 cancellation_select,
1321 cancellation_poll,
1322 cancellation_accept,
1323 cancellation_cond,
1324 cancellation_rwcond,
1325 };
1326
1327 static void* run_cancellation_point(void (*fn)())
1328 {
1329 fn();
1330 return NULL;
1331 }
1332
1333 static void* run_cancellation_point_pre(void (*fn)())
1334 {
1335 usleep(5000);
1336 fn();
1337 return NULL;
1338 }
1339
1340 START_TEST(test_cancellation_point)
1341 {
1342 thread_t *thread;
1343
1344 thread = thread_create((void*)run_cancellation_point,
1345 cancellation_points[_i]);
1346 usleep(5000);
1347 thread->cancel(thread);
1348 thread->join(thread);
1349 }
1350 END_TEST
1351
1352 START_TEST(test_cancellation_point_pre)
1353 {
1354 thread_t *thread;
1355
1356 thread = thread_create((void*)run_cancellation_point_pre,
1357 cancellation_points[_i]);
1358 thread->cancel(thread);
1359 thread->join(thread);
1360 }
1361 END_TEST
1362
1363 static void cleanup1(void *data)
1364 {
1365 uintptr_t *value = (uintptr_t*)data;
1366
1367 ck_assert_int_eq(*value, 1);
1368 (*value)++;
1369 }
1370
1371 static void cleanup2(void *data)
1372 {
1373 uintptr_t *value = (uintptr_t*)data;
1374
1375 ck_assert_int_eq(*value, 2);
1376 (*value)++;
1377 }
1378
1379 static void cleanup3(void *data)
1380 {
1381 uintptr_t *value = (uintptr_t*)data;
1382
1383 ck_assert_int_eq(*value, 3);
1384 (*value)++;
1385 }
1386
1387 static void *cleanup_run(void *data)
1388 {
1389 thread_cleanup_push(cleanup3, data);
1390 thread_cleanup_push(cleanup2, data);
1391 thread_cleanup_push(cleanup1, data);
1392 return NULL;
1393 }
1394
1395 START_TEST(test_cleanup)
1396 {
1397 thread_t *threads[THREADS];
1398 uintptr_t values[THREADS];
1399 int i;
1400
1401 for (i = 0; i < THREADS; i++)
1402 {
1403 values[i] = 1;
1404 threads[i] = thread_create(cleanup_run, &values[i]);
1405 }
1406 for (i = 0; i < THREADS; i++)
1407 {
1408 threads[i]->join(threads[i]);
1409 ck_assert_int_eq(values[i], 4);
1410 }
1411 }
1412 END_TEST
1413
1414 static void *cleanup_exit_run(void *data)
1415 {
1416 thread_cleanup_push(cleanup3, data);
1417 thread_cleanup_push(cleanup2, data);
1418 thread_cleanup_push(cleanup1, data);
1419 thread_exit(NULL);
1420 ck_assert(FALSE);
1421 return NULL;
1422 }
1423
1424 START_TEST(test_cleanup_exit)
1425 {
1426 thread_t *threads[THREADS];
1427 uintptr_t values[THREADS];
1428 int i;
1429
1430 for (i = 0; i < THREADS; i++)
1431 {
1432 values[i] = 1;
1433 threads[i] = thread_create(cleanup_exit_run, &values[i]);
1434 }
1435 for (i = 0; i < THREADS; i++)
1436 {
1437 threads[i]->join(threads[i]);
1438 ck_assert_int_eq(values[i], 4);
1439 }
1440 }
1441 END_TEST
1442
1443 static void *cleanup_cancel_run(void *data)
1444 {
1445 thread_cancelability(FALSE);
1446
1447 barrier_wait(barrier);
1448
1449 thread_cleanup_push(cleanup3, data);
1450 thread_cleanup_push(cleanup2, data);
1451 thread_cleanup_push(cleanup1, data);
1452
1453 thread_cancelability(TRUE);
1454
1455 while (TRUE)
1456 {
1457 sleep(1);
1458 }
1459 return NULL;
1460 }
1461
1462 START_TEST(test_cleanup_cancel)
1463 {
1464 thread_t *threads[THREADS];
1465 uintptr_t values[THREADS];
1466 int i;
1467
1468 barrier = barrier_create(THREADS+1);
1469 for (i = 0; i < THREADS; i++)
1470 {
1471 values[i] = 1;
1472 threads[i] = thread_create(cleanup_cancel_run, &values[i]);
1473 }
1474 barrier_wait(barrier);
1475 for (i = 0; i < THREADS; i++)
1476 {
1477 threads[i]->cancel(threads[i]);
1478 }
1479 for (i = 0; i < THREADS; i++)
1480 {
1481 threads[i]->join(threads[i]);
1482 ck_assert_int_eq(values[i], 4);
1483 }
1484 barrier_destroy(barrier);
1485 }
1486 END_TEST
1487
1488 static void *cleanup_pop_run(void *data)
1489 {
1490 thread_cleanup_push(cleanup3, data);
1491 thread_cleanup_push(cleanup2, data);
1492 thread_cleanup_push(cleanup1, data);
1493
1494 thread_cleanup_push(cleanup2, data);
1495 thread_cleanup_pop(FALSE);
1496
1497 thread_cleanup_pop(TRUE);
1498 return NULL;
1499 }
1500
1501 START_TEST(test_cleanup_pop)
1502 {
1503 thread_t *threads[THREADS];
1504 uintptr_t values[THREADS];
1505 int i;
1506
1507 for (i = 0; i < THREADS; i++)
1508 {
1509 values[i] = 1;
1510 threads[i] = thread_create(cleanup_pop_run, &values[i]);
1511 }
1512 for (i = 0; i < THREADS; i++)
1513 {
1514 threads[i]->join(threads[i]);
1515 ck_assert_int_eq(values[i], 4);
1516 }
1517 }
1518 END_TEST
1519
1520 static void *cleanup_popall_run(void *data)
1521 {
1522 thread_cleanup_push(cleanup3, data);
1523 thread_cleanup_push(cleanup2, data);
1524 thread_cleanup_push(cleanup1, data);
1525
1526 thread_cleanup_popall();
1527 return NULL;
1528 }
1529
1530 START_TEST(test_cleanup_popall)
1531 {
1532 thread_t *threads[THREADS];
1533 uintptr_t values[THREADS];
1534 int i;
1535
1536 for (i = 0; i < THREADS; i++)
1537 {
1538 values[i] = 1;
1539 threads[i] = thread_create(cleanup_popall_run, &values[i]);
1540 }
1541 for (i = 0; i < THREADS; i++)
1542 {
1543 threads[i]->join(threads[i]);
1544 ck_assert_int_eq(values[i], 4);
1545 }
1546 }
1547 END_TEST
1548
1549
1550 static thread_value_t *tls[10];
1551
1552 static void *tls_run(void *data)
1553 {
1554 uintptr_t value = (uintptr_t)data;
1555 int i, j;
1556
1557 for (i = 0; i < countof(tls); i++)
1558 {
1559 ck_assert(tls[i]->get(tls[i]) == NULL);
1560 }
1561 for (i = 0; i < countof(tls); i++)
1562 {
1563 tls[i]->set(tls[i], (void*)(value * i));
1564 }
1565 for (j = 0; j < 1000; j++)
1566 {
1567 for (i = 0; i < countof(tls); i++)
1568 {
1569 tls[i]->set(tls[i], (void*)(value * i));
1570 ck_assert(tls[i]->get(tls[i]) == (void*)(value * i));
1571 }
1572 sched_yield();
1573 }
1574 for (i = 0; i < countof(tls); i++)
1575 {
1576 ck_assert(tls[i]->get(tls[i]) == (void*)(value * i));
1577 }
1578 return (void*)(value + 1);
1579 }
1580
1581 START_TEST(test_tls)
1582 {
1583 thread_t *threads[THREADS];
1584 int i;
1585
1586 for (i = 0; i < countof(tls); i++)
1587 {
1588 tls[i] = thread_value_create(NULL);
1589 }
1590 for (i = 0; i < THREADS; i++)
1591 {
1592 threads[i] = thread_create(tls_run, (void*)(uintptr_t)i);
1593 }
1594
1595 ck_assert_int_eq((uintptr_t)tls_run((void*)(uintptr_t)(THREADS + 1)),
1596 THREADS + 2);
1597
1598 for (i = 0; i < THREADS; i++)
1599 {
1600 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + 1);
1601 }
1602 for (i = 0; i < countof(tls); i++)
1603 {
1604 tls[i]->destroy(tls[i]);
1605 }
1606 }
1607 END_TEST
1608
1609 static void tls_cleanup(void *data)
1610 {
1611 uintptr_t *value = (uintptr_t*)data;
1612
1613 (*value)--;
1614 }
1615
1616 static void *tls_cleanup_run(void *data)
1617 {
1618 int i;
1619
1620 for (i = 0; i < countof(tls); i++)
1621 {
1622 tls[i]->set(tls[i], data);
1623 }
1624 return NULL;
1625 }
1626
1627 START_TEST(test_tls_cleanup)
1628 {
1629 thread_t *threads[THREADS];
1630 uintptr_t values[THREADS], main_value = countof(tls);
1631 int i;
1632
1633 for (i = 0; i < countof(tls); i++)
1634 {
1635 tls[i] = thread_value_create(tls_cleanup);
1636 }
1637 for (i = 0; i < THREADS; i++)
1638 {
1639 values[i] = countof(tls);
1640 threads[i] = thread_create(tls_cleanup_run, &values[i]);
1641 }
1642
1643 tls_cleanup_run(&main_value);
1644
1645 for (i = 0; i < THREADS; i++)
1646 {
1647 threads[i]->join(threads[i]);
1648 ck_assert_int_eq(values[i], 0);
1649 }
1650 for (i = 0; i < countof(tls); i++)
1651 {
1652 tls[i]->destroy(tls[i]);
1653 }
1654 ck_assert_int_eq(main_value, 0);
1655 }
1656 END_TEST
1657
1658 Suite *threading_suite_create()
1659 {
1660 Suite *s;
1661 TCase *tc;
1662
1663 s = suite_create("threading");
1664
1665 tc = tcase_create("recursive mutex");
1666 tcase_add_test(tc, test_mutex);
1667 suite_add_tcase(s, tc);
1668
1669 tc = tcase_create("spinlock");
1670 tcase_add_test(tc, test_spinlock);
1671 suite_add_tcase(s, tc);
1672
1673 tc = tcase_create("condvar");
1674 tcase_add_test(tc, test_condvar);
1675 tcase_add_test(tc, test_condvar_recursive);
1676 tcase_add_test(tc, test_condvar_broad);
1677 tcase_add_test(tc, test_condvar_timed);
1678 tcase_add_test(tc, test_condvar_timed_abs);
1679 tcase_add_test(tc, test_condvar_cancel);
1680 suite_add_tcase(s, tc);
1681
1682 tc = tcase_create("rwlock");
1683 tcase_add_test(tc, test_rwlock);
1684 tcase_add_test(tc, test_rwlock_try);
1685 suite_add_tcase(s, tc);
1686
1687 tc = tcase_create("rwlock condvar");
1688 tcase_add_test(tc, test_rwlock_condvar);
1689 tcase_add_test(tc, test_rwlock_condvar_broad);
1690 tcase_add_test(tc, test_rwlock_condvar_timed);
1691 tcase_add_test(tc, test_rwlock_condvar_timed_abs);
1692 tcase_add_test(tc, test_rwlock_condvar_cancel);
1693 suite_add_tcase(s, tc);
1694
1695 tc = tcase_create("semaphore");
1696 tcase_add_test(tc, test_semaphore);
1697 tcase_add_test(tc, test_semaphore_timed);
1698 tcase_add_test(tc, test_semaphore_timed_abs);
1699 tcase_add_test(tc, test_semaphore_cancel);
1700 suite_add_tcase(s, tc);
1701
1702 tc = tcase_create("thread joining");
1703 tcase_add_test(tc, test_join);
1704 tcase_add_test(tc, test_join_exit);
1705 suite_add_tcase(s, tc);
1706
1707 tc = tcase_create("thread detaching");
1708 tcase_add_test(tc, test_detach);
1709 tcase_add_test(tc, test_detach_exit);
1710 suite_add_tcase(s, tc);
1711
1712 tc = tcase_create("thread cancellation");
1713 tcase_add_test(tc, test_cancel);
1714 tcase_add_test(tc, test_cancel_onoff);
1715 tcase_add_test(tc, test_cancel_point);
1716 suite_add_tcase(s, tc);
1717
1718 tc = tcase_create("thread cancellation point");
1719 tcase_add_loop_test(tc, test_cancellation_point,
1720 0, countof(cancellation_points));
1721 tcase_add_loop_test(tc, test_cancellation_point_pre,
1722 0, countof(cancellation_points));
1723 suite_add_tcase(s, tc);
1724
1725 tc = tcase_create("thread cleanup");
1726 tcase_add_test(tc, test_cleanup);
1727 tcase_add_test(tc, test_cleanup_exit);
1728 tcase_add_test(tc, test_cleanup_cancel);
1729 tcase_add_test(tc, test_cleanup_pop);
1730 tcase_add_test(tc, test_cleanup_popall);
1731 suite_add_tcase(s, tc);
1732
1733 tc = tcase_create("thread local storage");
1734 tcase_add_test(tc, test_tls);
1735 tcase_add_test(tc, test_tls_cleanup);
1736 suite_add_tcase(s, tc);
1737
1738 return s;
1739 }