37b2bd05dacffd463bfb8e0035fcbd43842bd43e
[strongswan.git] / src / libstrongswan / tests / suites / test_threading.c
1 /*
2 * Copyright (C) 2013 Tobias Brunner
3 * Copyright (C) 2008 Martin Willi
4 * Hochschule fuer Technik Rapperswil
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 */
16
17 #include "test_suite.h"
18
19 #include <sched.h>
20 #include <unistd.h>
21
22 #include <threading/thread.h>
23 #include <threading/mutex.h>
24 #include <threading/condvar.h>
25 #include <threading/rwlock.h>
26 #include <threading/thread_value.h>
27
28 /*******************************************************************************
29 * recursive mutex test
30 */
31
32 #define THREADS 20
33
34 /**
35 * Thread barrier data
36 */
37 typedef struct {
38 mutex_t *mutex;
39 condvar_t *cond;
40 int count;
41 int current;
42 bool active;
43 } barrier_t;
44
45 /**
46 * Create a thread barrier for count threads
47 */
48 static barrier_t* barrier_create(int count)
49 {
50 barrier_t *this;
51
52 INIT(this,
53 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
54 .cond = condvar_create(CONDVAR_TYPE_DEFAULT),
55 .count = count,
56 );
57
58 return this;
59 }
60
61 /**
62 * Destroy a thread barrier
63 */
64 static void barrier_destroy(barrier_t *this)
65 {
66 this->mutex->destroy(this->mutex);
67 this->cond->destroy(this->cond);
68 free(this);
69 }
70
71 /**
72 * Wait to have configured number of threads in barrier
73 */
74 static bool barrier_wait(barrier_t *this)
75 {
76 bool winner = FALSE;
77
78 this->mutex->lock(this->mutex);
79 if (!this->active)
80 { /* first, reset */
81 this->active = TRUE;
82 this->current = 0;
83 }
84
85 this->current++;
86 while (this->current < this->count)
87 {
88 this->cond->wait(this->cond, this->mutex);
89 }
90 if (this->active)
91 { /* first, win */
92 winner = TRUE;
93 this->active = FALSE;
94 }
95 this->mutex->unlock(this->mutex);
96 this->cond->broadcast(this->cond);
97 sched_yield();
98
99 return winner;
100 }
101
102 /**
103 * Barrier for some tests
104 */
105 static barrier_t *barrier;
106
107 /**
108 * A mutex for tests requiring one
109 */
110 static mutex_t *mutex;
111
112 /**
113 * A condvar for tests requiring one
114 */
115 static condvar_t *condvar;
116
117 /**
118 * A counter for signaling
119 */
120 static int sigcount;
121
122 static void *mutex_run(void *data)
123 {
124 int locked = 0;
125 int i;
126
127 /* wait for all threads before getting in action */
128 barrier_wait(barrier);
129
130 for (i = 0; i < 100; i++)
131 {
132 mutex->lock(mutex);
133 mutex->lock(mutex);
134 mutex->lock(mutex);
135 locked++;
136 sched_yield();
137 if (locked > 1)
138 {
139 fail("two threads locked the mutex concurrently");
140 }
141 locked--;
142 mutex->unlock(mutex);
143 mutex->unlock(mutex);
144 mutex->unlock(mutex);
145 }
146 return NULL;
147 }
148
149 START_TEST(test_mutex)
150 {
151 thread_t *threads[THREADS];
152 int i;
153
154 barrier = barrier_create(THREADS);
155 mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
156
157 for (i = 0; i < 10; i++)
158 {
159 mutex->lock(mutex);
160 mutex->unlock(mutex);
161 }
162 for (i = 0; i < 10; i++)
163 {
164 mutex->lock(mutex);
165 }
166 for (i = 0; i < 10; i++)
167 {
168 mutex->unlock(mutex);
169 }
170
171 for (i = 0; i < THREADS; i++)
172 {
173 threads[i] = thread_create(mutex_run, NULL);
174 }
175 for (i = 0; i < THREADS; i++)
176 {
177 threads[i]->join(threads[i]);
178 }
179
180 mutex->destroy(mutex);
181 barrier_destroy(barrier);
182 }
183 END_TEST
184
185 static void *condvar_run(void *data)
186 {
187 mutex->lock(mutex);
188 sigcount++;
189 condvar->signal(condvar);
190 mutex->unlock(mutex);
191 return NULL;
192 }
193
194 START_TEST(test_condvar)
195 {
196 thread_t *threads[THREADS];
197 int i;
198
199 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
200 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
201 sigcount = 0;
202
203 for (i = 0; i < THREADS; i++)
204 {
205 threads[i] = thread_create(condvar_run, NULL);
206 }
207
208 mutex->lock(mutex);
209 while (sigcount < THREADS)
210 {
211 condvar->wait(condvar, mutex);
212 }
213 mutex->unlock(mutex);
214
215 for (i = 0; i < THREADS; i++)
216 {
217 threads[i]->join(threads[i]);
218 }
219
220 mutex->destroy(mutex);
221 condvar->destroy(condvar);
222 }
223 END_TEST
224
225 static void *condvar_recursive_run(void *data)
226 {
227 mutex->lock(mutex);
228 mutex->lock(mutex);
229 mutex->lock(mutex);
230 sigcount++;
231 condvar->signal(condvar);
232 mutex->unlock(mutex);
233 mutex->unlock(mutex);
234 mutex->unlock(mutex);
235 return NULL;
236 }
237
238 START_TEST(test_condvar_recursive)
239 {
240 thread_t *threads[THREADS];
241 int i;
242
243 mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
244 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
245 sigcount = 0;
246
247 mutex->lock(mutex);
248
249 for (i = 0; i < THREADS; i++)
250 {
251 threads[i] = thread_create(condvar_recursive_run, NULL);
252 }
253
254 mutex->lock(mutex);
255 mutex->lock(mutex);
256 while (sigcount < THREADS)
257 {
258 condvar->wait(condvar, mutex);
259 }
260 mutex->unlock(mutex);
261 mutex->unlock(mutex);
262 mutex->unlock(mutex);
263
264 for (i = 0; i < THREADS; i++)
265 {
266 threads[i]->join(threads[i]);
267 }
268
269 mutex->destroy(mutex);
270 condvar->destroy(condvar);
271 }
272 END_TEST
273
274 static void *condvar_run_broad(void *data)
275 {
276 mutex->lock(mutex);
277 while (sigcount < 0)
278 {
279 condvar->wait(condvar, mutex);
280 }
281 mutex->unlock(mutex);
282 return NULL;
283 }
284
285 START_TEST(test_condvar_broad)
286 {
287 thread_t *threads[THREADS];
288 int i;
289
290 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
291 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
292 sigcount = 0;
293
294 for (i = 0; i < THREADS; i++)
295 {
296 threads[i] = thread_create(condvar_run_broad, NULL);
297 }
298
299 sched_yield();
300
301 mutex->lock(mutex);
302 sigcount = 1;
303 condvar->broadcast(condvar);
304 mutex->unlock(mutex);
305
306 for (i = 0; i < THREADS; i++)
307 {
308 threads[i]->join(threads[i]);
309 }
310
311 mutex->destroy(mutex);
312 condvar->destroy(condvar);
313 }
314 END_TEST
315
316 START_TEST(test_condvar_timed)
317 {
318 thread_t *thread;
319 timeval_t start, end, diff = { .tv_usec = 50000 };
320
321 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
322 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
323 sigcount = 0;
324
325 mutex->lock(mutex);
326 while (TRUE)
327 {
328 time_monotonic(&start);
329 if (condvar->timed_wait(condvar, mutex, diff.tv_usec / 1000))
330 {
331 break;
332 }
333 }
334 time_monotonic(&end);
335 mutex->unlock(mutex);
336 timersub(&end, &start, &end);
337 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
338 end.tv_sec, end.tv_usec, diff.tv_sec, diff.tv_usec);
339
340 thread = thread_create(condvar_run, NULL);
341
342 mutex->lock(mutex);
343 while (sigcount == 0)
344 {
345 ck_assert(!condvar->timed_wait(condvar, mutex, 1000));
346 }
347 mutex->unlock(mutex);
348
349 thread->join(thread);
350 mutex->destroy(mutex);
351 condvar->destroy(condvar);
352 }
353 END_TEST
354
355 START_TEST(test_condvar_timed_abs)
356 {
357 thread_t *thread;
358 timeval_t start, end, abso, diff = { .tv_usec = 50000 };
359
360 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
361 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
362 sigcount = 0;
363
364 mutex->lock(mutex);
365 while (TRUE)
366 {
367 time_monotonic(&start);
368 timeradd(&start, &diff, &abso);
369 if (condvar->timed_wait_abs(condvar, mutex, abso))
370 {
371 break;
372 }
373 }
374 time_monotonic(&end);
375 mutex->unlock(mutex);
376 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
377 end.tv_sec, end.tv_usec, abso.tv_sec, abso.tv_usec);
378
379 thread = thread_create(condvar_run, NULL);
380
381 time_monotonic(&start);
382 diff.tv_sec = 1;
383 timeradd(&start, &diff, &abso);
384 mutex->lock(mutex);
385 while (sigcount == 0)
386 {
387 ck_assert(!condvar->timed_wait_abs(condvar, mutex, abso));
388 }
389 mutex->unlock(mutex);
390
391 thread->join(thread);
392 mutex->destroy(mutex);
393 condvar->destroy(condvar);
394 }
395 END_TEST
396
397 static void *condvar_cancel_run(void *data)
398 {
399 thread_cancelability(FALSE);
400
401 mutex->lock(mutex);
402
403 sigcount++;
404 condvar->broadcast(condvar);
405
406 thread_cleanup_push((void*)mutex->unlock, mutex);
407 thread_cancelability(TRUE);
408 while (TRUE)
409 {
410 condvar->wait(condvar, mutex);
411 }
412 thread_cleanup_pop(TRUE);
413
414 return NULL;
415 }
416
417 START_TEST(test_condvar_cancel)
418 {
419 thread_t *threads[THREADS];
420 int i;
421
422 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
423 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
424 sigcount = 0;
425
426 for (i = 0; i < THREADS; i++)
427 {
428 threads[i] = thread_create(condvar_cancel_run, NULL);
429 }
430
431 /* wait for all threads */
432 mutex->lock(mutex);
433 while (sigcount < THREADS)
434 {
435 condvar->wait(condvar, mutex);
436 }
437 mutex->unlock(mutex);
438
439 for (i = 0; i < THREADS; i++)
440 {
441 threads[i]->cancel(threads[i]);
442 }
443 for (i = 0; i < THREADS; i++)
444 {
445 threads[i]->join(threads[i]);
446 }
447
448 mutex->destroy(mutex);
449 condvar->destroy(condvar);
450 }
451 END_TEST
452
453 /**
454 * RWlock for different tests
455 */
456 static rwlock_t *rwlock;
457
458 static void *rwlock_run(refcount_t *refs)
459 {
460 rwlock->read_lock(rwlock);
461 ref_get(refs);
462 sched_yield();
463 ignore_result(ref_put(refs));
464 rwlock->unlock(rwlock);
465
466 if (rwlock->try_write_lock(rwlock))
467 {
468 ck_assert_int_eq(*refs, 0);
469 sched_yield();
470 rwlock->unlock(rwlock);
471 }
472
473 rwlock->write_lock(rwlock);
474 ck_assert_int_eq(*refs, 0);
475 sched_yield();
476 rwlock->unlock(rwlock);
477
478 rwlock->read_lock(rwlock);
479 rwlock->read_lock(rwlock);
480 ref_get(refs);
481 sched_yield();
482 ignore_result(ref_put(refs));
483 rwlock->unlock(rwlock);
484 rwlock->unlock(rwlock);
485
486 return NULL;
487 }
488
489 START_TEST(test_rwlock)
490 {
491 thread_t *threads[THREADS];
492 refcount_t refs = 0;
493 int i;
494
495 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
496
497 for (i = 0; i < THREADS; i++)
498 {
499 threads[i] = thread_create((void*)rwlock_run, &refs);
500 }
501 for (i = 0; i < THREADS; i++)
502 {
503 threads[i]->join(threads[i]);
504 }
505
506 rwlock->destroy(rwlock);
507 }
508 END_TEST
509
510 static void *join_run(void *data)
511 {
512 /* force some context switches */
513 sched_yield();
514 return (void*)((uintptr_t)data + THREADS);
515 }
516
517 START_TEST(test_join)
518 {
519 thread_t *threads[THREADS];
520 int i;
521
522 for (i = 0; i < THREADS; i++)
523 {
524 threads[i] = thread_create(join_run, (void*)(uintptr_t)i);
525 }
526 for (i = 0; i < THREADS; i++)
527 {
528 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
529 }
530 }
531 END_TEST
532
533 static void *exit_join_run(void *data)
534 {
535 sched_yield();
536 thread_exit((void*)((uintptr_t)data + THREADS));
537 /* not reached */
538 ck_assert(FALSE);
539 return NULL;
540 }
541
542 START_TEST(test_join_exit)
543 {
544 thread_t *threads[THREADS];
545 int i;
546
547 for (i = 0; i < THREADS; i++)
548 {
549 threads[i] = thread_create(exit_join_run, (void*)(uintptr_t)i);
550 }
551 for (i = 0; i < THREADS; i++)
552 {
553 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
554 }
555 }
556 END_TEST
557
558 static void *detach_run(void *data)
559 {
560 refcount_t *running = (refcount_t*)data;
561
562 ignore_result(ref_put(running));
563 return NULL;
564 }
565
566 START_TEST(test_detach)
567 {
568 thread_t *threads[THREADS];
569 int i;
570 refcount_t running = 0;
571
572 for (i = 0; i < THREADS; i++)
573 {
574 ref_get(&running);
575 threads[i] = thread_create(detach_run, &running);
576 }
577 for (i = 0; i < THREADS; i++)
578 {
579 threads[i]->detach(threads[i]);
580 }
581 while (running > 0)
582 {
583 sched_yield();
584 }
585 /* no checks done here, but we check that thread state gets cleaned
586 * up with leak detective. */
587 }
588 END_TEST
589
590 static void *detach_exit_run(void *data)
591 {
592 refcount_t *running = (refcount_t*)data;
593
594 ignore_result(ref_put(running));
595 thread_exit(NULL);
596 /* not reached */
597 ck_assert(FALSE);
598 return NULL;
599 }
600
601 START_TEST(test_detach_exit)
602 {
603 thread_t *threads[THREADS];
604 int i;
605 refcount_t running = 0;
606
607 for (i = 0; i < THREADS; i++)
608 {
609 ref_get(&running);
610 threads[i] = thread_create(detach_exit_run, &running);
611 }
612 for (i = 0; i < THREADS; i++)
613 {
614 threads[i]->detach(threads[i]);
615 }
616 while (running > 0)
617 {
618 sched_yield();
619 }
620 /* no checks done here, but we check that thread state gets cleaned
621 * up with leak detective. */
622 }
623 END_TEST
624
625 static void *cancel_run(void *data)
626 {
627 /* default cancellability should be TRUE, so don't change it */
628 while (TRUE)
629 {
630 sleep(10);
631 }
632 return NULL;
633 }
634
635 START_TEST(test_cancel)
636 {
637 thread_t *threads[THREADS];
638 int i;
639
640 for (i = 0; i < THREADS; i++)
641 {
642 threads[i] = thread_create(cancel_run, NULL);
643 }
644 for (i = 0; i < THREADS; i++)
645 {
646 threads[i]->cancel(threads[i]);
647 }
648 for (i = 0; i < THREADS; i++)
649 {
650 threads[i]->join(threads[i]);
651 }
652 }
653 END_TEST
654
655 static void *cancel_onoff_run(void *data)
656 {
657 bool *cancellable = (bool*)data;
658
659 thread_cancelability(FALSE);
660 *cancellable = FALSE;
661
662 /* we should not get cancelled here */
663 usleep(50000);
664
665 *cancellable = TRUE;
666 thread_cancelability(TRUE);
667
668 /* but here */
669 while (TRUE)
670 {
671 sleep(10);
672 }
673 return NULL;
674 }
675
676 START_TEST(test_cancel_onoff)
677 {
678 thread_t *threads[THREADS];
679 bool cancellable[THREADS];
680 int i;
681
682 for (i = 0; i < THREADS; i++)
683 {
684 cancellable[i] = TRUE;
685 threads[i] = thread_create(cancel_onoff_run, &cancellable[i]);
686 }
687 for (i = 0; i < THREADS; i++)
688 {
689 /* wait until thread has cleared its cancellability */
690 while (cancellable[i])
691 {
692 sched_yield();
693 }
694 threads[i]->cancel(threads[i]);
695 }
696 for (i = 0; i < THREADS; i++)
697 {
698 threads[i]->join(threads[i]);
699 ck_assert(cancellable[i]);
700 }
701 }
702 END_TEST
703
704 static void *cancel_point_run(void *data)
705 {
706 thread_cancelability(FALSE);
707 while (TRUE)
708 {
709 /* implicitly enables cancellability */
710 thread_cancellation_point();
711 }
712 return NULL;
713 }
714
715 START_TEST(test_cancel_point)
716 {
717 thread_t *threads[THREADS];
718 int i;
719
720 for (i = 0; i < THREADS; i++)
721 {
722 threads[i] = thread_create(cancel_point_run, NULL);
723 }
724 sched_yield();
725 for (i = 0; i < THREADS; i++)
726 {
727 threads[i]->cancel(threads[i]);
728 }
729 for (i = 0; i < THREADS; i++)
730 {
731 threads[i]->join(threads[i]);
732 }
733 }
734 END_TEST
735
736 static void cleanup1(void *data)
737 {
738 uintptr_t *value = (uintptr_t*)data;
739
740 ck_assert_int_eq(*value, 1);
741 (*value)++;
742 }
743
744 static void cleanup2(void *data)
745 {
746 uintptr_t *value = (uintptr_t*)data;
747
748 ck_assert_int_eq(*value, 2);
749 (*value)++;
750 }
751
752 static void cleanup3(void *data)
753 {
754 uintptr_t *value = (uintptr_t*)data;
755
756 ck_assert_int_eq(*value, 3);
757 (*value)++;
758 }
759
760 static void *cleanup_run(void *data)
761 {
762 thread_cleanup_push(cleanup3, data);
763 thread_cleanup_push(cleanup2, data);
764 thread_cleanup_push(cleanup1, data);
765 return NULL;
766 }
767
768 START_TEST(test_cleanup)
769 {
770 thread_t *threads[THREADS];
771 uintptr_t values[THREADS];
772 int i;
773
774 for (i = 0; i < THREADS; i++)
775 {
776 values[i] = 1;
777 threads[i] = thread_create(cleanup_run, &values[i]);
778 }
779 for (i = 0; i < THREADS; i++)
780 {
781 threads[i]->join(threads[i]);
782 ck_assert_int_eq(values[i], 4);
783 }
784 }
785 END_TEST
786
787 static void *cleanup_exit_run(void *data)
788 {
789 thread_cleanup_push(cleanup3, data);
790 thread_cleanup_push(cleanup2, data);
791 thread_cleanup_push(cleanup1, data);
792 thread_exit(NULL);
793 ck_assert(FALSE);
794 return NULL;
795 }
796
797 START_TEST(test_cleanup_exit)
798 {
799 thread_t *threads[THREADS];
800 uintptr_t values[THREADS];
801 int i;
802
803 for (i = 0; i < THREADS; i++)
804 {
805 values[i] = 1;
806 threads[i] = thread_create(cleanup_exit_run, &values[i]);
807 }
808 for (i = 0; i < THREADS; i++)
809 {
810 threads[i]->join(threads[i]);
811 ck_assert_int_eq(values[i], 4);
812 }
813 }
814 END_TEST
815
816 static void *cleanup_cancel_run(void *data)
817 {
818 thread_cancelability(FALSE);
819
820 thread_cleanup_push(cleanup3, data);
821 thread_cleanup_push(cleanup2, data);
822 thread_cleanup_push(cleanup1, data);
823
824 thread_cancelability(TRUE);
825
826 while (TRUE)
827 {
828 sleep(1);
829 }
830 return NULL;
831 }
832
833 START_TEST(test_cleanup_cancel)
834 {
835 thread_t *threads[THREADS];
836 uintptr_t values[THREADS];
837 int i;
838
839 for (i = 0; i < THREADS; i++)
840 {
841 values[i] = 1;
842 threads[i] = thread_create(cleanup_cancel_run, &values[i]);
843 }
844 for (i = 0; i < THREADS; i++)
845 {
846 threads[i]->cancel(threads[i]);
847 }
848 for (i = 0; i < THREADS; i++)
849 {
850 threads[i]->join(threads[i]);
851 ck_assert_int_eq(values[i], 4);
852 }
853 }
854 END_TEST
855
856 static void *cleanup_pop_run(void *data)
857 {
858 thread_cleanup_push(cleanup3, data);
859 thread_cleanup_push(cleanup2, data);
860 thread_cleanup_push(cleanup1, data);
861
862 thread_cleanup_push(cleanup2, data);
863 thread_cleanup_pop(FALSE);
864
865 thread_cleanup_pop(TRUE);
866 return NULL;
867 }
868
869 START_TEST(test_cleanup_pop)
870 {
871 thread_t *threads[THREADS];
872 uintptr_t values[THREADS];
873 int i;
874
875 for (i = 0; i < THREADS; i++)
876 {
877 values[i] = 1;
878 threads[i] = thread_create(cleanup_pop_run, &values[i]);
879 }
880 for (i = 0; i < THREADS; i++)
881 {
882 threads[i]->join(threads[i]);
883 ck_assert_int_eq(values[i], 4);
884 }
885 }
886 END_TEST
887
888 static thread_value_t *tls[10];
889
890 static void *tls_run(void *data)
891 {
892 uintptr_t value = (uintptr_t)data;
893 int i, j;
894
895 for (i = 0; i < countof(tls); i++)
896 {
897 ck_assert(tls[i]->get(tls[i]) == NULL);
898 }
899 for (i = 0; i < countof(tls); i++)
900 {
901 tls[i]->set(tls[i], (void*)(value * i));
902 }
903 for (j = 0; j < 1000; j++)
904 {
905 for (i = 0; i < countof(tls); i++)
906 {
907 tls[i]->set(tls[i], (void*)(value * i));
908 ck_assert(tls[i]->get(tls[i]) == (void*)(value * i));
909 }
910 sched_yield();
911 }
912 for (i = 0; i < countof(tls); i++)
913 {
914 ck_assert(tls[i]->get(tls[i]) == (void*)(value * i));
915 }
916 return (void*)(value + 1);
917 }
918
919 START_TEST(test_tls)
920 {
921 thread_t *threads[THREADS];
922 int i;
923
924 for (i = 0; i < countof(tls); i++)
925 {
926 tls[i] = thread_value_create(NULL);
927 }
928 for (i = 0; i < THREADS; i++)
929 {
930 threads[i] = thread_create(tls_run, (void*)(uintptr_t)i);
931 }
932
933 ck_assert_int_eq((uintptr_t)tls_run((void*)(uintptr_t)(THREADS + 1)),
934 THREADS + 2);
935
936 for (i = 0; i < THREADS; i++)
937 {
938 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + 1);
939 }
940 for (i = 0; i < countof(tls); i++)
941 {
942 tls[i]->destroy(tls[i]);
943 }
944 }
945 END_TEST
946
947 static void tls_cleanup(void *data)
948 {
949 uintptr_t *value = (uintptr_t*)data;
950
951 (*value)--;
952 }
953
954 static void *tls_cleanup_run(void *data)
955 {
956 int i;
957
958 for (i = 0; i < countof(tls); i++)
959 {
960 tls[i]->set(tls[i], data);
961 }
962 return NULL;
963 }
964
965 START_TEST(test_tls_cleanup)
966 {
967 thread_t *threads[THREADS];
968 uintptr_t values[THREADS], main_value = countof(tls);
969 int i;
970
971 for (i = 0; i < countof(tls); i++)
972 {
973 tls[i] = thread_value_create(tls_cleanup);
974 }
975 for (i = 0; i < THREADS; i++)
976 {
977 values[i] = countof(tls);
978 threads[i] = thread_create(tls_cleanup_run, &values[i]);
979 }
980
981 tls_cleanup_run(&main_value);
982
983 for (i = 0; i < THREADS; i++)
984 {
985 threads[i]->join(threads[i]);
986 ck_assert_int_eq(values[i], 0);
987 }
988 for (i = 0; i < countof(tls); i++)
989 {
990 tls[i]->destroy(tls[i]);
991 }
992 ck_assert_int_eq(main_value, 0);
993 }
994 END_TEST
995
996 Suite *threading_suite_create()
997 {
998 Suite *s;
999 TCase *tc;
1000
1001 s = suite_create("threading");
1002
1003 tc = tcase_create("recursive mutex");
1004 tcase_add_test(tc, test_mutex);
1005 suite_add_tcase(s, tc);
1006
1007 tc = tcase_create("condvar");
1008 tcase_add_test(tc, test_condvar);
1009 tcase_add_test(tc, test_condvar_recursive);
1010 tcase_add_test(tc, test_condvar_broad);
1011 tcase_add_test(tc, test_condvar_timed);
1012 tcase_add_test(tc, test_condvar_timed_abs);
1013 tcase_add_test(tc, test_condvar_cancel);
1014 suite_add_tcase(s, tc);
1015
1016 tc = tcase_create("rwlock");
1017 tcase_add_test(tc, test_rwlock);
1018 suite_add_tcase(s, tc);
1019
1020 tc = tcase_create("thread joining");
1021 tcase_add_test(tc, test_join);
1022 tcase_add_test(tc, test_join_exit);
1023 suite_add_tcase(s, tc);
1024
1025 tc = tcase_create("thread detaching");
1026 tcase_add_test(tc, test_detach);
1027 tcase_add_test(tc, test_detach_exit);
1028 suite_add_tcase(s, tc);
1029
1030 tc = tcase_create("thread cancellation");
1031 tcase_add_test(tc, test_cancel);
1032 tcase_add_test(tc, test_cancel_onoff);
1033 tcase_add_test(tc, test_cancel_point);
1034 suite_add_tcase(s, tc);
1035
1036 tc = tcase_create("thread cleanup");
1037 tcase_add_test(tc, test_cleanup);
1038 tcase_add_test(tc, test_cleanup_exit);
1039 tcase_add_test(tc, test_cleanup_cancel);
1040 tcase_add_test(tc, test_cleanup_pop);
1041 suite_add_tcase(s, tc);
1042
1043 tc = tcase_create("thread local storage");
1044 tcase_add_test(tc, test_tls);
1045 tcase_add_test(tc, test_tls_cleanup);
1046 suite_add_tcase(s, tc);
1047
1048 return s;
1049 }