unit-tests: Add a rwlock condvar test
[strongswan.git] / src / libstrongswan / tests / suites / test_threading.c
1 /*
2 * Copyright (C) 2013 Tobias Brunner
3 * Copyright (C) 2008 Martin Willi
4 * Hochschule fuer Technik Rapperswil
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 */
16
17 #include "test_suite.h"
18
19 #include <sched.h>
20 #include <unistd.h>
21
22 #include <threading/thread.h>
23 #include <threading/mutex.h>
24 #include <threading/condvar.h>
25 #include <threading/rwlock.h>
26 #include <threading/rwlock_condvar.h>
27 #include <threading/thread_value.h>
28
29 /*******************************************************************************
30 * recursive mutex test
31 */
32
33 #define THREADS 20
34
35 /**
36 * Thread barrier data
37 */
38 typedef struct {
39 mutex_t *mutex;
40 condvar_t *cond;
41 int count;
42 int current;
43 bool active;
44 } barrier_t;
45
46 /**
47 * Create a thread barrier for count threads
48 */
49 static barrier_t* barrier_create(int count)
50 {
51 barrier_t *this;
52
53 INIT(this,
54 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
55 .cond = condvar_create(CONDVAR_TYPE_DEFAULT),
56 .count = count,
57 );
58
59 return this;
60 }
61
62 /**
63 * Destroy a thread barrier
64 */
65 static void barrier_destroy(barrier_t *this)
66 {
67 this->mutex->destroy(this->mutex);
68 this->cond->destroy(this->cond);
69 free(this);
70 }
71
72 /**
73 * Wait to have configured number of threads in barrier
74 */
75 static bool barrier_wait(barrier_t *this)
76 {
77 bool winner = FALSE;
78
79 this->mutex->lock(this->mutex);
80 if (!this->active)
81 { /* first, reset */
82 this->active = TRUE;
83 this->current = 0;
84 }
85
86 this->current++;
87 while (this->current < this->count)
88 {
89 this->cond->wait(this->cond, this->mutex);
90 }
91 if (this->active)
92 { /* first, win */
93 winner = TRUE;
94 this->active = FALSE;
95 }
96 this->mutex->unlock(this->mutex);
97 this->cond->broadcast(this->cond);
98 sched_yield();
99
100 return winner;
101 }
102
103 /**
104 * Barrier for some tests
105 */
106 static barrier_t *barrier;
107
108 /**
109 * A mutex for tests requiring one
110 */
111 static mutex_t *mutex;
112
113 /**
114 * A condvar for tests requiring one
115 */
116 static condvar_t *condvar;
117
118 /**
119 * A counter for signaling
120 */
121 static int sigcount;
122
123 static void *mutex_run(void *data)
124 {
125 int locked = 0;
126 int i;
127
128 /* wait for all threads before getting in action */
129 barrier_wait(barrier);
130
131 for (i = 0; i < 100; i++)
132 {
133 mutex->lock(mutex);
134 mutex->lock(mutex);
135 mutex->lock(mutex);
136 locked++;
137 sched_yield();
138 if (locked > 1)
139 {
140 fail("two threads locked the mutex concurrently");
141 }
142 locked--;
143 mutex->unlock(mutex);
144 mutex->unlock(mutex);
145 mutex->unlock(mutex);
146 }
147 return NULL;
148 }
149
150 START_TEST(test_mutex)
151 {
152 thread_t *threads[THREADS];
153 int i;
154
155 barrier = barrier_create(THREADS);
156 mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
157
158 for (i = 0; i < 10; i++)
159 {
160 mutex->lock(mutex);
161 mutex->unlock(mutex);
162 }
163 for (i = 0; i < 10; i++)
164 {
165 mutex->lock(mutex);
166 }
167 for (i = 0; i < 10; i++)
168 {
169 mutex->unlock(mutex);
170 }
171
172 for (i = 0; i < THREADS; i++)
173 {
174 threads[i] = thread_create(mutex_run, NULL);
175 }
176 for (i = 0; i < THREADS; i++)
177 {
178 threads[i]->join(threads[i]);
179 }
180
181 mutex->destroy(mutex);
182 barrier_destroy(barrier);
183 }
184 END_TEST
185
186 static void *condvar_run(void *data)
187 {
188 mutex->lock(mutex);
189 sigcount++;
190 condvar->signal(condvar);
191 mutex->unlock(mutex);
192 return NULL;
193 }
194
195 START_TEST(test_condvar)
196 {
197 thread_t *threads[THREADS];
198 int i;
199
200 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
201 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
202 sigcount = 0;
203
204 for (i = 0; i < THREADS; i++)
205 {
206 threads[i] = thread_create(condvar_run, NULL);
207 }
208
209 mutex->lock(mutex);
210 while (sigcount < THREADS)
211 {
212 condvar->wait(condvar, mutex);
213 }
214 mutex->unlock(mutex);
215
216 for (i = 0; i < THREADS; i++)
217 {
218 threads[i]->join(threads[i]);
219 }
220
221 mutex->destroy(mutex);
222 condvar->destroy(condvar);
223 }
224 END_TEST
225
226 static void *condvar_recursive_run(void *data)
227 {
228 mutex->lock(mutex);
229 mutex->lock(mutex);
230 mutex->lock(mutex);
231 sigcount++;
232 condvar->signal(condvar);
233 mutex->unlock(mutex);
234 mutex->unlock(mutex);
235 mutex->unlock(mutex);
236 return NULL;
237 }
238
239 START_TEST(test_condvar_recursive)
240 {
241 thread_t *threads[THREADS];
242 int i;
243
244 mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
245 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
246 sigcount = 0;
247
248 mutex->lock(mutex);
249
250 for (i = 0; i < THREADS; i++)
251 {
252 threads[i] = thread_create(condvar_recursive_run, NULL);
253 }
254
255 mutex->lock(mutex);
256 mutex->lock(mutex);
257 while (sigcount < THREADS)
258 {
259 condvar->wait(condvar, mutex);
260 }
261 mutex->unlock(mutex);
262 mutex->unlock(mutex);
263 mutex->unlock(mutex);
264
265 for (i = 0; i < THREADS; i++)
266 {
267 threads[i]->join(threads[i]);
268 }
269
270 mutex->destroy(mutex);
271 condvar->destroy(condvar);
272 }
273 END_TEST
274
275 static void *condvar_run_broad(void *data)
276 {
277 mutex->lock(mutex);
278 while (sigcount < 0)
279 {
280 condvar->wait(condvar, mutex);
281 }
282 mutex->unlock(mutex);
283 return NULL;
284 }
285
286 START_TEST(test_condvar_broad)
287 {
288 thread_t *threads[THREADS];
289 int i;
290
291 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
292 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
293 sigcount = 0;
294
295 for (i = 0; i < THREADS; i++)
296 {
297 threads[i] = thread_create(condvar_run_broad, NULL);
298 }
299
300 sched_yield();
301
302 mutex->lock(mutex);
303 sigcount = 1;
304 condvar->broadcast(condvar);
305 mutex->unlock(mutex);
306
307 for (i = 0; i < THREADS; i++)
308 {
309 threads[i]->join(threads[i]);
310 }
311
312 mutex->destroy(mutex);
313 condvar->destroy(condvar);
314 }
315 END_TEST
316
317 START_TEST(test_condvar_timed)
318 {
319 thread_t *thread;
320 timeval_t start, end, diff = { .tv_usec = 50000 };
321
322 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
323 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
324 sigcount = 0;
325
326 mutex->lock(mutex);
327 while (TRUE)
328 {
329 time_monotonic(&start);
330 if (condvar->timed_wait(condvar, mutex, diff.tv_usec / 1000))
331 {
332 break;
333 }
334 }
335 time_monotonic(&end);
336 mutex->unlock(mutex);
337 timersub(&end, &start, &end);
338 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
339 end.tv_sec, end.tv_usec, diff.tv_sec, diff.tv_usec);
340
341 thread = thread_create(condvar_run, NULL);
342
343 mutex->lock(mutex);
344 while (sigcount == 0)
345 {
346 ck_assert(!condvar->timed_wait(condvar, mutex, 1000));
347 }
348 mutex->unlock(mutex);
349
350 thread->join(thread);
351 mutex->destroy(mutex);
352 condvar->destroy(condvar);
353 }
354 END_TEST
355
356 START_TEST(test_condvar_timed_abs)
357 {
358 thread_t *thread;
359 timeval_t start, end, abso, diff = { .tv_usec = 50000 };
360
361 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
362 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
363 sigcount = 0;
364
365 mutex->lock(mutex);
366 while (TRUE)
367 {
368 time_monotonic(&start);
369 timeradd(&start, &diff, &abso);
370 if (condvar->timed_wait_abs(condvar, mutex, abso))
371 {
372 break;
373 }
374 }
375 time_monotonic(&end);
376 mutex->unlock(mutex);
377 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
378 end.tv_sec, end.tv_usec, abso.tv_sec, abso.tv_usec);
379
380 thread = thread_create(condvar_run, NULL);
381
382 time_monotonic(&start);
383 diff.tv_sec = 1;
384 timeradd(&start, &diff, &abso);
385 mutex->lock(mutex);
386 while (sigcount == 0)
387 {
388 ck_assert(!condvar->timed_wait_abs(condvar, mutex, abso));
389 }
390 mutex->unlock(mutex);
391
392 thread->join(thread);
393 mutex->destroy(mutex);
394 condvar->destroy(condvar);
395 }
396 END_TEST
397
398 static void *condvar_cancel_run(void *data)
399 {
400 thread_cancelability(FALSE);
401
402 mutex->lock(mutex);
403
404 sigcount++;
405 condvar->broadcast(condvar);
406
407 thread_cleanup_push((void*)mutex->unlock, mutex);
408 thread_cancelability(TRUE);
409 while (TRUE)
410 {
411 condvar->wait(condvar, mutex);
412 }
413 thread_cleanup_pop(TRUE);
414
415 return NULL;
416 }
417
418 START_TEST(test_condvar_cancel)
419 {
420 thread_t *threads[THREADS];
421 int i;
422
423 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
424 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
425 sigcount = 0;
426
427 for (i = 0; i < THREADS; i++)
428 {
429 threads[i] = thread_create(condvar_cancel_run, NULL);
430 }
431
432 /* wait for all threads */
433 mutex->lock(mutex);
434 while (sigcount < THREADS)
435 {
436 condvar->wait(condvar, mutex);
437 }
438 mutex->unlock(mutex);
439
440 for (i = 0; i < THREADS; i++)
441 {
442 threads[i]->cancel(threads[i]);
443 }
444 for (i = 0; i < THREADS; i++)
445 {
446 threads[i]->join(threads[i]);
447 }
448
449 mutex->destroy(mutex);
450 condvar->destroy(condvar);
451 }
452 END_TEST
453
454 /**
455 * RWlock for different tests
456 */
457 static rwlock_t *rwlock;
458
459 static void *rwlock_run(refcount_t *refs)
460 {
461 rwlock->read_lock(rwlock);
462 ref_get(refs);
463 sched_yield();
464 ignore_result(ref_put(refs));
465 rwlock->unlock(rwlock);
466
467 if (rwlock->try_write_lock(rwlock))
468 {
469 ck_assert_int_eq(*refs, 0);
470 sched_yield();
471 rwlock->unlock(rwlock);
472 }
473
474 rwlock->write_lock(rwlock);
475 ck_assert_int_eq(*refs, 0);
476 sched_yield();
477 rwlock->unlock(rwlock);
478
479 rwlock->read_lock(rwlock);
480 rwlock->read_lock(rwlock);
481 ref_get(refs);
482 sched_yield();
483 ignore_result(ref_put(refs));
484 rwlock->unlock(rwlock);
485 rwlock->unlock(rwlock);
486
487 return NULL;
488 }
489
490 START_TEST(test_rwlock)
491 {
492 thread_t *threads[THREADS];
493 refcount_t refs = 0;
494 int i;
495
496 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
497
498 for (i = 0; i < THREADS; i++)
499 {
500 threads[i] = thread_create((void*)rwlock_run, &refs);
501 }
502 for (i = 0; i < THREADS; i++)
503 {
504 threads[i]->join(threads[i]);
505 }
506
507 rwlock->destroy(rwlock);
508 }
509 END_TEST
510
511 /**
512 * Rwlock condvar
513 */
514 static rwlock_condvar_t *rwcond;
515
516 static void *rwlock_condvar_run(void *data)
517 {
518 rwlock->write_lock(rwlock);
519 sigcount++;
520 rwcond->signal(rwcond);
521 rwlock->unlock(rwlock);
522 return NULL;
523 }
524
525 START_TEST(test_rwlock_condvar)
526 {
527 thread_t *threads[THREADS];
528 int i;
529
530 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
531 rwcond = rwlock_condvar_create();
532 sigcount = 0;
533
534 for (i = 0; i < THREADS; i++)
535 {
536 threads[i] = thread_create(rwlock_condvar_run, NULL);
537 }
538
539 rwlock->write_lock(rwlock);
540 while (sigcount < THREADS)
541 {
542 rwcond->wait(rwcond, rwlock);
543 }
544 rwlock->unlock(rwlock);
545
546 for (i = 0; i < THREADS; i++)
547 {
548 threads[i]->join(threads[i]);
549 }
550
551 rwlock->destroy(rwlock);
552 rwcond->destroy(rwcond);
553 }
554 END_TEST
555
556 static void *join_run(void *data)
557 {
558 /* force some context switches */
559 sched_yield();
560 return (void*)((uintptr_t)data + THREADS);
561 }
562
563 START_TEST(test_join)
564 {
565 thread_t *threads[THREADS];
566 int i;
567
568 for (i = 0; i < THREADS; i++)
569 {
570 threads[i] = thread_create(join_run, (void*)(uintptr_t)i);
571 }
572 for (i = 0; i < THREADS; i++)
573 {
574 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
575 }
576 }
577 END_TEST
578
579 static void *exit_join_run(void *data)
580 {
581 sched_yield();
582 thread_exit((void*)((uintptr_t)data + THREADS));
583 /* not reached */
584 ck_assert(FALSE);
585 return NULL;
586 }
587
588 START_TEST(test_join_exit)
589 {
590 thread_t *threads[THREADS];
591 int i;
592
593 for (i = 0; i < THREADS; i++)
594 {
595 threads[i] = thread_create(exit_join_run, (void*)(uintptr_t)i);
596 }
597 for (i = 0; i < THREADS; i++)
598 {
599 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
600 }
601 }
602 END_TEST
603
604 static void *detach_run(void *data)
605 {
606 refcount_t *running = (refcount_t*)data;
607
608 ignore_result(ref_put(running));
609 return NULL;
610 }
611
612 START_TEST(test_detach)
613 {
614 thread_t *threads[THREADS];
615 int i;
616 refcount_t running = 0;
617
618 for (i = 0; i < THREADS; i++)
619 {
620 ref_get(&running);
621 threads[i] = thread_create(detach_run, &running);
622 }
623 for (i = 0; i < THREADS; i++)
624 {
625 threads[i]->detach(threads[i]);
626 }
627 while (running > 0)
628 {
629 sched_yield();
630 }
631 /* no checks done here, but we check that thread state gets cleaned
632 * up with leak detective. */
633 }
634 END_TEST
635
636 static void *detach_exit_run(void *data)
637 {
638 refcount_t *running = (refcount_t*)data;
639
640 ignore_result(ref_put(running));
641 thread_exit(NULL);
642 /* not reached */
643 ck_assert(FALSE);
644 return NULL;
645 }
646
647 START_TEST(test_detach_exit)
648 {
649 thread_t *threads[THREADS];
650 int i;
651 refcount_t running = 0;
652
653 for (i = 0; i < THREADS; i++)
654 {
655 ref_get(&running);
656 threads[i] = thread_create(detach_exit_run, &running);
657 }
658 for (i = 0; i < THREADS; i++)
659 {
660 threads[i]->detach(threads[i]);
661 }
662 while (running > 0)
663 {
664 sched_yield();
665 }
666 /* no checks done here, but we check that thread state gets cleaned
667 * up with leak detective. */
668 }
669 END_TEST
670
671 static void *cancel_run(void *data)
672 {
673 /* default cancellability should be TRUE, so don't change it */
674 while (TRUE)
675 {
676 sleep(10);
677 }
678 return NULL;
679 }
680
681 START_TEST(test_cancel)
682 {
683 thread_t *threads[THREADS];
684 int i;
685
686 for (i = 0; i < THREADS; i++)
687 {
688 threads[i] = thread_create(cancel_run, NULL);
689 }
690 for (i = 0; i < THREADS; i++)
691 {
692 threads[i]->cancel(threads[i]);
693 }
694 for (i = 0; i < THREADS; i++)
695 {
696 threads[i]->join(threads[i]);
697 }
698 }
699 END_TEST
700
701 static void *cancel_onoff_run(void *data)
702 {
703 bool *cancellable = (bool*)data;
704
705 thread_cancelability(FALSE);
706 *cancellable = FALSE;
707
708 /* we should not get cancelled here */
709 usleep(50000);
710
711 *cancellable = TRUE;
712 thread_cancelability(TRUE);
713
714 /* but here */
715 while (TRUE)
716 {
717 sleep(10);
718 }
719 return NULL;
720 }
721
722 START_TEST(test_cancel_onoff)
723 {
724 thread_t *threads[THREADS];
725 bool cancellable[THREADS];
726 int i;
727
728 for (i = 0; i < THREADS; i++)
729 {
730 cancellable[i] = TRUE;
731 threads[i] = thread_create(cancel_onoff_run, &cancellable[i]);
732 }
733 for (i = 0; i < THREADS; i++)
734 {
735 /* wait until thread has cleared its cancellability */
736 while (cancellable[i])
737 {
738 sched_yield();
739 }
740 threads[i]->cancel(threads[i]);
741 }
742 for (i = 0; i < THREADS; i++)
743 {
744 threads[i]->join(threads[i]);
745 ck_assert(cancellable[i]);
746 }
747 }
748 END_TEST
749
750 static void *cancel_point_run(void *data)
751 {
752 thread_cancelability(FALSE);
753 while (TRUE)
754 {
755 /* implicitly enables cancellability */
756 thread_cancellation_point();
757 }
758 return NULL;
759 }
760
761 START_TEST(test_cancel_point)
762 {
763 thread_t *threads[THREADS];
764 int i;
765
766 for (i = 0; i < THREADS; i++)
767 {
768 threads[i] = thread_create(cancel_point_run, NULL);
769 }
770 sched_yield();
771 for (i = 0; i < THREADS; i++)
772 {
773 threads[i]->cancel(threads[i]);
774 }
775 for (i = 0; i < THREADS; i++)
776 {
777 threads[i]->join(threads[i]);
778 }
779 }
780 END_TEST
781
782 static void cleanup1(void *data)
783 {
784 uintptr_t *value = (uintptr_t*)data;
785
786 ck_assert_int_eq(*value, 1);
787 (*value)++;
788 }
789
790 static void cleanup2(void *data)
791 {
792 uintptr_t *value = (uintptr_t*)data;
793
794 ck_assert_int_eq(*value, 2);
795 (*value)++;
796 }
797
798 static void cleanup3(void *data)
799 {
800 uintptr_t *value = (uintptr_t*)data;
801
802 ck_assert_int_eq(*value, 3);
803 (*value)++;
804 }
805
806 static void *cleanup_run(void *data)
807 {
808 thread_cleanup_push(cleanup3, data);
809 thread_cleanup_push(cleanup2, data);
810 thread_cleanup_push(cleanup1, data);
811 return NULL;
812 }
813
814 START_TEST(test_cleanup)
815 {
816 thread_t *threads[THREADS];
817 uintptr_t values[THREADS];
818 int i;
819
820 for (i = 0; i < THREADS; i++)
821 {
822 values[i] = 1;
823 threads[i] = thread_create(cleanup_run, &values[i]);
824 }
825 for (i = 0; i < THREADS; i++)
826 {
827 threads[i]->join(threads[i]);
828 ck_assert_int_eq(values[i], 4);
829 }
830 }
831 END_TEST
832
833 static void *cleanup_exit_run(void *data)
834 {
835 thread_cleanup_push(cleanup3, data);
836 thread_cleanup_push(cleanup2, data);
837 thread_cleanup_push(cleanup1, data);
838 thread_exit(NULL);
839 ck_assert(FALSE);
840 return NULL;
841 }
842
843 START_TEST(test_cleanup_exit)
844 {
845 thread_t *threads[THREADS];
846 uintptr_t values[THREADS];
847 int i;
848
849 for (i = 0; i < THREADS; i++)
850 {
851 values[i] = 1;
852 threads[i] = thread_create(cleanup_exit_run, &values[i]);
853 }
854 for (i = 0; i < THREADS; i++)
855 {
856 threads[i]->join(threads[i]);
857 ck_assert_int_eq(values[i], 4);
858 }
859 }
860 END_TEST
861
862 static void *cleanup_cancel_run(void *data)
863 {
864 thread_cancelability(FALSE);
865
866 thread_cleanup_push(cleanup3, data);
867 thread_cleanup_push(cleanup2, data);
868 thread_cleanup_push(cleanup1, data);
869
870 thread_cancelability(TRUE);
871
872 while (TRUE)
873 {
874 sleep(1);
875 }
876 return NULL;
877 }
878
879 START_TEST(test_cleanup_cancel)
880 {
881 thread_t *threads[THREADS];
882 uintptr_t values[THREADS];
883 int i;
884
885 for (i = 0; i < THREADS; i++)
886 {
887 values[i] = 1;
888 threads[i] = thread_create(cleanup_cancel_run, &values[i]);
889 }
890 for (i = 0; i < THREADS; i++)
891 {
892 threads[i]->cancel(threads[i]);
893 }
894 for (i = 0; i < THREADS; i++)
895 {
896 threads[i]->join(threads[i]);
897 ck_assert_int_eq(values[i], 4);
898 }
899 }
900 END_TEST
901
902 static void *cleanup_pop_run(void *data)
903 {
904 thread_cleanup_push(cleanup3, data);
905 thread_cleanup_push(cleanup2, data);
906 thread_cleanup_push(cleanup1, data);
907
908 thread_cleanup_push(cleanup2, data);
909 thread_cleanup_pop(FALSE);
910
911 thread_cleanup_pop(TRUE);
912 return NULL;
913 }
914
915 START_TEST(test_cleanup_pop)
916 {
917 thread_t *threads[THREADS];
918 uintptr_t values[THREADS];
919 int i;
920
921 for (i = 0; i < THREADS; i++)
922 {
923 values[i] = 1;
924 threads[i] = thread_create(cleanup_pop_run, &values[i]);
925 }
926 for (i = 0; i < THREADS; i++)
927 {
928 threads[i]->join(threads[i]);
929 ck_assert_int_eq(values[i], 4);
930 }
931 }
932 END_TEST
933
934 static thread_value_t *tls[10];
935
936 static void *tls_run(void *data)
937 {
938 uintptr_t value = (uintptr_t)data;
939 int i, j;
940
941 for (i = 0; i < countof(tls); i++)
942 {
943 ck_assert(tls[i]->get(tls[i]) == NULL);
944 }
945 for (i = 0; i < countof(tls); i++)
946 {
947 tls[i]->set(tls[i], (void*)(value * i));
948 }
949 for (j = 0; j < 1000; j++)
950 {
951 for (i = 0; i < countof(tls); i++)
952 {
953 tls[i]->set(tls[i], (void*)(value * i));
954 ck_assert(tls[i]->get(tls[i]) == (void*)(value * i));
955 }
956 sched_yield();
957 }
958 for (i = 0; i < countof(tls); i++)
959 {
960 ck_assert(tls[i]->get(tls[i]) == (void*)(value * i));
961 }
962 return (void*)(value + 1);
963 }
964
965 START_TEST(test_tls)
966 {
967 thread_t *threads[THREADS];
968 int i;
969
970 for (i = 0; i < countof(tls); i++)
971 {
972 tls[i] = thread_value_create(NULL);
973 }
974 for (i = 0; i < THREADS; i++)
975 {
976 threads[i] = thread_create(tls_run, (void*)(uintptr_t)i);
977 }
978
979 ck_assert_int_eq((uintptr_t)tls_run((void*)(uintptr_t)(THREADS + 1)),
980 THREADS + 2);
981
982 for (i = 0; i < THREADS; i++)
983 {
984 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + 1);
985 }
986 for (i = 0; i < countof(tls); i++)
987 {
988 tls[i]->destroy(tls[i]);
989 }
990 }
991 END_TEST
992
993 static void tls_cleanup(void *data)
994 {
995 uintptr_t *value = (uintptr_t*)data;
996
997 (*value)--;
998 }
999
1000 static void *tls_cleanup_run(void *data)
1001 {
1002 int i;
1003
1004 for (i = 0; i < countof(tls); i++)
1005 {
1006 tls[i]->set(tls[i], data);
1007 }
1008 return NULL;
1009 }
1010
1011 START_TEST(test_tls_cleanup)
1012 {
1013 thread_t *threads[THREADS];
1014 uintptr_t values[THREADS], main_value = countof(tls);
1015 int i;
1016
1017 for (i = 0; i < countof(tls); i++)
1018 {
1019 tls[i] = thread_value_create(tls_cleanup);
1020 }
1021 for (i = 0; i < THREADS; i++)
1022 {
1023 values[i] = countof(tls);
1024 threads[i] = thread_create(tls_cleanup_run, &values[i]);
1025 }
1026
1027 tls_cleanup_run(&main_value);
1028
1029 for (i = 0; i < THREADS; i++)
1030 {
1031 threads[i]->join(threads[i]);
1032 ck_assert_int_eq(values[i], 0);
1033 }
1034 for (i = 0; i < countof(tls); i++)
1035 {
1036 tls[i]->destroy(tls[i]);
1037 }
1038 ck_assert_int_eq(main_value, 0);
1039 }
1040 END_TEST
1041
1042 Suite *threading_suite_create()
1043 {
1044 Suite *s;
1045 TCase *tc;
1046
1047 s = suite_create("threading");
1048
1049 tc = tcase_create("recursive mutex");
1050 tcase_add_test(tc, test_mutex);
1051 suite_add_tcase(s, tc);
1052
1053 tc = tcase_create("condvar");
1054 tcase_add_test(tc, test_condvar);
1055 tcase_add_test(tc, test_condvar_recursive);
1056 tcase_add_test(tc, test_condvar_broad);
1057 tcase_add_test(tc, test_condvar_timed);
1058 tcase_add_test(tc, test_condvar_timed_abs);
1059 tcase_add_test(tc, test_condvar_cancel);
1060 suite_add_tcase(s, tc);
1061
1062 tc = tcase_create("rwlock");
1063 tcase_add_test(tc, test_rwlock);
1064 suite_add_tcase(s, tc);
1065
1066 tc = tcase_create("rwlock condvar");
1067 tcase_add_test(tc, test_rwlock_condvar);
1068 suite_add_tcase(s, tc);
1069
1070 tc = tcase_create("thread joining");
1071 tcase_add_test(tc, test_join);
1072 tcase_add_test(tc, test_join_exit);
1073 suite_add_tcase(s, tc);
1074
1075 tc = tcase_create("thread detaching");
1076 tcase_add_test(tc, test_detach);
1077 tcase_add_test(tc, test_detach_exit);
1078 suite_add_tcase(s, tc);
1079
1080 tc = tcase_create("thread cancellation");
1081 tcase_add_test(tc, test_cancel);
1082 tcase_add_test(tc, test_cancel_onoff);
1083 tcase_add_test(tc, test_cancel_point);
1084 suite_add_tcase(s, tc);
1085
1086 tc = tcase_create("thread cleanup");
1087 tcase_add_test(tc, test_cleanup);
1088 tcase_add_test(tc, test_cleanup_exit);
1089 tcase_add_test(tc, test_cleanup_cancel);
1090 tcase_add_test(tc, test_cleanup_pop);
1091 suite_add_tcase(s, tc);
1092
1093 tc = tcase_create("thread local storage");
1094 tcase_add_test(tc, test_tls);
1095 tcase_add_test(tc, test_tls_cleanup);
1096 suite_add_tcase(s, tc);
1097
1098 return s;
1099 }