unit-tests: Add a simple semaphore test
[strongswan.git] / src / libstrongswan / tests / suites / test_threading.c
1 /*
2 * Copyright (C) 2013 Tobias Brunner
3 * Copyright (C) 2008 Martin Willi
4 * Hochschule fuer Technik Rapperswil
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 */
16
17 #include "test_suite.h"
18
19 #include <sched.h>
20 #include <unistd.h>
21
22 #include <threading/thread.h>
23 #include <threading/mutex.h>
24 #include <threading/condvar.h>
25 #include <threading/rwlock.h>
26 #include <threading/rwlock_condvar.h>
27 #include <threading/spinlock.h>
28 #include <threading/semaphore.h>
29 #include <threading/thread_value.h>
30
31 /*******************************************************************************
32 * recursive mutex test
33 */
34
35 #define THREADS 20
36
37 /**
38 * Thread barrier data
39 */
40 typedef struct {
41 mutex_t *mutex;
42 condvar_t *cond;
43 int count;
44 int current;
45 bool active;
46 } barrier_t;
47
48 /**
49 * Create a thread barrier for count threads
50 */
51 static barrier_t* barrier_create(int count)
52 {
53 barrier_t *this;
54
55 INIT(this,
56 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
57 .cond = condvar_create(CONDVAR_TYPE_DEFAULT),
58 .count = count,
59 );
60
61 return this;
62 }
63
64 /**
65 * Destroy a thread barrier
66 */
67 static void barrier_destroy(barrier_t *this)
68 {
69 this->mutex->destroy(this->mutex);
70 this->cond->destroy(this->cond);
71 free(this);
72 }
73
74 /**
75 * Wait to have configured number of threads in barrier
76 */
77 static bool barrier_wait(barrier_t *this)
78 {
79 bool winner = FALSE;
80
81 this->mutex->lock(this->mutex);
82 if (!this->active)
83 { /* first, reset */
84 this->active = TRUE;
85 this->current = 0;
86 }
87
88 this->current++;
89 while (this->current < this->count)
90 {
91 this->cond->wait(this->cond, this->mutex);
92 }
93 if (this->active)
94 { /* first, win */
95 winner = TRUE;
96 this->active = FALSE;
97 }
98 this->mutex->unlock(this->mutex);
99 this->cond->broadcast(this->cond);
100 sched_yield();
101
102 return winner;
103 }
104
105 /**
106 * Barrier for some tests
107 */
108 static barrier_t *barrier;
109
110 /**
111 * A mutex for tests requiring one
112 */
113 static mutex_t *mutex;
114
115 /**
116 * A condvar for tests requiring one
117 */
118 static condvar_t *condvar;
119
120 /**
121 * A counter for signaling
122 */
123 static int sigcount;
124
125 static void *mutex_run(void *data)
126 {
127 int locked = 0;
128 int i;
129
130 /* wait for all threads before getting in action */
131 barrier_wait(barrier);
132
133 for (i = 0; i < 100; i++)
134 {
135 mutex->lock(mutex);
136 mutex->lock(mutex);
137 mutex->lock(mutex);
138 locked++;
139 sched_yield();
140 if (locked > 1)
141 {
142 fail("two threads locked the mutex concurrently");
143 }
144 locked--;
145 mutex->unlock(mutex);
146 mutex->unlock(mutex);
147 mutex->unlock(mutex);
148 }
149 return NULL;
150 }
151
152 START_TEST(test_mutex)
153 {
154 thread_t *threads[THREADS];
155 int i;
156
157 barrier = barrier_create(THREADS);
158 mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
159
160 for (i = 0; i < 10; i++)
161 {
162 mutex->lock(mutex);
163 mutex->unlock(mutex);
164 }
165 for (i = 0; i < 10; i++)
166 {
167 mutex->lock(mutex);
168 }
169 for (i = 0; i < 10; i++)
170 {
171 mutex->unlock(mutex);
172 }
173
174 for (i = 0; i < THREADS; i++)
175 {
176 threads[i] = thread_create(mutex_run, NULL);
177 }
178 for (i = 0; i < THREADS; i++)
179 {
180 threads[i]->join(threads[i]);
181 }
182
183 mutex->destroy(mutex);
184 barrier_destroy(barrier);
185 }
186 END_TEST
187
188 /**
189 * Spinlock for testing
190 */
191 static spinlock_t *spinlock;
192
193 static void *spinlock_run(void *data)
194 {
195 int i, *locked = (int*)data;
196
197 barrier_wait(barrier);
198
199 for (i = 0; i < 1000; i++)
200 {
201 spinlock->lock(spinlock);
202 (*locked)++;
203 ck_assert_int_eq(*locked, 1);
204 (*locked)--;
205 spinlock->unlock(spinlock);
206 }
207 return NULL;
208 }
209
210 START_TEST(test_spinlock)
211 {
212 thread_t *threads[THREADS];
213 int i, locked = 0;
214
215 barrier = barrier_create(THREADS);
216 spinlock = spinlock_create();
217
218 for (i = 0; i < THREADS; i++)
219 {
220 threads[i] = thread_create(spinlock_run, &locked);
221 }
222 for (i = 0; i < THREADS; i++)
223 {
224 threads[i]->join(threads[i]);
225 }
226
227 spinlock->destroy(spinlock);
228 barrier_destroy(barrier);
229 }
230 END_TEST
231
232 static void *condvar_run(void *data)
233 {
234 mutex->lock(mutex);
235 sigcount++;
236 condvar->signal(condvar);
237 mutex->unlock(mutex);
238 return NULL;
239 }
240
241 START_TEST(test_condvar)
242 {
243 thread_t *threads[THREADS];
244 int i;
245
246 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
247 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
248 sigcount = 0;
249
250 for (i = 0; i < THREADS; i++)
251 {
252 threads[i] = thread_create(condvar_run, NULL);
253 }
254
255 mutex->lock(mutex);
256 while (sigcount < THREADS)
257 {
258 condvar->wait(condvar, mutex);
259 }
260 mutex->unlock(mutex);
261
262 for (i = 0; i < THREADS; i++)
263 {
264 threads[i]->join(threads[i]);
265 }
266
267 mutex->destroy(mutex);
268 condvar->destroy(condvar);
269 }
270 END_TEST
271
272 static void *condvar_recursive_run(void *data)
273 {
274 mutex->lock(mutex);
275 mutex->lock(mutex);
276 mutex->lock(mutex);
277 sigcount++;
278 condvar->signal(condvar);
279 mutex->unlock(mutex);
280 mutex->unlock(mutex);
281 mutex->unlock(mutex);
282 return NULL;
283 }
284
285 START_TEST(test_condvar_recursive)
286 {
287 thread_t *threads[THREADS];
288 int i;
289
290 mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
291 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
292 sigcount = 0;
293
294 mutex->lock(mutex);
295
296 for (i = 0; i < THREADS; i++)
297 {
298 threads[i] = thread_create(condvar_recursive_run, NULL);
299 }
300
301 mutex->lock(mutex);
302 mutex->lock(mutex);
303 while (sigcount < THREADS)
304 {
305 condvar->wait(condvar, mutex);
306 }
307 mutex->unlock(mutex);
308 mutex->unlock(mutex);
309 mutex->unlock(mutex);
310
311 for (i = 0; i < THREADS; i++)
312 {
313 threads[i]->join(threads[i]);
314 }
315
316 mutex->destroy(mutex);
317 condvar->destroy(condvar);
318 }
319 END_TEST
320
321 static void *condvar_run_broad(void *data)
322 {
323 mutex->lock(mutex);
324 while (sigcount < 0)
325 {
326 condvar->wait(condvar, mutex);
327 }
328 mutex->unlock(mutex);
329 return NULL;
330 }
331
332 START_TEST(test_condvar_broad)
333 {
334 thread_t *threads[THREADS];
335 int i;
336
337 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
338 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
339 sigcount = 0;
340
341 for (i = 0; i < THREADS; i++)
342 {
343 threads[i] = thread_create(condvar_run_broad, NULL);
344 }
345
346 sched_yield();
347
348 mutex->lock(mutex);
349 sigcount = 1;
350 condvar->broadcast(condvar);
351 mutex->unlock(mutex);
352
353 for (i = 0; i < THREADS; i++)
354 {
355 threads[i]->join(threads[i]);
356 }
357
358 mutex->destroy(mutex);
359 condvar->destroy(condvar);
360 }
361 END_TEST
362
363 START_TEST(test_condvar_timed)
364 {
365 thread_t *thread;
366 timeval_t start, end, diff = { .tv_usec = 50000 };
367
368 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
369 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
370 sigcount = 0;
371
372 mutex->lock(mutex);
373 while (TRUE)
374 {
375 time_monotonic(&start);
376 if (condvar->timed_wait(condvar, mutex, diff.tv_usec / 1000))
377 {
378 break;
379 }
380 }
381 time_monotonic(&end);
382 mutex->unlock(mutex);
383 timersub(&end, &start, &end);
384 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
385 end.tv_sec, end.tv_usec, diff.tv_sec, diff.tv_usec);
386
387 thread = thread_create(condvar_run, NULL);
388
389 mutex->lock(mutex);
390 while (sigcount == 0)
391 {
392 ck_assert(!condvar->timed_wait(condvar, mutex, 1000));
393 }
394 mutex->unlock(mutex);
395
396 thread->join(thread);
397 mutex->destroy(mutex);
398 condvar->destroy(condvar);
399 }
400 END_TEST
401
402 START_TEST(test_condvar_timed_abs)
403 {
404 thread_t *thread;
405 timeval_t start, end, abso, diff = { .tv_usec = 50000 };
406
407 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
408 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
409 sigcount = 0;
410
411 mutex->lock(mutex);
412 while (TRUE)
413 {
414 time_monotonic(&start);
415 timeradd(&start, &diff, &abso);
416 if (condvar->timed_wait_abs(condvar, mutex, abso))
417 {
418 break;
419 }
420 }
421 time_monotonic(&end);
422 mutex->unlock(mutex);
423 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
424 end.tv_sec, end.tv_usec, abso.tv_sec, abso.tv_usec);
425
426 thread = thread_create(condvar_run, NULL);
427
428 time_monotonic(&start);
429 diff.tv_sec = 1;
430 timeradd(&start, &diff, &abso);
431 mutex->lock(mutex);
432 while (sigcount == 0)
433 {
434 ck_assert(!condvar->timed_wait_abs(condvar, mutex, abso));
435 }
436 mutex->unlock(mutex);
437
438 thread->join(thread);
439 mutex->destroy(mutex);
440 condvar->destroy(condvar);
441 }
442 END_TEST
443
444 static void *condvar_cancel_run(void *data)
445 {
446 thread_cancelability(FALSE);
447
448 mutex->lock(mutex);
449
450 sigcount++;
451 condvar->broadcast(condvar);
452
453 thread_cleanup_push((void*)mutex->unlock, mutex);
454 thread_cancelability(TRUE);
455 while (TRUE)
456 {
457 condvar->wait(condvar, mutex);
458 }
459 thread_cleanup_pop(TRUE);
460
461 return NULL;
462 }
463
464 START_TEST(test_condvar_cancel)
465 {
466 thread_t *threads[THREADS];
467 int i;
468
469 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
470 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
471 sigcount = 0;
472
473 for (i = 0; i < THREADS; i++)
474 {
475 threads[i] = thread_create(condvar_cancel_run, NULL);
476 }
477
478 /* wait for all threads */
479 mutex->lock(mutex);
480 while (sigcount < THREADS)
481 {
482 condvar->wait(condvar, mutex);
483 }
484 mutex->unlock(mutex);
485
486 for (i = 0; i < THREADS; i++)
487 {
488 threads[i]->cancel(threads[i]);
489 }
490 for (i = 0; i < THREADS; i++)
491 {
492 threads[i]->join(threads[i]);
493 }
494
495 mutex->destroy(mutex);
496 condvar->destroy(condvar);
497 }
498 END_TEST
499
500 /**
501 * RWlock for different tests
502 */
503 static rwlock_t *rwlock;
504
505 static void *rwlock_run(refcount_t *refs)
506 {
507 rwlock->read_lock(rwlock);
508 ref_get(refs);
509 sched_yield();
510 ignore_result(ref_put(refs));
511 rwlock->unlock(rwlock);
512
513 if (rwlock->try_write_lock(rwlock))
514 {
515 ck_assert_int_eq(*refs, 0);
516 sched_yield();
517 rwlock->unlock(rwlock);
518 }
519
520 rwlock->write_lock(rwlock);
521 ck_assert_int_eq(*refs, 0);
522 sched_yield();
523 rwlock->unlock(rwlock);
524
525 rwlock->read_lock(rwlock);
526 rwlock->read_lock(rwlock);
527 ref_get(refs);
528 sched_yield();
529 ignore_result(ref_put(refs));
530 rwlock->unlock(rwlock);
531 rwlock->unlock(rwlock);
532
533 return NULL;
534 }
535
536 START_TEST(test_rwlock)
537 {
538 thread_t *threads[THREADS];
539 refcount_t refs = 0;
540 int i;
541
542 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
543
544 for (i = 0; i < THREADS; i++)
545 {
546 threads[i] = thread_create((void*)rwlock_run, &refs);
547 }
548 for (i = 0; i < THREADS; i++)
549 {
550 threads[i]->join(threads[i]);
551 }
552
553 rwlock->destroy(rwlock);
554 }
555 END_TEST
556
557 /**
558 * Rwlock condvar
559 */
560 static rwlock_condvar_t *rwcond;
561
562 static void *rwlock_condvar_run(void *data)
563 {
564 rwlock->write_lock(rwlock);
565 sigcount++;
566 rwcond->signal(rwcond);
567 rwlock->unlock(rwlock);
568 return NULL;
569 }
570
571 START_TEST(test_rwlock_condvar)
572 {
573 thread_t *threads[THREADS];
574 int i;
575
576 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
577 rwcond = rwlock_condvar_create();
578 sigcount = 0;
579
580 for (i = 0; i < THREADS; i++)
581 {
582 threads[i] = thread_create(rwlock_condvar_run, NULL);
583 }
584
585 rwlock->write_lock(rwlock);
586 while (sigcount < THREADS)
587 {
588 rwcond->wait(rwcond, rwlock);
589 }
590 rwlock->unlock(rwlock);
591
592 for (i = 0; i < THREADS; i++)
593 {
594 threads[i]->join(threads[i]);
595 }
596
597 rwlock->destroy(rwlock);
598 rwcond->destroy(rwcond);
599 }
600 END_TEST
601
602 static void *rwlock_condvar_run_broad(void *data)
603 {
604 rwlock->write_lock(rwlock);
605 while (sigcount < 0)
606 {
607 rwcond->wait(rwcond, rwlock);
608 }
609 rwlock->unlock(rwlock);
610 return NULL;
611 }
612
613 START_TEST(test_rwlock_condvar_broad)
614 {
615 thread_t *threads[THREADS];
616 int i;
617
618 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
619 rwcond = rwlock_condvar_create();
620 sigcount = 0;
621
622 for (i = 0; i < THREADS; i++)
623 {
624 threads[i] = thread_create(rwlock_condvar_run_broad, NULL);
625 }
626
627 sched_yield();
628
629 rwlock->write_lock(rwlock);
630 sigcount = 1;
631 rwcond->broadcast(rwcond);
632 rwlock->unlock(rwlock);
633
634 for (i = 0; i < THREADS; i++)
635 {
636 threads[i]->join(threads[i]);
637 }
638
639 rwlock->destroy(rwlock);
640 rwcond->destroy(rwcond);
641 }
642 END_TEST
643
644 START_TEST(test_rwlock_condvar_timed)
645 {
646 thread_t *thread;
647 timeval_t start, end, diff = { .tv_usec = 50000 };
648
649 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
650 rwcond = rwlock_condvar_create();
651 sigcount = 0;
652
653 rwlock->write_lock(rwlock);
654 while (TRUE)
655 {
656 time_monotonic(&start);
657 if (rwcond->timed_wait(rwcond, rwlock, diff.tv_usec / 1000))
658 {
659 break;
660 }
661 }
662 rwlock->unlock(rwlock);
663 time_monotonic(&end);
664 timersub(&end, &start, &end);
665 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
666 end.tv_sec, end.tv_usec, diff.tv_sec, diff.tv_usec);
667
668 thread = thread_create(rwlock_condvar_run, NULL);
669
670 rwlock->write_lock(rwlock);
671 while (sigcount == 0)
672 {
673 ck_assert(!rwcond->timed_wait(rwcond, rwlock, 1000));
674 }
675 rwlock->unlock(rwlock);
676
677 thread->join(thread);
678 rwlock->destroy(rwlock);
679 rwcond->destroy(rwcond);
680 }
681 END_TEST
682
683 START_TEST(test_rwlock_condvar_timed_abs)
684 {
685 thread_t *thread;
686 timeval_t start, end, abso, diff = { .tv_usec = 50000 };
687
688 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
689 rwcond = rwlock_condvar_create();
690 sigcount = 0;
691
692 rwlock->write_lock(rwlock);
693 while (TRUE)
694 {
695 time_monotonic(&start);
696 timeradd(&start, &diff, &abso);
697 if (rwcond->timed_wait_abs(rwcond, rwlock, abso))
698 {
699 break;
700 }
701 }
702 rwlock->unlock(rwlock);
703 time_monotonic(&end);
704 ck_assert_msg(timercmp(&end, &abso, >), "end: %u.%u, abso: %u.%u",
705 end.tv_sec, end.tv_usec, abso.tv_sec, abso.tv_usec);
706
707 thread = thread_create(rwlock_condvar_run, NULL);
708
709 time_monotonic(&start);
710 diff.tv_sec = 1;
711 timeradd(&start, &diff, &abso);
712 rwlock->write_lock(rwlock);
713 while (sigcount == 0)
714 {
715 ck_assert(!rwcond->timed_wait_abs(rwcond, rwlock, abso));
716 }
717 rwlock->unlock(rwlock);
718
719 thread->join(thread);
720 rwlock->destroy(rwlock);
721 rwcond->destroy(rwcond);
722 }
723 END_TEST
724
725 static void *rwlock_condvar_cancel_run(void *data)
726 {
727 thread_cancelability(FALSE);
728
729 rwlock->write_lock(rwlock);
730
731 sigcount++;
732 rwcond->broadcast(rwcond);
733
734 thread_cleanup_push((void*)rwlock->unlock, rwlock);
735 thread_cancelability(TRUE);
736 while (TRUE)
737 {
738 rwcond->wait(rwcond, rwlock);
739 }
740 thread_cleanup_pop(TRUE);
741
742 return NULL;
743 }
744
745 START_TEST(test_rwlock_condvar_cancel)
746 {
747 thread_t *threads[THREADS];
748 int i;
749
750 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
751 rwcond = rwlock_condvar_create();
752 sigcount = 0;
753
754 for (i = 0; i < THREADS; i++)
755 {
756 threads[i] = thread_create(rwlock_condvar_cancel_run, NULL);
757 }
758
759 /* wait for all threads */
760 rwlock->write_lock(rwlock);
761 while (sigcount < THREADS)
762 {
763 rwcond->wait(rwcond, rwlock);
764 }
765 rwlock->unlock(rwlock);
766
767 for (i = 0; i < THREADS; i++)
768 {
769 threads[i]->cancel(threads[i]);
770 }
771 for (i = 0; i < THREADS; i++)
772 {
773 threads[i]->join(threads[i]);
774 }
775
776 rwlock->destroy(rwlock);
777 rwcond->destroy(rwcond);
778 }
779 END_TEST
780
781 /**
782 * Semaphore for different tests
783 */
784 static semaphore_t *semaphore;
785
786 static void *semaphore_run(void *data)
787 {
788 semaphore->post(semaphore);
789 return NULL;
790 }
791
792 START_TEST(test_semaphore)
793 {
794 thread_t *threads[THREADS];
795 int i, initial = 5;
796
797 semaphore = semaphore_create(initial);
798
799 for (i = 0; i < THREADS; i++)
800 {
801 threads[i] = thread_create(semaphore_run, NULL);
802 }
803 for (i = 0; i < THREADS + initial; i++)
804 {
805 semaphore->wait(semaphore);
806 }
807 for (i = 0; i < THREADS; i++)
808 {
809 threads[i]->join(threads[i]);
810 }
811
812 semaphore->destroy(semaphore);
813 }
814 END_TEST
815
816 static void *join_run(void *data)
817 {
818 /* force some context switches */
819 sched_yield();
820 return (void*)((uintptr_t)data + THREADS);
821 }
822
823 START_TEST(test_join)
824 {
825 thread_t *threads[THREADS];
826 int i;
827
828 for (i = 0; i < THREADS; i++)
829 {
830 threads[i] = thread_create(join_run, (void*)(uintptr_t)i);
831 }
832 for (i = 0; i < THREADS; i++)
833 {
834 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
835 }
836 }
837 END_TEST
838
839 static void *exit_join_run(void *data)
840 {
841 sched_yield();
842 thread_exit((void*)((uintptr_t)data + THREADS));
843 /* not reached */
844 ck_assert(FALSE);
845 return NULL;
846 }
847
848 START_TEST(test_join_exit)
849 {
850 thread_t *threads[THREADS];
851 int i;
852
853 for (i = 0; i < THREADS; i++)
854 {
855 threads[i] = thread_create(exit_join_run, (void*)(uintptr_t)i);
856 }
857 for (i = 0; i < THREADS; i++)
858 {
859 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
860 }
861 }
862 END_TEST
863
864 static void *detach_run(void *data)
865 {
866 refcount_t *running = (refcount_t*)data;
867
868 ignore_result(ref_put(running));
869 return NULL;
870 }
871
872 START_TEST(test_detach)
873 {
874 thread_t *threads[THREADS];
875 int i;
876 refcount_t running = 0;
877
878 for (i = 0; i < THREADS; i++)
879 {
880 ref_get(&running);
881 threads[i] = thread_create(detach_run, &running);
882 }
883 for (i = 0; i < THREADS; i++)
884 {
885 threads[i]->detach(threads[i]);
886 }
887 while (running > 0)
888 {
889 sched_yield();
890 }
891 /* no checks done here, but we check that thread state gets cleaned
892 * up with leak detective. */
893 }
894 END_TEST
895
896 static void *detach_exit_run(void *data)
897 {
898 refcount_t *running = (refcount_t*)data;
899
900 ignore_result(ref_put(running));
901 thread_exit(NULL);
902 /* not reached */
903 ck_assert(FALSE);
904 return NULL;
905 }
906
907 START_TEST(test_detach_exit)
908 {
909 thread_t *threads[THREADS];
910 int i;
911 refcount_t running = 0;
912
913 for (i = 0; i < THREADS; i++)
914 {
915 ref_get(&running);
916 threads[i] = thread_create(detach_exit_run, &running);
917 }
918 for (i = 0; i < THREADS; i++)
919 {
920 threads[i]->detach(threads[i]);
921 }
922 while (running > 0)
923 {
924 sched_yield();
925 }
926 /* no checks done here, but we check that thread state gets cleaned
927 * up with leak detective. */
928 }
929 END_TEST
930
931 static void *cancel_run(void *data)
932 {
933 /* default cancellability should be TRUE, so don't change it */
934 while (TRUE)
935 {
936 sleep(10);
937 }
938 return NULL;
939 }
940
941 START_TEST(test_cancel)
942 {
943 thread_t *threads[THREADS];
944 int i;
945
946 for (i = 0; i < THREADS; i++)
947 {
948 threads[i] = thread_create(cancel_run, NULL);
949 }
950 for (i = 0; i < THREADS; i++)
951 {
952 threads[i]->cancel(threads[i]);
953 }
954 for (i = 0; i < THREADS; i++)
955 {
956 threads[i]->join(threads[i]);
957 }
958 }
959 END_TEST
960
961 static void *cancel_onoff_run(void *data)
962 {
963 bool *cancellable = (bool*)data;
964
965 thread_cancelability(FALSE);
966 *cancellable = FALSE;
967
968 /* we should not get cancelled here */
969 usleep(50000);
970
971 *cancellable = TRUE;
972 thread_cancelability(TRUE);
973
974 /* but here */
975 while (TRUE)
976 {
977 sleep(10);
978 }
979 return NULL;
980 }
981
982 START_TEST(test_cancel_onoff)
983 {
984 thread_t *threads[THREADS];
985 bool cancellable[THREADS];
986 int i;
987
988 for (i = 0; i < THREADS; i++)
989 {
990 cancellable[i] = TRUE;
991 threads[i] = thread_create(cancel_onoff_run, &cancellable[i]);
992 }
993 for (i = 0; i < THREADS; i++)
994 {
995 /* wait until thread has cleared its cancellability */
996 while (cancellable[i])
997 {
998 sched_yield();
999 }
1000 threads[i]->cancel(threads[i]);
1001 }
1002 for (i = 0; i < THREADS; i++)
1003 {
1004 threads[i]->join(threads[i]);
1005 ck_assert(cancellable[i]);
1006 }
1007 }
1008 END_TEST
1009
1010 static void *cancel_point_run(void *data)
1011 {
1012 thread_cancelability(FALSE);
1013 while (TRUE)
1014 {
1015 /* implicitly enables cancellability */
1016 thread_cancellation_point();
1017 }
1018 return NULL;
1019 }
1020
1021 START_TEST(test_cancel_point)
1022 {
1023 thread_t *threads[THREADS];
1024 int i;
1025
1026 for (i = 0; i < THREADS; i++)
1027 {
1028 threads[i] = thread_create(cancel_point_run, NULL);
1029 }
1030 sched_yield();
1031 for (i = 0; i < THREADS; i++)
1032 {
1033 threads[i]->cancel(threads[i]);
1034 }
1035 for (i = 0; i < THREADS; i++)
1036 {
1037 threads[i]->join(threads[i]);
1038 }
1039 }
1040 END_TEST
1041
1042 static void cleanup1(void *data)
1043 {
1044 uintptr_t *value = (uintptr_t*)data;
1045
1046 ck_assert_int_eq(*value, 1);
1047 (*value)++;
1048 }
1049
1050 static void cleanup2(void *data)
1051 {
1052 uintptr_t *value = (uintptr_t*)data;
1053
1054 ck_assert_int_eq(*value, 2);
1055 (*value)++;
1056 }
1057
1058 static void cleanup3(void *data)
1059 {
1060 uintptr_t *value = (uintptr_t*)data;
1061
1062 ck_assert_int_eq(*value, 3);
1063 (*value)++;
1064 }
1065
1066 static void *cleanup_run(void *data)
1067 {
1068 thread_cleanup_push(cleanup3, data);
1069 thread_cleanup_push(cleanup2, data);
1070 thread_cleanup_push(cleanup1, data);
1071 return NULL;
1072 }
1073
1074 START_TEST(test_cleanup)
1075 {
1076 thread_t *threads[THREADS];
1077 uintptr_t values[THREADS];
1078 int i;
1079
1080 for (i = 0; i < THREADS; i++)
1081 {
1082 values[i] = 1;
1083 threads[i] = thread_create(cleanup_run, &values[i]);
1084 }
1085 for (i = 0; i < THREADS; i++)
1086 {
1087 threads[i]->join(threads[i]);
1088 ck_assert_int_eq(values[i], 4);
1089 }
1090 }
1091 END_TEST
1092
1093 static void *cleanup_exit_run(void *data)
1094 {
1095 thread_cleanup_push(cleanup3, data);
1096 thread_cleanup_push(cleanup2, data);
1097 thread_cleanup_push(cleanup1, data);
1098 thread_exit(NULL);
1099 ck_assert(FALSE);
1100 return NULL;
1101 }
1102
1103 START_TEST(test_cleanup_exit)
1104 {
1105 thread_t *threads[THREADS];
1106 uintptr_t values[THREADS];
1107 int i;
1108
1109 for (i = 0; i < THREADS; i++)
1110 {
1111 values[i] = 1;
1112 threads[i] = thread_create(cleanup_exit_run, &values[i]);
1113 }
1114 for (i = 0; i < THREADS; i++)
1115 {
1116 threads[i]->join(threads[i]);
1117 ck_assert_int_eq(values[i], 4);
1118 }
1119 }
1120 END_TEST
1121
1122 static void *cleanup_cancel_run(void *data)
1123 {
1124 thread_cancelability(FALSE);
1125
1126 thread_cleanup_push(cleanup3, data);
1127 thread_cleanup_push(cleanup2, data);
1128 thread_cleanup_push(cleanup1, data);
1129
1130 thread_cancelability(TRUE);
1131
1132 while (TRUE)
1133 {
1134 sleep(1);
1135 }
1136 return NULL;
1137 }
1138
1139 START_TEST(test_cleanup_cancel)
1140 {
1141 thread_t *threads[THREADS];
1142 uintptr_t values[THREADS];
1143 int i;
1144
1145 for (i = 0; i < THREADS; i++)
1146 {
1147 values[i] = 1;
1148 threads[i] = thread_create(cleanup_cancel_run, &values[i]);
1149 }
1150 for (i = 0; i < THREADS; i++)
1151 {
1152 threads[i]->cancel(threads[i]);
1153 }
1154 for (i = 0; i < THREADS; i++)
1155 {
1156 threads[i]->join(threads[i]);
1157 ck_assert_int_eq(values[i], 4);
1158 }
1159 }
1160 END_TEST
1161
1162 static void *cleanup_pop_run(void *data)
1163 {
1164 thread_cleanup_push(cleanup3, data);
1165 thread_cleanup_push(cleanup2, data);
1166 thread_cleanup_push(cleanup1, data);
1167
1168 thread_cleanup_push(cleanup2, data);
1169 thread_cleanup_pop(FALSE);
1170
1171 thread_cleanup_pop(TRUE);
1172 return NULL;
1173 }
1174
1175 START_TEST(test_cleanup_pop)
1176 {
1177 thread_t *threads[THREADS];
1178 uintptr_t values[THREADS];
1179 int i;
1180
1181 for (i = 0; i < THREADS; i++)
1182 {
1183 values[i] = 1;
1184 threads[i] = thread_create(cleanup_pop_run, &values[i]);
1185 }
1186 for (i = 0; i < THREADS; i++)
1187 {
1188 threads[i]->join(threads[i]);
1189 ck_assert_int_eq(values[i], 4);
1190 }
1191 }
1192 END_TEST
1193
1194 static thread_value_t *tls[10];
1195
1196 static void *tls_run(void *data)
1197 {
1198 uintptr_t value = (uintptr_t)data;
1199 int i, j;
1200
1201 for (i = 0; i < countof(tls); i++)
1202 {
1203 ck_assert(tls[i]->get(tls[i]) == NULL);
1204 }
1205 for (i = 0; i < countof(tls); i++)
1206 {
1207 tls[i]->set(tls[i], (void*)(value * i));
1208 }
1209 for (j = 0; j < 1000; j++)
1210 {
1211 for (i = 0; i < countof(tls); i++)
1212 {
1213 tls[i]->set(tls[i], (void*)(value * i));
1214 ck_assert(tls[i]->get(tls[i]) == (void*)(value * i));
1215 }
1216 sched_yield();
1217 }
1218 for (i = 0; i < countof(tls); i++)
1219 {
1220 ck_assert(tls[i]->get(tls[i]) == (void*)(value * i));
1221 }
1222 return (void*)(value + 1);
1223 }
1224
1225 START_TEST(test_tls)
1226 {
1227 thread_t *threads[THREADS];
1228 int i;
1229
1230 for (i = 0; i < countof(tls); i++)
1231 {
1232 tls[i] = thread_value_create(NULL);
1233 }
1234 for (i = 0; i < THREADS; i++)
1235 {
1236 threads[i] = thread_create(tls_run, (void*)(uintptr_t)i);
1237 }
1238
1239 ck_assert_int_eq((uintptr_t)tls_run((void*)(uintptr_t)(THREADS + 1)),
1240 THREADS + 2);
1241
1242 for (i = 0; i < THREADS; i++)
1243 {
1244 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + 1);
1245 }
1246 for (i = 0; i < countof(tls); i++)
1247 {
1248 tls[i]->destroy(tls[i]);
1249 }
1250 }
1251 END_TEST
1252
1253 static void tls_cleanup(void *data)
1254 {
1255 uintptr_t *value = (uintptr_t*)data;
1256
1257 (*value)--;
1258 }
1259
1260 static void *tls_cleanup_run(void *data)
1261 {
1262 int i;
1263
1264 for (i = 0; i < countof(tls); i++)
1265 {
1266 tls[i]->set(tls[i], data);
1267 }
1268 return NULL;
1269 }
1270
1271 START_TEST(test_tls_cleanup)
1272 {
1273 thread_t *threads[THREADS];
1274 uintptr_t values[THREADS], main_value = countof(tls);
1275 int i;
1276
1277 for (i = 0; i < countof(tls); i++)
1278 {
1279 tls[i] = thread_value_create(tls_cleanup);
1280 }
1281 for (i = 0; i < THREADS; i++)
1282 {
1283 values[i] = countof(tls);
1284 threads[i] = thread_create(tls_cleanup_run, &values[i]);
1285 }
1286
1287 tls_cleanup_run(&main_value);
1288
1289 for (i = 0; i < THREADS; i++)
1290 {
1291 threads[i]->join(threads[i]);
1292 ck_assert_int_eq(values[i], 0);
1293 }
1294 for (i = 0; i < countof(tls); i++)
1295 {
1296 tls[i]->destroy(tls[i]);
1297 }
1298 ck_assert_int_eq(main_value, 0);
1299 }
1300 END_TEST
1301
1302 Suite *threading_suite_create()
1303 {
1304 Suite *s;
1305 TCase *tc;
1306
1307 s = suite_create("threading");
1308
1309 tc = tcase_create("recursive mutex");
1310 tcase_add_test(tc, test_mutex);
1311 suite_add_tcase(s, tc);
1312
1313 tc = tcase_create("spinlock");
1314 tcase_add_test(tc, test_spinlock);
1315 suite_add_tcase(s, tc);
1316
1317 tc = tcase_create("condvar");
1318 tcase_add_test(tc, test_condvar);
1319 tcase_add_test(tc, test_condvar_recursive);
1320 tcase_add_test(tc, test_condvar_broad);
1321 tcase_add_test(tc, test_condvar_timed);
1322 tcase_add_test(tc, test_condvar_timed_abs);
1323 tcase_add_test(tc, test_condvar_cancel);
1324 suite_add_tcase(s, tc);
1325
1326 tc = tcase_create("rwlock");
1327 tcase_add_test(tc, test_rwlock);
1328 suite_add_tcase(s, tc);
1329
1330 tc = tcase_create("rwlock condvar");
1331 tcase_add_test(tc, test_rwlock_condvar);
1332 tcase_add_test(tc, test_rwlock_condvar_broad);
1333 tcase_add_test(tc, test_rwlock_condvar_timed);
1334 tcase_add_test(tc, test_rwlock_condvar_timed_abs);
1335 tcase_add_test(tc, test_rwlock_condvar_cancel);
1336 suite_add_tcase(s, tc);
1337
1338 tc = tcase_create("semaphore");
1339 tcase_add_test(tc, test_semaphore);
1340 suite_add_tcase(s, tc);
1341
1342 tc = tcase_create("thread joining");
1343 tcase_add_test(tc, test_join);
1344 tcase_add_test(tc, test_join_exit);
1345 suite_add_tcase(s, tc);
1346
1347 tc = tcase_create("thread detaching");
1348 tcase_add_test(tc, test_detach);
1349 tcase_add_test(tc, test_detach_exit);
1350 suite_add_tcase(s, tc);
1351
1352 tc = tcase_create("thread cancellation");
1353 tcase_add_test(tc, test_cancel);
1354 tcase_add_test(tc, test_cancel_onoff);
1355 tcase_add_test(tc, test_cancel_point);
1356 suite_add_tcase(s, tc);
1357
1358 tc = tcase_create("thread cleanup");
1359 tcase_add_test(tc, test_cleanup);
1360 tcase_add_test(tc, test_cleanup_exit);
1361 tcase_add_test(tc, test_cleanup_cancel);
1362 tcase_add_test(tc, test_cleanup_pop);
1363 suite_add_tcase(s, tc);
1364
1365 tc = tcase_create("thread local storage");
1366 tcase_add_test(tc, test_tls);
1367 tcase_add_test(tc, test_tls_cleanup);
1368 suite_add_tcase(s, tc);
1369
1370 return s;
1371 }