unit-tests: Add a semaphore wait cancel test
[strongswan.git] / src / libstrongswan / tests / suites / test_threading.c
1 /*
2 * Copyright (C) 2013 Tobias Brunner
3 * Copyright (C) 2008 Martin Willi
4 * Hochschule fuer Technik Rapperswil
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 */
16
17 #include "test_suite.h"
18
19 #include <sched.h>
20 #include <unistd.h>
21
22 #include <threading/thread.h>
23 #include <threading/mutex.h>
24 #include <threading/condvar.h>
25 #include <threading/rwlock.h>
26 #include <threading/rwlock_condvar.h>
27 #include <threading/spinlock.h>
28 #include <threading/semaphore.h>
29 #include <threading/thread_value.h>
30
31 /*******************************************************************************
32 * recursive mutex test
33 */
34
35 #define THREADS 20
36
37 /**
38 * Thread barrier data
39 */
40 typedef struct {
41 mutex_t *mutex;
42 condvar_t *cond;
43 int count;
44 int current;
45 bool active;
46 } barrier_t;
47
48 /**
49 * Create a thread barrier for count threads
50 */
51 static barrier_t* barrier_create(int count)
52 {
53 barrier_t *this;
54
55 INIT(this,
56 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
57 .cond = condvar_create(CONDVAR_TYPE_DEFAULT),
58 .count = count,
59 );
60
61 return this;
62 }
63
64 /**
65 * Destroy a thread barrier
66 */
67 static void barrier_destroy(barrier_t *this)
68 {
69 this->mutex->destroy(this->mutex);
70 this->cond->destroy(this->cond);
71 free(this);
72 }
73
74 /**
75 * Wait to have configured number of threads in barrier
76 */
77 static bool barrier_wait(barrier_t *this)
78 {
79 bool winner = FALSE;
80
81 this->mutex->lock(this->mutex);
82 if (!this->active)
83 { /* first, reset */
84 this->active = TRUE;
85 this->current = 0;
86 }
87
88 this->current++;
89 while (this->current < this->count)
90 {
91 this->cond->wait(this->cond, this->mutex);
92 }
93 if (this->active)
94 { /* first, win */
95 winner = TRUE;
96 this->active = FALSE;
97 }
98 this->mutex->unlock(this->mutex);
99 this->cond->broadcast(this->cond);
100 sched_yield();
101
102 return winner;
103 }
104
105 /**
106 * Barrier for some tests
107 */
108 static barrier_t *barrier;
109
110 /**
111 * A mutex for tests requiring one
112 */
113 static mutex_t *mutex;
114
115 /**
116 * A condvar for tests requiring one
117 */
118 static condvar_t *condvar;
119
120 /**
121 * A counter for signaling
122 */
123 static int sigcount;
124
125 static void *mutex_run(void *data)
126 {
127 int locked = 0;
128 int i;
129
130 /* wait for all threads before getting in action */
131 barrier_wait(barrier);
132
133 for (i = 0; i < 100; i++)
134 {
135 mutex->lock(mutex);
136 mutex->lock(mutex);
137 mutex->lock(mutex);
138 locked++;
139 sched_yield();
140 if (locked > 1)
141 {
142 fail("two threads locked the mutex concurrently");
143 }
144 locked--;
145 mutex->unlock(mutex);
146 mutex->unlock(mutex);
147 mutex->unlock(mutex);
148 }
149 return NULL;
150 }
151
152 START_TEST(test_mutex)
153 {
154 thread_t *threads[THREADS];
155 int i;
156
157 barrier = barrier_create(THREADS);
158 mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
159
160 for (i = 0; i < 10; i++)
161 {
162 mutex->lock(mutex);
163 mutex->unlock(mutex);
164 }
165 for (i = 0; i < 10; i++)
166 {
167 mutex->lock(mutex);
168 }
169 for (i = 0; i < 10; i++)
170 {
171 mutex->unlock(mutex);
172 }
173
174 for (i = 0; i < THREADS; i++)
175 {
176 threads[i] = thread_create(mutex_run, NULL);
177 }
178 for (i = 0; i < THREADS; i++)
179 {
180 threads[i]->join(threads[i]);
181 }
182
183 mutex->destroy(mutex);
184 barrier_destroy(barrier);
185 }
186 END_TEST
187
188 /**
189 * Spinlock for testing
190 */
191 static spinlock_t *spinlock;
192
193 static void *spinlock_run(void *data)
194 {
195 int i, *locked = (int*)data;
196
197 barrier_wait(barrier);
198
199 for (i = 0; i < 1000; i++)
200 {
201 spinlock->lock(spinlock);
202 (*locked)++;
203 ck_assert_int_eq(*locked, 1);
204 (*locked)--;
205 spinlock->unlock(spinlock);
206 }
207 return NULL;
208 }
209
210 START_TEST(test_spinlock)
211 {
212 thread_t *threads[THREADS];
213 int i, locked = 0;
214
215 barrier = barrier_create(THREADS);
216 spinlock = spinlock_create();
217
218 for (i = 0; i < THREADS; i++)
219 {
220 threads[i] = thread_create(spinlock_run, &locked);
221 }
222 for (i = 0; i < THREADS; i++)
223 {
224 threads[i]->join(threads[i]);
225 }
226
227 spinlock->destroy(spinlock);
228 barrier_destroy(barrier);
229 }
230 END_TEST
231
232 static void *condvar_run(void *data)
233 {
234 mutex->lock(mutex);
235 sigcount++;
236 condvar->signal(condvar);
237 mutex->unlock(mutex);
238 return NULL;
239 }
240
241 START_TEST(test_condvar)
242 {
243 thread_t *threads[THREADS];
244 int i;
245
246 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
247 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
248 sigcount = 0;
249
250 for (i = 0; i < THREADS; i++)
251 {
252 threads[i] = thread_create(condvar_run, NULL);
253 }
254
255 mutex->lock(mutex);
256 while (sigcount < THREADS)
257 {
258 condvar->wait(condvar, mutex);
259 }
260 mutex->unlock(mutex);
261
262 for (i = 0; i < THREADS; i++)
263 {
264 threads[i]->join(threads[i]);
265 }
266
267 mutex->destroy(mutex);
268 condvar->destroy(condvar);
269 }
270 END_TEST
271
272 static void *condvar_recursive_run(void *data)
273 {
274 mutex->lock(mutex);
275 mutex->lock(mutex);
276 mutex->lock(mutex);
277 sigcount++;
278 condvar->signal(condvar);
279 mutex->unlock(mutex);
280 mutex->unlock(mutex);
281 mutex->unlock(mutex);
282 return NULL;
283 }
284
285 START_TEST(test_condvar_recursive)
286 {
287 thread_t *threads[THREADS];
288 int i;
289
290 mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
291 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
292 sigcount = 0;
293
294 mutex->lock(mutex);
295
296 for (i = 0; i < THREADS; i++)
297 {
298 threads[i] = thread_create(condvar_recursive_run, NULL);
299 }
300
301 mutex->lock(mutex);
302 mutex->lock(mutex);
303 while (sigcount < THREADS)
304 {
305 condvar->wait(condvar, mutex);
306 }
307 mutex->unlock(mutex);
308 mutex->unlock(mutex);
309 mutex->unlock(mutex);
310
311 for (i = 0; i < THREADS; i++)
312 {
313 threads[i]->join(threads[i]);
314 }
315
316 mutex->destroy(mutex);
317 condvar->destroy(condvar);
318 }
319 END_TEST
320
321 static void *condvar_run_broad(void *data)
322 {
323 mutex->lock(mutex);
324 while (sigcount < 0)
325 {
326 condvar->wait(condvar, mutex);
327 }
328 mutex->unlock(mutex);
329 return NULL;
330 }
331
332 START_TEST(test_condvar_broad)
333 {
334 thread_t *threads[THREADS];
335 int i;
336
337 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
338 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
339 sigcount = 0;
340
341 for (i = 0; i < THREADS; i++)
342 {
343 threads[i] = thread_create(condvar_run_broad, NULL);
344 }
345
346 sched_yield();
347
348 mutex->lock(mutex);
349 sigcount = 1;
350 condvar->broadcast(condvar);
351 mutex->unlock(mutex);
352
353 for (i = 0; i < THREADS; i++)
354 {
355 threads[i]->join(threads[i]);
356 }
357
358 mutex->destroy(mutex);
359 condvar->destroy(condvar);
360 }
361 END_TEST
362
363 START_TEST(test_condvar_timed)
364 {
365 thread_t *thread;
366 timeval_t start, end, diff = { .tv_usec = 50000 };
367
368 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
369 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
370 sigcount = 0;
371
372 mutex->lock(mutex);
373 while (TRUE)
374 {
375 time_monotonic(&start);
376 if (condvar->timed_wait(condvar, mutex, diff.tv_usec / 1000))
377 {
378 break;
379 }
380 }
381 time_monotonic(&end);
382 mutex->unlock(mutex);
383 timersub(&end, &start, &end);
384 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
385 end.tv_sec, end.tv_usec, diff.tv_sec, diff.tv_usec);
386
387 thread = thread_create(condvar_run, NULL);
388
389 mutex->lock(mutex);
390 while (sigcount == 0)
391 {
392 ck_assert(!condvar->timed_wait(condvar, mutex, 1000));
393 }
394 mutex->unlock(mutex);
395
396 thread->join(thread);
397 mutex->destroy(mutex);
398 condvar->destroy(condvar);
399 }
400 END_TEST
401
402 START_TEST(test_condvar_timed_abs)
403 {
404 thread_t *thread;
405 timeval_t start, end, abso, diff = { .tv_usec = 50000 };
406
407 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
408 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
409 sigcount = 0;
410
411 mutex->lock(mutex);
412 while (TRUE)
413 {
414 time_monotonic(&start);
415 timeradd(&start, &diff, &abso);
416 if (condvar->timed_wait_abs(condvar, mutex, abso))
417 {
418 break;
419 }
420 }
421 time_monotonic(&end);
422 mutex->unlock(mutex);
423 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
424 end.tv_sec, end.tv_usec, abso.tv_sec, abso.tv_usec);
425
426 thread = thread_create(condvar_run, NULL);
427
428 time_monotonic(&start);
429 diff.tv_sec = 1;
430 timeradd(&start, &diff, &abso);
431 mutex->lock(mutex);
432 while (sigcount == 0)
433 {
434 ck_assert(!condvar->timed_wait_abs(condvar, mutex, abso));
435 }
436 mutex->unlock(mutex);
437
438 thread->join(thread);
439 mutex->destroy(mutex);
440 condvar->destroy(condvar);
441 }
442 END_TEST
443
444 static void *condvar_cancel_run(void *data)
445 {
446 thread_cancelability(FALSE);
447
448 mutex->lock(mutex);
449
450 sigcount++;
451 condvar->broadcast(condvar);
452
453 thread_cleanup_push((void*)mutex->unlock, mutex);
454 thread_cancelability(TRUE);
455 while (TRUE)
456 {
457 condvar->wait(condvar, mutex);
458 }
459 thread_cleanup_pop(TRUE);
460
461 return NULL;
462 }
463
464 START_TEST(test_condvar_cancel)
465 {
466 thread_t *threads[THREADS];
467 int i;
468
469 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
470 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
471 sigcount = 0;
472
473 for (i = 0; i < THREADS; i++)
474 {
475 threads[i] = thread_create(condvar_cancel_run, NULL);
476 }
477
478 /* wait for all threads */
479 mutex->lock(mutex);
480 while (sigcount < THREADS)
481 {
482 condvar->wait(condvar, mutex);
483 }
484 mutex->unlock(mutex);
485
486 for (i = 0; i < THREADS; i++)
487 {
488 threads[i]->cancel(threads[i]);
489 }
490 for (i = 0; i < THREADS; i++)
491 {
492 threads[i]->join(threads[i]);
493 }
494
495 mutex->destroy(mutex);
496 condvar->destroy(condvar);
497 }
498 END_TEST
499
500 /**
501 * RWlock for different tests
502 */
503 static rwlock_t *rwlock;
504
505 static void *rwlock_run(refcount_t *refs)
506 {
507 rwlock->read_lock(rwlock);
508 ref_get(refs);
509 sched_yield();
510 ignore_result(ref_put(refs));
511 rwlock->unlock(rwlock);
512
513 if (rwlock->try_write_lock(rwlock))
514 {
515 ck_assert_int_eq(*refs, 0);
516 sched_yield();
517 rwlock->unlock(rwlock);
518 }
519
520 rwlock->write_lock(rwlock);
521 ck_assert_int_eq(*refs, 0);
522 sched_yield();
523 rwlock->unlock(rwlock);
524
525 rwlock->read_lock(rwlock);
526 rwlock->read_lock(rwlock);
527 ref_get(refs);
528 sched_yield();
529 ignore_result(ref_put(refs));
530 rwlock->unlock(rwlock);
531 rwlock->unlock(rwlock);
532
533 return NULL;
534 }
535
536 START_TEST(test_rwlock)
537 {
538 thread_t *threads[THREADS];
539 refcount_t refs = 0;
540 int i;
541
542 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
543
544 for (i = 0; i < THREADS; i++)
545 {
546 threads[i] = thread_create((void*)rwlock_run, &refs);
547 }
548 for (i = 0; i < THREADS; i++)
549 {
550 threads[i]->join(threads[i]);
551 }
552
553 rwlock->destroy(rwlock);
554 }
555 END_TEST
556
557 /**
558 * Rwlock condvar
559 */
560 static rwlock_condvar_t *rwcond;
561
562 static void *rwlock_condvar_run(void *data)
563 {
564 rwlock->write_lock(rwlock);
565 sigcount++;
566 rwcond->signal(rwcond);
567 rwlock->unlock(rwlock);
568 return NULL;
569 }
570
571 START_TEST(test_rwlock_condvar)
572 {
573 thread_t *threads[THREADS];
574 int i;
575
576 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
577 rwcond = rwlock_condvar_create();
578 sigcount = 0;
579
580 for (i = 0; i < THREADS; i++)
581 {
582 threads[i] = thread_create(rwlock_condvar_run, NULL);
583 }
584
585 rwlock->write_lock(rwlock);
586 while (sigcount < THREADS)
587 {
588 rwcond->wait(rwcond, rwlock);
589 }
590 rwlock->unlock(rwlock);
591
592 for (i = 0; i < THREADS; i++)
593 {
594 threads[i]->join(threads[i]);
595 }
596
597 rwlock->destroy(rwlock);
598 rwcond->destroy(rwcond);
599 }
600 END_TEST
601
602 static void *rwlock_condvar_run_broad(void *data)
603 {
604 rwlock->write_lock(rwlock);
605 while (sigcount < 0)
606 {
607 rwcond->wait(rwcond, rwlock);
608 }
609 rwlock->unlock(rwlock);
610 return NULL;
611 }
612
613 START_TEST(test_rwlock_condvar_broad)
614 {
615 thread_t *threads[THREADS];
616 int i;
617
618 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
619 rwcond = rwlock_condvar_create();
620 sigcount = 0;
621
622 for (i = 0; i < THREADS; i++)
623 {
624 threads[i] = thread_create(rwlock_condvar_run_broad, NULL);
625 }
626
627 sched_yield();
628
629 rwlock->write_lock(rwlock);
630 sigcount = 1;
631 rwcond->broadcast(rwcond);
632 rwlock->unlock(rwlock);
633
634 for (i = 0; i < THREADS; i++)
635 {
636 threads[i]->join(threads[i]);
637 }
638
639 rwlock->destroy(rwlock);
640 rwcond->destroy(rwcond);
641 }
642 END_TEST
643
644 START_TEST(test_rwlock_condvar_timed)
645 {
646 thread_t *thread;
647 timeval_t start, end, diff = { .tv_usec = 50000 };
648
649 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
650 rwcond = rwlock_condvar_create();
651 sigcount = 0;
652
653 rwlock->write_lock(rwlock);
654 while (TRUE)
655 {
656 time_monotonic(&start);
657 if (rwcond->timed_wait(rwcond, rwlock, diff.tv_usec / 1000))
658 {
659 break;
660 }
661 }
662 rwlock->unlock(rwlock);
663 time_monotonic(&end);
664 timersub(&end, &start, &end);
665 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
666 end.tv_sec, end.tv_usec, diff.tv_sec, diff.tv_usec);
667
668 thread = thread_create(rwlock_condvar_run, NULL);
669
670 rwlock->write_lock(rwlock);
671 while (sigcount == 0)
672 {
673 ck_assert(!rwcond->timed_wait(rwcond, rwlock, 1000));
674 }
675 rwlock->unlock(rwlock);
676
677 thread->join(thread);
678 rwlock->destroy(rwlock);
679 rwcond->destroy(rwcond);
680 }
681 END_TEST
682
683 START_TEST(test_rwlock_condvar_timed_abs)
684 {
685 thread_t *thread;
686 timeval_t start, end, abso, diff = { .tv_usec = 50000 };
687
688 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
689 rwcond = rwlock_condvar_create();
690 sigcount = 0;
691
692 rwlock->write_lock(rwlock);
693 while (TRUE)
694 {
695 time_monotonic(&start);
696 timeradd(&start, &diff, &abso);
697 if (rwcond->timed_wait_abs(rwcond, rwlock, abso))
698 {
699 break;
700 }
701 }
702 rwlock->unlock(rwlock);
703 time_monotonic(&end);
704 ck_assert_msg(timercmp(&end, &abso, >), "end: %u.%u, abso: %u.%u",
705 end.tv_sec, end.tv_usec, abso.tv_sec, abso.tv_usec);
706
707 thread = thread_create(rwlock_condvar_run, NULL);
708
709 time_monotonic(&start);
710 diff.tv_sec = 1;
711 timeradd(&start, &diff, &abso);
712 rwlock->write_lock(rwlock);
713 while (sigcount == 0)
714 {
715 ck_assert(!rwcond->timed_wait_abs(rwcond, rwlock, abso));
716 }
717 rwlock->unlock(rwlock);
718
719 thread->join(thread);
720 rwlock->destroy(rwlock);
721 rwcond->destroy(rwcond);
722 }
723 END_TEST
724
725 static void *rwlock_condvar_cancel_run(void *data)
726 {
727 thread_cancelability(FALSE);
728
729 rwlock->write_lock(rwlock);
730
731 sigcount++;
732 rwcond->broadcast(rwcond);
733
734 thread_cleanup_push((void*)rwlock->unlock, rwlock);
735 thread_cancelability(TRUE);
736 while (TRUE)
737 {
738 rwcond->wait(rwcond, rwlock);
739 }
740 thread_cleanup_pop(TRUE);
741
742 return NULL;
743 }
744
745 START_TEST(test_rwlock_condvar_cancel)
746 {
747 thread_t *threads[THREADS];
748 int i;
749
750 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
751 rwcond = rwlock_condvar_create();
752 sigcount = 0;
753
754 for (i = 0; i < THREADS; i++)
755 {
756 threads[i] = thread_create(rwlock_condvar_cancel_run, NULL);
757 }
758
759 /* wait for all threads */
760 rwlock->write_lock(rwlock);
761 while (sigcount < THREADS)
762 {
763 rwcond->wait(rwcond, rwlock);
764 }
765 rwlock->unlock(rwlock);
766
767 for (i = 0; i < THREADS; i++)
768 {
769 threads[i]->cancel(threads[i]);
770 }
771 for (i = 0; i < THREADS; i++)
772 {
773 threads[i]->join(threads[i]);
774 }
775
776 rwlock->destroy(rwlock);
777 rwcond->destroy(rwcond);
778 }
779 END_TEST
780
781 /**
782 * Semaphore for different tests
783 */
784 static semaphore_t *semaphore;
785
786 static void *semaphore_run(void *data)
787 {
788 semaphore->post(semaphore);
789 return NULL;
790 }
791
792 START_TEST(test_semaphore)
793 {
794 thread_t *threads[THREADS];
795 int i, initial = 5;
796
797 semaphore = semaphore_create(initial);
798
799 for (i = 0; i < THREADS; i++)
800 {
801 threads[i] = thread_create(semaphore_run, NULL);
802 }
803 for (i = 0; i < THREADS + initial; i++)
804 {
805 semaphore->wait(semaphore);
806 }
807 for (i = 0; i < THREADS; i++)
808 {
809 threads[i]->join(threads[i]);
810 }
811
812 semaphore->destroy(semaphore);
813 }
814 END_TEST
815
816 START_TEST(test_semaphore_timed)
817 {
818 thread_t *thread;
819 timeval_t start, end, diff = { .tv_usec = 50000 };
820
821 semaphore = semaphore_create(0);
822
823 time_monotonic(&start);
824 ck_assert(semaphore->timed_wait(semaphore, diff.tv_usec / 1000));
825 time_monotonic(&end);
826 timersub(&end, &start, &end);
827 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
828 end.tv_sec, end.tv_usec, diff.tv_sec, diff.tv_usec);
829
830 thread = thread_create(semaphore_run, NULL);
831
832 ck_assert(!semaphore->timed_wait(semaphore, 1000));
833
834 thread->join(thread);
835 semaphore->destroy(semaphore);
836 }
837 END_TEST
838
839 START_TEST(test_semaphore_timed_abs)
840 {
841 thread_t *thread;
842 timeval_t start, end, abso, diff = { .tv_usec = 50000 };
843
844 semaphore = semaphore_create(0);
845
846 time_monotonic(&start);
847 timeradd(&start, &diff, &abso);
848 ck_assert(semaphore->timed_wait_abs(semaphore, abso));
849 time_monotonic(&end);
850 ck_assert_msg(timercmp(&end, &abso, >), "end: %u.%u, abso: %u.%u",
851 end.tv_sec, end.tv_usec, abso.tv_sec, abso.tv_usec);
852
853 thread = thread_create(semaphore_run, NULL);
854
855 time_monotonic(&start);
856 diff.tv_sec = 1;
857 timeradd(&start, &diff, &abso);
858 ck_assert(!semaphore->timed_wait_abs(semaphore, abso));
859
860 thread->join(thread);
861 semaphore->destroy(semaphore);
862 }
863 END_TEST
864
865 static void *semaphore_cancel_run(void *data)
866 {
867 refcount_t *ready = (refcount_t*)data;
868
869 thread_cancelability(FALSE);
870 ref_get(ready);
871
872 thread_cancelability(TRUE);
873 semaphore->wait(semaphore);
874
875 ck_assert(FALSE);
876 return NULL;
877 }
878
879 START_TEST(test_semaphore_cancel)
880 {
881 thread_t *threads[THREADS];
882 refcount_t ready = 0;
883 int i;
884
885 semaphore = semaphore_create(0);
886
887 for (i = 0; i < THREADS; i++)
888 {
889 threads[i] = thread_create(semaphore_cancel_run, &ready);
890 }
891 while (ready < THREADS)
892 {
893 sched_yield();
894 }
895 for (i = 0; i < THREADS; i++)
896 {
897 threads[i]->cancel(threads[i]);
898 }
899 for (i = 0; i < THREADS; i++)
900 {
901 threads[i]->join(threads[i]);
902 }
903
904 semaphore->destroy(semaphore);
905 }
906 END_TEST
907
908 static void *join_run(void *data)
909 {
910 /* force some context switches */
911 sched_yield();
912 return (void*)((uintptr_t)data + THREADS);
913 }
914
915 START_TEST(test_join)
916 {
917 thread_t *threads[THREADS];
918 int i;
919
920 for (i = 0; i < THREADS; i++)
921 {
922 threads[i] = thread_create(join_run, (void*)(uintptr_t)i);
923 }
924 for (i = 0; i < THREADS; i++)
925 {
926 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
927 }
928 }
929 END_TEST
930
931 static void *exit_join_run(void *data)
932 {
933 sched_yield();
934 thread_exit((void*)((uintptr_t)data + THREADS));
935 /* not reached */
936 ck_assert(FALSE);
937 return NULL;
938 }
939
940 START_TEST(test_join_exit)
941 {
942 thread_t *threads[THREADS];
943 int i;
944
945 for (i = 0; i < THREADS; i++)
946 {
947 threads[i] = thread_create(exit_join_run, (void*)(uintptr_t)i);
948 }
949 for (i = 0; i < THREADS; i++)
950 {
951 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
952 }
953 }
954 END_TEST
955
956 static void *detach_run(void *data)
957 {
958 refcount_t *running = (refcount_t*)data;
959
960 ignore_result(ref_put(running));
961 return NULL;
962 }
963
964 START_TEST(test_detach)
965 {
966 thread_t *threads[THREADS];
967 int i;
968 refcount_t running = 0;
969
970 for (i = 0; i < THREADS; i++)
971 {
972 ref_get(&running);
973 threads[i] = thread_create(detach_run, &running);
974 }
975 for (i = 0; i < THREADS; i++)
976 {
977 threads[i]->detach(threads[i]);
978 }
979 while (running > 0)
980 {
981 sched_yield();
982 }
983 /* no checks done here, but we check that thread state gets cleaned
984 * up with leak detective. */
985 }
986 END_TEST
987
988 static void *detach_exit_run(void *data)
989 {
990 refcount_t *running = (refcount_t*)data;
991
992 ignore_result(ref_put(running));
993 thread_exit(NULL);
994 /* not reached */
995 ck_assert(FALSE);
996 return NULL;
997 }
998
999 START_TEST(test_detach_exit)
1000 {
1001 thread_t *threads[THREADS];
1002 int i;
1003 refcount_t running = 0;
1004
1005 for (i = 0; i < THREADS; i++)
1006 {
1007 ref_get(&running);
1008 threads[i] = thread_create(detach_exit_run, &running);
1009 }
1010 for (i = 0; i < THREADS; i++)
1011 {
1012 threads[i]->detach(threads[i]);
1013 }
1014 while (running > 0)
1015 {
1016 sched_yield();
1017 }
1018 /* no checks done here, but we check that thread state gets cleaned
1019 * up with leak detective. */
1020 }
1021 END_TEST
1022
1023 static void *cancel_run(void *data)
1024 {
1025 /* default cancellability should be TRUE, so don't change it */
1026 while (TRUE)
1027 {
1028 sleep(10);
1029 }
1030 return NULL;
1031 }
1032
1033 START_TEST(test_cancel)
1034 {
1035 thread_t *threads[THREADS];
1036 int i;
1037
1038 for (i = 0; i < THREADS; i++)
1039 {
1040 threads[i] = thread_create(cancel_run, NULL);
1041 }
1042 for (i = 0; i < THREADS; i++)
1043 {
1044 threads[i]->cancel(threads[i]);
1045 }
1046 for (i = 0; i < THREADS; i++)
1047 {
1048 threads[i]->join(threads[i]);
1049 }
1050 }
1051 END_TEST
1052
1053 static void *cancel_onoff_run(void *data)
1054 {
1055 bool *cancellable = (bool*)data;
1056
1057 thread_cancelability(FALSE);
1058 *cancellable = FALSE;
1059
1060 /* we should not get cancelled here */
1061 usleep(50000);
1062
1063 *cancellable = TRUE;
1064 thread_cancelability(TRUE);
1065
1066 /* but here */
1067 while (TRUE)
1068 {
1069 sleep(10);
1070 }
1071 return NULL;
1072 }
1073
1074 START_TEST(test_cancel_onoff)
1075 {
1076 thread_t *threads[THREADS];
1077 bool cancellable[THREADS];
1078 int i;
1079
1080 for (i = 0; i < THREADS; i++)
1081 {
1082 cancellable[i] = TRUE;
1083 threads[i] = thread_create(cancel_onoff_run, &cancellable[i]);
1084 }
1085 for (i = 0; i < THREADS; i++)
1086 {
1087 /* wait until thread has cleared its cancellability */
1088 while (cancellable[i])
1089 {
1090 sched_yield();
1091 }
1092 threads[i]->cancel(threads[i]);
1093 }
1094 for (i = 0; i < THREADS; i++)
1095 {
1096 threads[i]->join(threads[i]);
1097 ck_assert(cancellable[i]);
1098 }
1099 }
1100 END_TEST
1101
1102 static void *cancel_point_run(void *data)
1103 {
1104 thread_cancelability(FALSE);
1105 while (TRUE)
1106 {
1107 /* implicitly enables cancellability */
1108 thread_cancellation_point();
1109 }
1110 return NULL;
1111 }
1112
1113 START_TEST(test_cancel_point)
1114 {
1115 thread_t *threads[THREADS];
1116 int i;
1117
1118 for (i = 0; i < THREADS; i++)
1119 {
1120 threads[i] = thread_create(cancel_point_run, NULL);
1121 }
1122 sched_yield();
1123 for (i = 0; i < THREADS; i++)
1124 {
1125 threads[i]->cancel(threads[i]);
1126 }
1127 for (i = 0; i < THREADS; i++)
1128 {
1129 threads[i]->join(threads[i]);
1130 }
1131 }
1132 END_TEST
1133
1134 static void cleanup1(void *data)
1135 {
1136 uintptr_t *value = (uintptr_t*)data;
1137
1138 ck_assert_int_eq(*value, 1);
1139 (*value)++;
1140 }
1141
1142 static void cleanup2(void *data)
1143 {
1144 uintptr_t *value = (uintptr_t*)data;
1145
1146 ck_assert_int_eq(*value, 2);
1147 (*value)++;
1148 }
1149
1150 static void cleanup3(void *data)
1151 {
1152 uintptr_t *value = (uintptr_t*)data;
1153
1154 ck_assert_int_eq(*value, 3);
1155 (*value)++;
1156 }
1157
1158 static void *cleanup_run(void *data)
1159 {
1160 thread_cleanup_push(cleanup3, data);
1161 thread_cleanup_push(cleanup2, data);
1162 thread_cleanup_push(cleanup1, data);
1163 return NULL;
1164 }
1165
1166 START_TEST(test_cleanup)
1167 {
1168 thread_t *threads[THREADS];
1169 uintptr_t values[THREADS];
1170 int i;
1171
1172 for (i = 0; i < THREADS; i++)
1173 {
1174 values[i] = 1;
1175 threads[i] = thread_create(cleanup_run, &values[i]);
1176 }
1177 for (i = 0; i < THREADS; i++)
1178 {
1179 threads[i]->join(threads[i]);
1180 ck_assert_int_eq(values[i], 4);
1181 }
1182 }
1183 END_TEST
1184
1185 static void *cleanup_exit_run(void *data)
1186 {
1187 thread_cleanup_push(cleanup3, data);
1188 thread_cleanup_push(cleanup2, data);
1189 thread_cleanup_push(cleanup1, data);
1190 thread_exit(NULL);
1191 ck_assert(FALSE);
1192 return NULL;
1193 }
1194
1195 START_TEST(test_cleanup_exit)
1196 {
1197 thread_t *threads[THREADS];
1198 uintptr_t values[THREADS];
1199 int i;
1200
1201 for (i = 0; i < THREADS; i++)
1202 {
1203 values[i] = 1;
1204 threads[i] = thread_create(cleanup_exit_run, &values[i]);
1205 }
1206 for (i = 0; i < THREADS; i++)
1207 {
1208 threads[i]->join(threads[i]);
1209 ck_assert_int_eq(values[i], 4);
1210 }
1211 }
1212 END_TEST
1213
1214 static void *cleanup_cancel_run(void *data)
1215 {
1216 thread_cancelability(FALSE);
1217
1218 thread_cleanup_push(cleanup3, data);
1219 thread_cleanup_push(cleanup2, data);
1220 thread_cleanup_push(cleanup1, data);
1221
1222 thread_cancelability(TRUE);
1223
1224 while (TRUE)
1225 {
1226 sleep(1);
1227 }
1228 return NULL;
1229 }
1230
1231 START_TEST(test_cleanup_cancel)
1232 {
1233 thread_t *threads[THREADS];
1234 uintptr_t values[THREADS];
1235 int i;
1236
1237 for (i = 0; i < THREADS; i++)
1238 {
1239 values[i] = 1;
1240 threads[i] = thread_create(cleanup_cancel_run, &values[i]);
1241 }
1242 for (i = 0; i < THREADS; i++)
1243 {
1244 threads[i]->cancel(threads[i]);
1245 }
1246 for (i = 0; i < THREADS; i++)
1247 {
1248 threads[i]->join(threads[i]);
1249 ck_assert_int_eq(values[i], 4);
1250 }
1251 }
1252 END_TEST
1253
1254 static void *cleanup_pop_run(void *data)
1255 {
1256 thread_cleanup_push(cleanup3, data);
1257 thread_cleanup_push(cleanup2, data);
1258 thread_cleanup_push(cleanup1, data);
1259
1260 thread_cleanup_push(cleanup2, data);
1261 thread_cleanup_pop(FALSE);
1262
1263 thread_cleanup_pop(TRUE);
1264 return NULL;
1265 }
1266
1267 START_TEST(test_cleanup_pop)
1268 {
1269 thread_t *threads[THREADS];
1270 uintptr_t values[THREADS];
1271 int i;
1272
1273 for (i = 0; i < THREADS; i++)
1274 {
1275 values[i] = 1;
1276 threads[i] = thread_create(cleanup_pop_run, &values[i]);
1277 }
1278 for (i = 0; i < THREADS; i++)
1279 {
1280 threads[i]->join(threads[i]);
1281 ck_assert_int_eq(values[i], 4);
1282 }
1283 }
1284 END_TEST
1285
1286 static thread_value_t *tls[10];
1287
1288 static void *tls_run(void *data)
1289 {
1290 uintptr_t value = (uintptr_t)data;
1291 int i, j;
1292
1293 for (i = 0; i < countof(tls); i++)
1294 {
1295 ck_assert(tls[i]->get(tls[i]) == NULL);
1296 }
1297 for (i = 0; i < countof(tls); i++)
1298 {
1299 tls[i]->set(tls[i], (void*)(value * i));
1300 }
1301 for (j = 0; j < 1000; j++)
1302 {
1303 for (i = 0; i < countof(tls); i++)
1304 {
1305 tls[i]->set(tls[i], (void*)(value * i));
1306 ck_assert(tls[i]->get(tls[i]) == (void*)(value * i));
1307 }
1308 sched_yield();
1309 }
1310 for (i = 0; i < countof(tls); i++)
1311 {
1312 ck_assert(tls[i]->get(tls[i]) == (void*)(value * i));
1313 }
1314 return (void*)(value + 1);
1315 }
1316
1317 START_TEST(test_tls)
1318 {
1319 thread_t *threads[THREADS];
1320 int i;
1321
1322 for (i = 0; i < countof(tls); i++)
1323 {
1324 tls[i] = thread_value_create(NULL);
1325 }
1326 for (i = 0; i < THREADS; i++)
1327 {
1328 threads[i] = thread_create(tls_run, (void*)(uintptr_t)i);
1329 }
1330
1331 ck_assert_int_eq((uintptr_t)tls_run((void*)(uintptr_t)(THREADS + 1)),
1332 THREADS + 2);
1333
1334 for (i = 0; i < THREADS; i++)
1335 {
1336 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + 1);
1337 }
1338 for (i = 0; i < countof(tls); i++)
1339 {
1340 tls[i]->destroy(tls[i]);
1341 }
1342 }
1343 END_TEST
1344
1345 static void tls_cleanup(void *data)
1346 {
1347 uintptr_t *value = (uintptr_t*)data;
1348
1349 (*value)--;
1350 }
1351
1352 static void *tls_cleanup_run(void *data)
1353 {
1354 int i;
1355
1356 for (i = 0; i < countof(tls); i++)
1357 {
1358 tls[i]->set(tls[i], data);
1359 }
1360 return NULL;
1361 }
1362
1363 START_TEST(test_tls_cleanup)
1364 {
1365 thread_t *threads[THREADS];
1366 uintptr_t values[THREADS], main_value = countof(tls);
1367 int i;
1368
1369 for (i = 0; i < countof(tls); i++)
1370 {
1371 tls[i] = thread_value_create(tls_cleanup);
1372 }
1373 for (i = 0; i < THREADS; i++)
1374 {
1375 values[i] = countof(tls);
1376 threads[i] = thread_create(tls_cleanup_run, &values[i]);
1377 }
1378
1379 tls_cleanup_run(&main_value);
1380
1381 for (i = 0; i < THREADS; i++)
1382 {
1383 threads[i]->join(threads[i]);
1384 ck_assert_int_eq(values[i], 0);
1385 }
1386 for (i = 0; i < countof(tls); i++)
1387 {
1388 tls[i]->destroy(tls[i]);
1389 }
1390 ck_assert_int_eq(main_value, 0);
1391 }
1392 END_TEST
1393
1394 Suite *threading_suite_create()
1395 {
1396 Suite *s;
1397 TCase *tc;
1398
1399 s = suite_create("threading");
1400
1401 tc = tcase_create("recursive mutex");
1402 tcase_add_test(tc, test_mutex);
1403 suite_add_tcase(s, tc);
1404
1405 tc = tcase_create("spinlock");
1406 tcase_add_test(tc, test_spinlock);
1407 suite_add_tcase(s, tc);
1408
1409 tc = tcase_create("condvar");
1410 tcase_add_test(tc, test_condvar);
1411 tcase_add_test(tc, test_condvar_recursive);
1412 tcase_add_test(tc, test_condvar_broad);
1413 tcase_add_test(tc, test_condvar_timed);
1414 tcase_add_test(tc, test_condvar_timed_abs);
1415 tcase_add_test(tc, test_condvar_cancel);
1416 suite_add_tcase(s, tc);
1417
1418 tc = tcase_create("rwlock");
1419 tcase_add_test(tc, test_rwlock);
1420 suite_add_tcase(s, tc);
1421
1422 tc = tcase_create("rwlock condvar");
1423 tcase_add_test(tc, test_rwlock_condvar);
1424 tcase_add_test(tc, test_rwlock_condvar_broad);
1425 tcase_add_test(tc, test_rwlock_condvar_timed);
1426 tcase_add_test(tc, test_rwlock_condvar_timed_abs);
1427 tcase_add_test(tc, test_rwlock_condvar_cancel);
1428 suite_add_tcase(s, tc);
1429
1430 tc = tcase_create("semaphore");
1431 tcase_add_test(tc, test_semaphore);
1432 tcase_add_test(tc, test_semaphore_timed);
1433 tcase_add_test(tc, test_semaphore_timed_abs);
1434 tcase_add_test(tc, test_semaphore_cancel);
1435 suite_add_tcase(s, tc);
1436
1437 tc = tcase_create("thread joining");
1438 tcase_add_test(tc, test_join);
1439 tcase_add_test(tc, test_join_exit);
1440 suite_add_tcase(s, tc);
1441
1442 tc = tcase_create("thread detaching");
1443 tcase_add_test(tc, test_detach);
1444 tcase_add_test(tc, test_detach_exit);
1445 suite_add_tcase(s, tc);
1446
1447 tc = tcase_create("thread cancellation");
1448 tcase_add_test(tc, test_cancel);
1449 tcase_add_test(tc, test_cancel_onoff);
1450 tcase_add_test(tc, test_cancel_point);
1451 suite_add_tcase(s, tc);
1452
1453 tc = tcase_create("thread cleanup");
1454 tcase_add_test(tc, test_cleanup);
1455 tcase_add_test(tc, test_cleanup_exit);
1456 tcase_add_test(tc, test_cleanup_cancel);
1457 tcase_add_test(tc, test_cleanup_pop);
1458 suite_add_tcase(s, tc);
1459
1460 tc = tcase_create("thread local storage");
1461 tcase_add_test(tc, test_tls);
1462 tcase_add_test(tc, test_tls_cleanup);
1463 suite_add_tcase(s, tc);
1464
1465 return s;
1466 }