5938bd9598fdec6600bdd424a31c74f9f1dc0271
[strongswan.git] / src / libstrongswan / tests / suites / test_threading.c
1 /*
2 * Copyright (C) 2013 Tobias Brunner
3 * Copyright (C) 2008 Martin Willi
4 * Hochschule fuer Technik Rapperswil
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 */
16
17 #include "test_suite.h"
18
19 #include <sched.h>
20 #include <unistd.h>
21
22 #include <threading/thread.h>
23 #include <threading/mutex.h>
24 #include <threading/condvar.h>
25 #include <threading/rwlock.h>
26 #include <threading/rwlock_condvar.h>
27 #include <threading/spinlock.h>
28 #include <threading/semaphore.h>
29 #include <threading/thread_value.h>
30
31 /*******************************************************************************
32 * recursive mutex test
33 */
34
35 #define THREADS 20
36
37 /**
38 * Thread barrier data
39 */
40 typedef struct {
41 mutex_t *mutex;
42 condvar_t *cond;
43 int count;
44 int current;
45 bool active;
46 } barrier_t;
47
48 /**
49 * Create a thread barrier for count threads
50 */
51 static barrier_t* barrier_create(int count)
52 {
53 barrier_t *this;
54
55 INIT(this,
56 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
57 .cond = condvar_create(CONDVAR_TYPE_DEFAULT),
58 .count = count,
59 );
60
61 return this;
62 }
63
64 /**
65 * Destroy a thread barrier
66 */
67 static void barrier_destroy(barrier_t *this)
68 {
69 this->mutex->destroy(this->mutex);
70 this->cond->destroy(this->cond);
71 free(this);
72 }
73
74 /**
75 * Wait to have configured number of threads in barrier
76 */
77 static bool barrier_wait(barrier_t *this)
78 {
79 bool winner = FALSE;
80
81 this->mutex->lock(this->mutex);
82 if (!this->active)
83 { /* first, reset */
84 this->active = TRUE;
85 this->current = 0;
86 }
87
88 this->current++;
89 while (this->current < this->count)
90 {
91 this->cond->wait(this->cond, this->mutex);
92 }
93 if (this->active)
94 { /* first, win */
95 winner = TRUE;
96 this->active = FALSE;
97 }
98 this->mutex->unlock(this->mutex);
99 this->cond->broadcast(this->cond);
100 sched_yield();
101
102 return winner;
103 }
104
105 /**
106 * Barrier for some tests
107 */
108 static barrier_t *barrier;
109
110 /**
111 * A mutex for tests requiring one
112 */
113 static mutex_t *mutex;
114
115 /**
116 * A condvar for tests requiring one
117 */
118 static condvar_t *condvar;
119
120 /**
121 * A counter for signaling
122 */
123 static int sigcount;
124
125 static void *mutex_run(void *data)
126 {
127 int locked = 0;
128 int i;
129
130 /* wait for all threads before getting in action */
131 barrier_wait(barrier);
132
133 for (i = 0; i < 100; i++)
134 {
135 mutex->lock(mutex);
136 mutex->lock(mutex);
137 mutex->lock(mutex);
138 locked++;
139 sched_yield();
140 if (locked > 1)
141 {
142 fail("two threads locked the mutex concurrently");
143 }
144 locked--;
145 mutex->unlock(mutex);
146 mutex->unlock(mutex);
147 mutex->unlock(mutex);
148 }
149 return NULL;
150 }
151
152 START_TEST(test_mutex)
153 {
154 thread_t *threads[THREADS];
155 int i;
156
157 barrier = barrier_create(THREADS);
158 mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
159
160 for (i = 0; i < 10; i++)
161 {
162 mutex->lock(mutex);
163 mutex->unlock(mutex);
164 }
165 for (i = 0; i < 10; i++)
166 {
167 mutex->lock(mutex);
168 }
169 for (i = 0; i < 10; i++)
170 {
171 mutex->unlock(mutex);
172 }
173
174 for (i = 0; i < THREADS; i++)
175 {
176 threads[i] = thread_create(mutex_run, NULL);
177 }
178 for (i = 0; i < THREADS; i++)
179 {
180 threads[i]->join(threads[i]);
181 }
182
183 mutex->destroy(mutex);
184 barrier_destroy(barrier);
185 }
186 END_TEST
187
188 /**
189 * Spinlock for testing
190 */
191 static spinlock_t *spinlock;
192
193 static void *spinlock_run(void *data)
194 {
195 int i, *locked = (int*)data;
196
197 barrier_wait(barrier);
198
199 for (i = 0; i < 1000; i++)
200 {
201 spinlock->lock(spinlock);
202 (*locked)++;
203 ck_assert_int_eq(*locked, 1);
204 (*locked)--;
205 spinlock->unlock(spinlock);
206 }
207 return NULL;
208 }
209
210 START_TEST(test_spinlock)
211 {
212 thread_t *threads[THREADS];
213 int i, locked = 0;
214
215 barrier = barrier_create(THREADS);
216 spinlock = spinlock_create();
217
218 for (i = 0; i < THREADS; i++)
219 {
220 threads[i] = thread_create(spinlock_run, &locked);
221 }
222 for (i = 0; i < THREADS; i++)
223 {
224 threads[i]->join(threads[i]);
225 }
226
227 spinlock->destroy(spinlock);
228 barrier_destroy(barrier);
229 }
230 END_TEST
231
232 static void *condvar_run(void *data)
233 {
234 mutex->lock(mutex);
235 sigcount++;
236 condvar->signal(condvar);
237 mutex->unlock(mutex);
238 return NULL;
239 }
240
241 START_TEST(test_condvar)
242 {
243 thread_t *threads[THREADS];
244 int i;
245
246 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
247 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
248 sigcount = 0;
249
250 for (i = 0; i < THREADS; i++)
251 {
252 threads[i] = thread_create(condvar_run, NULL);
253 }
254
255 mutex->lock(mutex);
256 while (sigcount < THREADS)
257 {
258 condvar->wait(condvar, mutex);
259 }
260 mutex->unlock(mutex);
261
262 for (i = 0; i < THREADS; i++)
263 {
264 threads[i]->join(threads[i]);
265 }
266
267 mutex->destroy(mutex);
268 condvar->destroy(condvar);
269 }
270 END_TEST
271
272 static void *condvar_recursive_run(void *data)
273 {
274 mutex->lock(mutex);
275 mutex->lock(mutex);
276 mutex->lock(mutex);
277 sigcount++;
278 condvar->signal(condvar);
279 mutex->unlock(mutex);
280 mutex->unlock(mutex);
281 mutex->unlock(mutex);
282 return NULL;
283 }
284
285 START_TEST(test_condvar_recursive)
286 {
287 thread_t *threads[THREADS];
288 int i;
289
290 mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
291 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
292 sigcount = 0;
293
294 mutex->lock(mutex);
295
296 for (i = 0; i < THREADS; i++)
297 {
298 threads[i] = thread_create(condvar_recursive_run, NULL);
299 }
300
301 mutex->lock(mutex);
302 mutex->lock(mutex);
303 while (sigcount < THREADS)
304 {
305 condvar->wait(condvar, mutex);
306 }
307 mutex->unlock(mutex);
308 mutex->unlock(mutex);
309 mutex->unlock(mutex);
310
311 for (i = 0; i < THREADS; i++)
312 {
313 threads[i]->join(threads[i]);
314 }
315
316 mutex->destroy(mutex);
317 condvar->destroy(condvar);
318 }
319 END_TEST
320
321 static void *condvar_run_broad(void *data)
322 {
323 mutex->lock(mutex);
324 while (sigcount < 0)
325 {
326 condvar->wait(condvar, mutex);
327 }
328 mutex->unlock(mutex);
329 return NULL;
330 }
331
332 START_TEST(test_condvar_broad)
333 {
334 thread_t *threads[THREADS];
335 int i;
336
337 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
338 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
339 sigcount = 0;
340
341 for (i = 0; i < THREADS; i++)
342 {
343 threads[i] = thread_create(condvar_run_broad, NULL);
344 }
345
346 sched_yield();
347
348 mutex->lock(mutex);
349 sigcount = 1;
350 condvar->broadcast(condvar);
351 mutex->unlock(mutex);
352
353 for (i = 0; i < THREADS; i++)
354 {
355 threads[i]->join(threads[i]);
356 }
357
358 mutex->destroy(mutex);
359 condvar->destroy(condvar);
360 }
361 END_TEST
362
363 START_TEST(test_condvar_timed)
364 {
365 thread_t *thread;
366 timeval_t start, end, diff = { .tv_usec = 50000 };
367
368 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
369 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
370 sigcount = 0;
371
372 mutex->lock(mutex);
373 while (TRUE)
374 {
375 time_monotonic(&start);
376 if (condvar->timed_wait(condvar, mutex, diff.tv_usec / 1000))
377 {
378 break;
379 }
380 }
381 time_monotonic(&end);
382 mutex->unlock(mutex);
383 timersub(&end, &start, &end);
384 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
385 end.tv_sec, end.tv_usec, diff.tv_sec, diff.tv_usec);
386
387 thread = thread_create(condvar_run, NULL);
388
389 mutex->lock(mutex);
390 while (sigcount == 0)
391 {
392 ck_assert(!condvar->timed_wait(condvar, mutex, 1000));
393 }
394 mutex->unlock(mutex);
395
396 thread->join(thread);
397 mutex->destroy(mutex);
398 condvar->destroy(condvar);
399 }
400 END_TEST
401
402 START_TEST(test_condvar_timed_abs)
403 {
404 thread_t *thread;
405 timeval_t start, end, abso, diff = { .tv_usec = 50000 };
406
407 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
408 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
409 sigcount = 0;
410
411 mutex->lock(mutex);
412 while (TRUE)
413 {
414 time_monotonic(&start);
415 timeradd(&start, &diff, &abso);
416 if (condvar->timed_wait_abs(condvar, mutex, abso))
417 {
418 break;
419 }
420 }
421 time_monotonic(&end);
422 mutex->unlock(mutex);
423 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
424 end.tv_sec, end.tv_usec, abso.tv_sec, abso.tv_usec);
425
426 thread = thread_create(condvar_run, NULL);
427
428 time_monotonic(&start);
429 diff.tv_sec = 1;
430 timeradd(&start, &diff, &abso);
431 mutex->lock(mutex);
432 while (sigcount == 0)
433 {
434 ck_assert(!condvar->timed_wait_abs(condvar, mutex, abso));
435 }
436 mutex->unlock(mutex);
437
438 thread->join(thread);
439 mutex->destroy(mutex);
440 condvar->destroy(condvar);
441 }
442 END_TEST
443
444 static void *condvar_cancel_run(void *data)
445 {
446 thread_cancelability(FALSE);
447
448 mutex->lock(mutex);
449
450 sigcount++;
451 condvar->broadcast(condvar);
452
453 thread_cleanup_push((void*)mutex->unlock, mutex);
454 thread_cancelability(TRUE);
455 while (TRUE)
456 {
457 condvar->wait(condvar, mutex);
458 }
459 thread_cleanup_pop(TRUE);
460
461 return NULL;
462 }
463
464 START_TEST(test_condvar_cancel)
465 {
466 thread_t *threads[THREADS];
467 int i;
468
469 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
470 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
471 sigcount = 0;
472
473 for (i = 0; i < THREADS; i++)
474 {
475 threads[i] = thread_create(condvar_cancel_run, NULL);
476 }
477
478 /* wait for all threads */
479 mutex->lock(mutex);
480 while (sigcount < THREADS)
481 {
482 condvar->wait(condvar, mutex);
483 }
484 mutex->unlock(mutex);
485
486 for (i = 0; i < THREADS; i++)
487 {
488 threads[i]->cancel(threads[i]);
489 }
490 for (i = 0; i < THREADS; i++)
491 {
492 threads[i]->join(threads[i]);
493 }
494
495 mutex->destroy(mutex);
496 condvar->destroy(condvar);
497 }
498 END_TEST
499
500 /**
501 * RWlock for different tests
502 */
503 static rwlock_t *rwlock;
504
505 static void *rwlock_run(refcount_t *refs)
506 {
507 rwlock->read_lock(rwlock);
508 ref_get(refs);
509 sched_yield();
510 ignore_result(ref_put(refs));
511 rwlock->unlock(rwlock);
512
513 if (rwlock->try_write_lock(rwlock))
514 {
515 ck_assert_int_eq(*refs, 0);
516 sched_yield();
517 rwlock->unlock(rwlock);
518 }
519
520 rwlock->write_lock(rwlock);
521 ck_assert_int_eq(*refs, 0);
522 sched_yield();
523 rwlock->unlock(rwlock);
524
525 rwlock->read_lock(rwlock);
526 rwlock->read_lock(rwlock);
527 ref_get(refs);
528 sched_yield();
529 ignore_result(ref_put(refs));
530 rwlock->unlock(rwlock);
531 rwlock->unlock(rwlock);
532
533 return NULL;
534 }
535
536 START_TEST(test_rwlock)
537 {
538 thread_t *threads[THREADS];
539 refcount_t refs = 0;
540 int i;
541
542 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
543
544 for (i = 0; i < THREADS; i++)
545 {
546 threads[i] = thread_create((void*)rwlock_run, &refs);
547 }
548 for (i = 0; i < THREADS; i++)
549 {
550 threads[i]->join(threads[i]);
551 }
552
553 rwlock->destroy(rwlock);
554 }
555 END_TEST
556
557 /**
558 * Rwlock condvar
559 */
560 static rwlock_condvar_t *rwcond;
561
562 static void *rwlock_condvar_run(void *data)
563 {
564 rwlock->write_lock(rwlock);
565 sigcount++;
566 rwcond->signal(rwcond);
567 rwlock->unlock(rwlock);
568 return NULL;
569 }
570
571 START_TEST(test_rwlock_condvar)
572 {
573 thread_t *threads[THREADS];
574 int i;
575
576 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
577 rwcond = rwlock_condvar_create();
578 sigcount = 0;
579
580 for (i = 0; i < THREADS; i++)
581 {
582 threads[i] = thread_create(rwlock_condvar_run, NULL);
583 }
584
585 rwlock->write_lock(rwlock);
586 while (sigcount < THREADS)
587 {
588 rwcond->wait(rwcond, rwlock);
589 }
590 rwlock->unlock(rwlock);
591
592 for (i = 0; i < THREADS; i++)
593 {
594 threads[i]->join(threads[i]);
595 }
596
597 rwlock->destroy(rwlock);
598 rwcond->destroy(rwcond);
599 }
600 END_TEST
601
602 static void *rwlock_condvar_run_broad(void *data)
603 {
604 rwlock->write_lock(rwlock);
605 while (sigcount < 0)
606 {
607 rwcond->wait(rwcond, rwlock);
608 }
609 rwlock->unlock(rwlock);
610 return NULL;
611 }
612
613 START_TEST(test_rwlock_condvar_broad)
614 {
615 thread_t *threads[THREADS];
616 int i;
617
618 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
619 rwcond = rwlock_condvar_create();
620 sigcount = 0;
621
622 for (i = 0; i < THREADS; i++)
623 {
624 threads[i] = thread_create(rwlock_condvar_run_broad, NULL);
625 }
626
627 sched_yield();
628
629 rwlock->write_lock(rwlock);
630 sigcount = 1;
631 rwcond->broadcast(rwcond);
632 rwlock->unlock(rwlock);
633
634 for (i = 0; i < THREADS; i++)
635 {
636 threads[i]->join(threads[i]);
637 }
638
639 rwlock->destroy(rwlock);
640 rwcond->destroy(rwcond);
641 }
642 END_TEST
643
644 START_TEST(test_rwlock_condvar_timed)
645 {
646 thread_t *thread;
647 timeval_t start, end, diff = { .tv_usec = 50000 };
648
649 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
650 rwcond = rwlock_condvar_create();
651 sigcount = 0;
652
653 rwlock->write_lock(rwlock);
654 while (TRUE)
655 {
656 time_monotonic(&start);
657 if (rwcond->timed_wait(rwcond, rwlock, diff.tv_usec / 1000))
658 {
659 break;
660 }
661 }
662 rwlock->unlock(rwlock);
663 time_monotonic(&end);
664 timersub(&end, &start, &end);
665 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
666 end.tv_sec, end.tv_usec, diff.tv_sec, diff.tv_usec);
667
668 thread = thread_create(rwlock_condvar_run, NULL);
669
670 rwlock->write_lock(rwlock);
671 while (sigcount == 0)
672 {
673 ck_assert(!rwcond->timed_wait(rwcond, rwlock, 1000));
674 }
675 rwlock->unlock(rwlock);
676
677 thread->join(thread);
678 rwlock->destroy(rwlock);
679 rwcond->destroy(rwcond);
680 }
681 END_TEST
682
683 START_TEST(test_rwlock_condvar_timed_abs)
684 {
685 thread_t *thread;
686 timeval_t start, end, abso, diff = { .tv_usec = 50000 };
687
688 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
689 rwcond = rwlock_condvar_create();
690 sigcount = 0;
691
692 rwlock->write_lock(rwlock);
693 while (TRUE)
694 {
695 time_monotonic(&start);
696 timeradd(&start, &diff, &abso);
697 if (rwcond->timed_wait_abs(rwcond, rwlock, abso))
698 {
699 break;
700 }
701 }
702 rwlock->unlock(rwlock);
703 time_monotonic(&end);
704 ck_assert_msg(timercmp(&end, &abso, >), "end: %u.%u, abso: %u.%u",
705 end.tv_sec, end.tv_usec, abso.tv_sec, abso.tv_usec);
706
707 thread = thread_create(rwlock_condvar_run, NULL);
708
709 time_monotonic(&start);
710 diff.tv_sec = 1;
711 timeradd(&start, &diff, &abso);
712 rwlock->write_lock(rwlock);
713 while (sigcount == 0)
714 {
715 ck_assert(!rwcond->timed_wait_abs(rwcond, rwlock, abso));
716 }
717 rwlock->unlock(rwlock);
718
719 thread->join(thread);
720 rwlock->destroy(rwlock);
721 rwcond->destroy(rwcond);
722 }
723 END_TEST
724
725 static void *rwlock_condvar_cancel_run(void *data)
726 {
727 thread_cancelability(FALSE);
728
729 rwlock->write_lock(rwlock);
730
731 sigcount++;
732 rwcond->broadcast(rwcond);
733
734 thread_cleanup_push((void*)rwlock->unlock, rwlock);
735 thread_cancelability(TRUE);
736 while (TRUE)
737 {
738 rwcond->wait(rwcond, rwlock);
739 }
740 thread_cleanup_pop(TRUE);
741
742 return NULL;
743 }
744
745 START_TEST(test_rwlock_condvar_cancel)
746 {
747 thread_t *threads[THREADS];
748 int i;
749
750 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
751 rwcond = rwlock_condvar_create();
752 sigcount = 0;
753
754 for (i = 0; i < THREADS; i++)
755 {
756 threads[i] = thread_create(rwlock_condvar_cancel_run, NULL);
757 }
758
759 /* wait for all threads */
760 rwlock->write_lock(rwlock);
761 while (sigcount < THREADS)
762 {
763 rwcond->wait(rwcond, rwlock);
764 }
765 rwlock->unlock(rwlock);
766
767 for (i = 0; i < THREADS; i++)
768 {
769 threads[i]->cancel(threads[i]);
770 }
771 for (i = 0; i < THREADS; i++)
772 {
773 threads[i]->join(threads[i]);
774 }
775
776 rwlock->destroy(rwlock);
777 rwcond->destroy(rwcond);
778 }
779 END_TEST
780
781 /**
782 * Semaphore for different tests
783 */
784 static semaphore_t *semaphore;
785
786 static void *semaphore_run(void *data)
787 {
788 semaphore->post(semaphore);
789 return NULL;
790 }
791
792 START_TEST(test_semaphore)
793 {
794 thread_t *threads[THREADS];
795 int i, initial = 5;
796
797 semaphore = semaphore_create(initial);
798
799 for (i = 0; i < THREADS; i++)
800 {
801 threads[i] = thread_create(semaphore_run, NULL);
802 }
803 for (i = 0; i < THREADS + initial; i++)
804 {
805 semaphore->wait(semaphore);
806 }
807 for (i = 0; i < THREADS; i++)
808 {
809 threads[i]->join(threads[i]);
810 }
811
812 semaphore->destroy(semaphore);
813 }
814 END_TEST
815
816 START_TEST(test_semaphore_timed)
817 {
818 thread_t *thread;
819 timeval_t start, end, diff = { .tv_usec = 50000 };
820
821 semaphore = semaphore_create(0);
822
823 time_monotonic(&start);
824 ck_assert(semaphore->timed_wait(semaphore, diff.tv_usec / 1000));
825 time_monotonic(&end);
826 timersub(&end, &start, &end);
827 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
828 end.tv_sec, end.tv_usec, diff.tv_sec, diff.tv_usec);
829
830 thread = thread_create(semaphore_run, NULL);
831
832 ck_assert(!semaphore->timed_wait(semaphore, 1000));
833
834 thread->join(thread);
835 semaphore->destroy(semaphore);
836 }
837 END_TEST
838
839 START_TEST(test_semaphore_timed_abs)
840 {
841 thread_t *thread;
842 timeval_t start, end, abso, diff = { .tv_usec = 50000 };
843
844 semaphore = semaphore_create(0);
845
846 time_monotonic(&start);
847 timeradd(&start, &diff, &abso);
848 ck_assert(semaphore->timed_wait_abs(semaphore, abso));
849 time_monotonic(&end);
850 ck_assert_msg(timercmp(&end, &abso, >), "end: %u.%u, abso: %u.%u",
851 end.tv_sec, end.tv_usec, abso.tv_sec, abso.tv_usec);
852
853 thread = thread_create(semaphore_run, NULL);
854
855 time_monotonic(&start);
856 diff.tv_sec = 1;
857 timeradd(&start, &diff, &abso);
858 ck_assert(!semaphore->timed_wait_abs(semaphore, abso));
859
860 thread->join(thread);
861 semaphore->destroy(semaphore);
862 }
863 END_TEST
864
865 static void *join_run(void *data)
866 {
867 /* force some context switches */
868 sched_yield();
869 return (void*)((uintptr_t)data + THREADS);
870 }
871
872 START_TEST(test_join)
873 {
874 thread_t *threads[THREADS];
875 int i;
876
877 for (i = 0; i < THREADS; i++)
878 {
879 threads[i] = thread_create(join_run, (void*)(uintptr_t)i);
880 }
881 for (i = 0; i < THREADS; i++)
882 {
883 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
884 }
885 }
886 END_TEST
887
888 static void *exit_join_run(void *data)
889 {
890 sched_yield();
891 thread_exit((void*)((uintptr_t)data + THREADS));
892 /* not reached */
893 ck_assert(FALSE);
894 return NULL;
895 }
896
897 START_TEST(test_join_exit)
898 {
899 thread_t *threads[THREADS];
900 int i;
901
902 for (i = 0; i < THREADS; i++)
903 {
904 threads[i] = thread_create(exit_join_run, (void*)(uintptr_t)i);
905 }
906 for (i = 0; i < THREADS; i++)
907 {
908 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
909 }
910 }
911 END_TEST
912
913 static void *detach_run(void *data)
914 {
915 refcount_t *running = (refcount_t*)data;
916
917 ignore_result(ref_put(running));
918 return NULL;
919 }
920
921 START_TEST(test_detach)
922 {
923 thread_t *threads[THREADS];
924 int i;
925 refcount_t running = 0;
926
927 for (i = 0; i < THREADS; i++)
928 {
929 ref_get(&running);
930 threads[i] = thread_create(detach_run, &running);
931 }
932 for (i = 0; i < THREADS; i++)
933 {
934 threads[i]->detach(threads[i]);
935 }
936 while (running > 0)
937 {
938 sched_yield();
939 }
940 /* no checks done here, but we check that thread state gets cleaned
941 * up with leak detective. */
942 }
943 END_TEST
944
945 static void *detach_exit_run(void *data)
946 {
947 refcount_t *running = (refcount_t*)data;
948
949 ignore_result(ref_put(running));
950 thread_exit(NULL);
951 /* not reached */
952 ck_assert(FALSE);
953 return NULL;
954 }
955
956 START_TEST(test_detach_exit)
957 {
958 thread_t *threads[THREADS];
959 int i;
960 refcount_t running = 0;
961
962 for (i = 0; i < THREADS; i++)
963 {
964 ref_get(&running);
965 threads[i] = thread_create(detach_exit_run, &running);
966 }
967 for (i = 0; i < THREADS; i++)
968 {
969 threads[i]->detach(threads[i]);
970 }
971 while (running > 0)
972 {
973 sched_yield();
974 }
975 /* no checks done here, but we check that thread state gets cleaned
976 * up with leak detective. */
977 }
978 END_TEST
979
980 static void *cancel_run(void *data)
981 {
982 /* default cancellability should be TRUE, so don't change it */
983 while (TRUE)
984 {
985 sleep(10);
986 }
987 return NULL;
988 }
989
990 START_TEST(test_cancel)
991 {
992 thread_t *threads[THREADS];
993 int i;
994
995 for (i = 0; i < THREADS; i++)
996 {
997 threads[i] = thread_create(cancel_run, NULL);
998 }
999 for (i = 0; i < THREADS; i++)
1000 {
1001 threads[i]->cancel(threads[i]);
1002 }
1003 for (i = 0; i < THREADS; i++)
1004 {
1005 threads[i]->join(threads[i]);
1006 }
1007 }
1008 END_TEST
1009
1010 static void *cancel_onoff_run(void *data)
1011 {
1012 bool *cancellable = (bool*)data;
1013
1014 thread_cancelability(FALSE);
1015 *cancellable = FALSE;
1016
1017 /* we should not get cancelled here */
1018 usleep(50000);
1019
1020 *cancellable = TRUE;
1021 thread_cancelability(TRUE);
1022
1023 /* but here */
1024 while (TRUE)
1025 {
1026 sleep(10);
1027 }
1028 return NULL;
1029 }
1030
1031 START_TEST(test_cancel_onoff)
1032 {
1033 thread_t *threads[THREADS];
1034 bool cancellable[THREADS];
1035 int i;
1036
1037 for (i = 0; i < THREADS; i++)
1038 {
1039 cancellable[i] = TRUE;
1040 threads[i] = thread_create(cancel_onoff_run, &cancellable[i]);
1041 }
1042 for (i = 0; i < THREADS; i++)
1043 {
1044 /* wait until thread has cleared its cancellability */
1045 while (cancellable[i])
1046 {
1047 sched_yield();
1048 }
1049 threads[i]->cancel(threads[i]);
1050 }
1051 for (i = 0; i < THREADS; i++)
1052 {
1053 threads[i]->join(threads[i]);
1054 ck_assert(cancellable[i]);
1055 }
1056 }
1057 END_TEST
1058
1059 static void *cancel_point_run(void *data)
1060 {
1061 thread_cancelability(FALSE);
1062 while (TRUE)
1063 {
1064 /* implicitly enables cancellability */
1065 thread_cancellation_point();
1066 }
1067 return NULL;
1068 }
1069
1070 START_TEST(test_cancel_point)
1071 {
1072 thread_t *threads[THREADS];
1073 int i;
1074
1075 for (i = 0; i < THREADS; i++)
1076 {
1077 threads[i] = thread_create(cancel_point_run, NULL);
1078 }
1079 sched_yield();
1080 for (i = 0; i < THREADS; i++)
1081 {
1082 threads[i]->cancel(threads[i]);
1083 }
1084 for (i = 0; i < THREADS; i++)
1085 {
1086 threads[i]->join(threads[i]);
1087 }
1088 }
1089 END_TEST
1090
1091 static void cleanup1(void *data)
1092 {
1093 uintptr_t *value = (uintptr_t*)data;
1094
1095 ck_assert_int_eq(*value, 1);
1096 (*value)++;
1097 }
1098
1099 static void cleanup2(void *data)
1100 {
1101 uintptr_t *value = (uintptr_t*)data;
1102
1103 ck_assert_int_eq(*value, 2);
1104 (*value)++;
1105 }
1106
1107 static void cleanup3(void *data)
1108 {
1109 uintptr_t *value = (uintptr_t*)data;
1110
1111 ck_assert_int_eq(*value, 3);
1112 (*value)++;
1113 }
1114
1115 static void *cleanup_run(void *data)
1116 {
1117 thread_cleanup_push(cleanup3, data);
1118 thread_cleanup_push(cleanup2, data);
1119 thread_cleanup_push(cleanup1, data);
1120 return NULL;
1121 }
1122
1123 START_TEST(test_cleanup)
1124 {
1125 thread_t *threads[THREADS];
1126 uintptr_t values[THREADS];
1127 int i;
1128
1129 for (i = 0; i < THREADS; i++)
1130 {
1131 values[i] = 1;
1132 threads[i] = thread_create(cleanup_run, &values[i]);
1133 }
1134 for (i = 0; i < THREADS; i++)
1135 {
1136 threads[i]->join(threads[i]);
1137 ck_assert_int_eq(values[i], 4);
1138 }
1139 }
1140 END_TEST
1141
1142 static void *cleanup_exit_run(void *data)
1143 {
1144 thread_cleanup_push(cleanup3, data);
1145 thread_cleanup_push(cleanup2, data);
1146 thread_cleanup_push(cleanup1, data);
1147 thread_exit(NULL);
1148 ck_assert(FALSE);
1149 return NULL;
1150 }
1151
1152 START_TEST(test_cleanup_exit)
1153 {
1154 thread_t *threads[THREADS];
1155 uintptr_t values[THREADS];
1156 int i;
1157
1158 for (i = 0; i < THREADS; i++)
1159 {
1160 values[i] = 1;
1161 threads[i] = thread_create(cleanup_exit_run, &values[i]);
1162 }
1163 for (i = 0; i < THREADS; i++)
1164 {
1165 threads[i]->join(threads[i]);
1166 ck_assert_int_eq(values[i], 4);
1167 }
1168 }
1169 END_TEST
1170
1171 static void *cleanup_cancel_run(void *data)
1172 {
1173 thread_cancelability(FALSE);
1174
1175 thread_cleanup_push(cleanup3, data);
1176 thread_cleanup_push(cleanup2, data);
1177 thread_cleanup_push(cleanup1, data);
1178
1179 thread_cancelability(TRUE);
1180
1181 while (TRUE)
1182 {
1183 sleep(1);
1184 }
1185 return NULL;
1186 }
1187
1188 START_TEST(test_cleanup_cancel)
1189 {
1190 thread_t *threads[THREADS];
1191 uintptr_t values[THREADS];
1192 int i;
1193
1194 for (i = 0; i < THREADS; i++)
1195 {
1196 values[i] = 1;
1197 threads[i] = thread_create(cleanup_cancel_run, &values[i]);
1198 }
1199 for (i = 0; i < THREADS; i++)
1200 {
1201 threads[i]->cancel(threads[i]);
1202 }
1203 for (i = 0; i < THREADS; i++)
1204 {
1205 threads[i]->join(threads[i]);
1206 ck_assert_int_eq(values[i], 4);
1207 }
1208 }
1209 END_TEST
1210
1211 static void *cleanup_pop_run(void *data)
1212 {
1213 thread_cleanup_push(cleanup3, data);
1214 thread_cleanup_push(cleanup2, data);
1215 thread_cleanup_push(cleanup1, data);
1216
1217 thread_cleanup_push(cleanup2, data);
1218 thread_cleanup_pop(FALSE);
1219
1220 thread_cleanup_pop(TRUE);
1221 return NULL;
1222 }
1223
1224 START_TEST(test_cleanup_pop)
1225 {
1226 thread_t *threads[THREADS];
1227 uintptr_t values[THREADS];
1228 int i;
1229
1230 for (i = 0; i < THREADS; i++)
1231 {
1232 values[i] = 1;
1233 threads[i] = thread_create(cleanup_pop_run, &values[i]);
1234 }
1235 for (i = 0; i < THREADS; i++)
1236 {
1237 threads[i]->join(threads[i]);
1238 ck_assert_int_eq(values[i], 4);
1239 }
1240 }
1241 END_TEST
1242
1243 static thread_value_t *tls[10];
1244
1245 static void *tls_run(void *data)
1246 {
1247 uintptr_t value = (uintptr_t)data;
1248 int i, j;
1249
1250 for (i = 0; i < countof(tls); i++)
1251 {
1252 ck_assert(tls[i]->get(tls[i]) == NULL);
1253 }
1254 for (i = 0; i < countof(tls); i++)
1255 {
1256 tls[i]->set(tls[i], (void*)(value * i));
1257 }
1258 for (j = 0; j < 1000; j++)
1259 {
1260 for (i = 0; i < countof(tls); i++)
1261 {
1262 tls[i]->set(tls[i], (void*)(value * i));
1263 ck_assert(tls[i]->get(tls[i]) == (void*)(value * i));
1264 }
1265 sched_yield();
1266 }
1267 for (i = 0; i < countof(tls); i++)
1268 {
1269 ck_assert(tls[i]->get(tls[i]) == (void*)(value * i));
1270 }
1271 return (void*)(value + 1);
1272 }
1273
1274 START_TEST(test_tls)
1275 {
1276 thread_t *threads[THREADS];
1277 int i;
1278
1279 for (i = 0; i < countof(tls); i++)
1280 {
1281 tls[i] = thread_value_create(NULL);
1282 }
1283 for (i = 0; i < THREADS; i++)
1284 {
1285 threads[i] = thread_create(tls_run, (void*)(uintptr_t)i);
1286 }
1287
1288 ck_assert_int_eq((uintptr_t)tls_run((void*)(uintptr_t)(THREADS + 1)),
1289 THREADS + 2);
1290
1291 for (i = 0; i < THREADS; i++)
1292 {
1293 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + 1);
1294 }
1295 for (i = 0; i < countof(tls); i++)
1296 {
1297 tls[i]->destroy(tls[i]);
1298 }
1299 }
1300 END_TEST
1301
1302 static void tls_cleanup(void *data)
1303 {
1304 uintptr_t *value = (uintptr_t*)data;
1305
1306 (*value)--;
1307 }
1308
1309 static void *tls_cleanup_run(void *data)
1310 {
1311 int i;
1312
1313 for (i = 0; i < countof(tls); i++)
1314 {
1315 tls[i]->set(tls[i], data);
1316 }
1317 return NULL;
1318 }
1319
1320 START_TEST(test_tls_cleanup)
1321 {
1322 thread_t *threads[THREADS];
1323 uintptr_t values[THREADS], main_value = countof(tls);
1324 int i;
1325
1326 for (i = 0; i < countof(tls); i++)
1327 {
1328 tls[i] = thread_value_create(tls_cleanup);
1329 }
1330 for (i = 0; i < THREADS; i++)
1331 {
1332 values[i] = countof(tls);
1333 threads[i] = thread_create(tls_cleanup_run, &values[i]);
1334 }
1335
1336 tls_cleanup_run(&main_value);
1337
1338 for (i = 0; i < THREADS; i++)
1339 {
1340 threads[i]->join(threads[i]);
1341 ck_assert_int_eq(values[i], 0);
1342 }
1343 for (i = 0; i < countof(tls); i++)
1344 {
1345 tls[i]->destroy(tls[i]);
1346 }
1347 ck_assert_int_eq(main_value, 0);
1348 }
1349 END_TEST
1350
1351 Suite *threading_suite_create()
1352 {
1353 Suite *s;
1354 TCase *tc;
1355
1356 s = suite_create("threading");
1357
1358 tc = tcase_create("recursive mutex");
1359 tcase_add_test(tc, test_mutex);
1360 suite_add_tcase(s, tc);
1361
1362 tc = tcase_create("spinlock");
1363 tcase_add_test(tc, test_spinlock);
1364 suite_add_tcase(s, tc);
1365
1366 tc = tcase_create("condvar");
1367 tcase_add_test(tc, test_condvar);
1368 tcase_add_test(tc, test_condvar_recursive);
1369 tcase_add_test(tc, test_condvar_broad);
1370 tcase_add_test(tc, test_condvar_timed);
1371 tcase_add_test(tc, test_condvar_timed_abs);
1372 tcase_add_test(tc, test_condvar_cancel);
1373 suite_add_tcase(s, tc);
1374
1375 tc = tcase_create("rwlock");
1376 tcase_add_test(tc, test_rwlock);
1377 suite_add_tcase(s, tc);
1378
1379 tc = tcase_create("rwlock condvar");
1380 tcase_add_test(tc, test_rwlock_condvar);
1381 tcase_add_test(tc, test_rwlock_condvar_broad);
1382 tcase_add_test(tc, test_rwlock_condvar_timed);
1383 tcase_add_test(tc, test_rwlock_condvar_timed_abs);
1384 tcase_add_test(tc, test_rwlock_condvar_cancel);
1385 suite_add_tcase(s, tc);
1386
1387 tc = tcase_create("semaphore");
1388 tcase_add_test(tc, test_semaphore);
1389 tcase_add_test(tc, test_semaphore_timed);
1390 tcase_add_test(tc, test_semaphore_timed_abs);
1391 suite_add_tcase(s, tc);
1392
1393 tc = tcase_create("thread joining");
1394 tcase_add_test(tc, test_join);
1395 tcase_add_test(tc, test_join_exit);
1396 suite_add_tcase(s, tc);
1397
1398 tc = tcase_create("thread detaching");
1399 tcase_add_test(tc, test_detach);
1400 tcase_add_test(tc, test_detach_exit);
1401 suite_add_tcase(s, tc);
1402
1403 tc = tcase_create("thread cancellation");
1404 tcase_add_test(tc, test_cancel);
1405 tcase_add_test(tc, test_cancel_onoff);
1406 tcase_add_test(tc, test_cancel_point);
1407 suite_add_tcase(s, tc);
1408
1409 tc = tcase_create("thread cleanup");
1410 tcase_add_test(tc, test_cleanup);
1411 tcase_add_test(tc, test_cleanup_exit);
1412 tcase_add_test(tc, test_cleanup_cancel);
1413 tcase_add_test(tc, test_cleanup_pop);
1414 suite_add_tcase(s, tc);
1415
1416 tc = tcase_create("thread local storage");
1417 tcase_add_test(tc, test_tls);
1418 tcase_add_test(tc, test_tls_cleanup);
1419 suite_add_tcase(s, tc);
1420
1421 return s;
1422 }