unit-tests: Add a rwlock condvar absolute timed wait test
[strongswan.git] / src / libstrongswan / tests / suites / test_threading.c
1 /*
2 * Copyright (C) 2013 Tobias Brunner
3 * Copyright (C) 2008 Martin Willi
4 * Hochschule fuer Technik Rapperswil
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 */
16
17 #include "test_suite.h"
18
19 #include <sched.h>
20 #include <unistd.h>
21
22 #include <threading/thread.h>
23 #include <threading/mutex.h>
24 #include <threading/condvar.h>
25 #include <threading/rwlock.h>
26 #include <threading/rwlock_condvar.h>
27 #include <threading/thread_value.h>
28
29 /*******************************************************************************
30 * recursive mutex test
31 */
32
33 #define THREADS 20
34
35 /**
36 * Thread barrier data
37 */
38 typedef struct {
39 mutex_t *mutex;
40 condvar_t *cond;
41 int count;
42 int current;
43 bool active;
44 } barrier_t;
45
46 /**
47 * Create a thread barrier for count threads
48 */
49 static barrier_t* barrier_create(int count)
50 {
51 barrier_t *this;
52
53 INIT(this,
54 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
55 .cond = condvar_create(CONDVAR_TYPE_DEFAULT),
56 .count = count,
57 );
58
59 return this;
60 }
61
62 /**
63 * Destroy a thread barrier
64 */
65 static void barrier_destroy(barrier_t *this)
66 {
67 this->mutex->destroy(this->mutex);
68 this->cond->destroy(this->cond);
69 free(this);
70 }
71
72 /**
73 * Wait to have configured number of threads in barrier
74 */
75 static bool barrier_wait(barrier_t *this)
76 {
77 bool winner = FALSE;
78
79 this->mutex->lock(this->mutex);
80 if (!this->active)
81 { /* first, reset */
82 this->active = TRUE;
83 this->current = 0;
84 }
85
86 this->current++;
87 while (this->current < this->count)
88 {
89 this->cond->wait(this->cond, this->mutex);
90 }
91 if (this->active)
92 { /* first, win */
93 winner = TRUE;
94 this->active = FALSE;
95 }
96 this->mutex->unlock(this->mutex);
97 this->cond->broadcast(this->cond);
98 sched_yield();
99
100 return winner;
101 }
102
103 /**
104 * Barrier for some tests
105 */
106 static barrier_t *barrier;
107
108 /**
109 * A mutex for tests requiring one
110 */
111 static mutex_t *mutex;
112
113 /**
114 * A condvar for tests requiring one
115 */
116 static condvar_t *condvar;
117
118 /**
119 * A counter for signaling
120 */
121 static int sigcount;
122
123 static void *mutex_run(void *data)
124 {
125 int locked = 0;
126 int i;
127
128 /* wait for all threads before getting in action */
129 barrier_wait(barrier);
130
131 for (i = 0; i < 100; i++)
132 {
133 mutex->lock(mutex);
134 mutex->lock(mutex);
135 mutex->lock(mutex);
136 locked++;
137 sched_yield();
138 if (locked > 1)
139 {
140 fail("two threads locked the mutex concurrently");
141 }
142 locked--;
143 mutex->unlock(mutex);
144 mutex->unlock(mutex);
145 mutex->unlock(mutex);
146 }
147 return NULL;
148 }
149
150 START_TEST(test_mutex)
151 {
152 thread_t *threads[THREADS];
153 int i;
154
155 barrier = barrier_create(THREADS);
156 mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
157
158 for (i = 0; i < 10; i++)
159 {
160 mutex->lock(mutex);
161 mutex->unlock(mutex);
162 }
163 for (i = 0; i < 10; i++)
164 {
165 mutex->lock(mutex);
166 }
167 for (i = 0; i < 10; i++)
168 {
169 mutex->unlock(mutex);
170 }
171
172 for (i = 0; i < THREADS; i++)
173 {
174 threads[i] = thread_create(mutex_run, NULL);
175 }
176 for (i = 0; i < THREADS; i++)
177 {
178 threads[i]->join(threads[i]);
179 }
180
181 mutex->destroy(mutex);
182 barrier_destroy(barrier);
183 }
184 END_TEST
185
186 static void *condvar_run(void *data)
187 {
188 mutex->lock(mutex);
189 sigcount++;
190 condvar->signal(condvar);
191 mutex->unlock(mutex);
192 return NULL;
193 }
194
195 START_TEST(test_condvar)
196 {
197 thread_t *threads[THREADS];
198 int i;
199
200 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
201 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
202 sigcount = 0;
203
204 for (i = 0; i < THREADS; i++)
205 {
206 threads[i] = thread_create(condvar_run, NULL);
207 }
208
209 mutex->lock(mutex);
210 while (sigcount < THREADS)
211 {
212 condvar->wait(condvar, mutex);
213 }
214 mutex->unlock(mutex);
215
216 for (i = 0; i < THREADS; i++)
217 {
218 threads[i]->join(threads[i]);
219 }
220
221 mutex->destroy(mutex);
222 condvar->destroy(condvar);
223 }
224 END_TEST
225
226 static void *condvar_recursive_run(void *data)
227 {
228 mutex->lock(mutex);
229 mutex->lock(mutex);
230 mutex->lock(mutex);
231 sigcount++;
232 condvar->signal(condvar);
233 mutex->unlock(mutex);
234 mutex->unlock(mutex);
235 mutex->unlock(mutex);
236 return NULL;
237 }
238
239 START_TEST(test_condvar_recursive)
240 {
241 thread_t *threads[THREADS];
242 int i;
243
244 mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
245 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
246 sigcount = 0;
247
248 mutex->lock(mutex);
249
250 for (i = 0; i < THREADS; i++)
251 {
252 threads[i] = thread_create(condvar_recursive_run, NULL);
253 }
254
255 mutex->lock(mutex);
256 mutex->lock(mutex);
257 while (sigcount < THREADS)
258 {
259 condvar->wait(condvar, mutex);
260 }
261 mutex->unlock(mutex);
262 mutex->unlock(mutex);
263 mutex->unlock(mutex);
264
265 for (i = 0; i < THREADS; i++)
266 {
267 threads[i]->join(threads[i]);
268 }
269
270 mutex->destroy(mutex);
271 condvar->destroy(condvar);
272 }
273 END_TEST
274
275 static void *condvar_run_broad(void *data)
276 {
277 mutex->lock(mutex);
278 while (sigcount < 0)
279 {
280 condvar->wait(condvar, mutex);
281 }
282 mutex->unlock(mutex);
283 return NULL;
284 }
285
286 START_TEST(test_condvar_broad)
287 {
288 thread_t *threads[THREADS];
289 int i;
290
291 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
292 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
293 sigcount = 0;
294
295 for (i = 0; i < THREADS; i++)
296 {
297 threads[i] = thread_create(condvar_run_broad, NULL);
298 }
299
300 sched_yield();
301
302 mutex->lock(mutex);
303 sigcount = 1;
304 condvar->broadcast(condvar);
305 mutex->unlock(mutex);
306
307 for (i = 0; i < THREADS; i++)
308 {
309 threads[i]->join(threads[i]);
310 }
311
312 mutex->destroy(mutex);
313 condvar->destroy(condvar);
314 }
315 END_TEST
316
317 START_TEST(test_condvar_timed)
318 {
319 thread_t *thread;
320 timeval_t start, end, diff = { .tv_usec = 50000 };
321
322 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
323 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
324 sigcount = 0;
325
326 mutex->lock(mutex);
327 while (TRUE)
328 {
329 time_monotonic(&start);
330 if (condvar->timed_wait(condvar, mutex, diff.tv_usec / 1000))
331 {
332 break;
333 }
334 }
335 time_monotonic(&end);
336 mutex->unlock(mutex);
337 timersub(&end, &start, &end);
338 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
339 end.tv_sec, end.tv_usec, diff.tv_sec, diff.tv_usec);
340
341 thread = thread_create(condvar_run, NULL);
342
343 mutex->lock(mutex);
344 while (sigcount == 0)
345 {
346 ck_assert(!condvar->timed_wait(condvar, mutex, 1000));
347 }
348 mutex->unlock(mutex);
349
350 thread->join(thread);
351 mutex->destroy(mutex);
352 condvar->destroy(condvar);
353 }
354 END_TEST
355
356 START_TEST(test_condvar_timed_abs)
357 {
358 thread_t *thread;
359 timeval_t start, end, abso, diff = { .tv_usec = 50000 };
360
361 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
362 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
363 sigcount = 0;
364
365 mutex->lock(mutex);
366 while (TRUE)
367 {
368 time_monotonic(&start);
369 timeradd(&start, &diff, &abso);
370 if (condvar->timed_wait_abs(condvar, mutex, abso))
371 {
372 break;
373 }
374 }
375 time_monotonic(&end);
376 mutex->unlock(mutex);
377 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
378 end.tv_sec, end.tv_usec, abso.tv_sec, abso.tv_usec);
379
380 thread = thread_create(condvar_run, NULL);
381
382 time_monotonic(&start);
383 diff.tv_sec = 1;
384 timeradd(&start, &diff, &abso);
385 mutex->lock(mutex);
386 while (sigcount == 0)
387 {
388 ck_assert(!condvar->timed_wait_abs(condvar, mutex, abso));
389 }
390 mutex->unlock(mutex);
391
392 thread->join(thread);
393 mutex->destroy(mutex);
394 condvar->destroy(condvar);
395 }
396 END_TEST
397
398 static void *condvar_cancel_run(void *data)
399 {
400 thread_cancelability(FALSE);
401
402 mutex->lock(mutex);
403
404 sigcount++;
405 condvar->broadcast(condvar);
406
407 thread_cleanup_push((void*)mutex->unlock, mutex);
408 thread_cancelability(TRUE);
409 while (TRUE)
410 {
411 condvar->wait(condvar, mutex);
412 }
413 thread_cleanup_pop(TRUE);
414
415 return NULL;
416 }
417
418 START_TEST(test_condvar_cancel)
419 {
420 thread_t *threads[THREADS];
421 int i;
422
423 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
424 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
425 sigcount = 0;
426
427 for (i = 0; i < THREADS; i++)
428 {
429 threads[i] = thread_create(condvar_cancel_run, NULL);
430 }
431
432 /* wait for all threads */
433 mutex->lock(mutex);
434 while (sigcount < THREADS)
435 {
436 condvar->wait(condvar, mutex);
437 }
438 mutex->unlock(mutex);
439
440 for (i = 0; i < THREADS; i++)
441 {
442 threads[i]->cancel(threads[i]);
443 }
444 for (i = 0; i < THREADS; i++)
445 {
446 threads[i]->join(threads[i]);
447 }
448
449 mutex->destroy(mutex);
450 condvar->destroy(condvar);
451 }
452 END_TEST
453
454 /**
455 * RWlock for different tests
456 */
457 static rwlock_t *rwlock;
458
459 static void *rwlock_run(refcount_t *refs)
460 {
461 rwlock->read_lock(rwlock);
462 ref_get(refs);
463 sched_yield();
464 ignore_result(ref_put(refs));
465 rwlock->unlock(rwlock);
466
467 if (rwlock->try_write_lock(rwlock))
468 {
469 ck_assert_int_eq(*refs, 0);
470 sched_yield();
471 rwlock->unlock(rwlock);
472 }
473
474 rwlock->write_lock(rwlock);
475 ck_assert_int_eq(*refs, 0);
476 sched_yield();
477 rwlock->unlock(rwlock);
478
479 rwlock->read_lock(rwlock);
480 rwlock->read_lock(rwlock);
481 ref_get(refs);
482 sched_yield();
483 ignore_result(ref_put(refs));
484 rwlock->unlock(rwlock);
485 rwlock->unlock(rwlock);
486
487 return NULL;
488 }
489
490 START_TEST(test_rwlock)
491 {
492 thread_t *threads[THREADS];
493 refcount_t refs = 0;
494 int i;
495
496 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
497
498 for (i = 0; i < THREADS; i++)
499 {
500 threads[i] = thread_create((void*)rwlock_run, &refs);
501 }
502 for (i = 0; i < THREADS; i++)
503 {
504 threads[i]->join(threads[i]);
505 }
506
507 rwlock->destroy(rwlock);
508 }
509 END_TEST
510
511 /**
512 * Rwlock condvar
513 */
514 static rwlock_condvar_t *rwcond;
515
516 static void *rwlock_condvar_run(void *data)
517 {
518 rwlock->write_lock(rwlock);
519 sigcount++;
520 rwcond->signal(rwcond);
521 rwlock->unlock(rwlock);
522 return NULL;
523 }
524
525 START_TEST(test_rwlock_condvar)
526 {
527 thread_t *threads[THREADS];
528 int i;
529
530 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
531 rwcond = rwlock_condvar_create();
532 sigcount = 0;
533
534 for (i = 0; i < THREADS; i++)
535 {
536 threads[i] = thread_create(rwlock_condvar_run, NULL);
537 }
538
539 rwlock->write_lock(rwlock);
540 while (sigcount < THREADS)
541 {
542 rwcond->wait(rwcond, rwlock);
543 }
544 rwlock->unlock(rwlock);
545
546 for (i = 0; i < THREADS; i++)
547 {
548 threads[i]->join(threads[i]);
549 }
550
551 rwlock->destroy(rwlock);
552 rwcond->destroy(rwcond);
553 }
554 END_TEST
555
556 static void *rwlock_condvar_run_broad(void *data)
557 {
558 rwlock->write_lock(rwlock);
559 while (sigcount < 0)
560 {
561 rwcond->wait(rwcond, rwlock);
562 }
563 rwlock->unlock(rwlock);
564 return NULL;
565 }
566
567 START_TEST(test_rwlock_condvar_broad)
568 {
569 thread_t *threads[THREADS];
570 int i;
571
572 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
573 rwcond = rwlock_condvar_create();
574 sigcount = 0;
575
576 for (i = 0; i < THREADS; i++)
577 {
578 threads[i] = thread_create(rwlock_condvar_run_broad, NULL);
579 }
580
581 sched_yield();
582
583 rwlock->write_lock(rwlock);
584 sigcount = 1;
585 rwcond->broadcast(rwcond);
586 rwlock->unlock(rwlock);
587
588 for (i = 0; i < THREADS; i++)
589 {
590 threads[i]->join(threads[i]);
591 }
592
593 rwlock->destroy(rwlock);
594 rwcond->destroy(rwcond);
595 }
596 END_TEST
597
598 START_TEST(test_rwlock_condvar_timed)
599 {
600 thread_t *thread;
601 timeval_t start, end, diff = { .tv_usec = 50000 };
602
603 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
604 rwcond = rwlock_condvar_create();
605 sigcount = 0;
606
607 rwlock->write_lock(rwlock);
608 while (TRUE)
609 {
610 time_monotonic(&start);
611 if (rwcond->timed_wait(rwcond, rwlock, diff.tv_usec / 1000))
612 {
613 break;
614 }
615 }
616 rwlock->unlock(rwlock);
617 time_monotonic(&end);
618 timersub(&end, &start, &end);
619 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
620 end.tv_sec, end.tv_usec, diff.tv_sec, diff.tv_usec);
621
622 thread = thread_create(rwlock_condvar_run, NULL);
623
624 rwlock->write_lock(rwlock);
625 while (sigcount == 0)
626 {
627 ck_assert(!rwcond->timed_wait(rwcond, rwlock, 1000));
628 }
629 rwlock->unlock(rwlock);
630
631 thread->join(thread);
632 rwlock->destroy(rwlock);
633 rwcond->destroy(rwcond);
634 }
635 END_TEST
636
637 START_TEST(test_rwlock_condvar_timed_abs)
638 {
639 thread_t *thread;
640 timeval_t start, end, abso, diff = { .tv_usec = 50000 };
641
642 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
643 rwcond = rwlock_condvar_create();
644 sigcount = 0;
645
646 rwlock->write_lock(rwlock);
647 while (TRUE)
648 {
649 time_monotonic(&start);
650 timeradd(&start, &diff, &abso);
651 if (rwcond->timed_wait_abs(rwcond, rwlock, abso))
652 {
653 break;
654 }
655 }
656 rwlock->unlock(rwlock);
657 time_monotonic(&end);
658 ck_assert_msg(timercmp(&end, &abso, >), "end: %u.%u, abso: %u.%u",
659 end.tv_sec, end.tv_usec, abso.tv_sec, abso.tv_usec);
660
661 thread = thread_create(rwlock_condvar_run, NULL);
662
663 time_monotonic(&start);
664 diff.tv_sec = 1;
665 timeradd(&start, &diff, &abso);
666 rwlock->write_lock(rwlock);
667 while (sigcount == 0)
668 {
669 ck_assert(!rwcond->timed_wait_abs(rwcond, rwlock, abso));
670 }
671 rwlock->unlock(rwlock);
672
673 thread->join(thread);
674 rwlock->destroy(rwlock);
675 rwcond->destroy(rwcond);
676 }
677 END_TEST
678
679 static void *join_run(void *data)
680 {
681 /* force some context switches */
682 sched_yield();
683 return (void*)((uintptr_t)data + THREADS);
684 }
685
686 START_TEST(test_join)
687 {
688 thread_t *threads[THREADS];
689 int i;
690
691 for (i = 0; i < THREADS; i++)
692 {
693 threads[i] = thread_create(join_run, (void*)(uintptr_t)i);
694 }
695 for (i = 0; i < THREADS; i++)
696 {
697 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
698 }
699 }
700 END_TEST
701
702 static void *exit_join_run(void *data)
703 {
704 sched_yield();
705 thread_exit((void*)((uintptr_t)data + THREADS));
706 /* not reached */
707 ck_assert(FALSE);
708 return NULL;
709 }
710
711 START_TEST(test_join_exit)
712 {
713 thread_t *threads[THREADS];
714 int i;
715
716 for (i = 0; i < THREADS; i++)
717 {
718 threads[i] = thread_create(exit_join_run, (void*)(uintptr_t)i);
719 }
720 for (i = 0; i < THREADS; i++)
721 {
722 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
723 }
724 }
725 END_TEST
726
727 static void *detach_run(void *data)
728 {
729 refcount_t *running = (refcount_t*)data;
730
731 ignore_result(ref_put(running));
732 return NULL;
733 }
734
735 START_TEST(test_detach)
736 {
737 thread_t *threads[THREADS];
738 int i;
739 refcount_t running = 0;
740
741 for (i = 0; i < THREADS; i++)
742 {
743 ref_get(&running);
744 threads[i] = thread_create(detach_run, &running);
745 }
746 for (i = 0; i < THREADS; i++)
747 {
748 threads[i]->detach(threads[i]);
749 }
750 while (running > 0)
751 {
752 sched_yield();
753 }
754 /* no checks done here, but we check that thread state gets cleaned
755 * up with leak detective. */
756 }
757 END_TEST
758
759 static void *detach_exit_run(void *data)
760 {
761 refcount_t *running = (refcount_t*)data;
762
763 ignore_result(ref_put(running));
764 thread_exit(NULL);
765 /* not reached */
766 ck_assert(FALSE);
767 return NULL;
768 }
769
770 START_TEST(test_detach_exit)
771 {
772 thread_t *threads[THREADS];
773 int i;
774 refcount_t running = 0;
775
776 for (i = 0; i < THREADS; i++)
777 {
778 ref_get(&running);
779 threads[i] = thread_create(detach_exit_run, &running);
780 }
781 for (i = 0; i < THREADS; i++)
782 {
783 threads[i]->detach(threads[i]);
784 }
785 while (running > 0)
786 {
787 sched_yield();
788 }
789 /* no checks done here, but we check that thread state gets cleaned
790 * up with leak detective. */
791 }
792 END_TEST
793
794 static void *cancel_run(void *data)
795 {
796 /* default cancellability should be TRUE, so don't change it */
797 while (TRUE)
798 {
799 sleep(10);
800 }
801 return NULL;
802 }
803
804 START_TEST(test_cancel)
805 {
806 thread_t *threads[THREADS];
807 int i;
808
809 for (i = 0; i < THREADS; i++)
810 {
811 threads[i] = thread_create(cancel_run, NULL);
812 }
813 for (i = 0; i < THREADS; i++)
814 {
815 threads[i]->cancel(threads[i]);
816 }
817 for (i = 0; i < THREADS; i++)
818 {
819 threads[i]->join(threads[i]);
820 }
821 }
822 END_TEST
823
824 static void *cancel_onoff_run(void *data)
825 {
826 bool *cancellable = (bool*)data;
827
828 thread_cancelability(FALSE);
829 *cancellable = FALSE;
830
831 /* we should not get cancelled here */
832 usleep(50000);
833
834 *cancellable = TRUE;
835 thread_cancelability(TRUE);
836
837 /* but here */
838 while (TRUE)
839 {
840 sleep(10);
841 }
842 return NULL;
843 }
844
845 START_TEST(test_cancel_onoff)
846 {
847 thread_t *threads[THREADS];
848 bool cancellable[THREADS];
849 int i;
850
851 for (i = 0; i < THREADS; i++)
852 {
853 cancellable[i] = TRUE;
854 threads[i] = thread_create(cancel_onoff_run, &cancellable[i]);
855 }
856 for (i = 0; i < THREADS; i++)
857 {
858 /* wait until thread has cleared its cancellability */
859 while (cancellable[i])
860 {
861 sched_yield();
862 }
863 threads[i]->cancel(threads[i]);
864 }
865 for (i = 0; i < THREADS; i++)
866 {
867 threads[i]->join(threads[i]);
868 ck_assert(cancellable[i]);
869 }
870 }
871 END_TEST
872
873 static void *cancel_point_run(void *data)
874 {
875 thread_cancelability(FALSE);
876 while (TRUE)
877 {
878 /* implicitly enables cancellability */
879 thread_cancellation_point();
880 }
881 return NULL;
882 }
883
884 START_TEST(test_cancel_point)
885 {
886 thread_t *threads[THREADS];
887 int i;
888
889 for (i = 0; i < THREADS; i++)
890 {
891 threads[i] = thread_create(cancel_point_run, NULL);
892 }
893 sched_yield();
894 for (i = 0; i < THREADS; i++)
895 {
896 threads[i]->cancel(threads[i]);
897 }
898 for (i = 0; i < THREADS; i++)
899 {
900 threads[i]->join(threads[i]);
901 }
902 }
903 END_TEST
904
905 static void cleanup1(void *data)
906 {
907 uintptr_t *value = (uintptr_t*)data;
908
909 ck_assert_int_eq(*value, 1);
910 (*value)++;
911 }
912
913 static void cleanup2(void *data)
914 {
915 uintptr_t *value = (uintptr_t*)data;
916
917 ck_assert_int_eq(*value, 2);
918 (*value)++;
919 }
920
921 static void cleanup3(void *data)
922 {
923 uintptr_t *value = (uintptr_t*)data;
924
925 ck_assert_int_eq(*value, 3);
926 (*value)++;
927 }
928
929 static void *cleanup_run(void *data)
930 {
931 thread_cleanup_push(cleanup3, data);
932 thread_cleanup_push(cleanup2, data);
933 thread_cleanup_push(cleanup1, data);
934 return NULL;
935 }
936
937 START_TEST(test_cleanup)
938 {
939 thread_t *threads[THREADS];
940 uintptr_t values[THREADS];
941 int i;
942
943 for (i = 0; i < THREADS; i++)
944 {
945 values[i] = 1;
946 threads[i] = thread_create(cleanup_run, &values[i]);
947 }
948 for (i = 0; i < THREADS; i++)
949 {
950 threads[i]->join(threads[i]);
951 ck_assert_int_eq(values[i], 4);
952 }
953 }
954 END_TEST
955
956 static void *cleanup_exit_run(void *data)
957 {
958 thread_cleanup_push(cleanup3, data);
959 thread_cleanup_push(cleanup2, data);
960 thread_cleanup_push(cleanup1, data);
961 thread_exit(NULL);
962 ck_assert(FALSE);
963 return NULL;
964 }
965
966 START_TEST(test_cleanup_exit)
967 {
968 thread_t *threads[THREADS];
969 uintptr_t values[THREADS];
970 int i;
971
972 for (i = 0; i < THREADS; i++)
973 {
974 values[i] = 1;
975 threads[i] = thread_create(cleanup_exit_run, &values[i]);
976 }
977 for (i = 0; i < THREADS; i++)
978 {
979 threads[i]->join(threads[i]);
980 ck_assert_int_eq(values[i], 4);
981 }
982 }
983 END_TEST
984
985 static void *cleanup_cancel_run(void *data)
986 {
987 thread_cancelability(FALSE);
988
989 thread_cleanup_push(cleanup3, data);
990 thread_cleanup_push(cleanup2, data);
991 thread_cleanup_push(cleanup1, data);
992
993 thread_cancelability(TRUE);
994
995 while (TRUE)
996 {
997 sleep(1);
998 }
999 return NULL;
1000 }
1001
1002 START_TEST(test_cleanup_cancel)
1003 {
1004 thread_t *threads[THREADS];
1005 uintptr_t values[THREADS];
1006 int i;
1007
1008 for (i = 0; i < THREADS; i++)
1009 {
1010 values[i] = 1;
1011 threads[i] = thread_create(cleanup_cancel_run, &values[i]);
1012 }
1013 for (i = 0; i < THREADS; i++)
1014 {
1015 threads[i]->cancel(threads[i]);
1016 }
1017 for (i = 0; i < THREADS; i++)
1018 {
1019 threads[i]->join(threads[i]);
1020 ck_assert_int_eq(values[i], 4);
1021 }
1022 }
1023 END_TEST
1024
1025 static void *cleanup_pop_run(void *data)
1026 {
1027 thread_cleanup_push(cleanup3, data);
1028 thread_cleanup_push(cleanup2, data);
1029 thread_cleanup_push(cleanup1, data);
1030
1031 thread_cleanup_push(cleanup2, data);
1032 thread_cleanup_pop(FALSE);
1033
1034 thread_cleanup_pop(TRUE);
1035 return NULL;
1036 }
1037
1038 START_TEST(test_cleanup_pop)
1039 {
1040 thread_t *threads[THREADS];
1041 uintptr_t values[THREADS];
1042 int i;
1043
1044 for (i = 0; i < THREADS; i++)
1045 {
1046 values[i] = 1;
1047 threads[i] = thread_create(cleanup_pop_run, &values[i]);
1048 }
1049 for (i = 0; i < THREADS; i++)
1050 {
1051 threads[i]->join(threads[i]);
1052 ck_assert_int_eq(values[i], 4);
1053 }
1054 }
1055 END_TEST
1056
1057 static thread_value_t *tls[10];
1058
1059 static void *tls_run(void *data)
1060 {
1061 uintptr_t value = (uintptr_t)data;
1062 int i, j;
1063
1064 for (i = 0; i < countof(tls); i++)
1065 {
1066 ck_assert(tls[i]->get(tls[i]) == NULL);
1067 }
1068 for (i = 0; i < countof(tls); i++)
1069 {
1070 tls[i]->set(tls[i], (void*)(value * i));
1071 }
1072 for (j = 0; j < 1000; j++)
1073 {
1074 for (i = 0; i < countof(tls); i++)
1075 {
1076 tls[i]->set(tls[i], (void*)(value * i));
1077 ck_assert(tls[i]->get(tls[i]) == (void*)(value * i));
1078 }
1079 sched_yield();
1080 }
1081 for (i = 0; i < countof(tls); i++)
1082 {
1083 ck_assert(tls[i]->get(tls[i]) == (void*)(value * i));
1084 }
1085 return (void*)(value + 1);
1086 }
1087
1088 START_TEST(test_tls)
1089 {
1090 thread_t *threads[THREADS];
1091 int i;
1092
1093 for (i = 0; i < countof(tls); i++)
1094 {
1095 tls[i] = thread_value_create(NULL);
1096 }
1097 for (i = 0; i < THREADS; i++)
1098 {
1099 threads[i] = thread_create(tls_run, (void*)(uintptr_t)i);
1100 }
1101
1102 ck_assert_int_eq((uintptr_t)tls_run((void*)(uintptr_t)(THREADS + 1)),
1103 THREADS + 2);
1104
1105 for (i = 0; i < THREADS; i++)
1106 {
1107 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + 1);
1108 }
1109 for (i = 0; i < countof(tls); i++)
1110 {
1111 tls[i]->destroy(tls[i]);
1112 }
1113 }
1114 END_TEST
1115
1116 static void tls_cleanup(void *data)
1117 {
1118 uintptr_t *value = (uintptr_t*)data;
1119
1120 (*value)--;
1121 }
1122
1123 static void *tls_cleanup_run(void *data)
1124 {
1125 int i;
1126
1127 for (i = 0; i < countof(tls); i++)
1128 {
1129 tls[i]->set(tls[i], data);
1130 }
1131 return NULL;
1132 }
1133
1134 START_TEST(test_tls_cleanup)
1135 {
1136 thread_t *threads[THREADS];
1137 uintptr_t values[THREADS], main_value = countof(tls);
1138 int i;
1139
1140 for (i = 0; i < countof(tls); i++)
1141 {
1142 tls[i] = thread_value_create(tls_cleanup);
1143 }
1144 for (i = 0; i < THREADS; i++)
1145 {
1146 values[i] = countof(tls);
1147 threads[i] = thread_create(tls_cleanup_run, &values[i]);
1148 }
1149
1150 tls_cleanup_run(&main_value);
1151
1152 for (i = 0; i < THREADS; i++)
1153 {
1154 threads[i]->join(threads[i]);
1155 ck_assert_int_eq(values[i], 0);
1156 }
1157 for (i = 0; i < countof(tls); i++)
1158 {
1159 tls[i]->destroy(tls[i]);
1160 }
1161 ck_assert_int_eq(main_value, 0);
1162 }
1163 END_TEST
1164
1165 Suite *threading_suite_create()
1166 {
1167 Suite *s;
1168 TCase *tc;
1169
1170 s = suite_create("threading");
1171
1172 tc = tcase_create("recursive mutex");
1173 tcase_add_test(tc, test_mutex);
1174 suite_add_tcase(s, tc);
1175
1176 tc = tcase_create("condvar");
1177 tcase_add_test(tc, test_condvar);
1178 tcase_add_test(tc, test_condvar_recursive);
1179 tcase_add_test(tc, test_condvar_broad);
1180 tcase_add_test(tc, test_condvar_timed);
1181 tcase_add_test(tc, test_condvar_timed_abs);
1182 tcase_add_test(tc, test_condvar_cancel);
1183 suite_add_tcase(s, tc);
1184
1185 tc = tcase_create("rwlock");
1186 tcase_add_test(tc, test_rwlock);
1187 suite_add_tcase(s, tc);
1188
1189 tc = tcase_create("rwlock condvar");
1190 tcase_add_test(tc, test_rwlock_condvar);
1191 tcase_add_test(tc, test_rwlock_condvar_broad);
1192 tcase_add_test(tc, test_rwlock_condvar_timed);
1193 tcase_add_test(tc, test_rwlock_condvar_timed_abs);
1194 suite_add_tcase(s, tc);
1195
1196 tc = tcase_create("thread joining");
1197 tcase_add_test(tc, test_join);
1198 tcase_add_test(tc, test_join_exit);
1199 suite_add_tcase(s, tc);
1200
1201 tc = tcase_create("thread detaching");
1202 tcase_add_test(tc, test_detach);
1203 tcase_add_test(tc, test_detach_exit);
1204 suite_add_tcase(s, tc);
1205
1206 tc = tcase_create("thread cancellation");
1207 tcase_add_test(tc, test_cancel);
1208 tcase_add_test(tc, test_cancel_onoff);
1209 tcase_add_test(tc, test_cancel_point);
1210 suite_add_tcase(s, tc);
1211
1212 tc = tcase_create("thread cleanup");
1213 tcase_add_test(tc, test_cleanup);
1214 tcase_add_test(tc, test_cleanup_exit);
1215 tcase_add_test(tc, test_cleanup_cancel);
1216 tcase_add_test(tc, test_cleanup_pop);
1217 suite_add_tcase(s, tc);
1218
1219 tc = tcase_create("thread local storage");
1220 tcase_add_test(tc, test_tls);
1221 tcase_add_test(tc, test_tls_cleanup);
1222 suite_add_tcase(s, tc);
1223
1224 return s;
1225 }