a8de5c3d639ecbf07e8f42ffdcf5eca34aa45611
[strongswan.git] / src / libstrongswan / tests / suites / test_threading.c
1 /*
2 * Copyright (C) 2013 Tobias Brunner
3 * Copyright (C) 2008 Martin Willi
4 * Hochschule fuer Technik Rapperswil
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 */
16
17 #include "test_suite.h"
18
19 #include <sched.h>
20 #include <unistd.h>
21
22 #include <threading/thread.h>
23 #include <threading/mutex.h>
24 #include <threading/condvar.h>
25 #include <threading/rwlock.h>
26 #include <threading/rwlock_condvar.h>
27 #include <threading/thread_value.h>
28
29 /*******************************************************************************
30 * recursive mutex test
31 */
32
33 #define THREADS 20
34
35 /**
36 * Thread barrier data
37 */
38 typedef struct {
39 mutex_t *mutex;
40 condvar_t *cond;
41 int count;
42 int current;
43 bool active;
44 } barrier_t;
45
46 /**
47 * Create a thread barrier for count threads
48 */
49 static barrier_t* barrier_create(int count)
50 {
51 barrier_t *this;
52
53 INIT(this,
54 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
55 .cond = condvar_create(CONDVAR_TYPE_DEFAULT),
56 .count = count,
57 );
58
59 return this;
60 }
61
62 /**
63 * Destroy a thread barrier
64 */
65 static void barrier_destroy(barrier_t *this)
66 {
67 this->mutex->destroy(this->mutex);
68 this->cond->destroy(this->cond);
69 free(this);
70 }
71
72 /**
73 * Wait to have configured number of threads in barrier
74 */
75 static bool barrier_wait(barrier_t *this)
76 {
77 bool winner = FALSE;
78
79 this->mutex->lock(this->mutex);
80 if (!this->active)
81 { /* first, reset */
82 this->active = TRUE;
83 this->current = 0;
84 }
85
86 this->current++;
87 while (this->current < this->count)
88 {
89 this->cond->wait(this->cond, this->mutex);
90 }
91 if (this->active)
92 { /* first, win */
93 winner = TRUE;
94 this->active = FALSE;
95 }
96 this->mutex->unlock(this->mutex);
97 this->cond->broadcast(this->cond);
98 sched_yield();
99
100 return winner;
101 }
102
103 /**
104 * Barrier for some tests
105 */
106 static barrier_t *barrier;
107
108 /**
109 * A mutex for tests requiring one
110 */
111 static mutex_t *mutex;
112
113 /**
114 * A condvar for tests requiring one
115 */
116 static condvar_t *condvar;
117
118 /**
119 * A counter for signaling
120 */
121 static int sigcount;
122
123 static void *mutex_run(void *data)
124 {
125 int locked = 0;
126 int i;
127
128 /* wait for all threads before getting in action */
129 barrier_wait(barrier);
130
131 for (i = 0; i < 100; i++)
132 {
133 mutex->lock(mutex);
134 mutex->lock(mutex);
135 mutex->lock(mutex);
136 locked++;
137 sched_yield();
138 if (locked > 1)
139 {
140 fail("two threads locked the mutex concurrently");
141 }
142 locked--;
143 mutex->unlock(mutex);
144 mutex->unlock(mutex);
145 mutex->unlock(mutex);
146 }
147 return NULL;
148 }
149
150 START_TEST(test_mutex)
151 {
152 thread_t *threads[THREADS];
153 int i;
154
155 barrier = barrier_create(THREADS);
156 mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
157
158 for (i = 0; i < 10; i++)
159 {
160 mutex->lock(mutex);
161 mutex->unlock(mutex);
162 }
163 for (i = 0; i < 10; i++)
164 {
165 mutex->lock(mutex);
166 }
167 for (i = 0; i < 10; i++)
168 {
169 mutex->unlock(mutex);
170 }
171
172 for (i = 0; i < THREADS; i++)
173 {
174 threads[i] = thread_create(mutex_run, NULL);
175 }
176 for (i = 0; i < THREADS; i++)
177 {
178 threads[i]->join(threads[i]);
179 }
180
181 mutex->destroy(mutex);
182 barrier_destroy(barrier);
183 }
184 END_TEST
185
186 static void *condvar_run(void *data)
187 {
188 mutex->lock(mutex);
189 sigcount++;
190 condvar->signal(condvar);
191 mutex->unlock(mutex);
192 return NULL;
193 }
194
195 START_TEST(test_condvar)
196 {
197 thread_t *threads[THREADS];
198 int i;
199
200 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
201 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
202 sigcount = 0;
203
204 for (i = 0; i < THREADS; i++)
205 {
206 threads[i] = thread_create(condvar_run, NULL);
207 }
208
209 mutex->lock(mutex);
210 while (sigcount < THREADS)
211 {
212 condvar->wait(condvar, mutex);
213 }
214 mutex->unlock(mutex);
215
216 for (i = 0; i < THREADS; i++)
217 {
218 threads[i]->join(threads[i]);
219 }
220
221 mutex->destroy(mutex);
222 condvar->destroy(condvar);
223 }
224 END_TEST
225
226 static void *condvar_recursive_run(void *data)
227 {
228 mutex->lock(mutex);
229 mutex->lock(mutex);
230 mutex->lock(mutex);
231 sigcount++;
232 condvar->signal(condvar);
233 mutex->unlock(mutex);
234 mutex->unlock(mutex);
235 mutex->unlock(mutex);
236 return NULL;
237 }
238
239 START_TEST(test_condvar_recursive)
240 {
241 thread_t *threads[THREADS];
242 int i;
243
244 mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
245 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
246 sigcount = 0;
247
248 mutex->lock(mutex);
249
250 for (i = 0; i < THREADS; i++)
251 {
252 threads[i] = thread_create(condvar_recursive_run, NULL);
253 }
254
255 mutex->lock(mutex);
256 mutex->lock(mutex);
257 while (sigcount < THREADS)
258 {
259 condvar->wait(condvar, mutex);
260 }
261 mutex->unlock(mutex);
262 mutex->unlock(mutex);
263 mutex->unlock(mutex);
264
265 for (i = 0; i < THREADS; i++)
266 {
267 threads[i]->join(threads[i]);
268 }
269
270 mutex->destroy(mutex);
271 condvar->destroy(condvar);
272 }
273 END_TEST
274
275 static void *condvar_run_broad(void *data)
276 {
277 mutex->lock(mutex);
278 while (sigcount < 0)
279 {
280 condvar->wait(condvar, mutex);
281 }
282 mutex->unlock(mutex);
283 return NULL;
284 }
285
286 START_TEST(test_condvar_broad)
287 {
288 thread_t *threads[THREADS];
289 int i;
290
291 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
292 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
293 sigcount = 0;
294
295 for (i = 0; i < THREADS; i++)
296 {
297 threads[i] = thread_create(condvar_run_broad, NULL);
298 }
299
300 sched_yield();
301
302 mutex->lock(mutex);
303 sigcount = 1;
304 condvar->broadcast(condvar);
305 mutex->unlock(mutex);
306
307 for (i = 0; i < THREADS; i++)
308 {
309 threads[i]->join(threads[i]);
310 }
311
312 mutex->destroy(mutex);
313 condvar->destroy(condvar);
314 }
315 END_TEST
316
317 START_TEST(test_condvar_timed)
318 {
319 thread_t *thread;
320 timeval_t start, end, diff = { .tv_usec = 50000 };
321
322 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
323 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
324 sigcount = 0;
325
326 mutex->lock(mutex);
327 while (TRUE)
328 {
329 time_monotonic(&start);
330 if (condvar->timed_wait(condvar, mutex, diff.tv_usec / 1000))
331 {
332 break;
333 }
334 }
335 time_monotonic(&end);
336 mutex->unlock(mutex);
337 timersub(&end, &start, &end);
338 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
339 end.tv_sec, end.tv_usec, diff.tv_sec, diff.tv_usec);
340
341 thread = thread_create(condvar_run, NULL);
342
343 mutex->lock(mutex);
344 while (sigcount == 0)
345 {
346 ck_assert(!condvar->timed_wait(condvar, mutex, 1000));
347 }
348 mutex->unlock(mutex);
349
350 thread->join(thread);
351 mutex->destroy(mutex);
352 condvar->destroy(condvar);
353 }
354 END_TEST
355
356 START_TEST(test_condvar_timed_abs)
357 {
358 thread_t *thread;
359 timeval_t start, end, abso, diff = { .tv_usec = 50000 };
360
361 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
362 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
363 sigcount = 0;
364
365 mutex->lock(mutex);
366 while (TRUE)
367 {
368 time_monotonic(&start);
369 timeradd(&start, &diff, &abso);
370 if (condvar->timed_wait_abs(condvar, mutex, abso))
371 {
372 break;
373 }
374 }
375 time_monotonic(&end);
376 mutex->unlock(mutex);
377 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
378 end.tv_sec, end.tv_usec, abso.tv_sec, abso.tv_usec);
379
380 thread = thread_create(condvar_run, NULL);
381
382 time_monotonic(&start);
383 diff.tv_sec = 1;
384 timeradd(&start, &diff, &abso);
385 mutex->lock(mutex);
386 while (sigcount == 0)
387 {
388 ck_assert(!condvar->timed_wait_abs(condvar, mutex, abso));
389 }
390 mutex->unlock(mutex);
391
392 thread->join(thread);
393 mutex->destroy(mutex);
394 condvar->destroy(condvar);
395 }
396 END_TEST
397
398 static void *condvar_cancel_run(void *data)
399 {
400 thread_cancelability(FALSE);
401
402 mutex->lock(mutex);
403
404 sigcount++;
405 condvar->broadcast(condvar);
406
407 thread_cleanup_push((void*)mutex->unlock, mutex);
408 thread_cancelability(TRUE);
409 while (TRUE)
410 {
411 condvar->wait(condvar, mutex);
412 }
413 thread_cleanup_pop(TRUE);
414
415 return NULL;
416 }
417
418 START_TEST(test_condvar_cancel)
419 {
420 thread_t *threads[THREADS];
421 int i;
422
423 mutex = mutex_create(MUTEX_TYPE_DEFAULT);
424 condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
425 sigcount = 0;
426
427 for (i = 0; i < THREADS; i++)
428 {
429 threads[i] = thread_create(condvar_cancel_run, NULL);
430 }
431
432 /* wait for all threads */
433 mutex->lock(mutex);
434 while (sigcount < THREADS)
435 {
436 condvar->wait(condvar, mutex);
437 }
438 mutex->unlock(mutex);
439
440 for (i = 0; i < THREADS; i++)
441 {
442 threads[i]->cancel(threads[i]);
443 }
444 for (i = 0; i < THREADS; i++)
445 {
446 threads[i]->join(threads[i]);
447 }
448
449 mutex->destroy(mutex);
450 condvar->destroy(condvar);
451 }
452 END_TEST
453
454 /**
455 * RWlock for different tests
456 */
457 static rwlock_t *rwlock;
458
459 static void *rwlock_run(refcount_t *refs)
460 {
461 rwlock->read_lock(rwlock);
462 ref_get(refs);
463 sched_yield();
464 ignore_result(ref_put(refs));
465 rwlock->unlock(rwlock);
466
467 if (rwlock->try_write_lock(rwlock))
468 {
469 ck_assert_int_eq(*refs, 0);
470 sched_yield();
471 rwlock->unlock(rwlock);
472 }
473
474 rwlock->write_lock(rwlock);
475 ck_assert_int_eq(*refs, 0);
476 sched_yield();
477 rwlock->unlock(rwlock);
478
479 rwlock->read_lock(rwlock);
480 rwlock->read_lock(rwlock);
481 ref_get(refs);
482 sched_yield();
483 ignore_result(ref_put(refs));
484 rwlock->unlock(rwlock);
485 rwlock->unlock(rwlock);
486
487 return NULL;
488 }
489
490 START_TEST(test_rwlock)
491 {
492 thread_t *threads[THREADS];
493 refcount_t refs = 0;
494 int i;
495
496 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
497
498 for (i = 0; i < THREADS; i++)
499 {
500 threads[i] = thread_create((void*)rwlock_run, &refs);
501 }
502 for (i = 0; i < THREADS; i++)
503 {
504 threads[i]->join(threads[i]);
505 }
506
507 rwlock->destroy(rwlock);
508 }
509 END_TEST
510
511 /**
512 * Rwlock condvar
513 */
514 static rwlock_condvar_t *rwcond;
515
516 static void *rwlock_condvar_run(void *data)
517 {
518 rwlock->write_lock(rwlock);
519 sigcount++;
520 rwcond->signal(rwcond);
521 rwlock->unlock(rwlock);
522 return NULL;
523 }
524
525 START_TEST(test_rwlock_condvar)
526 {
527 thread_t *threads[THREADS];
528 int i;
529
530 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
531 rwcond = rwlock_condvar_create();
532 sigcount = 0;
533
534 for (i = 0; i < THREADS; i++)
535 {
536 threads[i] = thread_create(rwlock_condvar_run, NULL);
537 }
538
539 rwlock->write_lock(rwlock);
540 while (sigcount < THREADS)
541 {
542 rwcond->wait(rwcond, rwlock);
543 }
544 rwlock->unlock(rwlock);
545
546 for (i = 0; i < THREADS; i++)
547 {
548 threads[i]->join(threads[i]);
549 }
550
551 rwlock->destroy(rwlock);
552 rwcond->destroy(rwcond);
553 }
554 END_TEST
555
556 static void *rwlock_condvar_run_broad(void *data)
557 {
558 rwlock->write_lock(rwlock);
559 while (sigcount < 0)
560 {
561 rwcond->wait(rwcond, rwlock);
562 }
563 rwlock->unlock(rwlock);
564 return NULL;
565 }
566
567 START_TEST(test_rwlock_condvar_broad)
568 {
569 thread_t *threads[THREADS];
570 int i;
571
572 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
573 rwcond = rwlock_condvar_create();
574 sigcount = 0;
575
576 for (i = 0; i < THREADS; i++)
577 {
578 threads[i] = thread_create(rwlock_condvar_run_broad, NULL);
579 }
580
581 sched_yield();
582
583 rwlock->write_lock(rwlock);
584 sigcount = 1;
585 rwcond->broadcast(rwcond);
586 rwlock->unlock(rwlock);
587
588 for (i = 0; i < THREADS; i++)
589 {
590 threads[i]->join(threads[i]);
591 }
592
593 rwlock->destroy(rwlock);
594 rwcond->destroy(rwcond);
595 }
596 END_TEST
597
598 START_TEST(test_rwlock_condvar_timed)
599 {
600 thread_t *thread;
601 timeval_t start, end, diff = { .tv_usec = 50000 };
602
603 rwlock = rwlock_create(RWLOCK_TYPE_DEFAULT);
604 rwcond = rwlock_condvar_create();
605 sigcount = 0;
606
607 rwlock->write_lock(rwlock);
608 while (TRUE)
609 {
610 time_monotonic(&start);
611 if (rwcond->timed_wait(rwcond, rwlock, diff.tv_usec / 1000))
612 {
613 break;
614 }
615 }
616 rwlock->unlock(rwlock);
617 time_monotonic(&end);
618 timersub(&end, &start, &end);
619 ck_assert_msg(timercmp(&end, &diff, >), "end: %u.%u, diff: %u.%u",
620 end.tv_sec, end.tv_usec, diff.tv_sec, diff.tv_usec);
621
622 thread = thread_create(rwlock_condvar_run, NULL);
623
624 rwlock->write_lock(rwlock);
625 while (sigcount == 0)
626 {
627 ck_assert(!rwcond->timed_wait(rwcond, rwlock, 1000));
628 }
629 rwlock->unlock(rwlock);
630
631 thread->join(thread);
632 rwlock->destroy(rwlock);
633 rwcond->destroy(rwcond);
634 }
635 END_TEST
636
637 static void *join_run(void *data)
638 {
639 /* force some context switches */
640 sched_yield();
641 return (void*)((uintptr_t)data + THREADS);
642 }
643
644 START_TEST(test_join)
645 {
646 thread_t *threads[THREADS];
647 int i;
648
649 for (i = 0; i < THREADS; i++)
650 {
651 threads[i] = thread_create(join_run, (void*)(uintptr_t)i);
652 }
653 for (i = 0; i < THREADS; i++)
654 {
655 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
656 }
657 }
658 END_TEST
659
660 static void *exit_join_run(void *data)
661 {
662 sched_yield();
663 thread_exit((void*)((uintptr_t)data + THREADS));
664 /* not reached */
665 ck_assert(FALSE);
666 return NULL;
667 }
668
669 START_TEST(test_join_exit)
670 {
671 thread_t *threads[THREADS];
672 int i;
673
674 for (i = 0; i < THREADS; i++)
675 {
676 threads[i] = thread_create(exit_join_run, (void*)(uintptr_t)i);
677 }
678 for (i = 0; i < THREADS; i++)
679 {
680 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
681 }
682 }
683 END_TEST
684
685 static void *detach_run(void *data)
686 {
687 refcount_t *running = (refcount_t*)data;
688
689 ignore_result(ref_put(running));
690 return NULL;
691 }
692
693 START_TEST(test_detach)
694 {
695 thread_t *threads[THREADS];
696 int i;
697 refcount_t running = 0;
698
699 for (i = 0; i < THREADS; i++)
700 {
701 ref_get(&running);
702 threads[i] = thread_create(detach_run, &running);
703 }
704 for (i = 0; i < THREADS; i++)
705 {
706 threads[i]->detach(threads[i]);
707 }
708 while (running > 0)
709 {
710 sched_yield();
711 }
712 /* no checks done here, but we check that thread state gets cleaned
713 * up with leak detective. */
714 }
715 END_TEST
716
717 static void *detach_exit_run(void *data)
718 {
719 refcount_t *running = (refcount_t*)data;
720
721 ignore_result(ref_put(running));
722 thread_exit(NULL);
723 /* not reached */
724 ck_assert(FALSE);
725 return NULL;
726 }
727
728 START_TEST(test_detach_exit)
729 {
730 thread_t *threads[THREADS];
731 int i;
732 refcount_t running = 0;
733
734 for (i = 0; i < THREADS; i++)
735 {
736 ref_get(&running);
737 threads[i] = thread_create(detach_exit_run, &running);
738 }
739 for (i = 0; i < THREADS; i++)
740 {
741 threads[i]->detach(threads[i]);
742 }
743 while (running > 0)
744 {
745 sched_yield();
746 }
747 /* no checks done here, but we check that thread state gets cleaned
748 * up with leak detective. */
749 }
750 END_TEST
751
752 static void *cancel_run(void *data)
753 {
754 /* default cancellability should be TRUE, so don't change it */
755 while (TRUE)
756 {
757 sleep(10);
758 }
759 return NULL;
760 }
761
762 START_TEST(test_cancel)
763 {
764 thread_t *threads[THREADS];
765 int i;
766
767 for (i = 0; i < THREADS; i++)
768 {
769 threads[i] = thread_create(cancel_run, NULL);
770 }
771 for (i = 0; i < THREADS; i++)
772 {
773 threads[i]->cancel(threads[i]);
774 }
775 for (i = 0; i < THREADS; i++)
776 {
777 threads[i]->join(threads[i]);
778 }
779 }
780 END_TEST
781
782 static void *cancel_onoff_run(void *data)
783 {
784 bool *cancellable = (bool*)data;
785
786 thread_cancelability(FALSE);
787 *cancellable = FALSE;
788
789 /* we should not get cancelled here */
790 usleep(50000);
791
792 *cancellable = TRUE;
793 thread_cancelability(TRUE);
794
795 /* but here */
796 while (TRUE)
797 {
798 sleep(10);
799 }
800 return NULL;
801 }
802
803 START_TEST(test_cancel_onoff)
804 {
805 thread_t *threads[THREADS];
806 bool cancellable[THREADS];
807 int i;
808
809 for (i = 0; i < THREADS; i++)
810 {
811 cancellable[i] = TRUE;
812 threads[i] = thread_create(cancel_onoff_run, &cancellable[i]);
813 }
814 for (i = 0; i < THREADS; i++)
815 {
816 /* wait until thread has cleared its cancellability */
817 while (cancellable[i])
818 {
819 sched_yield();
820 }
821 threads[i]->cancel(threads[i]);
822 }
823 for (i = 0; i < THREADS; i++)
824 {
825 threads[i]->join(threads[i]);
826 ck_assert(cancellable[i]);
827 }
828 }
829 END_TEST
830
831 static void *cancel_point_run(void *data)
832 {
833 thread_cancelability(FALSE);
834 while (TRUE)
835 {
836 /* implicitly enables cancellability */
837 thread_cancellation_point();
838 }
839 return NULL;
840 }
841
842 START_TEST(test_cancel_point)
843 {
844 thread_t *threads[THREADS];
845 int i;
846
847 for (i = 0; i < THREADS; i++)
848 {
849 threads[i] = thread_create(cancel_point_run, NULL);
850 }
851 sched_yield();
852 for (i = 0; i < THREADS; i++)
853 {
854 threads[i]->cancel(threads[i]);
855 }
856 for (i = 0; i < THREADS; i++)
857 {
858 threads[i]->join(threads[i]);
859 }
860 }
861 END_TEST
862
863 static void cleanup1(void *data)
864 {
865 uintptr_t *value = (uintptr_t*)data;
866
867 ck_assert_int_eq(*value, 1);
868 (*value)++;
869 }
870
871 static void cleanup2(void *data)
872 {
873 uintptr_t *value = (uintptr_t*)data;
874
875 ck_assert_int_eq(*value, 2);
876 (*value)++;
877 }
878
879 static void cleanup3(void *data)
880 {
881 uintptr_t *value = (uintptr_t*)data;
882
883 ck_assert_int_eq(*value, 3);
884 (*value)++;
885 }
886
887 static void *cleanup_run(void *data)
888 {
889 thread_cleanup_push(cleanup3, data);
890 thread_cleanup_push(cleanup2, data);
891 thread_cleanup_push(cleanup1, data);
892 return NULL;
893 }
894
895 START_TEST(test_cleanup)
896 {
897 thread_t *threads[THREADS];
898 uintptr_t values[THREADS];
899 int i;
900
901 for (i = 0; i < THREADS; i++)
902 {
903 values[i] = 1;
904 threads[i] = thread_create(cleanup_run, &values[i]);
905 }
906 for (i = 0; i < THREADS; i++)
907 {
908 threads[i]->join(threads[i]);
909 ck_assert_int_eq(values[i], 4);
910 }
911 }
912 END_TEST
913
914 static void *cleanup_exit_run(void *data)
915 {
916 thread_cleanup_push(cleanup3, data);
917 thread_cleanup_push(cleanup2, data);
918 thread_cleanup_push(cleanup1, data);
919 thread_exit(NULL);
920 ck_assert(FALSE);
921 return NULL;
922 }
923
924 START_TEST(test_cleanup_exit)
925 {
926 thread_t *threads[THREADS];
927 uintptr_t values[THREADS];
928 int i;
929
930 for (i = 0; i < THREADS; i++)
931 {
932 values[i] = 1;
933 threads[i] = thread_create(cleanup_exit_run, &values[i]);
934 }
935 for (i = 0; i < THREADS; i++)
936 {
937 threads[i]->join(threads[i]);
938 ck_assert_int_eq(values[i], 4);
939 }
940 }
941 END_TEST
942
943 static void *cleanup_cancel_run(void *data)
944 {
945 thread_cancelability(FALSE);
946
947 thread_cleanup_push(cleanup3, data);
948 thread_cleanup_push(cleanup2, data);
949 thread_cleanup_push(cleanup1, data);
950
951 thread_cancelability(TRUE);
952
953 while (TRUE)
954 {
955 sleep(1);
956 }
957 return NULL;
958 }
959
960 START_TEST(test_cleanup_cancel)
961 {
962 thread_t *threads[THREADS];
963 uintptr_t values[THREADS];
964 int i;
965
966 for (i = 0; i < THREADS; i++)
967 {
968 values[i] = 1;
969 threads[i] = thread_create(cleanup_cancel_run, &values[i]);
970 }
971 for (i = 0; i < THREADS; i++)
972 {
973 threads[i]->cancel(threads[i]);
974 }
975 for (i = 0; i < THREADS; i++)
976 {
977 threads[i]->join(threads[i]);
978 ck_assert_int_eq(values[i], 4);
979 }
980 }
981 END_TEST
982
983 static void *cleanup_pop_run(void *data)
984 {
985 thread_cleanup_push(cleanup3, data);
986 thread_cleanup_push(cleanup2, data);
987 thread_cleanup_push(cleanup1, data);
988
989 thread_cleanup_push(cleanup2, data);
990 thread_cleanup_pop(FALSE);
991
992 thread_cleanup_pop(TRUE);
993 return NULL;
994 }
995
996 START_TEST(test_cleanup_pop)
997 {
998 thread_t *threads[THREADS];
999 uintptr_t values[THREADS];
1000 int i;
1001
1002 for (i = 0; i < THREADS; i++)
1003 {
1004 values[i] = 1;
1005 threads[i] = thread_create(cleanup_pop_run, &values[i]);
1006 }
1007 for (i = 0; i < THREADS; i++)
1008 {
1009 threads[i]->join(threads[i]);
1010 ck_assert_int_eq(values[i], 4);
1011 }
1012 }
1013 END_TEST
1014
1015 static thread_value_t *tls[10];
1016
1017 static void *tls_run(void *data)
1018 {
1019 uintptr_t value = (uintptr_t)data;
1020 int i, j;
1021
1022 for (i = 0; i < countof(tls); i++)
1023 {
1024 ck_assert(tls[i]->get(tls[i]) == NULL);
1025 }
1026 for (i = 0; i < countof(tls); i++)
1027 {
1028 tls[i]->set(tls[i], (void*)(value * i));
1029 }
1030 for (j = 0; j < 1000; j++)
1031 {
1032 for (i = 0; i < countof(tls); i++)
1033 {
1034 tls[i]->set(tls[i], (void*)(value * i));
1035 ck_assert(tls[i]->get(tls[i]) == (void*)(value * i));
1036 }
1037 sched_yield();
1038 }
1039 for (i = 0; i < countof(tls); i++)
1040 {
1041 ck_assert(tls[i]->get(tls[i]) == (void*)(value * i));
1042 }
1043 return (void*)(value + 1);
1044 }
1045
1046 START_TEST(test_tls)
1047 {
1048 thread_t *threads[THREADS];
1049 int i;
1050
1051 for (i = 0; i < countof(tls); i++)
1052 {
1053 tls[i] = thread_value_create(NULL);
1054 }
1055 for (i = 0; i < THREADS; i++)
1056 {
1057 threads[i] = thread_create(tls_run, (void*)(uintptr_t)i);
1058 }
1059
1060 ck_assert_int_eq((uintptr_t)tls_run((void*)(uintptr_t)(THREADS + 1)),
1061 THREADS + 2);
1062
1063 for (i = 0; i < THREADS; i++)
1064 {
1065 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + 1);
1066 }
1067 for (i = 0; i < countof(tls); i++)
1068 {
1069 tls[i]->destroy(tls[i]);
1070 }
1071 }
1072 END_TEST
1073
1074 static void tls_cleanup(void *data)
1075 {
1076 uintptr_t *value = (uintptr_t*)data;
1077
1078 (*value)--;
1079 }
1080
1081 static void *tls_cleanup_run(void *data)
1082 {
1083 int i;
1084
1085 for (i = 0; i < countof(tls); i++)
1086 {
1087 tls[i]->set(tls[i], data);
1088 }
1089 return NULL;
1090 }
1091
1092 START_TEST(test_tls_cleanup)
1093 {
1094 thread_t *threads[THREADS];
1095 uintptr_t values[THREADS], main_value = countof(tls);
1096 int i;
1097
1098 for (i = 0; i < countof(tls); i++)
1099 {
1100 tls[i] = thread_value_create(tls_cleanup);
1101 }
1102 for (i = 0; i < THREADS; i++)
1103 {
1104 values[i] = countof(tls);
1105 threads[i] = thread_create(tls_cleanup_run, &values[i]);
1106 }
1107
1108 tls_cleanup_run(&main_value);
1109
1110 for (i = 0; i < THREADS; i++)
1111 {
1112 threads[i]->join(threads[i]);
1113 ck_assert_int_eq(values[i], 0);
1114 }
1115 for (i = 0; i < countof(tls); i++)
1116 {
1117 tls[i]->destroy(tls[i]);
1118 }
1119 ck_assert_int_eq(main_value, 0);
1120 }
1121 END_TEST
1122
1123 Suite *threading_suite_create()
1124 {
1125 Suite *s;
1126 TCase *tc;
1127
1128 s = suite_create("threading");
1129
1130 tc = tcase_create("recursive mutex");
1131 tcase_add_test(tc, test_mutex);
1132 suite_add_tcase(s, tc);
1133
1134 tc = tcase_create("condvar");
1135 tcase_add_test(tc, test_condvar);
1136 tcase_add_test(tc, test_condvar_recursive);
1137 tcase_add_test(tc, test_condvar_broad);
1138 tcase_add_test(tc, test_condvar_timed);
1139 tcase_add_test(tc, test_condvar_timed_abs);
1140 tcase_add_test(tc, test_condvar_cancel);
1141 suite_add_tcase(s, tc);
1142
1143 tc = tcase_create("rwlock");
1144 tcase_add_test(tc, test_rwlock);
1145 suite_add_tcase(s, tc);
1146
1147 tc = tcase_create("rwlock condvar");
1148 tcase_add_test(tc, test_rwlock_condvar);
1149 tcase_add_test(tc, test_rwlock_condvar_broad);
1150 tcase_add_test(tc, test_rwlock_condvar_timed);
1151 suite_add_tcase(s, tc);
1152
1153 tc = tcase_create("thread joining");
1154 tcase_add_test(tc, test_join);
1155 tcase_add_test(tc, test_join_exit);
1156 suite_add_tcase(s, tc);
1157
1158 tc = tcase_create("thread detaching");
1159 tcase_add_test(tc, test_detach);
1160 tcase_add_test(tc, test_detach_exit);
1161 suite_add_tcase(s, tc);
1162
1163 tc = tcase_create("thread cancellation");
1164 tcase_add_test(tc, test_cancel);
1165 tcase_add_test(tc, test_cancel_onoff);
1166 tcase_add_test(tc, test_cancel_point);
1167 suite_add_tcase(s, tc);
1168
1169 tc = tcase_create("thread cleanup");
1170 tcase_add_test(tc, test_cleanup);
1171 tcase_add_test(tc, test_cleanup_exit);
1172 tcase_add_test(tc, test_cleanup_cancel);
1173 tcase_add_test(tc, test_cleanup_pop);
1174 suite_add_tcase(s, tc);
1175
1176 tc = tcase_create("thread local storage");
1177 tcase_add_test(tc, test_tls);
1178 tcase_add_test(tc, test_tls_cleanup);
1179 suite_add_tcase(s, tc);
1180
1181 return s;
1182 }