2 * Copyright (C) 2013 Tobias Brunner
3 * Copyright (C) 2008 Martin Willi
4 * Hochschule fuer Technik Rapperswil
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 #include "test_suite.h"
22 #include <threading/thread.h>
23 #include <threading/mutex.h>
24 #include <threading/condvar.h>
25 #include <threading/rwlock.h>
26 #include <threading/rwlock_condvar.h>
27 #include <threading/spinlock.h>
28 #include <threading/semaphore.h>
29 #include <threading/thread_value.h>
31 /*******************************************************************************
32 * recursive mutex test
49 * Create a thread barrier for count threads
51 static barrier_t
* barrier_create(int count
)
56 .mutex
= mutex_create(MUTEX_TYPE_DEFAULT
),
57 .cond
= condvar_create(CONDVAR_TYPE_DEFAULT
),
65 * Destroy a thread barrier
67 static void barrier_destroy(barrier_t
*this)
69 this->mutex
->destroy(this->mutex
);
70 this->cond
->destroy(this->cond
);
75 * Wait to have configured number of threads in barrier
77 static bool barrier_wait(barrier_t
*this)
81 this->mutex
->lock(this->mutex
);
89 while (this->current
< this->count
)
91 this->cond
->wait(this->cond
, this->mutex
);
98 this->mutex
->unlock(this->mutex
);
99 this->cond
->broadcast(this->cond
);
106 * Barrier for some tests
108 static barrier_t
*barrier
;
111 * A mutex for tests requiring one
113 static mutex_t
*mutex
;
116 * A condvar for tests requiring one
118 static condvar_t
*condvar
;
121 * A counter for signaling
125 static void *mutex_run(void *data
)
130 /* wait for all threads before getting in action */
131 barrier_wait(barrier
);
133 for (i
= 0; i
< 100; i
++)
142 fail("two threads locked the mutex concurrently");
145 mutex
->unlock(mutex
);
146 mutex
->unlock(mutex
);
147 mutex
->unlock(mutex
);
152 START_TEST(test_mutex
)
154 thread_t
*threads
[THREADS
];
157 barrier
= barrier_create(THREADS
);
158 mutex
= mutex_create(MUTEX_TYPE_RECURSIVE
);
160 for (i
= 0; i
< 10; i
++)
163 mutex
->unlock(mutex
);
165 for (i
= 0; i
< 10; i
++)
169 for (i
= 0; i
< 10; i
++)
171 mutex
->unlock(mutex
);
174 for (i
= 0; i
< THREADS
; i
++)
176 threads
[i
] = thread_create(mutex_run
, NULL
);
178 for (i
= 0; i
< THREADS
; i
++)
180 threads
[i
]->join(threads
[i
]);
183 mutex
->destroy(mutex
);
184 barrier_destroy(barrier
);
189 * Spinlock for testing
191 static spinlock_t
*spinlock
;
193 static void *spinlock_run(void *data
)
195 int i
, *locked
= (int*)data
;
197 barrier_wait(barrier
);
199 for (i
= 0; i
< 1000; i
++)
201 spinlock
->lock(spinlock
);
203 ck_assert_int_eq(*locked
, 1);
205 spinlock
->unlock(spinlock
);
210 START_TEST(test_spinlock
)
212 thread_t
*threads
[THREADS
];
215 barrier
= barrier_create(THREADS
);
216 spinlock
= spinlock_create();
218 for (i
= 0; i
< THREADS
; i
++)
220 threads
[i
] = thread_create(spinlock_run
, &locked
);
222 for (i
= 0; i
< THREADS
; i
++)
224 threads
[i
]->join(threads
[i
]);
227 spinlock
->destroy(spinlock
);
228 barrier_destroy(barrier
);
232 static void *condvar_run(void *data
)
236 condvar
->signal(condvar
);
237 mutex
->unlock(mutex
);
241 START_TEST(test_condvar
)
243 thread_t
*threads
[THREADS
];
246 mutex
= mutex_create(MUTEX_TYPE_DEFAULT
);
247 condvar
= condvar_create(CONDVAR_TYPE_DEFAULT
);
250 for (i
= 0; i
< THREADS
; i
++)
252 threads
[i
] = thread_create(condvar_run
, NULL
);
256 while (sigcount
< THREADS
)
258 condvar
->wait(condvar
, mutex
);
260 mutex
->unlock(mutex
);
262 for (i
= 0; i
< THREADS
; i
++)
264 threads
[i
]->join(threads
[i
]);
267 mutex
->destroy(mutex
);
268 condvar
->destroy(condvar
);
272 static void *condvar_recursive_run(void *data
)
278 condvar
->signal(condvar
);
279 mutex
->unlock(mutex
);
280 mutex
->unlock(mutex
);
281 mutex
->unlock(mutex
);
285 START_TEST(test_condvar_recursive
)
287 thread_t
*threads
[THREADS
];
290 mutex
= mutex_create(MUTEX_TYPE_RECURSIVE
);
291 condvar
= condvar_create(CONDVAR_TYPE_DEFAULT
);
296 for (i
= 0; i
< THREADS
; i
++)
298 threads
[i
] = thread_create(condvar_recursive_run
, NULL
);
303 while (sigcount
< THREADS
)
305 condvar
->wait(condvar
, mutex
);
307 mutex
->unlock(mutex
);
308 mutex
->unlock(mutex
);
309 mutex
->unlock(mutex
);
311 for (i
= 0; i
< THREADS
; i
++)
313 threads
[i
]->join(threads
[i
]);
316 mutex
->destroy(mutex
);
317 condvar
->destroy(condvar
);
321 static void *condvar_run_broad(void *data
)
326 condvar
->wait(condvar
, mutex
);
328 mutex
->unlock(mutex
);
332 START_TEST(test_condvar_broad
)
334 thread_t
*threads
[THREADS
];
337 mutex
= mutex_create(MUTEX_TYPE_DEFAULT
);
338 condvar
= condvar_create(CONDVAR_TYPE_DEFAULT
);
341 for (i
= 0; i
< THREADS
; i
++)
343 threads
[i
] = thread_create(condvar_run_broad
, NULL
);
350 condvar
->broadcast(condvar
);
351 mutex
->unlock(mutex
);
353 for (i
= 0; i
< THREADS
; i
++)
355 threads
[i
]->join(threads
[i
]);
358 mutex
->destroy(mutex
);
359 condvar
->destroy(condvar
);
363 START_TEST(test_condvar_timed
)
366 timeval_t start
, end
, diff
= { .tv_usec
= 50000 };
368 mutex
= mutex_create(MUTEX_TYPE_DEFAULT
);
369 condvar
= condvar_create(CONDVAR_TYPE_DEFAULT
);
375 time_monotonic(&start
);
376 if (condvar
->timed_wait(condvar
, mutex
, diff
.tv_usec
/ 1000))
381 time_monotonic(&end
);
382 mutex
->unlock(mutex
);
383 timersub(&end
, &start
, &end
);
384 ck_assert_msg(timercmp(&end
, &diff
, >), "end: %u.%u, diff: %u.%u",
385 end
.tv_sec
, end
.tv_usec
, diff
.tv_sec
, diff
.tv_usec
);
387 thread
= thread_create(condvar_run
, NULL
);
390 while (sigcount
== 0)
392 ck_assert(!condvar
->timed_wait(condvar
, mutex
, 1000));
394 mutex
->unlock(mutex
);
396 thread
->join(thread
);
397 mutex
->destroy(mutex
);
398 condvar
->destroy(condvar
);
402 START_TEST(test_condvar_timed_abs
)
405 timeval_t start
, end
, abso
, diff
= { .tv_usec
= 50000 };
407 mutex
= mutex_create(MUTEX_TYPE_DEFAULT
);
408 condvar
= condvar_create(CONDVAR_TYPE_DEFAULT
);
414 time_monotonic(&start
);
415 timeradd(&start
, &diff
, &abso
);
416 if (condvar
->timed_wait_abs(condvar
, mutex
, abso
))
421 time_monotonic(&end
);
422 mutex
->unlock(mutex
);
423 ck_assert_msg(timercmp(&end
, &diff
, >), "end: %u.%u, diff: %u.%u",
424 end
.tv_sec
, end
.tv_usec
, abso
.tv_sec
, abso
.tv_usec
);
426 thread
= thread_create(condvar_run
, NULL
);
428 time_monotonic(&start
);
430 timeradd(&start
, &diff
, &abso
);
432 while (sigcount
== 0)
434 ck_assert(!condvar
->timed_wait_abs(condvar
, mutex
, abso
));
436 mutex
->unlock(mutex
);
438 thread
->join(thread
);
439 mutex
->destroy(mutex
);
440 condvar
->destroy(condvar
);
444 static void *condvar_cancel_run(void *data
)
446 thread_cancelability(FALSE
);
451 condvar
->broadcast(condvar
);
453 thread_cleanup_push((void*)mutex
->unlock
, mutex
);
454 thread_cancelability(TRUE
);
457 condvar
->wait(condvar
, mutex
);
459 thread_cleanup_pop(TRUE
);
464 START_TEST(test_condvar_cancel
)
466 thread_t
*threads
[THREADS
];
469 mutex
= mutex_create(MUTEX_TYPE_DEFAULT
);
470 condvar
= condvar_create(CONDVAR_TYPE_DEFAULT
);
473 for (i
= 0; i
< THREADS
; i
++)
475 threads
[i
] = thread_create(condvar_cancel_run
, NULL
);
478 /* wait for all threads */
480 while (sigcount
< THREADS
)
482 condvar
->wait(condvar
, mutex
);
484 mutex
->unlock(mutex
);
486 for (i
= 0; i
< THREADS
; i
++)
488 threads
[i
]->cancel(threads
[i
]);
490 for (i
= 0; i
< THREADS
; i
++)
492 threads
[i
]->join(threads
[i
]);
495 mutex
->destroy(mutex
);
496 condvar
->destroy(condvar
);
501 * RWlock for different tests
503 static rwlock_t
*rwlock
;
505 static void *rwlock_run(refcount_t
*refs
)
507 rwlock
->read_lock(rwlock
);
510 ignore_result(ref_put(refs
));
511 rwlock
->unlock(rwlock
);
513 if (rwlock
->try_write_lock(rwlock
))
515 ck_assert_int_eq(*refs
, 0);
517 rwlock
->unlock(rwlock
);
520 rwlock
->write_lock(rwlock
);
521 ck_assert_int_eq(*refs
, 0);
523 rwlock
->unlock(rwlock
);
525 rwlock
->read_lock(rwlock
);
526 rwlock
->read_lock(rwlock
);
529 ignore_result(ref_put(refs
));
530 rwlock
->unlock(rwlock
);
531 rwlock
->unlock(rwlock
);
536 START_TEST(test_rwlock
)
538 thread_t
*threads
[THREADS
];
542 rwlock
= rwlock_create(RWLOCK_TYPE_DEFAULT
);
544 for (i
= 0; i
< THREADS
; i
++)
546 threads
[i
] = thread_create((void*)rwlock_run
, &refs
);
548 for (i
= 0; i
< THREADS
; i
++)
550 threads
[i
]->join(threads
[i
]);
553 rwlock
->destroy(rwlock
);
560 static rwlock_condvar_t
*rwcond
;
562 static void *rwlock_condvar_run(void *data
)
564 rwlock
->write_lock(rwlock
);
566 rwcond
->signal(rwcond
);
567 rwlock
->unlock(rwlock
);
571 START_TEST(test_rwlock_condvar
)
573 thread_t
*threads
[THREADS
];
576 rwlock
= rwlock_create(RWLOCK_TYPE_DEFAULT
);
577 rwcond
= rwlock_condvar_create();
580 for (i
= 0; i
< THREADS
; i
++)
582 threads
[i
] = thread_create(rwlock_condvar_run
, NULL
);
585 rwlock
->write_lock(rwlock
);
586 while (sigcount
< THREADS
)
588 rwcond
->wait(rwcond
, rwlock
);
590 rwlock
->unlock(rwlock
);
592 for (i
= 0; i
< THREADS
; i
++)
594 threads
[i
]->join(threads
[i
]);
597 rwlock
->destroy(rwlock
);
598 rwcond
->destroy(rwcond
);
602 static void *rwlock_condvar_run_broad(void *data
)
604 rwlock
->write_lock(rwlock
);
607 rwcond
->wait(rwcond
, rwlock
);
609 rwlock
->unlock(rwlock
);
613 START_TEST(test_rwlock_condvar_broad
)
615 thread_t
*threads
[THREADS
];
618 rwlock
= rwlock_create(RWLOCK_TYPE_DEFAULT
);
619 rwcond
= rwlock_condvar_create();
622 for (i
= 0; i
< THREADS
; i
++)
624 threads
[i
] = thread_create(rwlock_condvar_run_broad
, NULL
);
629 rwlock
->write_lock(rwlock
);
631 rwcond
->broadcast(rwcond
);
632 rwlock
->unlock(rwlock
);
634 for (i
= 0; i
< THREADS
; i
++)
636 threads
[i
]->join(threads
[i
]);
639 rwlock
->destroy(rwlock
);
640 rwcond
->destroy(rwcond
);
644 START_TEST(test_rwlock_condvar_timed
)
647 timeval_t start
, end
, diff
= { .tv_usec
= 50000 };
649 rwlock
= rwlock_create(RWLOCK_TYPE_DEFAULT
);
650 rwcond
= rwlock_condvar_create();
653 rwlock
->write_lock(rwlock
);
656 time_monotonic(&start
);
657 if (rwcond
->timed_wait(rwcond
, rwlock
, diff
.tv_usec
/ 1000))
662 rwlock
->unlock(rwlock
);
663 time_monotonic(&end
);
664 timersub(&end
, &start
, &end
);
665 ck_assert_msg(timercmp(&end
, &diff
, >), "end: %u.%u, diff: %u.%u",
666 end
.tv_sec
, end
.tv_usec
, diff
.tv_sec
, diff
.tv_usec
);
668 thread
= thread_create(rwlock_condvar_run
, NULL
);
670 rwlock
->write_lock(rwlock
);
671 while (sigcount
== 0)
673 ck_assert(!rwcond
->timed_wait(rwcond
, rwlock
, 1000));
675 rwlock
->unlock(rwlock
);
677 thread
->join(thread
);
678 rwlock
->destroy(rwlock
);
679 rwcond
->destroy(rwcond
);
683 START_TEST(test_rwlock_condvar_timed_abs
)
686 timeval_t start
, end
, abso
, diff
= { .tv_usec
= 50000 };
688 rwlock
= rwlock_create(RWLOCK_TYPE_DEFAULT
);
689 rwcond
= rwlock_condvar_create();
692 rwlock
->write_lock(rwlock
);
695 time_monotonic(&start
);
696 timeradd(&start
, &diff
, &abso
);
697 if (rwcond
->timed_wait_abs(rwcond
, rwlock
, abso
))
702 rwlock
->unlock(rwlock
);
703 time_monotonic(&end
);
704 ck_assert_msg(timercmp(&end
, &abso
, >), "end: %u.%u, abso: %u.%u",
705 end
.tv_sec
, end
.tv_usec
, abso
.tv_sec
, abso
.tv_usec
);
707 thread
= thread_create(rwlock_condvar_run
, NULL
);
709 time_monotonic(&start
);
711 timeradd(&start
, &diff
, &abso
);
712 rwlock
->write_lock(rwlock
);
713 while (sigcount
== 0)
715 ck_assert(!rwcond
->timed_wait_abs(rwcond
, rwlock
, abso
));
717 rwlock
->unlock(rwlock
);
719 thread
->join(thread
);
720 rwlock
->destroy(rwlock
);
721 rwcond
->destroy(rwcond
);
725 static void *rwlock_condvar_cancel_run(void *data
)
727 thread_cancelability(FALSE
);
729 rwlock
->write_lock(rwlock
);
732 rwcond
->broadcast(rwcond
);
734 thread_cleanup_push((void*)rwlock
->unlock
, rwlock
);
735 thread_cancelability(TRUE
);
738 rwcond
->wait(rwcond
, rwlock
);
740 thread_cleanup_pop(TRUE
);
745 START_TEST(test_rwlock_condvar_cancel
)
747 thread_t
*threads
[THREADS
];
750 rwlock
= rwlock_create(RWLOCK_TYPE_DEFAULT
);
751 rwcond
= rwlock_condvar_create();
754 for (i
= 0; i
< THREADS
; i
++)
756 threads
[i
] = thread_create(rwlock_condvar_cancel_run
, NULL
);
759 /* wait for all threads */
760 rwlock
->write_lock(rwlock
);
761 while (sigcount
< THREADS
)
763 rwcond
->wait(rwcond
, rwlock
);
765 rwlock
->unlock(rwlock
);
767 for (i
= 0; i
< THREADS
; i
++)
769 threads
[i
]->cancel(threads
[i
]);
771 for (i
= 0; i
< THREADS
; i
++)
773 threads
[i
]->join(threads
[i
]);
776 rwlock
->destroy(rwlock
);
777 rwcond
->destroy(rwcond
);
782 * Semaphore for different tests
784 static semaphore_t
*semaphore
;
786 static void *semaphore_run(void *data
)
788 semaphore
->post(semaphore
);
792 START_TEST(test_semaphore
)
794 thread_t
*threads
[THREADS
];
797 semaphore
= semaphore_create(initial
);
799 for (i
= 0; i
< THREADS
; i
++)
801 threads
[i
] = thread_create(semaphore_run
, NULL
);
803 for (i
= 0; i
< THREADS
+ initial
; i
++)
805 semaphore
->wait(semaphore
);
807 for (i
= 0; i
< THREADS
; i
++)
809 threads
[i
]->join(threads
[i
]);
812 semaphore
->destroy(semaphore
);
816 static void *join_run(void *data
)
818 /* force some context switches */
820 return (void*)((uintptr_t)data
+ THREADS
);
823 START_TEST(test_join
)
825 thread_t
*threads
[THREADS
];
828 for (i
= 0; i
< THREADS
; i
++)
830 threads
[i
] = thread_create(join_run
, (void*)(uintptr_t)i
);
832 for (i
= 0; i
< THREADS
; i
++)
834 ck_assert_int_eq((uintptr_t)threads
[i
]->join(threads
[i
]), i
+ THREADS
);
839 static void *exit_join_run(void *data
)
842 thread_exit((void*)((uintptr_t)data
+ THREADS
));
848 START_TEST(test_join_exit
)
850 thread_t
*threads
[THREADS
];
853 for (i
= 0; i
< THREADS
; i
++)
855 threads
[i
] = thread_create(exit_join_run
, (void*)(uintptr_t)i
);
857 for (i
= 0; i
< THREADS
; i
++)
859 ck_assert_int_eq((uintptr_t)threads
[i
]->join(threads
[i
]), i
+ THREADS
);
864 static void *detach_run(void *data
)
866 refcount_t
*running
= (refcount_t
*)data
;
868 ignore_result(ref_put(running
));
872 START_TEST(test_detach
)
874 thread_t
*threads
[THREADS
];
876 refcount_t running
= 0;
878 for (i
= 0; i
< THREADS
; i
++)
881 threads
[i
] = thread_create(detach_run
, &running
);
883 for (i
= 0; i
< THREADS
; i
++)
885 threads
[i
]->detach(threads
[i
]);
891 /* no checks done here, but we check that thread state gets cleaned
892 * up with leak detective. */
896 static void *detach_exit_run(void *data
)
898 refcount_t
*running
= (refcount_t
*)data
;
900 ignore_result(ref_put(running
));
907 START_TEST(test_detach_exit
)
909 thread_t
*threads
[THREADS
];
911 refcount_t running
= 0;
913 for (i
= 0; i
< THREADS
; i
++)
916 threads
[i
] = thread_create(detach_exit_run
, &running
);
918 for (i
= 0; i
< THREADS
; i
++)
920 threads
[i
]->detach(threads
[i
]);
926 /* no checks done here, but we check that thread state gets cleaned
927 * up with leak detective. */
931 static void *cancel_run(void *data
)
933 /* default cancellability should be TRUE, so don't change it */
941 START_TEST(test_cancel
)
943 thread_t
*threads
[THREADS
];
946 for (i
= 0; i
< THREADS
; i
++)
948 threads
[i
] = thread_create(cancel_run
, NULL
);
950 for (i
= 0; i
< THREADS
; i
++)
952 threads
[i
]->cancel(threads
[i
]);
954 for (i
= 0; i
< THREADS
; i
++)
956 threads
[i
]->join(threads
[i
]);
961 static void *cancel_onoff_run(void *data
)
963 bool *cancellable
= (bool*)data
;
965 thread_cancelability(FALSE
);
966 *cancellable
= FALSE
;
968 /* we should not get cancelled here */
972 thread_cancelability(TRUE
);
982 START_TEST(test_cancel_onoff
)
984 thread_t
*threads
[THREADS
];
985 bool cancellable
[THREADS
];
988 for (i
= 0; i
< THREADS
; i
++)
990 cancellable
[i
] = TRUE
;
991 threads
[i
] = thread_create(cancel_onoff_run
, &cancellable
[i
]);
993 for (i
= 0; i
< THREADS
; i
++)
995 /* wait until thread has cleared its cancellability */
996 while (cancellable
[i
])
1000 threads
[i
]->cancel(threads
[i
]);
1002 for (i
= 0; i
< THREADS
; i
++)
1004 threads
[i
]->join(threads
[i
]);
1005 ck_assert(cancellable
[i
]);
1010 static void *cancel_point_run(void *data
)
1012 thread_cancelability(FALSE
);
1015 /* implicitly enables cancellability */
1016 thread_cancellation_point();
1021 START_TEST(test_cancel_point
)
1023 thread_t
*threads
[THREADS
];
1026 for (i
= 0; i
< THREADS
; i
++)
1028 threads
[i
] = thread_create(cancel_point_run
, NULL
);
1031 for (i
= 0; i
< THREADS
; i
++)
1033 threads
[i
]->cancel(threads
[i
]);
1035 for (i
= 0; i
< THREADS
; i
++)
1037 threads
[i
]->join(threads
[i
]);
1042 static void cleanup1(void *data
)
1044 uintptr_t *value
= (uintptr_t*)data
;
1046 ck_assert_int_eq(*value
, 1);
1050 static void cleanup2(void *data
)
1052 uintptr_t *value
= (uintptr_t*)data
;
1054 ck_assert_int_eq(*value
, 2);
1058 static void cleanup3(void *data
)
1060 uintptr_t *value
= (uintptr_t*)data
;
1062 ck_assert_int_eq(*value
, 3);
1066 static void *cleanup_run(void *data
)
1068 thread_cleanup_push(cleanup3
, data
);
1069 thread_cleanup_push(cleanup2
, data
);
1070 thread_cleanup_push(cleanup1
, data
);
1074 START_TEST(test_cleanup
)
1076 thread_t
*threads
[THREADS
];
1077 uintptr_t values
[THREADS
];
1080 for (i
= 0; i
< THREADS
; i
++)
1083 threads
[i
] = thread_create(cleanup_run
, &values
[i
]);
1085 for (i
= 0; i
< THREADS
; i
++)
1087 threads
[i
]->join(threads
[i
]);
1088 ck_assert_int_eq(values
[i
], 4);
1093 static void *cleanup_exit_run(void *data
)
1095 thread_cleanup_push(cleanup3
, data
);
1096 thread_cleanup_push(cleanup2
, data
);
1097 thread_cleanup_push(cleanup1
, data
);
1103 START_TEST(test_cleanup_exit
)
1105 thread_t
*threads
[THREADS
];
1106 uintptr_t values
[THREADS
];
1109 for (i
= 0; i
< THREADS
; i
++)
1112 threads
[i
] = thread_create(cleanup_exit_run
, &values
[i
]);
1114 for (i
= 0; i
< THREADS
; i
++)
1116 threads
[i
]->join(threads
[i
]);
1117 ck_assert_int_eq(values
[i
], 4);
1122 static void *cleanup_cancel_run(void *data
)
1124 thread_cancelability(FALSE
);
1126 thread_cleanup_push(cleanup3
, data
);
1127 thread_cleanup_push(cleanup2
, data
);
1128 thread_cleanup_push(cleanup1
, data
);
1130 thread_cancelability(TRUE
);
1139 START_TEST(test_cleanup_cancel
)
1141 thread_t
*threads
[THREADS
];
1142 uintptr_t values
[THREADS
];
1145 for (i
= 0; i
< THREADS
; i
++)
1148 threads
[i
] = thread_create(cleanup_cancel_run
, &values
[i
]);
1150 for (i
= 0; i
< THREADS
; i
++)
1152 threads
[i
]->cancel(threads
[i
]);
1154 for (i
= 0; i
< THREADS
; i
++)
1156 threads
[i
]->join(threads
[i
]);
1157 ck_assert_int_eq(values
[i
], 4);
1162 static void *cleanup_pop_run(void *data
)
1164 thread_cleanup_push(cleanup3
, data
);
1165 thread_cleanup_push(cleanup2
, data
);
1166 thread_cleanup_push(cleanup1
, data
);
1168 thread_cleanup_push(cleanup2
, data
);
1169 thread_cleanup_pop(FALSE
);
1171 thread_cleanup_pop(TRUE
);
1175 START_TEST(test_cleanup_pop
)
1177 thread_t
*threads
[THREADS
];
1178 uintptr_t values
[THREADS
];
1181 for (i
= 0; i
< THREADS
; i
++)
1184 threads
[i
] = thread_create(cleanup_pop_run
, &values
[i
]);
1186 for (i
= 0; i
< THREADS
; i
++)
1188 threads
[i
]->join(threads
[i
]);
1189 ck_assert_int_eq(values
[i
], 4);
1194 static thread_value_t
*tls
[10];
1196 static void *tls_run(void *data
)
1198 uintptr_t value
= (uintptr_t)data
;
1201 for (i
= 0; i
< countof(tls
); i
++)
1203 ck_assert(tls
[i
]->get(tls
[i
]) == NULL
);
1205 for (i
= 0; i
< countof(tls
); i
++)
1207 tls
[i
]->set(tls
[i
], (void*)(value
* i
));
1209 for (j
= 0; j
< 1000; j
++)
1211 for (i
= 0; i
< countof(tls
); i
++)
1213 tls
[i
]->set(tls
[i
], (void*)(value
* i
));
1214 ck_assert(tls
[i
]->get(tls
[i
]) == (void*)(value
* i
));
1218 for (i
= 0; i
< countof(tls
); i
++)
1220 ck_assert(tls
[i
]->get(tls
[i
]) == (void*)(value
* i
));
1222 return (void*)(value
+ 1);
1225 START_TEST(test_tls
)
1227 thread_t
*threads
[THREADS
];
1230 for (i
= 0; i
< countof(tls
); i
++)
1232 tls
[i
] = thread_value_create(NULL
);
1234 for (i
= 0; i
< THREADS
; i
++)
1236 threads
[i
] = thread_create(tls_run
, (void*)(uintptr_t)i
);
1239 ck_assert_int_eq((uintptr_t)tls_run((void*)(uintptr_t)(THREADS
+ 1)),
1242 for (i
= 0; i
< THREADS
; i
++)
1244 ck_assert_int_eq((uintptr_t)threads
[i
]->join(threads
[i
]), i
+ 1);
1246 for (i
= 0; i
< countof(tls
); i
++)
1248 tls
[i
]->destroy(tls
[i
]);
1253 static void tls_cleanup(void *data
)
1255 uintptr_t *value
= (uintptr_t*)data
;
1260 static void *tls_cleanup_run(void *data
)
1264 for (i
= 0; i
< countof(tls
); i
++)
1266 tls
[i
]->set(tls
[i
], data
);
1271 START_TEST(test_tls_cleanup
)
1273 thread_t
*threads
[THREADS
];
1274 uintptr_t values
[THREADS
], main_value
= countof(tls
);
1277 for (i
= 0; i
< countof(tls
); i
++)
1279 tls
[i
] = thread_value_create(tls_cleanup
);
1281 for (i
= 0; i
< THREADS
; i
++)
1283 values
[i
] = countof(tls
);
1284 threads
[i
] = thread_create(tls_cleanup_run
, &values
[i
]);
1287 tls_cleanup_run(&main_value
);
1289 for (i
= 0; i
< THREADS
; i
++)
1291 threads
[i
]->join(threads
[i
]);
1292 ck_assert_int_eq(values
[i
], 0);
1294 for (i
= 0; i
< countof(tls
); i
++)
1296 tls
[i
]->destroy(tls
[i
]);
1298 ck_assert_int_eq(main_value
, 0);
1302 Suite
*threading_suite_create()
1307 s
= suite_create("threading");
1309 tc
= tcase_create("recursive mutex");
1310 tcase_add_test(tc
, test_mutex
);
1311 suite_add_tcase(s
, tc
);
1313 tc
= tcase_create("spinlock");
1314 tcase_add_test(tc
, test_spinlock
);
1315 suite_add_tcase(s
, tc
);
1317 tc
= tcase_create("condvar");
1318 tcase_add_test(tc
, test_condvar
);
1319 tcase_add_test(tc
, test_condvar_recursive
);
1320 tcase_add_test(tc
, test_condvar_broad
);
1321 tcase_add_test(tc
, test_condvar_timed
);
1322 tcase_add_test(tc
, test_condvar_timed_abs
);
1323 tcase_add_test(tc
, test_condvar_cancel
);
1324 suite_add_tcase(s
, tc
);
1326 tc
= tcase_create("rwlock");
1327 tcase_add_test(tc
, test_rwlock
);
1328 suite_add_tcase(s
, tc
);
1330 tc
= tcase_create("rwlock condvar");
1331 tcase_add_test(tc
, test_rwlock_condvar
);
1332 tcase_add_test(tc
, test_rwlock_condvar_broad
);
1333 tcase_add_test(tc
, test_rwlock_condvar_timed
);
1334 tcase_add_test(tc
, test_rwlock_condvar_timed_abs
);
1335 tcase_add_test(tc
, test_rwlock_condvar_cancel
);
1336 suite_add_tcase(s
, tc
);
1338 tc
= tcase_create("semaphore");
1339 tcase_add_test(tc
, test_semaphore
);
1340 suite_add_tcase(s
, tc
);
1342 tc
= tcase_create("thread joining");
1343 tcase_add_test(tc
, test_join
);
1344 tcase_add_test(tc
, test_join_exit
);
1345 suite_add_tcase(s
, tc
);
1347 tc
= tcase_create("thread detaching");
1348 tcase_add_test(tc
, test_detach
);
1349 tcase_add_test(tc
, test_detach_exit
);
1350 suite_add_tcase(s
, tc
);
1352 tc
= tcase_create("thread cancellation");
1353 tcase_add_test(tc
, test_cancel
);
1354 tcase_add_test(tc
, test_cancel_onoff
);
1355 tcase_add_test(tc
, test_cancel_point
);
1356 suite_add_tcase(s
, tc
);
1358 tc
= tcase_create("thread cleanup");
1359 tcase_add_test(tc
, test_cleanup
);
1360 tcase_add_test(tc
, test_cleanup_exit
);
1361 tcase_add_test(tc
, test_cleanup_cancel
);
1362 tcase_add_test(tc
, test_cleanup_pop
);
1363 suite_add_tcase(s
, tc
);
1365 tc
= tcase_create("thread local storage");
1366 tcase_add_test(tc
, test_tls
);
1367 tcase_add_test(tc
, test_tls_cleanup
);
1368 suite_add_tcase(s
, tc
);