unit-tests: Add cleanup test cases for different thread exit situations
[strongswan.git] / src / libstrongswan / tests / suites / test_threading.c
1 /*
2 * Copyright (C) 2013 Tobias Brunner
3 * Copyright (C) 2008 Martin Willi
4 * Hochschule fuer Technik Rapperswil
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
13 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 */
16
17 #include "test_suite.h"
18
19 #include <sched.h>
20 #include <unistd.h>
21
22 #include <threading/thread.h>
23 #include <threading/mutex.h>
24 #include <threading/condvar.h>
25
26 /*******************************************************************************
27 * recursive mutex test
28 */
29
30 #define THREADS 20
31
32 /**
33 * Thread barrier data
34 */
35 typedef struct {
36 mutex_t *mutex;
37 condvar_t *cond;
38 int count;
39 int current;
40 bool active;
41 } barrier_t;
42
43 /**
44 * Create a thread barrier for count threads
45 */
46 static barrier_t* barrier_create(int count)
47 {
48 barrier_t *this;
49
50 INIT(this,
51 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
52 .cond = condvar_create(CONDVAR_TYPE_DEFAULT),
53 .count = count,
54 );
55
56 return this;
57 }
58
59 /**
60 * Destroy a thread barrier
61 */
62 static void barrier_destroy(barrier_t *this)
63 {
64 this->mutex->destroy(this->mutex);
65 this->cond->destroy(this->cond);
66 free(this);
67 }
68
69 /**
70 * Wait to have configured number of threads in barrier
71 */
72 static bool barrier_wait(barrier_t *this)
73 {
74 bool winner = FALSE;
75
76 this->mutex->lock(this->mutex);
77 if (!this->active)
78 { /* first, reset */
79 this->active = TRUE;
80 this->current = 0;
81 }
82
83 this->current++;
84 while (this->current < this->count)
85 {
86 this->cond->wait(this->cond, this->mutex);
87 }
88 if (this->active)
89 { /* first, win */
90 winner = TRUE;
91 this->active = FALSE;
92 }
93 this->mutex->unlock(this->mutex);
94 this->cond->broadcast(this->cond);
95 sched_yield();
96
97 return winner;
98 }
99
100 /**
101 * Barrier for some tests
102 */
103 static barrier_t *barrier;
104
105 static void *mutex_run(void *data)
106 {
107 mutex_t *mutex = (mutex_t*)data;
108 static int locked = 0;
109 int i;
110
111 /* wait for all threads before getting in action */
112 barrier_wait(barrier);
113
114 for (i = 0; i < 100; i++)
115 {
116 mutex->lock(mutex);
117 mutex->lock(mutex);
118 mutex->lock(mutex);
119 locked++;
120 sched_yield();
121 if (locked > 1)
122 {
123 fail("two threads locked the mutex concurrently");
124 }
125 locked--;
126 mutex->unlock(mutex);
127 mutex->unlock(mutex);
128 mutex->unlock(mutex);
129 }
130 return NULL;
131 }
132
133 START_TEST(test_mutex)
134 {
135 thread_t *threads[THREADS];
136 mutex_t *mutex;
137 int i;
138
139 barrier = barrier_create(THREADS);
140 mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
141
142 for (i = 0; i < 10; i++)
143 {
144 mutex->lock(mutex);
145 mutex->unlock(mutex);
146 }
147 for (i = 0; i < 10; i++)
148 {
149 mutex->lock(mutex);
150 }
151 for (i = 0; i < 10; i++)
152 {
153 mutex->unlock(mutex);
154 }
155
156 for (i = 0; i < THREADS; i++)
157 {
158 threads[i] = thread_create(mutex_run, mutex);
159 }
160 for (i = 0; i < THREADS; i++)
161 {
162 threads[i]->join(threads[i]);
163 }
164
165 mutex->destroy(mutex);
166 barrier_destroy(barrier);
167 }
168 END_TEST
169
170 static void *join_run(void *data)
171 {
172 /* force some context switches */
173 sched_yield();
174 return (void*)((uintptr_t)data + THREADS);
175 }
176
177 START_TEST(test_join)
178 {
179 thread_t *threads[THREADS];
180 int i;
181
182 for (i = 0; i < THREADS; i++)
183 {
184 threads[i] = thread_create(join_run, (void*)(uintptr_t)i);
185 }
186 for (i = 0; i < THREADS; i++)
187 {
188 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
189 }
190 }
191 END_TEST
192
193 static void *exit_join_run(void *data)
194 {
195 sched_yield();
196 thread_exit((void*)((uintptr_t)data + THREADS));
197 /* not reached */
198 ck_assert(FALSE);
199 return NULL;
200 }
201
202 START_TEST(test_join_exit)
203 {
204 thread_t *threads[THREADS];
205 int i;
206
207 for (i = 0; i < THREADS; i++)
208 {
209 threads[i] = thread_create(exit_join_run, (void*)(uintptr_t)i);
210 }
211 for (i = 0; i < THREADS; i++)
212 {
213 ck_assert_int_eq((uintptr_t)threads[i]->join(threads[i]), i + THREADS);
214 }
215 }
216 END_TEST
217
218 static void *detach_run(void *data)
219 {
220 refcount_t *running = (refcount_t*)data;
221
222 ignore_result(ref_put(running));
223 return NULL;
224 }
225
226 START_TEST(test_detach)
227 {
228 thread_t *threads[THREADS];
229 int i;
230 refcount_t running = 0;
231
232 for (i = 0; i < THREADS; i++)
233 {
234 ref_get(&running);
235 threads[i] = thread_create(detach_run, &running);
236 }
237 for (i = 0; i < THREADS; i++)
238 {
239 threads[i]->detach(threads[i]);
240 }
241 while (running > 0)
242 {
243 sched_yield();
244 }
245 /* no checks done here, but we check that thread state gets cleaned
246 * up with leak detective. */
247 }
248 END_TEST
249
250 static void *detach_exit_run(void *data)
251 {
252 refcount_t *running = (refcount_t*)data;
253
254 ignore_result(ref_put(running));
255 thread_exit(NULL);
256 /* not reached */
257 ck_assert(FALSE);
258 return NULL;
259 }
260
261 START_TEST(test_detach_exit)
262 {
263 thread_t *threads[THREADS];
264 int i;
265 refcount_t running = 0;
266
267 for (i = 0; i < THREADS; i++)
268 {
269 ref_get(&running);
270 threads[i] = thread_create(detach_exit_run, &running);
271 }
272 for (i = 0; i < THREADS; i++)
273 {
274 threads[i]->detach(threads[i]);
275 }
276 while (running > 0)
277 {
278 sched_yield();
279 }
280 /* no checks done here, but we check that thread state gets cleaned
281 * up with leak detective. */
282 }
283 END_TEST
284
285 static void *cancel_run(void *data)
286 {
287 /* default cancellability should be TRUE, so don't change it */
288 while (TRUE)
289 {
290 sleep(10);
291 }
292 return NULL;
293 }
294
295 START_TEST(test_cancel)
296 {
297 thread_t *threads[THREADS];
298 int i;
299
300 for (i = 0; i < THREADS; i++)
301 {
302 threads[i] = thread_create(cancel_run, NULL);
303 }
304 for (i = 0; i < THREADS; i++)
305 {
306 threads[i]->cancel(threads[i]);
307 }
308 for (i = 0; i < THREADS; i++)
309 {
310 threads[i]->join(threads[i]);
311 }
312 }
313 END_TEST
314
315 static void *cancel_onoff_run(void *data)
316 {
317 bool *cancellable = (bool*)data;
318
319 thread_cancelability(FALSE);
320 *cancellable = FALSE;
321
322 /* we should not get cancelled here */
323 usleep(50000);
324
325 *cancellable = TRUE;
326 thread_cancelability(TRUE);
327
328 /* but here */
329 while (TRUE)
330 {
331 sleep(10);
332 }
333 return NULL;
334 }
335
336 START_TEST(test_cancel_onoff)
337 {
338 thread_t *threads[THREADS];
339 bool cancellable[THREADS];
340 int i;
341
342 for (i = 0; i < THREADS; i++)
343 {
344 cancellable[i] = TRUE;
345 threads[i] = thread_create(cancel_onoff_run, &cancellable[i]);
346 }
347 for (i = 0; i < THREADS; i++)
348 {
349 /* wait until thread has cleared its cancellability */
350 while (cancellable[i])
351 {
352 sched_yield();
353 }
354 threads[i]->cancel(threads[i]);
355 }
356 for (i = 0; i < THREADS; i++)
357 {
358 threads[i]->join(threads[i]);
359 ck_assert(cancellable[i]);
360 }
361 }
362 END_TEST
363
364 static void *cancel_point_run(void *data)
365 {
366 thread_cancelability(FALSE);
367 while (TRUE)
368 {
369 /* implicitly enables cancellability */
370 thread_cancellation_point();
371 }
372 return NULL;
373 }
374
375 START_TEST(test_cancel_point)
376 {
377 thread_t *threads[THREADS];
378 int i;
379
380 for (i = 0; i < THREADS; i++)
381 {
382 threads[i] = thread_create(cancel_point_run, NULL);
383 }
384 sched_yield();
385 for (i = 0; i < THREADS; i++)
386 {
387 threads[i]->cancel(threads[i]);
388 }
389 for (i = 0; i < THREADS; i++)
390 {
391 threads[i]->join(threads[i]);
392 }
393 }
394 END_TEST
395
396 static void cleanup1(void *data)
397 {
398 uintptr_t *value = (uintptr_t*)data;
399
400 ck_assert_int_eq(*value, 1);
401 (*value)++;
402 }
403
404 static void cleanup2(void *data)
405 {
406 uintptr_t *value = (uintptr_t*)data;
407
408 ck_assert_int_eq(*value, 2);
409 (*value)++;
410 }
411
412 static void cleanup3(void *data)
413 {
414 uintptr_t *value = (uintptr_t*)data;
415
416 ck_assert_int_eq(*value, 3);
417 (*value)++;
418 }
419
420 static void *cleanup_run(void *data)
421 {
422 thread_cleanup_push(cleanup3, data);
423 thread_cleanup_push(cleanup2, data);
424 thread_cleanup_push(cleanup1, data);
425 return NULL;
426 }
427
428 START_TEST(test_cleanup)
429 {
430 thread_t *threads[THREADS];
431 uintptr_t values[THREADS];
432 int i;
433
434 for (i = 0; i < THREADS; i++)
435 {
436 values[i] = 1;
437 threads[i] = thread_create(cleanup_run, &values[i]);
438 }
439 for (i = 0; i < THREADS; i++)
440 {
441 threads[i]->join(threads[i]);
442 ck_assert_int_eq(values[i], 4);
443 }
444 }
445 END_TEST
446
447 static void *cleanup_exit_run(void *data)
448 {
449 thread_cleanup_push(cleanup3, data);
450 thread_cleanup_push(cleanup2, data);
451 thread_cleanup_push(cleanup1, data);
452 thread_exit(NULL);
453 ck_assert(FALSE);
454 return NULL;
455 }
456
457 START_TEST(test_cleanup_exit)
458 {
459 thread_t *threads[THREADS];
460 uintptr_t values[THREADS];
461 int i;
462
463 for (i = 0; i < THREADS; i++)
464 {
465 values[i] = 1;
466 threads[i] = thread_create(cleanup_exit_run, &values[i]);
467 }
468 for (i = 0; i < THREADS; i++)
469 {
470 threads[i]->join(threads[i]);
471 ck_assert_int_eq(values[i], 4);
472 }
473 }
474 END_TEST
475
476 static void *cleanup_cancel_run(void *data)
477 {
478 thread_cancelability(FALSE);
479
480 thread_cleanup_push(cleanup3, data);
481 thread_cleanup_push(cleanup2, data);
482 thread_cleanup_push(cleanup1, data);
483
484 thread_cancelability(TRUE);
485
486 while (TRUE)
487 {
488 sleep(1);
489 }
490 return NULL;
491 }
492
493 START_TEST(test_cleanup_cancel)
494 {
495 thread_t *threads[THREADS];
496 uintptr_t values[THREADS];
497 int i;
498
499 for (i = 0; i < THREADS; i++)
500 {
501 values[i] = 1;
502 threads[i] = thread_create(cleanup_cancel_run, &values[i]);
503 }
504 for (i = 0; i < THREADS; i++)
505 {
506 threads[i]->cancel(threads[i]);
507 }
508 for (i = 0; i < THREADS; i++)
509 {
510 threads[i]->join(threads[i]);
511 ck_assert_int_eq(values[i], 4);
512 }
513 }
514 END_TEST
515
516 Suite *threading_suite_create()
517 {
518 Suite *s;
519 TCase *tc;
520
521 s = suite_create("threading");
522
523 tc = tcase_create("recursive mutex");
524 tcase_add_test(tc, test_mutex);
525 suite_add_tcase(s, tc);
526
527 tc = tcase_create("thread joining");
528 tcase_add_test(tc, test_join);
529 tcase_add_test(tc, test_join_exit);
530 suite_add_tcase(s, tc);
531
532 tc = tcase_create("thread detaching");
533 tcase_add_test(tc, test_detach);
534 tcase_add_test(tc, test_detach_exit);
535 suite_add_tcase(s, tc);
536
537 tc = tcase_create("thread cancellation");
538 tcase_add_test(tc, test_cancel);
539 tcase_add_test(tc, test_cancel_onoff);
540 tcase_add_test(tc, test_cancel_point);
541 suite_add_tcase(s, tc);
542
543 tc = tcase_create("thread cleanup");
544 tcase_add_test(tc, test_cleanup);
545 tcase_add_test(tc, test_cleanup_exit);
546 tcase_add_test(tc, test_cleanup_cancel);
547 suite_add_tcase(s, tc);
548
549 return s;
550 }