Moved data structures to new collections subfolder
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2012 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 */
18
19 #include <string.h>
20
21 #include "ike_sa_manager.h"
22
23 #include <daemon.h>
24 #include <sa/ike_sa_id.h>
25 #include <bus/bus.h>
26 #include <threading/condvar.h>
27 #include <threading/mutex.h>
28 #include <threading/rwlock.h>
29 #include <collections/linked_list.h>
30 #include <crypto/hashers/hasher.h>
31
32 /* the default size of the hash table (MUST be a power of 2) */
33 #define DEFAULT_HASHTABLE_SIZE 1
34
35 /* the maximum size of the hash table (MUST be a power of 2) */
36 #define MAX_HASHTABLE_SIZE (1 << 30)
37
38 /* the default number of segments (MUST be a power of 2) */
39 #define DEFAULT_SEGMENT_COUNT 1
40
41 typedef struct entry_t entry_t;
42
43 /**
44 * An entry in the linked list, contains IKE_SA, locking and lookup data.
45 */
46 struct entry_t {
47
48 /**
49 * Number of threads waiting for this ike_sa_t object.
50 */
51 int waiting_threads;
52
53 /**
54 * Condvar where threads can wait until ike_sa_t object is free for use again.
55 */
56 condvar_t *condvar;
57
58 /**
59 * Is this ike_sa currently checked out?
60 */
61 bool checked_out;
62
63 /**
64 * Does this SA drives out new threads?
65 */
66 bool driveout_new_threads;
67
68 /**
69 * Does this SA drives out waiting threads?
70 */
71 bool driveout_waiting_threads;
72
73 /**
74 * Identification of an IKE_SA (SPIs).
75 */
76 ike_sa_id_t *ike_sa_id;
77
78 /**
79 * The contained ike_sa_t object.
80 */
81 ike_sa_t *ike_sa;
82
83 /**
84 * hash of the IKE_SA_INIT message, used to detect retransmissions
85 */
86 chunk_t init_hash;
87
88 /**
89 * remote host address, required for DoS detection and duplicate
90 * checking (host with same my_id and other_id is *not* considered
91 * a duplicate if the address family differs)
92 */
93 host_t *other;
94
95 /**
96 * As responder: Is this SA half-open?
97 */
98 bool half_open;
99
100 /**
101 * own identity, required for duplicate checking
102 */
103 identification_t *my_id;
104
105 /**
106 * remote identity, required for duplicate checking
107 */
108 identification_t *other_id;
109
110 /**
111 * message ID currently processing, if any
112 */
113 u_int32_t message_id;
114 };
115
116 /**
117 * Implementation of entry_t.destroy.
118 */
119 static status_t entry_destroy(entry_t *this)
120 {
121 /* also destroy IKE SA */
122 this->ike_sa->destroy(this->ike_sa);
123 this->ike_sa_id->destroy(this->ike_sa_id);
124 chunk_free(&this->init_hash);
125 DESTROY_IF(this->other);
126 DESTROY_IF(this->my_id);
127 DESTROY_IF(this->other_id);
128 this->condvar->destroy(this->condvar);
129 free(this);
130 return SUCCESS;
131 }
132
133 /**
134 * Creates a new entry for the ike_sa_t list.
135 */
136 static entry_t *entry_create()
137 {
138 entry_t *this = malloc_thing(entry_t);
139
140 this->waiting_threads = 0;
141 this->condvar = condvar_create(CONDVAR_TYPE_DEFAULT);
142
143 /* we set checkout flag when we really give it out */
144 this->checked_out = FALSE;
145 this->driveout_new_threads = FALSE;
146 this->driveout_waiting_threads = FALSE;
147 this->message_id = -1;
148 this->init_hash = chunk_empty;
149 this->other = NULL;
150 this->half_open = FALSE;
151 this->my_id = NULL;
152 this->other_id = NULL;
153 this->ike_sa_id = NULL;
154 this->ike_sa = NULL;
155
156 return this;
157 }
158
159 /**
160 * Function that matches entry_t objects by ike_sa_id_t.
161 */
162 static bool entry_match_by_id(entry_t *entry, ike_sa_id_t *id)
163 {
164 if (id->equals(id, entry->ike_sa_id))
165 {
166 return TRUE;
167 }
168 if ((id->get_responder_spi(id) == 0 ||
169 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
170 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
171 {
172 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
173 return TRUE;
174 }
175 return FALSE;
176 }
177
178 /**
179 * Function that matches entry_t objects by ike_sa_t pointers.
180 */
181 static bool entry_match_by_sa(entry_t *entry, ike_sa_t *ike_sa)
182 {
183 return entry->ike_sa == ike_sa;
184 }
185
186 /**
187 * Hash function for ike_sa_id_t objects.
188 */
189 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
190 {
191 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
192 * locally unique, so we use our randomly allocated SPI whether we are
193 * initiator or responder to ensure a good distribution. The latter is not
194 * possible for IKEv1 as we don't know whether we are original initiator or
195 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
196 * SPIs (Cookies) to be allocated near random (we allocate them randomly
197 * anyway) it seems safe to always use the initiator SPI. */
198 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
199 ike_sa_id->is_initiator(ike_sa_id))
200 {
201 return ike_sa_id->get_initiator_spi(ike_sa_id);
202 }
203 return ike_sa_id->get_responder_spi(ike_sa_id);
204 }
205
206 typedef struct half_open_t half_open_t;
207
208 /**
209 * Struct to manage half-open IKE_SAs per peer.
210 */
211 struct half_open_t {
212 /** chunk of remote host address */
213 chunk_t other;
214
215 /** the number of half-open IKE_SAs with that host */
216 u_int count;
217 };
218
219 /**
220 * Destroys a half_open_t object.
221 */
222 static void half_open_destroy(half_open_t *this)
223 {
224 chunk_free(&this->other);
225 free(this);
226 }
227
228 typedef struct connected_peers_t connected_peers_t;
229
230 struct connected_peers_t {
231 /** own identity */
232 identification_t *my_id;
233
234 /** remote identity */
235 identification_t *other_id;
236
237 /** ip address family of peer */
238 int family;
239
240 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
241 linked_list_t *sas;
242 };
243
244 static void connected_peers_destroy(connected_peers_t *this)
245 {
246 this->my_id->destroy(this->my_id);
247 this->other_id->destroy(this->other_id);
248 this->sas->destroy(this->sas);
249 free(this);
250 }
251
252 /**
253 * Function that matches connected_peers_t objects by the given ids.
254 */
255 static inline bool connected_peers_match(connected_peers_t *connected_peers,
256 identification_t *my_id, identification_t *other_id,
257 int family)
258 {
259 return my_id->equals(my_id, connected_peers->my_id) &&
260 other_id->equals(other_id, connected_peers->other_id) &&
261 (!family || family == connected_peers->family);
262 }
263
264 typedef struct init_hash_t init_hash_t;
265
266 struct init_hash_t {
267 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
268 chunk_t hash;
269
270 /** our SPI allocated for the IKE_SA based on this message */
271 u_int64_t our_spi;
272 };
273
274 typedef struct segment_t segment_t;
275
276 /**
277 * Struct to manage segments of the hash table.
278 */
279 struct segment_t {
280 /** mutex to access a segment exclusively */
281 mutex_t *mutex;
282
283 /** the number of entries in this segment */
284 u_int count;
285 };
286
287 typedef struct shareable_segment_t shareable_segment_t;
288
289 /**
290 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
291 */
292 struct shareable_segment_t {
293 /** rwlock to access a segment non-/exclusively */
294 rwlock_t *lock;
295
296 /** the number of entries in this segment - in case of the "half-open table"
297 * it's the sum of all half_open_t.count in a segment. */
298 u_int count;
299 };
300
301 typedef struct table_item_t table_item_t;
302
303 /**
304 * Instead of using linked_list_t for each bucket we store the data in our own
305 * list to save memory.
306 */
307 struct table_item_t {
308 /** data of this item */
309 void *value;
310
311 /** next item in the overflow list */
312 table_item_t *next;
313 };
314
315 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
316
317 /**
318 * Additional private members of ike_sa_manager_t.
319 */
320 struct private_ike_sa_manager_t {
321 /**
322 * Public interface of ike_sa_manager_t.
323 */
324 ike_sa_manager_t public;
325
326 /**
327 * Hash table with entries for the ike_sa_t objects.
328 */
329 table_item_t **ike_sa_table;
330
331 /**
332 * The size of the hash table.
333 */
334 u_int table_size;
335
336 /**
337 * Mask to map the hashes to table rows.
338 */
339 u_int table_mask;
340
341 /**
342 * Segments of the hash table.
343 */
344 segment_t *segments;
345
346 /**
347 * The number of segments.
348 */
349 u_int segment_count;
350
351 /**
352 * Mask to map a table row to a segment.
353 */
354 u_int segment_mask;
355
356 /**
357 * Hash table with half_open_t objects.
358 */
359 table_item_t **half_open_table;
360
361 /**
362 * Segments of the "half-open" hash table.
363 */
364 shareable_segment_t *half_open_segments;
365
366 /**
367 * Hash table with connected_peers_t objects.
368 */
369 table_item_t **connected_peers_table;
370
371 /**
372 * Segments of the "connected peers" hash table.
373 */
374 shareable_segment_t *connected_peers_segments;
375
376 /**
377 * Hash table with init_hash_t objects.
378 */
379 table_item_t **init_hashes_table;
380
381 /**
382 * Segments of the "hashes" hash table.
383 */
384 segment_t *init_hashes_segments;
385
386 /**
387 * RNG to get random SPIs for our side
388 */
389 rng_t *rng;
390
391 /**
392 * SHA1 hasher for IKE_SA_INIT retransmit detection
393 */
394 hasher_t *hasher;
395
396 /**
397 * reuse existing IKE_SAs in checkout_by_config
398 */
399 bool reuse_ikesa;
400 };
401
402 /**
403 * Acquire a lock to access the segment of the table row with the given index.
404 * It also works with the segment index directly.
405 */
406 static inline void lock_single_segment(private_ike_sa_manager_t *this,
407 u_int index)
408 {
409 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
410 lock->lock(lock);
411 }
412
413 /**
414 * Release the lock required to access the segment of the table row with the given index.
415 * It also works with the segment index directly.
416 */
417 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
418 u_int index)
419 {
420 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
421 lock->unlock(lock);
422 }
423
424 /**
425 * Lock all segments
426 */
427 static void lock_all_segments(private_ike_sa_manager_t *this)
428 {
429 u_int i;
430
431 for (i = 0; i < this->segment_count; i++)
432 {
433 this->segments[i].mutex->lock(this->segments[i].mutex);
434 }
435 }
436
437 /**
438 * Unlock all segments
439 */
440 static void unlock_all_segments(private_ike_sa_manager_t *this)
441 {
442 u_int i;
443
444 for (i = 0; i < this->segment_count; i++)
445 {
446 this->segments[i].mutex->unlock(this->segments[i].mutex);
447 }
448 }
449
450 typedef struct private_enumerator_t private_enumerator_t;
451
452 /**
453 * hash table enumerator implementation
454 */
455 struct private_enumerator_t {
456
457 /**
458 * implements enumerator interface
459 */
460 enumerator_t enumerator;
461
462 /**
463 * associated ike_sa_manager_t
464 */
465 private_ike_sa_manager_t *manager;
466
467 /**
468 * current segment index
469 */
470 u_int segment;
471
472 /**
473 * currently enumerating entry
474 */
475 entry_t *entry;
476
477 /**
478 * current table row index
479 */
480 u_int row;
481
482 /**
483 * current table item
484 */
485 table_item_t *current;
486
487 /**
488 * previous table item
489 */
490 table_item_t *prev;
491 };
492
493 METHOD(enumerator_t, enumerate, bool,
494 private_enumerator_t *this, entry_t **entry, u_int *segment)
495 {
496 if (this->entry)
497 {
498 this->entry->condvar->signal(this->entry->condvar);
499 this->entry = NULL;
500 }
501 while (this->segment < this->manager->segment_count)
502 {
503 while (this->row < this->manager->table_size)
504 {
505 this->prev = this->current;
506 if (this->current)
507 {
508 this->current = this->current->next;
509 }
510 else
511 {
512 lock_single_segment(this->manager, this->segment);
513 this->current = this->manager->ike_sa_table[this->row];
514 }
515 if (this->current)
516 {
517 *entry = this->entry = this->current->value;
518 *segment = this->segment;
519 return TRUE;
520 }
521 unlock_single_segment(this->manager, this->segment);
522 this->row += this->manager->segment_count;
523 }
524 this->segment++;
525 this->row = this->segment;
526 }
527 return FALSE;
528 }
529
530 METHOD(enumerator_t, enumerator_destroy, void,
531 private_enumerator_t *this)
532 {
533 if (this->entry)
534 {
535 this->entry->condvar->signal(this->entry->condvar);
536 }
537 if (this->current)
538 {
539 unlock_single_segment(this->manager, this->segment);
540 }
541 free(this);
542 }
543
544 /**
545 * Creates an enumerator to enumerate the entries in the hash table.
546 */
547 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
548 {
549 private_enumerator_t *enumerator;
550
551 INIT(enumerator,
552 .enumerator = {
553 .enumerate = (void*)_enumerate,
554 .destroy = _enumerator_destroy,
555 },
556 .manager = this,
557 );
558 return &enumerator->enumerator;
559 }
560
561 /**
562 * Put an entry into the hash table.
563 * Note: The caller has to unlock the returned segment.
564 */
565 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
566 {
567 table_item_t *current, *item;
568 u_int row, segment;
569
570 INIT(item,
571 .value = entry,
572 );
573
574 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
575 segment = row & this->segment_mask;
576
577 lock_single_segment(this, segment);
578 current = this->ike_sa_table[row];
579 if (current)
580 { /* insert at the front of current bucket */
581 item->next = current;
582 }
583 this->ike_sa_table[row] = item;
584 this->segments[segment].count++;
585 return segment;
586 }
587
588 /**
589 * Remove an entry from the hash table.
590 * Note: The caller MUST have a lock on the segment of this entry.
591 */
592 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
593 {
594 table_item_t *item, *prev = NULL;
595 u_int row, segment;
596
597 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
598 segment = row & this->segment_mask;
599 item = this->ike_sa_table[row];
600 while (item)
601 {
602 if (item->value == entry)
603 {
604 if (prev)
605 {
606 prev->next = item->next;
607 }
608 else
609 {
610 this->ike_sa_table[row] = item->next;
611 }
612 this->segments[segment].count--;
613 free(item);
614 break;
615 }
616 prev = item;
617 item = item->next;
618 }
619 }
620
621 /**
622 * Remove the entry at the current enumerator position.
623 */
624 static void remove_entry_at(private_enumerator_t *this)
625 {
626 this->entry = NULL;
627 if (this->current)
628 {
629 table_item_t *current = this->current;
630
631 this->manager->segments[this->segment].count--;
632 this->current = this->prev;
633
634 if (this->prev)
635 {
636 this->prev->next = current->next;
637 }
638 else
639 {
640 this->manager->ike_sa_table[this->row] = current->next;
641 unlock_single_segment(this->manager, this->segment);
642 }
643 free(current);
644 }
645 }
646
647 /**
648 * Find an entry using the provided match function to compare the entries for
649 * equality.
650 */
651 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
652 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
653 linked_list_match_t match, void *param)
654 {
655 table_item_t *item;
656 u_int row, seg;
657
658 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
659 seg = row & this->segment_mask;
660
661 lock_single_segment(this, seg);
662 item = this->ike_sa_table[row];
663 while (item)
664 {
665 if (match(item->value, param))
666 {
667 *entry = item->value;
668 *segment = seg;
669 /* the locked segment has to be unlocked by the caller */
670 return SUCCESS;
671 }
672 item = item->next;
673 }
674 unlock_single_segment(this, seg);
675 return NOT_FOUND;
676 }
677
678 /**
679 * Find an entry by ike_sa_id_t.
680 * Note: On SUCCESS, the caller has to unlock the segment.
681 */
682 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
683 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
684 {
685 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
686 (linked_list_match_t)entry_match_by_id, ike_sa_id);
687 }
688
689 /**
690 * Find an entry by IKE_SA pointer.
691 * Note: On SUCCESS, the caller has to unlock the segment.
692 */
693 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
694 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
695 {
696 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
697 (linked_list_match_t)entry_match_by_sa, ike_sa);
698 }
699
700 /**
701 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
702 * acquirable.
703 */
704 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
705 u_int segment)
706 {
707 if (entry->driveout_new_threads)
708 {
709 /* we are not allowed to get this */
710 return FALSE;
711 }
712 while (entry->checked_out && !entry->driveout_waiting_threads)
713 {
714 /* so wait until we can get it for us.
715 * we register us as waiting. */
716 entry->waiting_threads++;
717 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
718 entry->waiting_threads--;
719 }
720 /* hm, a deletion request forbids us to get this SA, get next one */
721 if (entry->driveout_waiting_threads)
722 {
723 /* we must signal here, others may be waiting on it, too */
724 entry->condvar->signal(entry->condvar);
725 return FALSE;
726 }
727 return TRUE;
728 }
729
730 /**
731 * Put a half-open SA into the hash table.
732 */
733 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
734 {
735 table_item_t *item;
736 u_int row, segment;
737 rwlock_t *lock;
738 half_open_t *half_open;
739 chunk_t addr;
740
741 addr = entry->other->get_address(entry->other);
742 row = chunk_hash(addr) & this->table_mask;
743 segment = row & this->segment_mask;
744 lock = this->half_open_segments[segment].lock;
745 lock->write_lock(lock);
746 item = this->half_open_table[row];
747 while (item)
748 {
749 half_open = item->value;
750
751 if (chunk_equals(addr, half_open->other))
752 {
753 half_open->count++;
754 break;
755 }
756 item = item->next;
757 }
758
759 if (!item)
760 {
761 INIT(half_open,
762 .other = chunk_clone(addr),
763 .count = 1,
764 );
765 INIT(item,
766 .value = half_open,
767 .next = this->half_open_table[row],
768 );
769 this->half_open_table[row] = item;
770 }
771 this->half_open_segments[segment].count++;
772 lock->unlock(lock);
773 }
774
775 /**
776 * Remove a half-open SA from the hash table.
777 */
778 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
779 {
780 table_item_t *item, *prev = NULL;
781 u_int row, segment;
782 rwlock_t *lock;
783 chunk_t addr;
784
785 addr = entry->other->get_address(entry->other);
786 row = chunk_hash(addr) & this->table_mask;
787 segment = row & this->segment_mask;
788 lock = this->half_open_segments[segment].lock;
789 lock->write_lock(lock);
790 item = this->half_open_table[row];
791 while (item)
792 {
793 half_open_t *half_open = item->value;
794
795 if (chunk_equals(addr, half_open->other))
796 {
797 if (--half_open->count == 0)
798 {
799 if (prev)
800 {
801 prev->next = item->next;
802 }
803 else
804 {
805 this->half_open_table[row] = item->next;
806 }
807 half_open_destroy(half_open);
808 free(item);
809 }
810 this->half_open_segments[segment].count--;
811 break;
812 }
813 prev = item;
814 item = item->next;
815 }
816 lock->unlock(lock);
817 }
818
819 /**
820 * Put an SA between two peers into the hash table.
821 */
822 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
823 {
824 table_item_t *item;
825 u_int row, segment;
826 rwlock_t *lock;
827 connected_peers_t *connected_peers;
828 chunk_t my_id, other_id;
829 int family;
830
831 my_id = entry->my_id->get_encoding(entry->my_id);
832 other_id = entry->other_id->get_encoding(entry->other_id);
833 family = entry->other->get_family(entry->other);
834 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
835 segment = row & this->segment_mask;
836 lock = this->connected_peers_segments[segment].lock;
837 lock->write_lock(lock);
838 item = this->connected_peers_table[row];
839 while (item)
840 {
841 connected_peers = item->value;
842
843 if (connected_peers_match(connected_peers, entry->my_id,
844 entry->other_id, family))
845 {
846 if (connected_peers->sas->find_first(connected_peers->sas,
847 (linked_list_match_t)entry->ike_sa_id->equals,
848 NULL, entry->ike_sa_id) == SUCCESS)
849 {
850 lock->unlock(lock);
851 return;
852 }
853 break;
854 }
855 item = item->next;
856 }
857
858 if (!item)
859 {
860 INIT(connected_peers,
861 .my_id = entry->my_id->clone(entry->my_id),
862 .other_id = entry->other_id->clone(entry->other_id),
863 .family = family,
864 .sas = linked_list_create(),
865 );
866 INIT(item,
867 .value = connected_peers,
868 .next = this->connected_peers_table[row],
869 );
870 this->connected_peers_table[row] = item;
871 }
872 connected_peers->sas->insert_last(connected_peers->sas,
873 entry->ike_sa_id->clone(entry->ike_sa_id));
874 this->connected_peers_segments[segment].count++;
875 lock->unlock(lock);
876 }
877
878 /**
879 * Remove an SA between two peers from the hash table.
880 */
881 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
882 {
883 table_item_t *item, *prev = NULL;
884 u_int row, segment;
885 rwlock_t *lock;
886 chunk_t my_id, other_id;
887 int family;
888
889 my_id = entry->my_id->get_encoding(entry->my_id);
890 other_id = entry->other_id->get_encoding(entry->other_id);
891 family = entry->other->get_family(entry->other);
892
893 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
894 segment = row & this->segment_mask;
895
896 lock = this->connected_peers_segments[segment].lock;
897 lock->write_lock(lock);
898 item = this->connected_peers_table[row];
899 while (item)
900 {
901 connected_peers_t *current = item->value;
902
903 if (connected_peers_match(current, entry->my_id, entry->other_id,
904 family))
905 {
906 enumerator_t *enumerator;
907 ike_sa_id_t *ike_sa_id;
908
909 enumerator = current->sas->create_enumerator(current->sas);
910 while (enumerator->enumerate(enumerator, &ike_sa_id))
911 {
912 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
913 {
914 current->sas->remove_at(current->sas, enumerator);
915 ike_sa_id->destroy(ike_sa_id);
916 this->connected_peers_segments[segment].count--;
917 break;
918 }
919 }
920 enumerator->destroy(enumerator);
921 if (current->sas->get_count(current->sas) == 0)
922 {
923 if (prev)
924 {
925 prev->next = item->next;
926 }
927 else
928 {
929 this->connected_peers_table[row] = item->next;
930 }
931 connected_peers_destroy(current);
932 free(item);
933 }
934 break;
935 }
936 prev = item;
937 item = item->next;
938 }
939 lock->unlock(lock);
940 }
941
942 /**
943 * Get a random SPI for new IKE_SAs
944 */
945 static u_int64_t get_spi(private_ike_sa_manager_t *this)
946 {
947 u_int64_t spi;
948
949 if (this->rng &&
950 this->rng->get_bytes(this->rng, sizeof(spi), (u_int8_t*)&spi))
951 {
952 return spi;
953 }
954 return 0;
955 }
956
957 /**
958 * Calculate the hash of the initial IKE message. Memory for the hash is
959 * allocated on success.
960 *
961 * @returns TRUE on success
962 */
963 static bool get_init_hash(private_ike_sa_manager_t *this, message_t *message,
964 chunk_t *hash)
965 {
966 if (!this->hasher)
967 { /* this might be the case when flush() has been called */
968 return FALSE;
969 }
970 if (message->get_exchange_type(message) == ID_PROT)
971 { /* include the source for Main Mode as the hash will be the same if
972 * SPIs are reused by two initiators that use the same proposal */
973 host_t *src = message->get_source(message);
974
975 if (!this->hasher->allocate_hash(this->hasher,
976 src->get_address(src), NULL))
977 {
978 return FALSE;
979 }
980 }
981 return this->hasher->allocate_hash(this->hasher,
982 message->get_packet_data(message), hash);
983 }
984
985 /**
986 * Check if we already have created an IKE_SA based on the initial IKE message
987 * with the given hash.
988 * If not the hash is stored, the hash data is not(!) cloned.
989 *
990 * Also, the local SPI is returned. In case of a retransmit this is already
991 * stored together with the hash, otherwise it is newly allocated and should
992 * be used to create the IKE_SA.
993 *
994 * @returns ALREADY_DONE if the message with the given hash has been seen before
995 * NOT_FOUND if the message hash was not found
996 * FAILED if the SPI allocation failed
997 */
998 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
999 chunk_t init_hash, u_int64_t *our_spi)
1000 {
1001 table_item_t *item;
1002 u_int row, segment;
1003 mutex_t *mutex;
1004 init_hash_t *init;
1005 u_int64_t spi;
1006
1007 row = chunk_hash(init_hash) & this->table_mask;
1008 segment = row & this->segment_mask;
1009 mutex = this->init_hashes_segments[segment].mutex;
1010 mutex->lock(mutex);
1011 item = this->init_hashes_table[row];
1012 while (item)
1013 {
1014 init_hash_t *current = item->value;
1015
1016 if (chunk_equals(init_hash, current->hash))
1017 {
1018 *our_spi = current->our_spi;
1019 mutex->unlock(mutex);
1020 return ALREADY_DONE;
1021 }
1022 item = item->next;
1023 }
1024
1025 spi = get_spi(this);
1026 if (!spi)
1027 {
1028 return FAILED;
1029 }
1030
1031 INIT(init,
1032 .hash = {
1033 .len = init_hash.len,
1034 .ptr = init_hash.ptr,
1035 },
1036 .our_spi = spi,
1037 );
1038 INIT(item,
1039 .value = init,
1040 .next = this->init_hashes_table[row],
1041 );
1042 this->init_hashes_table[row] = item;
1043 *our_spi = init->our_spi;
1044 mutex->unlock(mutex);
1045 return NOT_FOUND;
1046 }
1047
1048 /**
1049 * Remove the hash of an initial IKE message from the cache.
1050 */
1051 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1052 {
1053 table_item_t *item, *prev = NULL;
1054 u_int row, segment;
1055 mutex_t *mutex;
1056
1057 row = chunk_hash(init_hash) & this->table_mask;
1058 segment = row & this->segment_mask;
1059 mutex = this->init_hashes_segments[segment].mutex;
1060 mutex->lock(mutex);
1061 item = this->init_hashes_table[row];
1062 while (item)
1063 {
1064 init_hash_t *current = item->value;
1065
1066 if (chunk_equals(init_hash, current->hash))
1067 {
1068 if (prev)
1069 {
1070 prev->next = item->next;
1071 }
1072 else
1073 {
1074 this->init_hashes_table[row] = item->next;
1075 }
1076 free(current);
1077 free(item);
1078 break;
1079 }
1080 prev = item;
1081 item = item->next;
1082 }
1083 mutex->unlock(mutex);
1084 }
1085
1086 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1087 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1088 {
1089 ike_sa_t *ike_sa = NULL;
1090 entry_t *entry;
1091 u_int segment;
1092
1093 DBG2(DBG_MGR, "checkout IKE_SA");
1094
1095 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1096 {
1097 if (wait_for_entry(this, entry, segment))
1098 {
1099 entry->checked_out = TRUE;
1100 ike_sa = entry->ike_sa;
1101 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1102 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1103 }
1104 unlock_single_segment(this, segment);
1105 }
1106 charon->bus->set_sa(charon->bus, ike_sa);
1107 return ike_sa;
1108 }
1109
1110 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1111 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1112 {
1113 ike_sa_id_t *ike_sa_id;
1114 ike_sa_t *ike_sa;
1115 u_int8_t ike_version;
1116 u_int64_t spi;
1117
1118 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1119
1120 spi = get_spi(this);
1121 if (!spi)
1122 {
1123 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1124 return NULL;
1125 }
1126
1127 if (initiator)
1128 {
1129 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1130 }
1131 else
1132 {
1133 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1134 }
1135 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1136 ike_sa_id->destroy(ike_sa_id);
1137
1138 if (ike_sa)
1139 {
1140 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1141 ike_sa->get_unique_id(ike_sa));
1142 }
1143 return ike_sa;
1144 }
1145
1146 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1147 private_ike_sa_manager_t* this, message_t *message)
1148 {
1149 u_int segment;
1150 entry_t *entry;
1151 ike_sa_t *ike_sa = NULL;
1152 ike_sa_id_t *id;
1153 ike_version_t ike_version;
1154 bool is_init = FALSE;
1155
1156 id = message->get_ike_sa_id(message);
1157 /* clone the IKE_SA ID so we can modify the initiator flag */
1158 id = id->clone(id);
1159 id->switch_initiator(id);
1160
1161 DBG2(DBG_MGR, "checkout IKE_SA by message");
1162
1163 if (id->get_responder_spi(id) == 0)
1164 {
1165 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1166 {
1167 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1168 message->get_request(message))
1169 {
1170 ike_version = IKEV2;
1171 is_init = TRUE;
1172 }
1173 }
1174 else
1175 {
1176 if (message->get_exchange_type(message) == ID_PROT ||
1177 message->get_exchange_type(message) == AGGRESSIVE)
1178 {
1179 ike_version = IKEV1;
1180 is_init = TRUE;
1181 if (id->is_initiator(id))
1182 { /* not set in IKEv1, switch back before applying to new SA */
1183 id->switch_initiator(id);
1184 }
1185 }
1186 }
1187 }
1188
1189 if (is_init)
1190 {
1191 u_int64_t our_spi;
1192 chunk_t hash;
1193
1194 if (!get_init_hash(this, message, &hash))
1195 {
1196 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1197 id->destroy(id);
1198 return NULL;
1199 }
1200
1201 /* ensure this is not a retransmit of an already handled init message */
1202 switch (check_and_put_init_hash(this, hash, &our_spi))
1203 {
1204 case NOT_FOUND:
1205 { /* we've not seen this packet yet, create a new IKE_SA */
1206 id->set_responder_spi(id, our_spi);
1207 ike_sa = ike_sa_create(id, FALSE, ike_version);
1208 if (ike_sa)
1209 {
1210 entry = entry_create();
1211 entry->ike_sa = ike_sa;
1212 entry->ike_sa_id = id->clone(id);
1213
1214 segment = put_entry(this, entry);
1215 entry->checked_out = TRUE;
1216 unlock_single_segment(this, segment);
1217
1218 entry->message_id = message->get_message_id(message);
1219 entry->init_hash = hash;
1220
1221 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1222 ike_sa->get_name(ike_sa),
1223 ike_sa->get_unique_id(ike_sa));
1224 }
1225 else
1226 {
1227 remove_init_hash(this, hash);
1228 chunk_free(&hash);
1229 DBG1(DBG_MGR, "ignoring message, no such IKE_SA");
1230 }
1231 id->destroy(id);
1232 charon->bus->set_sa(charon->bus, ike_sa);
1233 return ike_sa;
1234 }
1235 case FAILED:
1236 { /* we failed to allocate an SPI */
1237 chunk_free(&hash);
1238 id->destroy(id);
1239 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1240 return NULL;
1241 }
1242 case ALREADY_DONE:
1243 default:
1244 break;
1245 }
1246 /* it looks like we already handled this init message to some degree */
1247 id->set_responder_spi(id, our_spi);
1248 chunk_free(&hash);
1249 }
1250
1251 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1252 {
1253 /* only check out in IKEv2 if we are not already processing it */
1254 if (message->get_request(message) &&
1255 message->get_message_id(message) == entry->message_id)
1256 {
1257 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1258 entry->message_id);
1259 }
1260 else if (wait_for_entry(this, entry, segment))
1261 {
1262 ike_sa_id_t *ike_id;
1263
1264 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1265 entry->checked_out = TRUE;
1266 entry->message_id = message->get_message_id(message);
1267 if (ike_id->get_responder_spi(ike_id) == 0)
1268 {
1269 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1270 }
1271 ike_sa = entry->ike_sa;
1272 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1273 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1274 }
1275 unlock_single_segment(this, segment);
1276 }
1277 else
1278 {
1279 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1280 }
1281 id->destroy(id);
1282 charon->bus->set_sa(charon->bus, ike_sa);
1283 return ike_sa;
1284 }
1285
1286 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1287 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1288 {
1289 enumerator_t *enumerator;
1290 entry_t *entry;
1291 ike_sa_t *ike_sa = NULL;
1292 peer_cfg_t *current_peer;
1293 ike_cfg_t *current_ike;
1294 u_int segment;
1295
1296 DBG2(DBG_MGR, "checkout IKE_SA by config");
1297
1298 if (!this->reuse_ikesa)
1299 { /* IKE_SA reuse disable by config */
1300 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1301 charon->bus->set_sa(charon->bus, ike_sa);
1302 return ike_sa;
1303 }
1304
1305 enumerator = create_table_enumerator(this);
1306 while (enumerator->enumerate(enumerator, &entry, &segment))
1307 {
1308 if (!wait_for_entry(this, entry, segment))
1309 {
1310 continue;
1311 }
1312 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING)
1313 { /* skip IKE_SAs which are not usable */
1314 continue;
1315 }
1316
1317 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1318 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1319 {
1320 current_ike = current_peer->get_ike_cfg(current_peer);
1321 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1322 {
1323 entry->checked_out = TRUE;
1324 ike_sa = entry->ike_sa;
1325 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1326 ike_sa->get_unique_id(ike_sa),
1327 current_peer->get_name(current_peer));
1328 break;
1329 }
1330 }
1331 }
1332 enumerator->destroy(enumerator);
1333
1334 if (!ike_sa)
1335 { /* no IKE_SA using such a config, hand out a new */
1336 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1337 }
1338 charon->bus->set_sa(charon->bus, ike_sa);
1339 return ike_sa;
1340 }
1341
1342 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1343 private_ike_sa_manager_t *this, u_int32_t id, bool child)
1344 {
1345 enumerator_t *enumerator, *children;
1346 entry_t *entry;
1347 ike_sa_t *ike_sa = NULL;
1348 child_sa_t *child_sa;
1349 u_int segment;
1350
1351 DBG2(DBG_MGR, "checkout IKE_SA by ID");
1352
1353 enumerator = create_table_enumerator(this);
1354 while (enumerator->enumerate(enumerator, &entry, &segment))
1355 {
1356 if (wait_for_entry(this, entry, segment))
1357 {
1358 /* look for a child with such a reqid ... */
1359 if (child)
1360 {
1361 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1362 while (children->enumerate(children, (void**)&child_sa))
1363 {
1364 if (child_sa->get_reqid(child_sa) == id)
1365 {
1366 ike_sa = entry->ike_sa;
1367 break;
1368 }
1369 }
1370 children->destroy(children);
1371 }
1372 else /* ... or for a IKE_SA with such a unique id */
1373 {
1374 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1375 {
1376 ike_sa = entry->ike_sa;
1377 }
1378 }
1379 /* got one, return */
1380 if (ike_sa)
1381 {
1382 entry->checked_out = TRUE;
1383 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1384 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1385 break;
1386 }
1387 }
1388 }
1389 enumerator->destroy(enumerator);
1390
1391 charon->bus->set_sa(charon->bus, ike_sa);
1392 return ike_sa;
1393 }
1394
1395 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1396 private_ike_sa_manager_t *this, char *name, bool child)
1397 {
1398 enumerator_t *enumerator, *children;
1399 entry_t *entry;
1400 ike_sa_t *ike_sa = NULL;
1401 child_sa_t *child_sa;
1402 u_int segment;
1403
1404 enumerator = create_table_enumerator(this);
1405 while (enumerator->enumerate(enumerator, &entry, &segment))
1406 {
1407 if (wait_for_entry(this, entry, segment))
1408 {
1409 /* look for a child with such a policy name ... */
1410 if (child)
1411 {
1412 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1413 while (children->enumerate(children, (void**)&child_sa))
1414 {
1415 if (streq(child_sa->get_name(child_sa), name))
1416 {
1417 ike_sa = entry->ike_sa;
1418 break;
1419 }
1420 }
1421 children->destroy(children);
1422 }
1423 else /* ... or for a IKE_SA with such a connection name */
1424 {
1425 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1426 {
1427 ike_sa = entry->ike_sa;
1428 }
1429 }
1430 /* got one, return */
1431 if (ike_sa)
1432 {
1433 entry->checked_out = TRUE;
1434 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1435 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1436 break;
1437 }
1438 }
1439 }
1440 enumerator->destroy(enumerator);
1441
1442 charon->bus->set_sa(charon->bus, ike_sa);
1443 return ike_sa;
1444 }
1445
1446 /**
1447 * enumerator filter function, waiting variant
1448 */
1449 static bool enumerator_filter_wait(private_ike_sa_manager_t *this,
1450 entry_t **in, ike_sa_t **out, u_int *segment)
1451 {
1452 if (wait_for_entry(this, *in, *segment))
1453 {
1454 *out = (*in)->ike_sa;
1455 charon->bus->set_sa(charon->bus, *out);
1456 return TRUE;
1457 }
1458 return FALSE;
1459 }
1460
1461 /**
1462 * enumerator filter function, skipping variant
1463 */
1464 static bool enumerator_filter_skip(private_ike_sa_manager_t *this,
1465 entry_t **in, ike_sa_t **out, u_int *segment)
1466 {
1467 if (!(*in)->driveout_new_threads &&
1468 !(*in)->driveout_waiting_threads &&
1469 !(*in)->checked_out)
1470 {
1471 *out = (*in)->ike_sa;
1472 charon->bus->set_sa(charon->bus, *out);
1473 return TRUE;
1474 }
1475 return FALSE;
1476 }
1477
1478 /**
1479 * Reset threads SA after enumeration
1480 */
1481 static void reset_sa(void *data)
1482 {
1483 charon->bus->set_sa(charon->bus, NULL);
1484 }
1485
1486 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1487 private_ike_sa_manager_t* this, bool wait)
1488 {
1489 return enumerator_create_filter(create_table_enumerator(this),
1490 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1491 this, reset_sa);
1492 }
1493
1494 METHOD(ike_sa_manager_t, checkin, void,
1495 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1496 {
1497 /* to check the SA back in, we look for the pointer of the ike_sa
1498 * in all entries.
1499 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1500 * on reception of a IKE_SA_INIT response) the lookup will work but
1501 * updating of the SPI MAY be necessary...
1502 */
1503 entry_t *entry;
1504 ike_sa_id_t *ike_sa_id;
1505 host_t *other;
1506 identification_t *my_id, *other_id;
1507 u_int segment;
1508
1509 ike_sa_id = ike_sa->get_id(ike_sa);
1510 my_id = ike_sa->get_my_id(ike_sa);
1511 other_id = ike_sa->get_other_eap_id(ike_sa);
1512 other = ike_sa->get_other_host(ike_sa);
1513
1514 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1515 ike_sa->get_unique_id(ike_sa));
1516
1517 /* look for the entry */
1518 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1519 {
1520 /* ike_sa_id must be updated */
1521 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1522 /* signal waiting threads */
1523 entry->checked_out = FALSE;
1524 entry->message_id = -1;
1525 /* check if this SA is half-open */
1526 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1527 {
1528 /* not half open anymore */
1529 entry->half_open = FALSE;
1530 remove_half_open(this, entry);
1531 }
1532 else if (entry->half_open && !other->ip_equals(other, entry->other))
1533 {
1534 /* the other host's IP has changed, we must update the hash table */
1535 remove_half_open(this, entry);
1536 DESTROY_IF(entry->other);
1537 entry->other = other->clone(other);
1538 put_half_open(this, entry);
1539 }
1540 else if (!entry->half_open &&
1541 !entry->ike_sa_id->is_initiator(entry->ike_sa_id) &&
1542 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1543 {
1544 /* this is a new half-open SA */
1545 entry->half_open = TRUE;
1546 entry->other = other->clone(other);
1547 put_half_open(this, entry);
1548 }
1549 DBG2(DBG_MGR, "check-in of IKE_SA successful.");
1550 entry->condvar->signal(entry->condvar);
1551 }
1552 else
1553 {
1554 entry = entry_create();
1555 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1556 entry->ike_sa = ike_sa;
1557 segment = put_entry(this, entry);
1558 }
1559
1560 /* apply identities for duplicate test */
1561 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1562 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1563 entry->my_id == NULL && entry->other_id == NULL)
1564 {
1565 if (ike_sa->get_version(ike_sa) == IKEV1)
1566 {
1567 /* If authenticated and received INITIAL_CONTACT,
1568 * delete any existing IKE_SAs with that peer. */
1569 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1570 {
1571 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1572 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1573 }
1574 }
1575
1576 entry->my_id = my_id->clone(my_id);
1577 entry->other_id = other_id->clone(other_id);
1578 if (!entry->other)
1579 {
1580 entry->other = other->clone(other);
1581 }
1582 put_connected_peers(this, entry);
1583 }
1584
1585 unlock_single_segment(this, segment);
1586
1587 charon->bus->set_sa(charon->bus, NULL);
1588 }
1589
1590 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1591 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1592 {
1593 /* deletion is a bit complex, we must ensure that no thread is waiting for
1594 * this SA.
1595 * We take this SA from the table, and start signaling while threads
1596 * are in the condvar.
1597 */
1598 entry_t *entry;
1599 ike_sa_id_t *ike_sa_id;
1600 u_int segment;
1601
1602 ike_sa_id = ike_sa->get_id(ike_sa);
1603
1604 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1605 ike_sa->get_unique_id(ike_sa));
1606
1607 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1608 {
1609 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1610 { /* it looks like flush() has been called and the SA is being deleted
1611 * anyway, just check it in */
1612 DBG2(DBG_MGR, "ignored check-in and destroy of IKE_SA during shutdown");
1613 entry->checked_out = FALSE;
1614 entry->condvar->broadcast(entry->condvar);
1615 unlock_single_segment(this, segment);
1616 return;
1617 }
1618
1619 /* drive out waiting threads, as we are in hurry */
1620 entry->driveout_waiting_threads = TRUE;
1621 /* mark it, so no new threads can get this entry */
1622 entry->driveout_new_threads = TRUE;
1623 /* wait until all workers have done their work */
1624 while (entry->waiting_threads)
1625 {
1626 /* wake up all */
1627 entry->condvar->broadcast(entry->condvar);
1628 /* they will wake us again when their work is done */
1629 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1630 }
1631 remove_entry(this, entry);
1632 unlock_single_segment(this, segment);
1633
1634 if (entry->half_open)
1635 {
1636 remove_half_open(this, entry);
1637 }
1638 if (entry->my_id && entry->other_id)
1639 {
1640 remove_connected_peers(this, entry);
1641 }
1642 if (entry->init_hash.ptr)
1643 {
1644 remove_init_hash(this, entry->init_hash);
1645 }
1646
1647 entry_destroy(entry);
1648
1649 DBG2(DBG_MGR, "check-in and destroy of IKE_SA successful");
1650 }
1651 else
1652 {
1653 DBG1(DBG_MGR, "tried to check-in and delete nonexisting IKE_SA");
1654 ike_sa->destroy(ike_sa);
1655 }
1656 charon->bus->set_sa(charon->bus, NULL);
1657 }
1658
1659 /**
1660 * Cleanup function for create_id_enumerator
1661 */
1662 static void id_enumerator_cleanup(linked_list_t *ids)
1663 {
1664 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1665 }
1666
1667 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1668 private_ike_sa_manager_t *this, identification_t *me,
1669 identification_t *other, int family)
1670 {
1671 table_item_t *item;
1672 u_int row, segment;
1673 rwlock_t *lock;
1674 linked_list_t *ids = NULL;
1675
1676 row = chunk_hash_inc(other->get_encoding(other),
1677 chunk_hash(me->get_encoding(me))) & this->table_mask;
1678 segment = row & this->segment_mask;
1679
1680 lock = this->connected_peers_segments[segment].lock;
1681 lock->read_lock(lock);
1682 item = this->connected_peers_table[row];
1683 while (item)
1684 {
1685 connected_peers_t *current = item->value;
1686
1687 if (connected_peers_match(current, me, other, family))
1688 {
1689 ids = current->sas->clone_offset(current->sas,
1690 offsetof(ike_sa_id_t, clone));
1691 break;
1692 }
1693 item = item->next;
1694 }
1695 lock->unlock(lock);
1696
1697 if (!ids)
1698 {
1699 return enumerator_create_empty();
1700 }
1701 return enumerator_create_cleaner(ids->create_enumerator(ids),
1702 (void*)id_enumerator_cleanup, ids);
1703 }
1704
1705 METHOD(ike_sa_manager_t, check_uniqueness, bool,
1706 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
1707 {
1708 bool cancel = FALSE;
1709 peer_cfg_t *peer_cfg;
1710 unique_policy_t policy;
1711 enumerator_t *enumerator;
1712 ike_sa_id_t *id = NULL;
1713 identification_t *me, *other;
1714 host_t *other_host;
1715
1716 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
1717 policy = peer_cfg->get_unique_policy(peer_cfg);
1718 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
1719 {
1720 return FALSE;
1721 }
1722 me = ike_sa->get_my_id(ike_sa);
1723 other = ike_sa->get_other_eap_id(ike_sa);
1724 other_host = ike_sa->get_other_host(ike_sa);
1725
1726 enumerator = create_id_enumerator(this, me, other,
1727 other_host->get_family(other_host));
1728 while (enumerator->enumerate(enumerator, &id))
1729 {
1730 status_t status = SUCCESS;
1731 ike_sa_t *duplicate;
1732
1733 duplicate = checkout(this, id);
1734 if (!duplicate)
1735 {
1736 continue;
1737 }
1738 if (force_replace)
1739 {
1740 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
1741 "received INITIAL_CONTACT", other);
1742 checkin_and_destroy(this, duplicate);
1743 continue;
1744 }
1745 peer_cfg = duplicate->get_peer_cfg(duplicate);
1746 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
1747 {
1748 switch (duplicate->get_state(duplicate))
1749 {
1750 case IKE_ESTABLISHED:
1751 case IKE_REKEYING:
1752 switch (policy)
1753 {
1754 case UNIQUE_REPLACE:
1755 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer "
1756 "'%Y' due to uniqueness policy", other);
1757 status = duplicate->delete(duplicate);
1758 break;
1759 case UNIQUE_KEEP:
1760 cancel = TRUE;
1761 /* we keep the first IKE_SA and delete all
1762 * other duplicates that might exist */
1763 policy = UNIQUE_REPLACE;
1764 break;
1765 default:
1766 break;
1767 }
1768 break;
1769 default:
1770 break;
1771 }
1772 }
1773 if (status == DESTROY_ME)
1774 {
1775 checkin_and_destroy(this, duplicate);
1776 }
1777 else
1778 {
1779 checkin(this, duplicate);
1780 }
1781 }
1782 enumerator->destroy(enumerator);
1783 /* reset thread's current IKE_SA after checkin */
1784 charon->bus->set_sa(charon->bus, ike_sa);
1785 return cancel;
1786 }
1787
1788 METHOD(ike_sa_manager_t, has_contact, bool,
1789 private_ike_sa_manager_t *this, identification_t *me,
1790 identification_t *other, int family)
1791 {
1792 table_item_t *item;
1793 u_int row, segment;
1794 rwlock_t *lock;
1795 bool found = FALSE;
1796
1797 row = chunk_hash_inc(other->get_encoding(other),
1798 chunk_hash(me->get_encoding(me))) & this->table_mask;
1799 segment = row & this->segment_mask;
1800 lock = this->connected_peers_segments[segment].lock;
1801 lock->read_lock(lock);
1802 item = this->connected_peers_table[row];
1803 while (item)
1804 {
1805 if (connected_peers_match(item->value, me, other, family))
1806 {
1807 found = TRUE;
1808 break;
1809 }
1810 item = item->next;
1811 }
1812 lock->unlock(lock);
1813
1814 return found;
1815 }
1816
1817 METHOD(ike_sa_manager_t, get_count, u_int,
1818 private_ike_sa_manager_t *this)
1819 {
1820 u_int segment, count = 0;
1821 mutex_t *mutex;
1822
1823 for (segment = 0; segment < this->segment_count; segment++)
1824 {
1825 mutex = this->segments[segment & this->segment_mask].mutex;
1826 mutex->lock(mutex);
1827 count += this->segments[segment].count;
1828 mutex->unlock(mutex);
1829 }
1830 return count;
1831 }
1832
1833 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
1834 private_ike_sa_manager_t *this, host_t *ip)
1835 {
1836 table_item_t *item;
1837 u_int row, segment;
1838 rwlock_t *lock;
1839 chunk_t addr;
1840 u_int count = 0;
1841
1842 if (ip)
1843 {
1844 addr = ip->get_address(ip);
1845 row = chunk_hash(addr) & this->table_mask;
1846 segment = row & this->segment_mask;
1847 lock = this->half_open_segments[segment].lock;
1848 lock->read_lock(lock);
1849 item = this->half_open_table[row];
1850 while (item)
1851 {
1852 half_open_t *half_open = item->value;
1853
1854 if (chunk_equals(addr, half_open->other))
1855 {
1856 count = half_open->count;
1857 break;
1858 }
1859 item = item->next;
1860 }
1861 lock->unlock(lock);
1862 }
1863 else
1864 {
1865 for (segment = 0; segment < this->segment_count; segment++)
1866 {
1867 lock = this->half_open_segments[segment].lock;
1868 lock->read_lock(lock);
1869 count += this->half_open_segments[segment].count;
1870 lock->unlock(lock);
1871 }
1872 }
1873 return count;
1874 }
1875
1876 METHOD(ike_sa_manager_t, flush, void,
1877 private_ike_sa_manager_t *this)
1878 {
1879 /* destroy all list entries */
1880 enumerator_t *enumerator;
1881 entry_t *entry;
1882 u_int segment;
1883
1884 lock_all_segments(this);
1885 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
1886 /* Step 1: drive out all waiting threads */
1887 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
1888 enumerator = create_table_enumerator(this);
1889 while (enumerator->enumerate(enumerator, &entry, &segment))
1890 {
1891 /* do not accept new threads, drive out waiting threads */
1892 entry->driveout_new_threads = TRUE;
1893 entry->driveout_waiting_threads = TRUE;
1894 }
1895 enumerator->destroy(enumerator);
1896 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
1897 /* Step 2: wait until all are gone */
1898 enumerator = create_table_enumerator(this);
1899 while (enumerator->enumerate(enumerator, &entry, &segment))
1900 {
1901 while (entry->waiting_threads || entry->checked_out)
1902 {
1903 /* wake up all */
1904 entry->condvar->broadcast(entry->condvar);
1905 /* go sleeping until they are gone */
1906 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1907 }
1908 }
1909 enumerator->destroy(enumerator);
1910 DBG2(DBG_MGR, "delete all IKE_SA's");
1911 /* Step 3: initiate deletion of all IKE_SAs */
1912 enumerator = create_table_enumerator(this);
1913 while (enumerator->enumerate(enumerator, &entry, &segment))
1914 {
1915 charon->bus->set_sa(charon->bus, entry->ike_sa);
1916 if (entry->ike_sa->get_version(entry->ike_sa) == IKEV2)
1917 { /* as the delete never gets processed, fire down events */
1918 switch (entry->ike_sa->get_state(entry->ike_sa))
1919 {
1920 case IKE_ESTABLISHED:
1921 case IKE_REKEYING:
1922 case IKE_DELETING:
1923 charon->bus->ike_updown(charon->bus, entry->ike_sa, FALSE);
1924 break;
1925 default:
1926 break;
1927 }
1928 }
1929 entry->ike_sa->delete(entry->ike_sa);
1930 }
1931 enumerator->destroy(enumerator);
1932
1933 DBG2(DBG_MGR, "destroy all entries");
1934 /* Step 4: destroy all entries */
1935 enumerator = create_table_enumerator(this);
1936 while (enumerator->enumerate(enumerator, &entry, &segment))
1937 {
1938 charon->bus->set_sa(charon->bus, entry->ike_sa);
1939 if (entry->half_open)
1940 {
1941 remove_half_open(this, entry);
1942 }
1943 if (entry->my_id && entry->other_id)
1944 {
1945 remove_connected_peers(this, entry);
1946 }
1947 if (entry->init_hash.ptr)
1948 {
1949 remove_init_hash(this, entry->init_hash);
1950 }
1951 remove_entry_at((private_enumerator_t*)enumerator);
1952 entry_destroy(entry);
1953 }
1954 enumerator->destroy(enumerator);
1955 charon->bus->set_sa(charon->bus, NULL);
1956 unlock_all_segments(this);
1957
1958 this->rng->destroy(this->rng);
1959 this->rng = NULL;
1960 this->hasher->destroy(this->hasher);
1961 this->hasher = NULL;
1962 }
1963
1964 METHOD(ike_sa_manager_t, destroy, void,
1965 private_ike_sa_manager_t *this)
1966 {
1967 u_int i;
1968
1969 /* these are already cleared in flush() above */
1970 free(this->ike_sa_table);
1971 free(this->half_open_table);
1972 free(this->connected_peers_table);
1973 free(this->init_hashes_table);
1974 for (i = 0; i < this->segment_count; i++)
1975 {
1976 this->segments[i].mutex->destroy(this->segments[i].mutex);
1977 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
1978 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
1979 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
1980 }
1981 free(this->segments);
1982 free(this->half_open_segments);
1983 free(this->connected_peers_segments);
1984 free(this->init_hashes_segments);
1985
1986 free(this);
1987 }
1988
1989 /**
1990 * This function returns the next-highest power of two for the given number.
1991 * The algorithm works by setting all bits on the right-hand side of the most
1992 * significant 1 to 1 and then increments the whole number so it rolls over
1993 * to the nearest power of two. Note: returns 0 for n == 0
1994 */
1995 static u_int get_nearest_powerof2(u_int n)
1996 {
1997 u_int i;
1998
1999 --n;
2000 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2001 {
2002 n |= n >> i;
2003 }
2004 return ++n;
2005 }
2006
2007 /*
2008 * Described in header.
2009 */
2010 ike_sa_manager_t *ike_sa_manager_create()
2011 {
2012 private_ike_sa_manager_t *this;
2013 u_int i;
2014
2015 INIT(this,
2016 .public = {
2017 .checkout = _checkout,
2018 .checkout_new = _checkout_new,
2019 .checkout_by_message = _checkout_by_message,
2020 .checkout_by_config = _checkout_by_config,
2021 .checkout_by_id = _checkout_by_id,
2022 .checkout_by_name = _checkout_by_name,
2023 .check_uniqueness = _check_uniqueness,
2024 .has_contact = _has_contact,
2025 .create_enumerator = _create_enumerator,
2026 .create_id_enumerator = _create_id_enumerator,
2027 .checkin = _checkin,
2028 .checkin_and_destroy = _checkin_and_destroy,
2029 .get_count = _get_count,
2030 .get_half_open_count = _get_half_open_count,
2031 .flush = _flush,
2032 .destroy = _destroy,
2033 },
2034 );
2035
2036 this->hasher = lib->crypto->create_hasher(lib->crypto, HASH_PREFERRED);
2037 if (this->hasher == NULL)
2038 {
2039 DBG1(DBG_MGR, "manager initialization failed, no hasher supported");
2040 free(this);
2041 return NULL;
2042 }
2043 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2044 if (this->rng == NULL)
2045 {
2046 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2047 this->hasher->destroy(this->hasher);
2048 free(this);
2049 return NULL;
2050 }
2051
2052 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2053 lib->settings, "%s.ikesa_table_size",
2054 DEFAULT_HASHTABLE_SIZE, charon->name));
2055 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2056 this->table_mask = this->table_size - 1;
2057
2058 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2059 lib->settings, "%s.ikesa_table_segments",
2060 DEFAULT_SEGMENT_COUNT, charon->name));
2061 this->segment_count = max(1, min(this->segment_count, this->table_size));
2062 this->segment_mask = this->segment_count - 1;
2063
2064 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2065 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2066 for (i = 0; i < this->segment_count; i++)
2067 {
2068 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2069 this->segments[i].count = 0;
2070 }
2071
2072 /* we use the same table parameters for the table to track half-open SAs */
2073 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2074 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2075 for (i = 0; i < this->segment_count; i++)
2076 {
2077 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2078 this->half_open_segments[i].count = 0;
2079 }
2080
2081 /* also for the hash table used for duplicate tests */
2082 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2083 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2084 for (i = 0; i < this->segment_count; i++)
2085 {
2086 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2087 this->connected_peers_segments[i].count = 0;
2088 }
2089
2090 /* and again for the table of hashes of seen initial IKE messages */
2091 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2092 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2093 for (i = 0; i < this->segment_count; i++)
2094 {
2095 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2096 this->init_hashes_segments[i].count = 0;
2097 }
2098
2099 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2100 "%s.reuse_ikesa", TRUE, charon->name);
2101 return &this->public;
2102 }