ikev1: Accept reauthentication attempts with a keep unique policy from same host
[strongswan.git] / src / libcharon / sa / ike_sa_manager.c
1 /*
2 * Copyright (C) 2005-2011 Martin Willi
3 * Copyright (C) 2011 revosec AG
4 * Copyright (C) 2008-2012 Tobias Brunner
5 * Copyright (C) 2005 Jan Hutter
6 * Hochschule fuer Technik Rapperswil
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 */
18
19 #include <string.h>
20
21 #include "ike_sa_manager.h"
22
23 #include <daemon.h>
24 #include <sa/ike_sa_id.h>
25 #include <bus/bus.h>
26 #include <threading/condvar.h>
27 #include <threading/mutex.h>
28 #include <threading/rwlock.h>
29 #include <collections/linked_list.h>
30 #include <crypto/hashers/hasher.h>
31 #include <processing/jobs/delete_ike_sa_job.h>
32
33 /* the default size of the hash table (MUST be a power of 2) */
34 #define DEFAULT_HASHTABLE_SIZE 1
35
36 /* the maximum size of the hash table (MUST be a power of 2) */
37 #define MAX_HASHTABLE_SIZE (1 << 30)
38
39 /* the default number of segments (MUST be a power of 2) */
40 #define DEFAULT_SEGMENT_COUNT 1
41
42 typedef struct entry_t entry_t;
43
44 /**
45 * An entry in the linked list, contains IKE_SA, locking and lookup data.
46 */
47 struct entry_t {
48
49 /**
50 * Number of threads waiting for this ike_sa_t object.
51 */
52 int waiting_threads;
53
54 /**
55 * Condvar where threads can wait until ike_sa_t object is free for use again.
56 */
57 condvar_t *condvar;
58
59 /**
60 * Is this ike_sa currently checked out?
61 */
62 bool checked_out;
63
64 /**
65 * Does this SA drives out new threads?
66 */
67 bool driveout_new_threads;
68
69 /**
70 * Does this SA drives out waiting threads?
71 */
72 bool driveout_waiting_threads;
73
74 /**
75 * Identification of an IKE_SA (SPIs).
76 */
77 ike_sa_id_t *ike_sa_id;
78
79 /**
80 * The contained ike_sa_t object.
81 */
82 ike_sa_t *ike_sa;
83
84 /**
85 * hash of the IKE_SA_INIT message, used to detect retransmissions
86 */
87 chunk_t init_hash;
88
89 /**
90 * remote host address, required for DoS detection and duplicate
91 * checking (host with same my_id and other_id is *not* considered
92 * a duplicate if the address family differs)
93 */
94 host_t *other;
95
96 /**
97 * As responder: Is this SA half-open?
98 */
99 bool half_open;
100
101 /**
102 * own identity, required for duplicate checking
103 */
104 identification_t *my_id;
105
106 /**
107 * remote identity, required for duplicate checking
108 */
109 identification_t *other_id;
110
111 /**
112 * message ID or hash of currently processing message, -1 if none
113 */
114 u_int32_t processing;
115 };
116
117 /**
118 * Implementation of entry_t.destroy.
119 */
120 static status_t entry_destroy(entry_t *this)
121 {
122 /* also destroy IKE SA */
123 this->ike_sa->destroy(this->ike_sa);
124 this->ike_sa_id->destroy(this->ike_sa_id);
125 chunk_free(&this->init_hash);
126 DESTROY_IF(this->other);
127 DESTROY_IF(this->my_id);
128 DESTROY_IF(this->other_id);
129 this->condvar->destroy(this->condvar);
130 free(this);
131 return SUCCESS;
132 }
133
134 /**
135 * Creates a new entry for the ike_sa_t list.
136 */
137 static entry_t *entry_create()
138 {
139 entry_t *this;
140
141 INIT(this,
142 .condvar = condvar_create(CONDVAR_TYPE_DEFAULT),
143 .processing = -1,
144 );
145
146 return this;
147 }
148
149 /**
150 * Function that matches entry_t objects by ike_sa_id_t.
151 */
152 static bool entry_match_by_id(entry_t *entry, ike_sa_id_t *id)
153 {
154 if (id->equals(id, entry->ike_sa_id))
155 {
156 return TRUE;
157 }
158 if ((id->get_responder_spi(id) == 0 ||
159 entry->ike_sa_id->get_responder_spi(entry->ike_sa_id) == 0) &&
160 id->get_initiator_spi(id) == entry->ike_sa_id->get_initiator_spi(entry->ike_sa_id))
161 {
162 /* this is TRUE for IKE_SAs that we initiated but have not yet received a response */
163 return TRUE;
164 }
165 return FALSE;
166 }
167
168 /**
169 * Function that matches entry_t objects by ike_sa_t pointers.
170 */
171 static bool entry_match_by_sa(entry_t *entry, ike_sa_t *ike_sa)
172 {
173 return entry->ike_sa == ike_sa;
174 }
175
176 /**
177 * Hash function for ike_sa_id_t objects.
178 */
179 static u_int ike_sa_id_hash(ike_sa_id_t *ike_sa_id)
180 {
181 /* IKEv2 does not mandate random SPIs (RFC 5996, 2.6), they just have to be
182 * locally unique, so we use our randomly allocated SPI whether we are
183 * initiator or responder to ensure a good distribution. The latter is not
184 * possible for IKEv1 as we don't know whether we are original initiator or
185 * not (based on the IKE header). But as RFC 2408, section 2.5.3 proposes
186 * SPIs (Cookies) to be allocated near random (we allocate them randomly
187 * anyway) it seems safe to always use the initiator SPI. */
188 if (ike_sa_id->get_ike_version(ike_sa_id) == IKEV1_MAJOR_VERSION ||
189 ike_sa_id->is_initiator(ike_sa_id))
190 {
191 return ike_sa_id->get_initiator_spi(ike_sa_id);
192 }
193 return ike_sa_id->get_responder_spi(ike_sa_id);
194 }
195
196 typedef struct half_open_t half_open_t;
197
198 /**
199 * Struct to manage half-open IKE_SAs per peer.
200 */
201 struct half_open_t {
202 /** chunk of remote host address */
203 chunk_t other;
204
205 /** the number of half-open IKE_SAs with that host */
206 u_int count;
207 };
208
209 /**
210 * Destroys a half_open_t object.
211 */
212 static void half_open_destroy(half_open_t *this)
213 {
214 chunk_free(&this->other);
215 free(this);
216 }
217
218 typedef struct connected_peers_t connected_peers_t;
219
220 struct connected_peers_t {
221 /** own identity */
222 identification_t *my_id;
223
224 /** remote identity */
225 identification_t *other_id;
226
227 /** ip address family of peer */
228 int family;
229
230 /** list of ike_sa_id_t objects of IKE_SAs between the two identities */
231 linked_list_t *sas;
232 };
233
234 static void connected_peers_destroy(connected_peers_t *this)
235 {
236 this->my_id->destroy(this->my_id);
237 this->other_id->destroy(this->other_id);
238 this->sas->destroy(this->sas);
239 free(this);
240 }
241
242 /**
243 * Function that matches connected_peers_t objects by the given ids.
244 */
245 static inline bool connected_peers_match(connected_peers_t *connected_peers,
246 identification_t *my_id, identification_t *other_id,
247 int family)
248 {
249 return my_id->equals(my_id, connected_peers->my_id) &&
250 other_id->equals(other_id, connected_peers->other_id) &&
251 (!family || family == connected_peers->family);
252 }
253
254 typedef struct init_hash_t init_hash_t;
255
256 struct init_hash_t {
257 /** hash of IKE_SA_INIT or initial phase1 message (data is not cloned) */
258 chunk_t hash;
259
260 /** our SPI allocated for the IKE_SA based on this message */
261 u_int64_t our_spi;
262 };
263
264 typedef struct segment_t segment_t;
265
266 /**
267 * Struct to manage segments of the hash table.
268 */
269 struct segment_t {
270 /** mutex to access a segment exclusively */
271 mutex_t *mutex;
272
273 /** the number of entries in this segment */
274 u_int count;
275 };
276
277 typedef struct shareable_segment_t shareable_segment_t;
278
279 /**
280 * Struct to manage segments of the "half-open" and "connected peers" hash tables.
281 */
282 struct shareable_segment_t {
283 /** rwlock to access a segment non-/exclusively */
284 rwlock_t *lock;
285
286 /** the number of entries in this segment - in case of the "half-open table"
287 * it's the sum of all half_open_t.count in a segment. */
288 u_int count;
289 };
290
291 typedef struct table_item_t table_item_t;
292
293 /**
294 * Instead of using linked_list_t for each bucket we store the data in our own
295 * list to save memory.
296 */
297 struct table_item_t {
298 /** data of this item */
299 void *value;
300
301 /** next item in the overflow list */
302 table_item_t *next;
303 };
304
305 typedef struct private_ike_sa_manager_t private_ike_sa_manager_t;
306
307 /**
308 * Additional private members of ike_sa_manager_t.
309 */
310 struct private_ike_sa_manager_t {
311 /**
312 * Public interface of ike_sa_manager_t.
313 */
314 ike_sa_manager_t public;
315
316 /**
317 * Hash table with entries for the ike_sa_t objects.
318 */
319 table_item_t **ike_sa_table;
320
321 /**
322 * The size of the hash table.
323 */
324 u_int table_size;
325
326 /**
327 * Mask to map the hashes to table rows.
328 */
329 u_int table_mask;
330
331 /**
332 * Segments of the hash table.
333 */
334 segment_t *segments;
335
336 /**
337 * The number of segments.
338 */
339 u_int segment_count;
340
341 /**
342 * Mask to map a table row to a segment.
343 */
344 u_int segment_mask;
345
346 /**
347 * Hash table with half_open_t objects.
348 */
349 table_item_t **half_open_table;
350
351 /**
352 * Segments of the "half-open" hash table.
353 */
354 shareable_segment_t *half_open_segments;
355
356 /**
357 * Hash table with connected_peers_t objects.
358 */
359 table_item_t **connected_peers_table;
360
361 /**
362 * Segments of the "connected peers" hash table.
363 */
364 shareable_segment_t *connected_peers_segments;
365
366 /**
367 * Hash table with init_hash_t objects.
368 */
369 table_item_t **init_hashes_table;
370
371 /**
372 * Segments of the "hashes" hash table.
373 */
374 segment_t *init_hashes_segments;
375
376 /**
377 * RNG to get random SPIs for our side
378 */
379 rng_t *rng;
380
381 /**
382 * SHA1 hasher for IKE_SA_INIT retransmit detection
383 */
384 hasher_t *hasher;
385
386 /**
387 * reuse existing IKE_SAs in checkout_by_config
388 */
389 bool reuse_ikesa;
390
391 /**
392 * Configured IKE_SA limit, if any
393 */
394 u_int ikesa_limit;
395 };
396
397 /**
398 * Acquire a lock to access the segment of the table row with the given index.
399 * It also works with the segment index directly.
400 */
401 static inline void lock_single_segment(private_ike_sa_manager_t *this,
402 u_int index)
403 {
404 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
405 lock->lock(lock);
406 }
407
408 /**
409 * Release the lock required to access the segment of the table row with the given index.
410 * It also works with the segment index directly.
411 */
412 static inline void unlock_single_segment(private_ike_sa_manager_t *this,
413 u_int index)
414 {
415 mutex_t *lock = this->segments[index & this->segment_mask].mutex;
416 lock->unlock(lock);
417 }
418
419 /**
420 * Lock all segments
421 */
422 static void lock_all_segments(private_ike_sa_manager_t *this)
423 {
424 u_int i;
425
426 for (i = 0; i < this->segment_count; i++)
427 {
428 this->segments[i].mutex->lock(this->segments[i].mutex);
429 }
430 }
431
432 /**
433 * Unlock all segments
434 */
435 static void unlock_all_segments(private_ike_sa_manager_t *this)
436 {
437 u_int i;
438
439 for (i = 0; i < this->segment_count; i++)
440 {
441 this->segments[i].mutex->unlock(this->segments[i].mutex);
442 }
443 }
444
445 typedef struct private_enumerator_t private_enumerator_t;
446
447 /**
448 * hash table enumerator implementation
449 */
450 struct private_enumerator_t {
451
452 /**
453 * implements enumerator interface
454 */
455 enumerator_t enumerator;
456
457 /**
458 * associated ike_sa_manager_t
459 */
460 private_ike_sa_manager_t *manager;
461
462 /**
463 * current segment index
464 */
465 u_int segment;
466
467 /**
468 * currently enumerating entry
469 */
470 entry_t *entry;
471
472 /**
473 * current table row index
474 */
475 u_int row;
476
477 /**
478 * current table item
479 */
480 table_item_t *current;
481
482 /**
483 * previous table item
484 */
485 table_item_t *prev;
486 };
487
488 METHOD(enumerator_t, enumerate, bool,
489 private_enumerator_t *this, entry_t **entry, u_int *segment)
490 {
491 if (this->entry)
492 {
493 this->entry->condvar->signal(this->entry->condvar);
494 this->entry = NULL;
495 }
496 while (this->segment < this->manager->segment_count)
497 {
498 while (this->row < this->manager->table_size)
499 {
500 this->prev = this->current;
501 if (this->current)
502 {
503 this->current = this->current->next;
504 }
505 else
506 {
507 lock_single_segment(this->manager, this->segment);
508 this->current = this->manager->ike_sa_table[this->row];
509 }
510 if (this->current)
511 {
512 *entry = this->entry = this->current->value;
513 *segment = this->segment;
514 return TRUE;
515 }
516 unlock_single_segment(this->manager, this->segment);
517 this->row += this->manager->segment_count;
518 }
519 this->segment++;
520 this->row = this->segment;
521 }
522 return FALSE;
523 }
524
525 METHOD(enumerator_t, enumerator_destroy, void,
526 private_enumerator_t *this)
527 {
528 if (this->entry)
529 {
530 this->entry->condvar->signal(this->entry->condvar);
531 }
532 if (this->current)
533 {
534 unlock_single_segment(this->manager, this->segment);
535 }
536 free(this);
537 }
538
539 /**
540 * Creates an enumerator to enumerate the entries in the hash table.
541 */
542 static enumerator_t* create_table_enumerator(private_ike_sa_manager_t *this)
543 {
544 private_enumerator_t *enumerator;
545
546 INIT(enumerator,
547 .enumerator = {
548 .enumerate = (void*)_enumerate,
549 .destroy = _enumerator_destroy,
550 },
551 .manager = this,
552 );
553 return &enumerator->enumerator;
554 }
555
556 /**
557 * Put an entry into the hash table.
558 * Note: The caller has to unlock the returned segment.
559 */
560 static u_int put_entry(private_ike_sa_manager_t *this, entry_t *entry)
561 {
562 table_item_t *current, *item;
563 u_int row, segment;
564
565 INIT(item,
566 .value = entry,
567 );
568
569 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
570 segment = row & this->segment_mask;
571
572 lock_single_segment(this, segment);
573 current = this->ike_sa_table[row];
574 if (current)
575 { /* insert at the front of current bucket */
576 item->next = current;
577 }
578 this->ike_sa_table[row] = item;
579 this->segments[segment].count++;
580 return segment;
581 }
582
583 /**
584 * Remove an entry from the hash table.
585 * Note: The caller MUST have a lock on the segment of this entry.
586 */
587 static void remove_entry(private_ike_sa_manager_t *this, entry_t *entry)
588 {
589 table_item_t *item, *prev = NULL;
590 u_int row, segment;
591
592 row = ike_sa_id_hash(entry->ike_sa_id) & this->table_mask;
593 segment = row & this->segment_mask;
594 item = this->ike_sa_table[row];
595 while (item)
596 {
597 if (item->value == entry)
598 {
599 if (prev)
600 {
601 prev->next = item->next;
602 }
603 else
604 {
605 this->ike_sa_table[row] = item->next;
606 }
607 this->segments[segment].count--;
608 free(item);
609 break;
610 }
611 prev = item;
612 item = item->next;
613 }
614 }
615
616 /**
617 * Remove the entry at the current enumerator position.
618 */
619 static void remove_entry_at(private_enumerator_t *this)
620 {
621 this->entry = NULL;
622 if (this->current)
623 {
624 table_item_t *current = this->current;
625
626 this->manager->segments[this->segment].count--;
627 this->current = this->prev;
628
629 if (this->prev)
630 {
631 this->prev->next = current->next;
632 }
633 else
634 {
635 this->manager->ike_sa_table[this->row] = current->next;
636 unlock_single_segment(this->manager, this->segment);
637 }
638 free(current);
639 }
640 }
641
642 /**
643 * Find an entry using the provided match function to compare the entries for
644 * equality.
645 */
646 static status_t get_entry_by_match_function(private_ike_sa_manager_t *this,
647 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment,
648 linked_list_match_t match, void *param)
649 {
650 table_item_t *item;
651 u_int row, seg;
652
653 row = ike_sa_id_hash(ike_sa_id) & this->table_mask;
654 seg = row & this->segment_mask;
655
656 lock_single_segment(this, seg);
657 item = this->ike_sa_table[row];
658 while (item)
659 {
660 if (match(item->value, param))
661 {
662 *entry = item->value;
663 *segment = seg;
664 /* the locked segment has to be unlocked by the caller */
665 return SUCCESS;
666 }
667 item = item->next;
668 }
669 unlock_single_segment(this, seg);
670 return NOT_FOUND;
671 }
672
673 /**
674 * Find an entry by ike_sa_id_t.
675 * Note: On SUCCESS, the caller has to unlock the segment.
676 */
677 static status_t get_entry_by_id(private_ike_sa_manager_t *this,
678 ike_sa_id_t *ike_sa_id, entry_t **entry, u_int *segment)
679 {
680 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
681 (linked_list_match_t)entry_match_by_id, ike_sa_id);
682 }
683
684 /**
685 * Find an entry by IKE_SA pointer.
686 * Note: On SUCCESS, the caller has to unlock the segment.
687 */
688 static status_t get_entry_by_sa(private_ike_sa_manager_t *this,
689 ike_sa_id_t *ike_sa_id, ike_sa_t *ike_sa, entry_t **entry, u_int *segment)
690 {
691 return get_entry_by_match_function(this, ike_sa_id, entry, segment,
692 (linked_list_match_t)entry_match_by_sa, ike_sa);
693 }
694
695 /**
696 * Wait until no other thread is using an IKE_SA, return FALSE if entry not
697 * acquirable.
698 */
699 static bool wait_for_entry(private_ike_sa_manager_t *this, entry_t *entry,
700 u_int segment)
701 {
702 if (entry->driveout_new_threads)
703 {
704 /* we are not allowed to get this */
705 return FALSE;
706 }
707 while (entry->checked_out && !entry->driveout_waiting_threads)
708 {
709 /* so wait until we can get it for us.
710 * we register us as waiting. */
711 entry->waiting_threads++;
712 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
713 entry->waiting_threads--;
714 }
715 /* hm, a deletion request forbids us to get this SA, get next one */
716 if (entry->driveout_waiting_threads)
717 {
718 /* we must signal here, others may be waiting on it, too */
719 entry->condvar->signal(entry->condvar);
720 return FALSE;
721 }
722 return TRUE;
723 }
724
725 /**
726 * Put a half-open SA into the hash table.
727 */
728 static void put_half_open(private_ike_sa_manager_t *this, entry_t *entry)
729 {
730 table_item_t *item;
731 u_int row, segment;
732 rwlock_t *lock;
733 half_open_t *half_open;
734 chunk_t addr;
735
736 addr = entry->other->get_address(entry->other);
737 row = chunk_hash(addr) & this->table_mask;
738 segment = row & this->segment_mask;
739 lock = this->half_open_segments[segment].lock;
740 lock->write_lock(lock);
741 item = this->half_open_table[row];
742 while (item)
743 {
744 half_open = item->value;
745
746 if (chunk_equals(addr, half_open->other))
747 {
748 half_open->count++;
749 break;
750 }
751 item = item->next;
752 }
753
754 if (!item)
755 {
756 INIT(half_open,
757 .other = chunk_clone(addr),
758 .count = 1,
759 );
760 INIT(item,
761 .value = half_open,
762 .next = this->half_open_table[row],
763 );
764 this->half_open_table[row] = item;
765 }
766 this->half_open_segments[segment].count++;
767 lock->unlock(lock);
768 }
769
770 /**
771 * Remove a half-open SA from the hash table.
772 */
773 static void remove_half_open(private_ike_sa_manager_t *this, entry_t *entry)
774 {
775 table_item_t *item, *prev = NULL;
776 u_int row, segment;
777 rwlock_t *lock;
778 chunk_t addr;
779
780 addr = entry->other->get_address(entry->other);
781 row = chunk_hash(addr) & this->table_mask;
782 segment = row & this->segment_mask;
783 lock = this->half_open_segments[segment].lock;
784 lock->write_lock(lock);
785 item = this->half_open_table[row];
786 while (item)
787 {
788 half_open_t *half_open = item->value;
789
790 if (chunk_equals(addr, half_open->other))
791 {
792 if (--half_open->count == 0)
793 {
794 if (prev)
795 {
796 prev->next = item->next;
797 }
798 else
799 {
800 this->half_open_table[row] = item->next;
801 }
802 half_open_destroy(half_open);
803 free(item);
804 }
805 this->half_open_segments[segment].count--;
806 break;
807 }
808 prev = item;
809 item = item->next;
810 }
811 lock->unlock(lock);
812 }
813
814 /**
815 * Put an SA between two peers into the hash table.
816 */
817 static void put_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
818 {
819 table_item_t *item;
820 u_int row, segment;
821 rwlock_t *lock;
822 connected_peers_t *connected_peers;
823 chunk_t my_id, other_id;
824 int family;
825
826 my_id = entry->my_id->get_encoding(entry->my_id);
827 other_id = entry->other_id->get_encoding(entry->other_id);
828 family = entry->other->get_family(entry->other);
829 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
830 segment = row & this->segment_mask;
831 lock = this->connected_peers_segments[segment].lock;
832 lock->write_lock(lock);
833 item = this->connected_peers_table[row];
834 while (item)
835 {
836 connected_peers = item->value;
837
838 if (connected_peers_match(connected_peers, entry->my_id,
839 entry->other_id, family))
840 {
841 if (connected_peers->sas->find_first(connected_peers->sas,
842 (linked_list_match_t)entry->ike_sa_id->equals,
843 NULL, entry->ike_sa_id) == SUCCESS)
844 {
845 lock->unlock(lock);
846 return;
847 }
848 break;
849 }
850 item = item->next;
851 }
852
853 if (!item)
854 {
855 INIT(connected_peers,
856 .my_id = entry->my_id->clone(entry->my_id),
857 .other_id = entry->other_id->clone(entry->other_id),
858 .family = family,
859 .sas = linked_list_create(),
860 );
861 INIT(item,
862 .value = connected_peers,
863 .next = this->connected_peers_table[row],
864 );
865 this->connected_peers_table[row] = item;
866 }
867 connected_peers->sas->insert_last(connected_peers->sas,
868 entry->ike_sa_id->clone(entry->ike_sa_id));
869 this->connected_peers_segments[segment].count++;
870 lock->unlock(lock);
871 }
872
873 /**
874 * Remove an SA between two peers from the hash table.
875 */
876 static void remove_connected_peers(private_ike_sa_manager_t *this, entry_t *entry)
877 {
878 table_item_t *item, *prev = NULL;
879 u_int row, segment;
880 rwlock_t *lock;
881 chunk_t my_id, other_id;
882 int family;
883
884 my_id = entry->my_id->get_encoding(entry->my_id);
885 other_id = entry->other_id->get_encoding(entry->other_id);
886 family = entry->other->get_family(entry->other);
887
888 row = chunk_hash_inc(other_id, chunk_hash(my_id)) & this->table_mask;
889 segment = row & this->segment_mask;
890
891 lock = this->connected_peers_segments[segment].lock;
892 lock->write_lock(lock);
893 item = this->connected_peers_table[row];
894 while (item)
895 {
896 connected_peers_t *current = item->value;
897
898 if (connected_peers_match(current, entry->my_id, entry->other_id,
899 family))
900 {
901 enumerator_t *enumerator;
902 ike_sa_id_t *ike_sa_id;
903
904 enumerator = current->sas->create_enumerator(current->sas);
905 while (enumerator->enumerate(enumerator, &ike_sa_id))
906 {
907 if (ike_sa_id->equals(ike_sa_id, entry->ike_sa_id))
908 {
909 current->sas->remove_at(current->sas, enumerator);
910 ike_sa_id->destroy(ike_sa_id);
911 this->connected_peers_segments[segment].count--;
912 break;
913 }
914 }
915 enumerator->destroy(enumerator);
916 if (current->sas->get_count(current->sas) == 0)
917 {
918 if (prev)
919 {
920 prev->next = item->next;
921 }
922 else
923 {
924 this->connected_peers_table[row] = item->next;
925 }
926 connected_peers_destroy(current);
927 free(item);
928 }
929 break;
930 }
931 prev = item;
932 item = item->next;
933 }
934 lock->unlock(lock);
935 }
936
937 /**
938 * Get a random SPI for new IKE_SAs
939 */
940 static u_int64_t get_spi(private_ike_sa_manager_t *this)
941 {
942 u_int64_t spi;
943
944 if (this->rng &&
945 this->rng->get_bytes(this->rng, sizeof(spi), (u_int8_t*)&spi))
946 {
947 return spi;
948 }
949 return 0;
950 }
951
952 /**
953 * Calculate the hash of the initial IKE message. Memory for the hash is
954 * allocated on success.
955 *
956 * @returns TRUE on success
957 */
958 static bool get_init_hash(private_ike_sa_manager_t *this, message_t *message,
959 chunk_t *hash)
960 {
961 host_t *src;
962
963 if (!this->hasher)
964 { /* this might be the case when flush() has been called */
965 return FALSE;
966 }
967 if (message->get_first_payload_type(message) == FRAGMENT_V1)
968 { /* only hash the source IP, port and SPI for fragmented init messages */
969 u_int16_t port;
970 u_int64_t spi;
971
972 src = message->get_source(message);
973 if (!this->hasher->allocate_hash(this->hasher,
974 src->get_address(src), NULL))
975 {
976 return FALSE;
977 }
978 port = src->get_port(src);
979 if (!this->hasher->allocate_hash(this->hasher,
980 chunk_from_thing(port), NULL))
981 {
982 return FALSE;
983 }
984 spi = message->get_initiator_spi(message);
985 return this->hasher->allocate_hash(this->hasher,
986 chunk_from_thing(spi), hash);
987 }
988 if (message->get_exchange_type(message) == ID_PROT)
989 { /* include the source for Main Mode as the hash will be the same if
990 * SPIs are reused by two initiators that use the same proposal */
991 src = message->get_source(message);
992
993 if (!this->hasher->allocate_hash(this->hasher,
994 src->get_address(src), NULL))
995 {
996 return FALSE;
997 }
998 }
999 return this->hasher->allocate_hash(this->hasher,
1000 message->get_packet_data(message), hash);
1001 }
1002
1003 /**
1004 * Check if we already have created an IKE_SA based on the initial IKE message
1005 * with the given hash.
1006 * If not the hash is stored, the hash data is not(!) cloned.
1007 *
1008 * Also, the local SPI is returned. In case of a retransmit this is already
1009 * stored together with the hash, otherwise it is newly allocated and should
1010 * be used to create the IKE_SA.
1011 *
1012 * @returns ALREADY_DONE if the message with the given hash has been seen before
1013 * NOT_FOUND if the message hash was not found
1014 * FAILED if the SPI allocation failed
1015 */
1016 static status_t check_and_put_init_hash(private_ike_sa_manager_t *this,
1017 chunk_t init_hash, u_int64_t *our_spi)
1018 {
1019 table_item_t *item;
1020 u_int row, segment;
1021 mutex_t *mutex;
1022 init_hash_t *init;
1023 u_int64_t spi;
1024
1025 row = chunk_hash(init_hash) & this->table_mask;
1026 segment = row & this->segment_mask;
1027 mutex = this->init_hashes_segments[segment].mutex;
1028 mutex->lock(mutex);
1029 item = this->init_hashes_table[row];
1030 while (item)
1031 {
1032 init_hash_t *current = item->value;
1033
1034 if (chunk_equals(init_hash, current->hash))
1035 {
1036 *our_spi = current->our_spi;
1037 mutex->unlock(mutex);
1038 return ALREADY_DONE;
1039 }
1040 item = item->next;
1041 }
1042
1043 spi = get_spi(this);
1044 if (!spi)
1045 {
1046 return FAILED;
1047 }
1048
1049 INIT(init,
1050 .hash = {
1051 .len = init_hash.len,
1052 .ptr = init_hash.ptr,
1053 },
1054 .our_spi = spi,
1055 );
1056 INIT(item,
1057 .value = init,
1058 .next = this->init_hashes_table[row],
1059 );
1060 this->init_hashes_table[row] = item;
1061 *our_spi = init->our_spi;
1062 mutex->unlock(mutex);
1063 return NOT_FOUND;
1064 }
1065
1066 /**
1067 * Remove the hash of an initial IKE message from the cache.
1068 */
1069 static void remove_init_hash(private_ike_sa_manager_t *this, chunk_t init_hash)
1070 {
1071 table_item_t *item, *prev = NULL;
1072 u_int row, segment;
1073 mutex_t *mutex;
1074
1075 row = chunk_hash(init_hash) & this->table_mask;
1076 segment = row & this->segment_mask;
1077 mutex = this->init_hashes_segments[segment].mutex;
1078 mutex->lock(mutex);
1079 item = this->init_hashes_table[row];
1080 while (item)
1081 {
1082 init_hash_t *current = item->value;
1083
1084 if (chunk_equals(init_hash, current->hash))
1085 {
1086 if (prev)
1087 {
1088 prev->next = item->next;
1089 }
1090 else
1091 {
1092 this->init_hashes_table[row] = item->next;
1093 }
1094 free(current);
1095 free(item);
1096 break;
1097 }
1098 prev = item;
1099 item = item->next;
1100 }
1101 mutex->unlock(mutex);
1102 }
1103
1104 METHOD(ike_sa_manager_t, checkout, ike_sa_t*,
1105 private_ike_sa_manager_t *this, ike_sa_id_t *ike_sa_id)
1106 {
1107 ike_sa_t *ike_sa = NULL;
1108 entry_t *entry;
1109 u_int segment;
1110
1111 DBG2(DBG_MGR, "checkout IKE_SA");
1112
1113 if (get_entry_by_id(this, ike_sa_id, &entry, &segment) == SUCCESS)
1114 {
1115 if (wait_for_entry(this, entry, segment))
1116 {
1117 entry->checked_out = TRUE;
1118 ike_sa = entry->ike_sa;
1119 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1120 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1121 }
1122 unlock_single_segment(this, segment);
1123 }
1124 charon->bus->set_sa(charon->bus, ike_sa);
1125 return ike_sa;
1126 }
1127
1128 METHOD(ike_sa_manager_t, checkout_new, ike_sa_t*,
1129 private_ike_sa_manager_t* this, ike_version_t version, bool initiator)
1130 {
1131 ike_sa_id_t *ike_sa_id;
1132 ike_sa_t *ike_sa;
1133 u_int8_t ike_version;
1134 u_int64_t spi;
1135
1136 ike_version = version == IKEV1 ? IKEV1_MAJOR_VERSION : IKEV2_MAJOR_VERSION;
1137
1138 spi = get_spi(this);
1139 if (!spi)
1140 {
1141 DBG1(DBG_MGR, "failed to allocate SPI for new IKE_SA");
1142 return NULL;
1143 }
1144
1145 if (initiator)
1146 {
1147 ike_sa_id = ike_sa_id_create(ike_version, spi, 0, TRUE);
1148 }
1149 else
1150 {
1151 ike_sa_id = ike_sa_id_create(ike_version, 0, spi, FALSE);
1152 }
1153 ike_sa = ike_sa_create(ike_sa_id, initiator, version);
1154 ike_sa_id->destroy(ike_sa_id);
1155
1156 if (ike_sa)
1157 {
1158 DBG2(DBG_MGR, "created IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1159 ike_sa->get_unique_id(ike_sa));
1160 }
1161 return ike_sa;
1162 }
1163
1164 /**
1165 * Get the message ID or message hash to detect early retransmissions
1166 */
1167 static u_int32_t get_message_id_or_hash(message_t *message)
1168 {
1169 /* Use the message ID, or the message hash in IKEv1 Main/Aggressive mode */
1170 if (message->get_major_version(message) == IKEV1_MAJOR_VERSION &&
1171 message->get_message_id(message) == 0)
1172 {
1173 return chunk_hash(message->get_packet_data(message));
1174 }
1175 return message->get_message_id(message);
1176 }
1177
1178 METHOD(ike_sa_manager_t, checkout_by_message, ike_sa_t*,
1179 private_ike_sa_manager_t* this, message_t *message)
1180 {
1181 u_int segment;
1182 entry_t *entry;
1183 ike_sa_t *ike_sa = NULL;
1184 ike_sa_id_t *id;
1185 ike_version_t ike_version;
1186 bool is_init = FALSE;
1187
1188 id = message->get_ike_sa_id(message);
1189 /* clone the IKE_SA ID so we can modify the initiator flag */
1190 id = id->clone(id);
1191 id->switch_initiator(id);
1192
1193 DBG2(DBG_MGR, "checkout IKE_SA by message");
1194
1195 if (id->get_responder_spi(id) == 0)
1196 {
1197 if (message->get_major_version(message) == IKEV2_MAJOR_VERSION)
1198 {
1199 if (message->get_exchange_type(message) == IKE_SA_INIT &&
1200 message->get_request(message))
1201 {
1202 ike_version = IKEV2;
1203 is_init = TRUE;
1204 }
1205 }
1206 else
1207 {
1208 if (message->get_exchange_type(message) == ID_PROT ||
1209 message->get_exchange_type(message) == AGGRESSIVE)
1210 {
1211 ike_version = IKEV1;
1212 is_init = TRUE;
1213 if (id->is_initiator(id))
1214 { /* not set in IKEv1, switch back before applying to new SA */
1215 id->switch_initiator(id);
1216 }
1217 }
1218 }
1219 }
1220
1221 if (is_init)
1222 {
1223 u_int64_t our_spi;
1224 chunk_t hash;
1225
1226 if (!get_init_hash(this, message, &hash))
1227 {
1228 DBG1(DBG_MGR, "ignoring message, failed to hash message");
1229 id->destroy(id);
1230 return NULL;
1231 }
1232
1233 /* ensure this is not a retransmit of an already handled init message */
1234 switch (check_and_put_init_hash(this, hash, &our_spi))
1235 {
1236 case NOT_FOUND:
1237 { /* we've not seen this packet yet, create a new IKE_SA */
1238 if (!this->ikesa_limit ||
1239 this->public.get_count(&this->public) < this->ikesa_limit)
1240 {
1241 id->set_responder_spi(id, our_spi);
1242 ike_sa = ike_sa_create(id, FALSE, ike_version);
1243 if (ike_sa)
1244 {
1245 entry = entry_create();
1246 entry->ike_sa = ike_sa;
1247 entry->ike_sa_id = id;
1248
1249 segment = put_entry(this, entry);
1250 entry->checked_out = TRUE;
1251 unlock_single_segment(this, segment);
1252
1253 entry->processing = get_message_id_or_hash(message);
1254 entry->init_hash = hash;
1255
1256 DBG2(DBG_MGR, "created IKE_SA %s[%u]",
1257 ike_sa->get_name(ike_sa),
1258 ike_sa->get_unique_id(ike_sa));
1259
1260 charon->bus->set_sa(charon->bus, ike_sa);
1261 return ike_sa;
1262 }
1263 else
1264 {
1265 DBG1(DBG_MGR, "creating IKE_SA failed, ignoring message");
1266 }
1267 }
1268 else
1269 {
1270 DBG1(DBG_MGR, "ignoring %N, hitting IKE_SA limit (%u)",
1271 exchange_type_names, message->get_exchange_type(message),
1272 this->ikesa_limit);
1273 }
1274 remove_init_hash(this, hash);
1275 chunk_free(&hash);
1276 id->destroy(id);
1277 return NULL;
1278 }
1279 case FAILED:
1280 { /* we failed to allocate an SPI */
1281 chunk_free(&hash);
1282 id->destroy(id);
1283 DBG1(DBG_MGR, "ignoring message, failed to allocate SPI");
1284 return NULL;
1285 }
1286 case ALREADY_DONE:
1287 default:
1288 break;
1289 }
1290 /* it looks like we already handled this init message to some degree */
1291 id->set_responder_spi(id, our_spi);
1292 chunk_free(&hash);
1293 }
1294
1295 if (get_entry_by_id(this, id, &entry, &segment) == SUCCESS)
1296 {
1297 /* only check out if we are not already processing it. */
1298 if (entry->processing == get_message_id_or_hash(message))
1299 {
1300 DBG1(DBG_MGR, "ignoring request with ID %u, already processing",
1301 entry->processing);
1302 }
1303 else if (wait_for_entry(this, entry, segment))
1304 {
1305 ike_sa_id_t *ike_id;
1306
1307 ike_id = entry->ike_sa->get_id(entry->ike_sa);
1308 entry->checked_out = TRUE;
1309 if (message->get_first_payload_type(message) != FRAGMENT_V1)
1310 {
1311 entry->processing = get_message_id_or_hash(message);
1312 }
1313 if (ike_id->get_responder_spi(ike_id) == 0)
1314 {
1315 ike_id->set_responder_spi(ike_id, id->get_responder_spi(id));
1316 }
1317 ike_sa = entry->ike_sa;
1318 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1319 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1320 }
1321 unlock_single_segment(this, segment);
1322 }
1323 else
1324 {
1325 charon->bus->alert(charon->bus, ALERT_INVALID_IKE_SPI, message);
1326 }
1327 id->destroy(id);
1328 charon->bus->set_sa(charon->bus, ike_sa);
1329 return ike_sa;
1330 }
1331
1332 METHOD(ike_sa_manager_t, checkout_by_config, ike_sa_t*,
1333 private_ike_sa_manager_t *this, peer_cfg_t *peer_cfg)
1334 {
1335 enumerator_t *enumerator;
1336 entry_t *entry;
1337 ike_sa_t *ike_sa = NULL;
1338 peer_cfg_t *current_peer;
1339 ike_cfg_t *current_ike;
1340 u_int segment;
1341
1342 DBG2(DBG_MGR, "checkout IKE_SA by config");
1343
1344 if (!this->reuse_ikesa)
1345 { /* IKE_SA reuse disable by config */
1346 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1347 charon->bus->set_sa(charon->bus, ike_sa);
1348 return ike_sa;
1349 }
1350
1351 enumerator = create_table_enumerator(this);
1352 while (enumerator->enumerate(enumerator, &entry, &segment))
1353 {
1354 if (!wait_for_entry(this, entry, segment))
1355 {
1356 continue;
1357 }
1358 if (entry->ike_sa->get_state(entry->ike_sa) == IKE_DELETING)
1359 { /* skip IKE_SAs which are not usable */
1360 continue;
1361 }
1362
1363 current_peer = entry->ike_sa->get_peer_cfg(entry->ike_sa);
1364 if (current_peer && current_peer->equals(current_peer, peer_cfg))
1365 {
1366 current_ike = current_peer->get_ike_cfg(current_peer);
1367 if (current_ike->equals(current_ike, peer_cfg->get_ike_cfg(peer_cfg)))
1368 {
1369 entry->checked_out = TRUE;
1370 ike_sa = entry->ike_sa;
1371 DBG2(DBG_MGR, "found existing IKE_SA %u with a '%s' config",
1372 ike_sa->get_unique_id(ike_sa),
1373 current_peer->get_name(current_peer));
1374 break;
1375 }
1376 }
1377 }
1378 enumerator->destroy(enumerator);
1379
1380 if (!ike_sa)
1381 { /* no IKE_SA using such a config, hand out a new */
1382 ike_sa = checkout_new(this, peer_cfg->get_ike_version(peer_cfg), TRUE);
1383 }
1384 charon->bus->set_sa(charon->bus, ike_sa);
1385 return ike_sa;
1386 }
1387
1388 METHOD(ike_sa_manager_t, checkout_by_id, ike_sa_t*,
1389 private_ike_sa_manager_t *this, u_int32_t id, bool child)
1390 {
1391 enumerator_t *enumerator, *children;
1392 entry_t *entry;
1393 ike_sa_t *ike_sa = NULL;
1394 child_sa_t *child_sa;
1395 u_int segment;
1396
1397 DBG2(DBG_MGR, "checkout IKE_SA by ID");
1398
1399 enumerator = create_table_enumerator(this);
1400 while (enumerator->enumerate(enumerator, &entry, &segment))
1401 {
1402 if (wait_for_entry(this, entry, segment))
1403 {
1404 /* look for a child with such a reqid ... */
1405 if (child)
1406 {
1407 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1408 while (children->enumerate(children, (void**)&child_sa))
1409 {
1410 if (child_sa->get_reqid(child_sa) == id)
1411 {
1412 ike_sa = entry->ike_sa;
1413 break;
1414 }
1415 }
1416 children->destroy(children);
1417 }
1418 else /* ... or for a IKE_SA with such a unique id */
1419 {
1420 if (entry->ike_sa->get_unique_id(entry->ike_sa) == id)
1421 {
1422 ike_sa = entry->ike_sa;
1423 }
1424 }
1425 /* got one, return */
1426 if (ike_sa)
1427 {
1428 entry->checked_out = TRUE;
1429 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1430 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1431 break;
1432 }
1433 }
1434 }
1435 enumerator->destroy(enumerator);
1436
1437 charon->bus->set_sa(charon->bus, ike_sa);
1438 return ike_sa;
1439 }
1440
1441 METHOD(ike_sa_manager_t, checkout_by_name, ike_sa_t*,
1442 private_ike_sa_manager_t *this, char *name, bool child)
1443 {
1444 enumerator_t *enumerator, *children;
1445 entry_t *entry;
1446 ike_sa_t *ike_sa = NULL;
1447 child_sa_t *child_sa;
1448 u_int segment;
1449
1450 enumerator = create_table_enumerator(this);
1451 while (enumerator->enumerate(enumerator, &entry, &segment))
1452 {
1453 if (wait_for_entry(this, entry, segment))
1454 {
1455 /* look for a child with such a policy name ... */
1456 if (child)
1457 {
1458 children = entry->ike_sa->create_child_sa_enumerator(entry->ike_sa);
1459 while (children->enumerate(children, (void**)&child_sa))
1460 {
1461 if (streq(child_sa->get_name(child_sa), name))
1462 {
1463 ike_sa = entry->ike_sa;
1464 break;
1465 }
1466 }
1467 children->destroy(children);
1468 }
1469 else /* ... or for a IKE_SA with such a connection name */
1470 {
1471 if (streq(entry->ike_sa->get_name(entry->ike_sa), name))
1472 {
1473 ike_sa = entry->ike_sa;
1474 }
1475 }
1476 /* got one, return */
1477 if (ike_sa)
1478 {
1479 entry->checked_out = TRUE;
1480 DBG2(DBG_MGR, "IKE_SA %s[%u] successfully checked out",
1481 ike_sa->get_name(ike_sa), ike_sa->get_unique_id(ike_sa));
1482 break;
1483 }
1484 }
1485 }
1486 enumerator->destroy(enumerator);
1487
1488 charon->bus->set_sa(charon->bus, ike_sa);
1489 return ike_sa;
1490 }
1491
1492 /**
1493 * enumerator filter function, waiting variant
1494 */
1495 static bool enumerator_filter_wait(private_ike_sa_manager_t *this,
1496 entry_t **in, ike_sa_t **out, u_int *segment)
1497 {
1498 if (wait_for_entry(this, *in, *segment))
1499 {
1500 *out = (*in)->ike_sa;
1501 charon->bus->set_sa(charon->bus, *out);
1502 return TRUE;
1503 }
1504 return FALSE;
1505 }
1506
1507 /**
1508 * enumerator filter function, skipping variant
1509 */
1510 static bool enumerator_filter_skip(private_ike_sa_manager_t *this,
1511 entry_t **in, ike_sa_t **out, u_int *segment)
1512 {
1513 if (!(*in)->driveout_new_threads &&
1514 !(*in)->driveout_waiting_threads &&
1515 !(*in)->checked_out)
1516 {
1517 *out = (*in)->ike_sa;
1518 charon->bus->set_sa(charon->bus, *out);
1519 return TRUE;
1520 }
1521 return FALSE;
1522 }
1523
1524 /**
1525 * Reset threads SA after enumeration
1526 */
1527 static void reset_sa(void *data)
1528 {
1529 charon->bus->set_sa(charon->bus, NULL);
1530 }
1531
1532 METHOD(ike_sa_manager_t, create_enumerator, enumerator_t*,
1533 private_ike_sa_manager_t* this, bool wait)
1534 {
1535 return enumerator_create_filter(create_table_enumerator(this),
1536 wait ? (void*)enumerator_filter_wait : (void*)enumerator_filter_skip,
1537 this, reset_sa);
1538 }
1539
1540 METHOD(ike_sa_manager_t, checkin, void,
1541 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1542 {
1543 /* to check the SA back in, we look for the pointer of the ike_sa
1544 * in all entries.
1545 * The lookup is done by initiator SPI, so even if the SPI has changed (e.g.
1546 * on reception of a IKE_SA_INIT response) the lookup will work but
1547 * updating of the SPI MAY be necessary...
1548 */
1549 entry_t *entry;
1550 ike_sa_id_t *ike_sa_id;
1551 host_t *other;
1552 identification_t *my_id, *other_id;
1553 u_int segment;
1554
1555 ike_sa_id = ike_sa->get_id(ike_sa);
1556 my_id = ike_sa->get_my_id(ike_sa);
1557 other_id = ike_sa->get_other_eap_id(ike_sa);
1558 other = ike_sa->get_other_host(ike_sa);
1559
1560 DBG2(DBG_MGR, "checkin IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1561 ike_sa->get_unique_id(ike_sa));
1562
1563 /* look for the entry */
1564 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1565 {
1566 /* ike_sa_id must be updated */
1567 entry->ike_sa_id->replace_values(entry->ike_sa_id, ike_sa->get_id(ike_sa));
1568 /* signal waiting threads */
1569 entry->checked_out = FALSE;
1570 entry->processing = -1;
1571 /* check if this SA is half-open */
1572 if (entry->half_open && ike_sa->get_state(ike_sa) != IKE_CONNECTING)
1573 {
1574 /* not half open anymore */
1575 entry->half_open = FALSE;
1576 remove_half_open(this, entry);
1577 }
1578 else if (entry->half_open && !other->ip_equals(other, entry->other))
1579 {
1580 /* the other host's IP has changed, we must update the hash table */
1581 remove_half_open(this, entry);
1582 DESTROY_IF(entry->other);
1583 entry->other = other->clone(other);
1584 put_half_open(this, entry);
1585 }
1586 else if (!entry->half_open &&
1587 !entry->ike_sa_id->is_initiator(entry->ike_sa_id) &&
1588 ike_sa->get_state(ike_sa) == IKE_CONNECTING)
1589 {
1590 /* this is a new half-open SA */
1591 entry->half_open = TRUE;
1592 entry->other = other->clone(other);
1593 put_half_open(this, entry);
1594 }
1595 DBG2(DBG_MGR, "check-in of IKE_SA successful.");
1596 entry->condvar->signal(entry->condvar);
1597 }
1598 else
1599 {
1600 entry = entry_create();
1601 entry->ike_sa_id = ike_sa_id->clone(ike_sa_id);
1602 entry->ike_sa = ike_sa;
1603 segment = put_entry(this, entry);
1604 }
1605
1606 /* apply identities for duplicate test */
1607 if ((ike_sa->get_state(ike_sa) == IKE_ESTABLISHED ||
1608 ike_sa->get_state(ike_sa) == IKE_PASSIVE) &&
1609 entry->my_id == NULL && entry->other_id == NULL)
1610 {
1611 if (ike_sa->get_version(ike_sa) == IKEV1)
1612 {
1613 /* If authenticated and received INITIAL_CONTACT,
1614 * delete any existing IKE_SAs with that peer. */
1615 if (ike_sa->has_condition(ike_sa, COND_INIT_CONTACT_SEEN))
1616 {
1617 this->public.check_uniqueness(&this->public, ike_sa, TRUE);
1618 ike_sa->set_condition(ike_sa, COND_INIT_CONTACT_SEEN, FALSE);
1619 }
1620 }
1621
1622 entry->my_id = my_id->clone(my_id);
1623 entry->other_id = other_id->clone(other_id);
1624 if (!entry->other)
1625 {
1626 entry->other = other->clone(other);
1627 }
1628 put_connected_peers(this, entry);
1629 }
1630
1631 unlock_single_segment(this, segment);
1632
1633 charon->bus->set_sa(charon->bus, NULL);
1634 }
1635
1636 METHOD(ike_sa_manager_t, checkin_and_destroy, void,
1637 private_ike_sa_manager_t *this, ike_sa_t *ike_sa)
1638 {
1639 /* deletion is a bit complex, we must ensure that no thread is waiting for
1640 * this SA.
1641 * We take this SA from the table, and start signaling while threads
1642 * are in the condvar.
1643 */
1644 entry_t *entry;
1645 ike_sa_id_t *ike_sa_id;
1646 u_int segment;
1647
1648 ike_sa_id = ike_sa->get_id(ike_sa);
1649
1650 DBG2(DBG_MGR, "checkin and destroy IKE_SA %s[%u]", ike_sa->get_name(ike_sa),
1651 ike_sa->get_unique_id(ike_sa));
1652
1653 if (get_entry_by_sa(this, ike_sa_id, ike_sa, &entry, &segment) == SUCCESS)
1654 {
1655 if (entry->driveout_waiting_threads && entry->driveout_new_threads)
1656 { /* it looks like flush() has been called and the SA is being deleted
1657 * anyway, just check it in */
1658 DBG2(DBG_MGR, "ignored check-in and destroy of IKE_SA during shutdown");
1659 entry->checked_out = FALSE;
1660 entry->condvar->broadcast(entry->condvar);
1661 unlock_single_segment(this, segment);
1662 return;
1663 }
1664
1665 /* drive out waiting threads, as we are in hurry */
1666 entry->driveout_waiting_threads = TRUE;
1667 /* mark it, so no new threads can get this entry */
1668 entry->driveout_new_threads = TRUE;
1669 /* wait until all workers have done their work */
1670 while (entry->waiting_threads)
1671 {
1672 /* wake up all */
1673 entry->condvar->broadcast(entry->condvar);
1674 /* they will wake us again when their work is done */
1675 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
1676 }
1677 remove_entry(this, entry);
1678 unlock_single_segment(this, segment);
1679
1680 if (entry->half_open)
1681 {
1682 remove_half_open(this, entry);
1683 }
1684 if (entry->my_id && entry->other_id)
1685 {
1686 remove_connected_peers(this, entry);
1687 }
1688 if (entry->init_hash.ptr)
1689 {
1690 remove_init_hash(this, entry->init_hash);
1691 }
1692
1693 entry_destroy(entry);
1694
1695 DBG2(DBG_MGR, "check-in and destroy of IKE_SA successful");
1696 }
1697 else
1698 {
1699 DBG1(DBG_MGR, "tried to check-in and delete nonexisting IKE_SA");
1700 ike_sa->destroy(ike_sa);
1701 }
1702 charon->bus->set_sa(charon->bus, NULL);
1703 }
1704
1705 /**
1706 * Cleanup function for create_id_enumerator
1707 */
1708 static void id_enumerator_cleanup(linked_list_t *ids)
1709 {
1710 ids->destroy_offset(ids, offsetof(ike_sa_id_t, destroy));
1711 }
1712
1713 METHOD(ike_sa_manager_t, create_id_enumerator, enumerator_t*,
1714 private_ike_sa_manager_t *this, identification_t *me,
1715 identification_t *other, int family)
1716 {
1717 table_item_t *item;
1718 u_int row, segment;
1719 rwlock_t *lock;
1720 linked_list_t *ids = NULL;
1721
1722 row = chunk_hash_inc(other->get_encoding(other),
1723 chunk_hash(me->get_encoding(me))) & this->table_mask;
1724 segment = row & this->segment_mask;
1725
1726 lock = this->connected_peers_segments[segment].lock;
1727 lock->read_lock(lock);
1728 item = this->connected_peers_table[row];
1729 while (item)
1730 {
1731 connected_peers_t *current = item->value;
1732
1733 if (connected_peers_match(current, me, other, family))
1734 {
1735 ids = current->sas->clone_offset(current->sas,
1736 offsetof(ike_sa_id_t, clone));
1737 break;
1738 }
1739 item = item->next;
1740 }
1741 lock->unlock(lock);
1742
1743 if (!ids)
1744 {
1745 return enumerator_create_empty();
1746 }
1747 return enumerator_create_cleaner(ids->create_enumerator(ids),
1748 (void*)id_enumerator_cleanup, ids);
1749 }
1750
1751 /**
1752 * Move all CHILD_SAs from old to new
1753 */
1754 static void adopt_children(ike_sa_t *old, ike_sa_t *new)
1755 {
1756 enumerator_t *enumerator;
1757 child_sa_t *child_sa;
1758
1759 enumerator = old->create_child_sa_enumerator(old);
1760 while (enumerator->enumerate(enumerator, &child_sa))
1761 {
1762 old->remove_child_sa(old, enumerator);
1763 new->add_child_sa(new, child_sa);
1764 }
1765 enumerator->destroy(enumerator);
1766 }
1767
1768 /**
1769 * Check if the replaced IKE_SA might get reauthenticated from host
1770 */
1771 static bool is_ikev1_reauth(ike_sa_t *duplicate, host_t *host)
1772 {
1773 return duplicate->get_version(duplicate) == IKEV1 &&
1774 host->equals(host, duplicate->get_other_host(duplicate));
1775 }
1776
1777 /**
1778 * Delete an existing IKE_SA due to a unique replace policy
1779 */
1780 static status_t enforce_replace(private_ike_sa_manager_t *this,
1781 ike_sa_t *duplicate, ike_sa_t *new,
1782 identification_t *other, host_t *host)
1783 {
1784 charon->bus->alert(charon->bus, ALERT_UNIQUE_REPLACE);
1785
1786 if (is_ikev1_reauth(duplicate, host))
1787 {
1788 /* looks like a reauthentication attempt */
1789 adopt_children(duplicate, new);
1790 /* For IKEv1 we have to delay the delete for the old IKE_SA. Some
1791 * peers need to complete the new SA first, otherwise the quick modes
1792 * might get lost. */
1793 lib->scheduler->schedule_job(lib->scheduler, (job_t*)
1794 delete_ike_sa_job_create(duplicate->get_id(duplicate), TRUE), 10);
1795 return SUCCESS;
1796 }
1797 DBG1(DBG_IKE, "deleting duplicate IKE_SA for peer '%Y' due to "
1798 "uniqueness policy", other);
1799 return duplicate->delete(duplicate);
1800 }
1801
1802 METHOD(ike_sa_manager_t, check_uniqueness, bool,
1803 private_ike_sa_manager_t *this, ike_sa_t *ike_sa, bool force_replace)
1804 {
1805 bool cancel = FALSE;
1806 peer_cfg_t *peer_cfg;
1807 unique_policy_t policy;
1808 enumerator_t *enumerator;
1809 ike_sa_id_t *id = NULL;
1810 identification_t *me, *other;
1811 host_t *other_host;
1812
1813 peer_cfg = ike_sa->get_peer_cfg(ike_sa);
1814 policy = peer_cfg->get_unique_policy(peer_cfg);
1815 if (policy == UNIQUE_NEVER || (policy == UNIQUE_NO && !force_replace))
1816 {
1817 return FALSE;
1818 }
1819 me = ike_sa->get_my_id(ike_sa);
1820 other = ike_sa->get_other_eap_id(ike_sa);
1821 other_host = ike_sa->get_other_host(ike_sa);
1822
1823 enumerator = create_id_enumerator(this, me, other,
1824 other_host->get_family(other_host));
1825 while (enumerator->enumerate(enumerator, &id))
1826 {
1827 status_t status = SUCCESS;
1828 ike_sa_t *duplicate;
1829
1830 duplicate = checkout(this, id);
1831 if (!duplicate)
1832 {
1833 continue;
1834 }
1835 if (force_replace)
1836 {
1837 DBG1(DBG_IKE, "destroying duplicate IKE_SA for peer '%Y', "
1838 "received INITIAL_CONTACT", other);
1839 charon->bus->ike_updown(charon->bus, duplicate, FALSE);
1840 checkin_and_destroy(this, duplicate);
1841 continue;
1842 }
1843 peer_cfg = duplicate->get_peer_cfg(duplicate);
1844 if (peer_cfg && peer_cfg->equals(peer_cfg, ike_sa->get_peer_cfg(ike_sa)))
1845 {
1846 switch (duplicate->get_state(duplicate))
1847 {
1848 case IKE_ESTABLISHED:
1849 case IKE_REKEYING:
1850 switch (policy)
1851 {
1852 case UNIQUE_REPLACE:
1853 status = enforce_replace(this, duplicate, ike_sa,
1854 other, other_host);
1855 break;
1856 case UNIQUE_KEEP:
1857 if (!is_ikev1_reauth(duplicate, other_host))
1858 {
1859 cancel = TRUE;
1860 /* we keep the first IKE_SA and delete all
1861 * other duplicates that might exist */
1862 policy = UNIQUE_REPLACE;
1863 }
1864 break;
1865 default:
1866 break;
1867 }
1868 break;
1869 default:
1870 break;
1871 }
1872 }
1873 if (status == DESTROY_ME)
1874 {
1875 checkin_and_destroy(this, duplicate);
1876 }
1877 else
1878 {
1879 checkin(this, duplicate);
1880 }
1881 }
1882 enumerator->destroy(enumerator);
1883 /* reset thread's current IKE_SA after checkin */
1884 charon->bus->set_sa(charon->bus, ike_sa);
1885 return cancel;
1886 }
1887
1888 METHOD(ike_sa_manager_t, has_contact, bool,
1889 private_ike_sa_manager_t *this, identification_t *me,
1890 identification_t *other, int family)
1891 {
1892 table_item_t *item;
1893 u_int row, segment;
1894 rwlock_t *lock;
1895 bool found = FALSE;
1896
1897 row = chunk_hash_inc(other->get_encoding(other),
1898 chunk_hash(me->get_encoding(me))) & this->table_mask;
1899 segment = row & this->segment_mask;
1900 lock = this->connected_peers_segments[segment].lock;
1901 lock->read_lock(lock);
1902 item = this->connected_peers_table[row];
1903 while (item)
1904 {
1905 if (connected_peers_match(item->value, me, other, family))
1906 {
1907 found = TRUE;
1908 break;
1909 }
1910 item = item->next;
1911 }
1912 lock->unlock(lock);
1913
1914 return found;
1915 }
1916
1917 METHOD(ike_sa_manager_t, get_count, u_int,
1918 private_ike_sa_manager_t *this)
1919 {
1920 u_int segment, count = 0;
1921 mutex_t *mutex;
1922
1923 for (segment = 0; segment < this->segment_count; segment++)
1924 {
1925 mutex = this->segments[segment & this->segment_mask].mutex;
1926 mutex->lock(mutex);
1927 count += this->segments[segment].count;
1928 mutex->unlock(mutex);
1929 }
1930 return count;
1931 }
1932
1933 METHOD(ike_sa_manager_t, get_half_open_count, u_int,
1934 private_ike_sa_manager_t *this, host_t *ip)
1935 {
1936 table_item_t *item;
1937 u_int row, segment;
1938 rwlock_t *lock;
1939 chunk_t addr;
1940 u_int count = 0;
1941
1942 if (ip)
1943 {
1944 addr = ip->get_address(ip);
1945 row = chunk_hash(addr) & this->table_mask;
1946 segment = row & this->segment_mask;
1947 lock = this->half_open_segments[segment].lock;
1948 lock->read_lock(lock);
1949 item = this->half_open_table[row];
1950 while (item)
1951 {
1952 half_open_t *half_open = item->value;
1953
1954 if (chunk_equals(addr, half_open->other))
1955 {
1956 count = half_open->count;
1957 break;
1958 }
1959 item = item->next;
1960 }
1961 lock->unlock(lock);
1962 }
1963 else
1964 {
1965 for (segment = 0; segment < this->segment_count; segment++)
1966 {
1967 lock = this->half_open_segments[segment].lock;
1968 lock->read_lock(lock);
1969 count += this->half_open_segments[segment].count;
1970 lock->unlock(lock);
1971 }
1972 }
1973 return count;
1974 }
1975
1976 METHOD(ike_sa_manager_t, flush, void,
1977 private_ike_sa_manager_t *this)
1978 {
1979 /* destroy all list entries */
1980 enumerator_t *enumerator;
1981 entry_t *entry;
1982 u_int segment;
1983
1984 lock_all_segments(this);
1985 DBG2(DBG_MGR, "going to destroy IKE_SA manager and all managed IKE_SA's");
1986 /* Step 1: drive out all waiting threads */
1987 DBG2(DBG_MGR, "set driveout flags for all stored IKE_SA's");
1988 enumerator = create_table_enumerator(this);
1989 while (enumerator->enumerate(enumerator, &entry, &segment))
1990 {
1991 /* do not accept new threads, drive out waiting threads */
1992 entry->driveout_new_threads = TRUE;
1993 entry->driveout_waiting_threads = TRUE;
1994 }
1995 enumerator->destroy(enumerator);
1996 DBG2(DBG_MGR, "wait for all threads to leave IKE_SA's");
1997 /* Step 2: wait until all are gone */
1998 enumerator = create_table_enumerator(this);
1999 while (enumerator->enumerate(enumerator, &entry, &segment))
2000 {
2001 while (entry->waiting_threads || entry->checked_out)
2002 {
2003 /* wake up all */
2004 entry->condvar->broadcast(entry->condvar);
2005 /* go sleeping until they are gone */
2006 entry->condvar->wait(entry->condvar, this->segments[segment].mutex);
2007 }
2008 }
2009 enumerator->destroy(enumerator);
2010 DBG2(DBG_MGR, "delete all IKE_SA's");
2011 /* Step 3: initiate deletion of all IKE_SAs */
2012 enumerator = create_table_enumerator(this);
2013 while (enumerator->enumerate(enumerator, &entry, &segment))
2014 {
2015 charon->bus->set_sa(charon->bus, entry->ike_sa);
2016 if (entry->ike_sa->get_version(entry->ike_sa) == IKEV2)
2017 { /* as the delete never gets processed, fire down events */
2018 switch (entry->ike_sa->get_state(entry->ike_sa))
2019 {
2020 case IKE_ESTABLISHED:
2021 case IKE_REKEYING:
2022 case IKE_DELETING:
2023 charon->bus->ike_updown(charon->bus, entry->ike_sa, FALSE);
2024 break;
2025 default:
2026 break;
2027 }
2028 }
2029 entry->ike_sa->delete(entry->ike_sa);
2030 }
2031 enumerator->destroy(enumerator);
2032
2033 DBG2(DBG_MGR, "destroy all entries");
2034 /* Step 4: destroy all entries */
2035 enumerator = create_table_enumerator(this);
2036 while (enumerator->enumerate(enumerator, &entry, &segment))
2037 {
2038 charon->bus->set_sa(charon->bus, entry->ike_sa);
2039 if (entry->half_open)
2040 {
2041 remove_half_open(this, entry);
2042 }
2043 if (entry->my_id && entry->other_id)
2044 {
2045 remove_connected_peers(this, entry);
2046 }
2047 if (entry->init_hash.ptr)
2048 {
2049 remove_init_hash(this, entry->init_hash);
2050 }
2051 remove_entry_at((private_enumerator_t*)enumerator);
2052 entry_destroy(entry);
2053 }
2054 enumerator->destroy(enumerator);
2055 charon->bus->set_sa(charon->bus, NULL);
2056 unlock_all_segments(this);
2057
2058 this->rng->destroy(this->rng);
2059 this->rng = NULL;
2060 this->hasher->destroy(this->hasher);
2061 this->hasher = NULL;
2062 }
2063
2064 METHOD(ike_sa_manager_t, destroy, void,
2065 private_ike_sa_manager_t *this)
2066 {
2067 u_int i;
2068
2069 /* these are already cleared in flush() above */
2070 free(this->ike_sa_table);
2071 free(this->half_open_table);
2072 free(this->connected_peers_table);
2073 free(this->init_hashes_table);
2074 for (i = 0; i < this->segment_count; i++)
2075 {
2076 this->segments[i].mutex->destroy(this->segments[i].mutex);
2077 this->half_open_segments[i].lock->destroy(this->half_open_segments[i].lock);
2078 this->connected_peers_segments[i].lock->destroy(this->connected_peers_segments[i].lock);
2079 this->init_hashes_segments[i].mutex->destroy(this->init_hashes_segments[i].mutex);
2080 }
2081 free(this->segments);
2082 free(this->half_open_segments);
2083 free(this->connected_peers_segments);
2084 free(this->init_hashes_segments);
2085
2086 free(this);
2087 }
2088
2089 /**
2090 * This function returns the next-highest power of two for the given number.
2091 * The algorithm works by setting all bits on the right-hand side of the most
2092 * significant 1 to 1 and then increments the whole number so it rolls over
2093 * to the nearest power of two. Note: returns 0 for n == 0
2094 */
2095 static u_int get_nearest_powerof2(u_int n)
2096 {
2097 u_int i;
2098
2099 --n;
2100 for (i = 1; i < sizeof(u_int) * 8; i <<= 1)
2101 {
2102 n |= n >> i;
2103 }
2104 return ++n;
2105 }
2106
2107 /*
2108 * Described in header.
2109 */
2110 ike_sa_manager_t *ike_sa_manager_create()
2111 {
2112 private_ike_sa_manager_t *this;
2113 u_int i;
2114
2115 INIT(this,
2116 .public = {
2117 .checkout = _checkout,
2118 .checkout_new = _checkout_new,
2119 .checkout_by_message = _checkout_by_message,
2120 .checkout_by_config = _checkout_by_config,
2121 .checkout_by_id = _checkout_by_id,
2122 .checkout_by_name = _checkout_by_name,
2123 .check_uniqueness = _check_uniqueness,
2124 .has_contact = _has_contact,
2125 .create_enumerator = _create_enumerator,
2126 .create_id_enumerator = _create_id_enumerator,
2127 .checkin = _checkin,
2128 .checkin_and_destroy = _checkin_and_destroy,
2129 .get_count = _get_count,
2130 .get_half_open_count = _get_half_open_count,
2131 .flush = _flush,
2132 .destroy = _destroy,
2133 },
2134 );
2135
2136 this->hasher = lib->crypto->create_hasher(lib->crypto, HASH_PREFERRED);
2137 if (this->hasher == NULL)
2138 {
2139 DBG1(DBG_MGR, "manager initialization failed, no hasher supported");
2140 free(this);
2141 return NULL;
2142 }
2143 this->rng = lib->crypto->create_rng(lib->crypto, RNG_WEAK);
2144 if (this->rng == NULL)
2145 {
2146 DBG1(DBG_MGR, "manager initialization failed, no RNG supported");
2147 this->hasher->destroy(this->hasher);
2148 free(this);
2149 return NULL;
2150 }
2151
2152 this->ikesa_limit = lib->settings->get_int(lib->settings,
2153 "%s.ikesa_limit", 0, charon->name);
2154
2155 this->table_size = get_nearest_powerof2(lib->settings->get_int(
2156 lib->settings, "%s.ikesa_table_size",
2157 DEFAULT_HASHTABLE_SIZE, charon->name));
2158 this->table_size = max(1, min(this->table_size, MAX_HASHTABLE_SIZE));
2159 this->table_mask = this->table_size - 1;
2160
2161 this->segment_count = get_nearest_powerof2(lib->settings->get_int(
2162 lib->settings, "%s.ikesa_table_segments",
2163 DEFAULT_SEGMENT_COUNT, charon->name));
2164 this->segment_count = max(1, min(this->segment_count, this->table_size));
2165 this->segment_mask = this->segment_count - 1;
2166
2167 this->ike_sa_table = calloc(this->table_size, sizeof(table_item_t*));
2168 this->segments = (segment_t*)calloc(this->segment_count, sizeof(segment_t));
2169 for (i = 0; i < this->segment_count; i++)
2170 {
2171 this->segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2172 this->segments[i].count = 0;
2173 }
2174
2175 /* we use the same table parameters for the table to track half-open SAs */
2176 this->half_open_table = calloc(this->table_size, sizeof(table_item_t*));
2177 this->half_open_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2178 for (i = 0; i < this->segment_count; i++)
2179 {
2180 this->half_open_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2181 this->half_open_segments[i].count = 0;
2182 }
2183
2184 /* also for the hash table used for duplicate tests */
2185 this->connected_peers_table = calloc(this->table_size, sizeof(table_item_t*));
2186 this->connected_peers_segments = calloc(this->segment_count, sizeof(shareable_segment_t));
2187 for (i = 0; i < this->segment_count; i++)
2188 {
2189 this->connected_peers_segments[i].lock = rwlock_create(RWLOCK_TYPE_DEFAULT);
2190 this->connected_peers_segments[i].count = 0;
2191 }
2192
2193 /* and again for the table of hashes of seen initial IKE messages */
2194 this->init_hashes_table = calloc(this->table_size, sizeof(table_item_t*));
2195 this->init_hashes_segments = calloc(this->segment_count, sizeof(segment_t));
2196 for (i = 0; i < this->segment_count; i++)
2197 {
2198 this->init_hashes_segments[i].mutex = mutex_create(MUTEX_TYPE_RECURSIVE);
2199 this->init_hashes_segments[i].count = 0;
2200 }
2201
2202 this->reuse_ikesa = lib->settings->get_bool(lib->settings,
2203 "%s.reuse_ikesa", TRUE, charon->name);
2204 return &this->public;
2205 }