Remove policies in kernel interfaces based on their priority.
[strongswan.git] / src / libhydra / plugins / kernel_netlink / kernel_netlink_ipsec.c
1 /*
2 * Copyright (C) 2006-2011 Tobias Brunner
3 * Copyright (C) 2005-2009 Martin Willi
4 * Copyright (C) 2008 Andreas Steffen
5 * Copyright (C) 2006-2007 Fabian Hartmann, Noah Heusser
6 * Copyright (C) 2006 Daniel Roethlisberger
7 * Copyright (C) 2005 Jan Hutter
8 * Hochschule fuer Technik Rapperswil
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 */
20
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <stdint.h>
24 #include <linux/ipsec.h>
25 #include <linux/netlink.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/xfrm.h>
28 #include <linux/udp.h>
29 #include <unistd.h>
30 #include <time.h>
31 #include <errno.h>
32 #include <string.h>
33 #include <fcntl.h>
34
35 #include "kernel_netlink_ipsec.h"
36 #include "kernel_netlink_shared.h"
37
38 #include <hydra.h>
39 #include <debug.h>
40 #include <threading/thread.h>
41 #include <threading/mutex.h>
42 #include <utils/hashtable.h>
43 #include <utils/linked_list.h>
44 #include <processing/jobs/callback_job.h>
45
46 /** Required for Linux 2.6.26 kernel and later */
47 #ifndef XFRM_STATE_AF_UNSPEC
48 #define XFRM_STATE_AF_UNSPEC 32
49 #endif
50
51 /** From linux/in.h */
52 #ifndef IP_XFRM_POLICY
53 #define IP_XFRM_POLICY 17
54 #endif
55
56 /** Missing on uclibc */
57 #ifndef IPV6_XFRM_POLICY
58 #define IPV6_XFRM_POLICY 34
59 #endif /*IPV6_XFRM_POLICY*/
60
61 /** Default priority of installed policies */
62 #define PRIO_BASE 512
63
64 /** Default replay window size, if not set using charon.replay_window */
65 #define DEFAULT_REPLAY_WINDOW 32
66
67 /**
68 * Map the limit for bytes and packets to XFRM_INF by default
69 */
70 #define XFRM_LIMIT(x) ((x) == 0 ? XFRM_INF : (x))
71
72 /**
73 * Create ORable bitfield of XFRM NL groups
74 */
75 #define XFRMNLGRP(x) (1<<(XFRMNLGRP_##x-1))
76
77 /**
78 * Returns a pointer to the first rtattr following the nlmsghdr *nlh and the
79 * 'usual' netlink data x like 'struct xfrm_usersa_info'
80 */
81 #define XFRM_RTA(nlh, x) ((struct rtattr*)(NLMSG_DATA(nlh) + \
82 NLMSG_ALIGN(sizeof(x))))
83 /**
84 * Returns a pointer to the next rtattr following rta.
85 * !!! Do not use this to parse messages. Use RTA_NEXT and RTA_OK instead !!!
86 */
87 #define XFRM_RTA_NEXT(rta) ((struct rtattr*)(((char*)(rta)) + \
88 RTA_ALIGN((rta)->rta_len)))
89 /**
90 * Returns the total size of attached rta data
91 * (after 'usual' netlink data x like 'struct xfrm_usersa_info')
92 */
93 #define XFRM_PAYLOAD(nlh, x) NLMSG_PAYLOAD(nlh, sizeof(x))
94
95 typedef struct kernel_algorithm_t kernel_algorithm_t;
96
97 /**
98 * Mapping of IKEv2 kernel identifier to linux crypto API names
99 */
100 struct kernel_algorithm_t {
101 /**
102 * Identifier specified in IKEv2
103 */
104 int ikev2;
105
106 /**
107 * Name of the algorithm in linux crypto API
108 */
109 char *name;
110 };
111
112 ENUM(xfrm_msg_names, XFRM_MSG_NEWSA, XFRM_MSG_MAPPING,
113 "XFRM_MSG_NEWSA",
114 "XFRM_MSG_DELSA",
115 "XFRM_MSG_GETSA",
116 "XFRM_MSG_NEWPOLICY",
117 "XFRM_MSG_DELPOLICY",
118 "XFRM_MSG_GETPOLICY",
119 "XFRM_MSG_ALLOCSPI",
120 "XFRM_MSG_ACQUIRE",
121 "XFRM_MSG_EXPIRE",
122 "XFRM_MSG_UPDPOLICY",
123 "XFRM_MSG_UPDSA",
124 "XFRM_MSG_POLEXPIRE",
125 "XFRM_MSG_FLUSHSA",
126 "XFRM_MSG_FLUSHPOLICY",
127 "XFRM_MSG_NEWAE",
128 "XFRM_MSG_GETAE",
129 "XFRM_MSG_REPORT",
130 "XFRM_MSG_MIGRATE",
131 "XFRM_MSG_NEWSADINFO",
132 "XFRM_MSG_GETSADINFO",
133 "XFRM_MSG_NEWSPDINFO",
134 "XFRM_MSG_GETSPDINFO",
135 "XFRM_MSG_MAPPING"
136 );
137
138 ENUM(xfrm_attr_type_names, XFRMA_UNSPEC, XFRMA_KMADDRESS,
139 "XFRMA_UNSPEC",
140 "XFRMA_ALG_AUTH",
141 "XFRMA_ALG_CRYPT",
142 "XFRMA_ALG_COMP",
143 "XFRMA_ENCAP",
144 "XFRMA_TMPL",
145 "XFRMA_SA",
146 "XFRMA_POLICY",
147 "XFRMA_SEC_CTX",
148 "XFRMA_LTIME_VAL",
149 "XFRMA_REPLAY_VAL",
150 "XFRMA_REPLAY_THRESH",
151 "XFRMA_ETIMER_THRESH",
152 "XFRMA_SRCADDR",
153 "XFRMA_COADDR",
154 "XFRMA_LASTUSED",
155 "XFRMA_POLICY_TYPE",
156 "XFRMA_MIGRATE",
157 "XFRMA_ALG_AEAD",
158 "XFRMA_KMADDRESS"
159 );
160
161 #define END_OF_LIST -1
162
163 /**
164 * Algorithms for encryption
165 */
166 static kernel_algorithm_t encryption_algs[] = {
167 /* {ENCR_DES_IV64, "***" }, */
168 {ENCR_DES, "des" },
169 {ENCR_3DES, "des3_ede" },
170 /* {ENCR_RC5, "***" }, */
171 /* {ENCR_IDEA, "***" }, */
172 {ENCR_CAST, "cast128" },
173 {ENCR_BLOWFISH, "blowfish" },
174 /* {ENCR_3IDEA, "***" }, */
175 /* {ENCR_DES_IV32, "***" }, */
176 {ENCR_NULL, "cipher_null" },
177 {ENCR_AES_CBC, "aes" },
178 {ENCR_AES_CTR, "rfc3686(ctr(aes))" },
179 {ENCR_AES_CCM_ICV8, "rfc4309(ccm(aes))" },
180 {ENCR_AES_CCM_ICV12, "rfc4309(ccm(aes))" },
181 {ENCR_AES_CCM_ICV16, "rfc4309(ccm(aes))" },
182 {ENCR_AES_GCM_ICV8, "rfc4106(gcm(aes))" },
183 {ENCR_AES_GCM_ICV12, "rfc4106(gcm(aes))" },
184 {ENCR_AES_GCM_ICV16, "rfc4106(gcm(aes))" },
185 {ENCR_NULL_AUTH_AES_GMAC, "rfc4543(gcm(aes))" },
186 {ENCR_CAMELLIA_CBC, "cbc(camellia)" },
187 /* {ENCR_CAMELLIA_CTR, "***" }, */
188 /* {ENCR_CAMELLIA_CCM_ICV8, "***" }, */
189 /* {ENCR_CAMELLIA_CCM_ICV12, "***" }, */
190 /* {ENCR_CAMELLIA_CCM_ICV16, "***" }, */
191 {ENCR_SERPENT_CBC, "serpent" },
192 {ENCR_TWOFISH_CBC, "twofish" },
193 {END_OF_LIST, NULL }
194 };
195
196 /**
197 * Algorithms for integrity protection
198 */
199 static kernel_algorithm_t integrity_algs[] = {
200 {AUTH_HMAC_MD5_96, "md5" },
201 {AUTH_HMAC_SHA1_96, "sha1" },
202 {AUTH_HMAC_SHA2_256_96, "sha256" },
203 {AUTH_HMAC_SHA2_256_128, "hmac(sha256)" },
204 {AUTH_HMAC_SHA2_384_192, "hmac(sha384)" },
205 {AUTH_HMAC_SHA2_512_256, "hmac(sha512)" },
206 /* {AUTH_DES_MAC, "***" }, */
207 /* {AUTH_KPDK_MD5, "***" }, */
208 {AUTH_AES_XCBC_96, "xcbc(aes)" },
209 {END_OF_LIST, NULL }
210 };
211
212 /**
213 * Algorithms for IPComp
214 */
215 static kernel_algorithm_t compression_algs[] = {
216 /* {IPCOMP_OUI, "***" }, */
217 {IPCOMP_DEFLATE, "deflate" },
218 {IPCOMP_LZS, "lzs" },
219 {IPCOMP_LZJH, "lzjh" },
220 {END_OF_LIST, NULL }
221 };
222
223 /**
224 * Look up a kernel algorithm name and its key size
225 */
226 static char* lookup_algorithm(kernel_algorithm_t *list, int ikev2)
227 {
228 while (list->ikev2 != END_OF_LIST)
229 {
230 if (list->ikev2 == ikev2)
231 {
232 return list->name;
233 }
234 list++;
235 }
236 return NULL;
237 }
238
239 typedef struct private_kernel_netlink_ipsec_t private_kernel_netlink_ipsec_t;
240
241 /**
242 * Private variables and functions of kernel_netlink class.
243 */
244 struct private_kernel_netlink_ipsec_t {
245 /**
246 * Public part of the kernel_netlink_t object
247 */
248 kernel_netlink_ipsec_t public;
249
250 /**
251 * Mutex to lock access to installed policies
252 */
253 mutex_t *mutex;
254
255 /**
256 * Hash table of installed policies (policy_entry_t)
257 */
258 hashtable_t *policies;
259
260 /**
261 * Hash table of IPsec SAs using policies (ipsec_sa_t)
262 */
263 hashtable_t *sas;
264
265 /**
266 * Job receiving netlink events
267 */
268 callback_job_t *job;
269
270 /**
271 * Netlink xfrm socket (IPsec)
272 */
273 netlink_socket_t *socket_xfrm;
274
275 /**
276 * Netlink xfrm socket to receive acquire and expire events
277 */
278 int socket_xfrm_events;
279
280 /**
281 * Whether to install routes along policies
282 */
283 bool install_routes;
284
285 /**
286 * Size of the replay window, in packets
287 */
288 u_int32_t replay_window;
289
290 /**
291 * Size of the replay window bitmap, in bytes
292 */
293 u_int32_t replay_bmp;
294 };
295
296 typedef struct route_entry_t route_entry_t;
297
298 /**
299 * Installed routing entry
300 */
301 struct route_entry_t {
302 /** Name of the interface the route is bound to */
303 char *if_name;
304
305 /** Source ip of the route */
306 host_t *src_ip;
307
308 /** Gateway for this route */
309 host_t *gateway;
310
311 /** Destination net */
312 chunk_t dst_net;
313
314 /** Destination net prefixlen */
315 u_int8_t prefixlen;
316 };
317
318 /**
319 * Destroy a route_entry_t object
320 */
321 static void route_entry_destroy(route_entry_t *this)
322 {
323 free(this->if_name);
324 this->src_ip->destroy(this->src_ip);
325 DESTROY_IF(this->gateway);
326 chunk_free(&this->dst_net);
327 free(this);
328 }
329
330 /**
331 * Compare two route_entry_t objects
332 */
333 static bool route_entry_equals(route_entry_t *a, route_entry_t *b)
334 {
335 return a->if_name && b->if_name && streq(a->if_name, b->if_name) &&
336 a->src_ip->equals(a->src_ip, b->src_ip) &&
337 a->gateway->equals(a->gateway, b->gateway) &&
338 chunk_equals(a->dst_net, b->dst_net) && a->prefixlen == b->prefixlen;
339 }
340
341 typedef struct ipsec_sa_t ipsec_sa_t;
342
343 /**
344 * IPsec SA assigned to a policy.
345 */
346 struct ipsec_sa_t {
347 /** Source address of this SA */
348 host_t *src;
349
350 /** Destination address of this SA */
351 host_t *dst;
352
353 /** Optional mark */
354 mark_t mark;
355
356 /** Description of this SA */
357 ipsec_sa_cfg_t cfg;
358
359 /** Reference count for this SA */
360 refcount_t refcount;
361 };
362
363 /**
364 * Hash function for ipsec_sa_t objects
365 */
366 static u_int ipsec_sa_hash(ipsec_sa_t *sa)
367 {
368 return chunk_hash_inc(sa->src->get_address(sa->src),
369 chunk_hash_inc(sa->dst->get_address(sa->dst),
370 chunk_hash_inc(chunk_from_thing(sa->mark),
371 chunk_hash(chunk_from_thing(sa->cfg)))));
372 }
373
374 /**
375 * Equality function for ipsec_sa_t objects
376 */
377 static bool ipsec_sa_equals(ipsec_sa_t *sa, ipsec_sa_t *other_sa)
378 {
379 return sa->src->ip_equals(sa->src, other_sa->src) &&
380 sa->dst->ip_equals(sa->dst, other_sa->dst) &&
381 memeq(&sa->mark, &other_sa->mark, sizeof(mark_t)) &&
382 memeq(&sa->cfg, &other_sa->cfg, sizeof(ipsec_sa_cfg_t));
383 }
384
385 /**
386 * Allocate or reference an IPsec SA object
387 */
388 static ipsec_sa_t *ipsec_sa_create(private_kernel_netlink_ipsec_t *this,
389 host_t *src, host_t *dst, mark_t mark,
390 ipsec_sa_cfg_t *cfg)
391 {
392 ipsec_sa_t *sa, *found;
393 INIT(sa,
394 .src = src,
395 .dst = dst,
396 .mark = mark,
397 .cfg = *cfg,
398 );
399 found = this->sas->get(this->sas, sa);
400 if (!found)
401 {
402 sa->src = src->clone(src);
403 sa->dst = dst->clone(dst);
404 this->sas->put(this->sas, sa, sa);
405 }
406 else
407 {
408 free(sa);
409 sa = found;
410 }
411 ref_get(&sa->refcount);
412 return sa;
413 }
414
415 /**
416 * Release and destroy an IPsec SA object
417 */
418 static void ipsec_sa_destroy(private_kernel_netlink_ipsec_t *this,
419 ipsec_sa_t *sa)
420 {
421 if (ref_put(&sa->refcount))
422 {
423 this->sas->remove(this->sas, sa);
424 DESTROY_IF(sa->src);
425 DESTROY_IF(sa->dst);
426 free(sa);
427 }
428 }
429
430 typedef struct policy_sa_t policy_sa_t;
431 typedef struct policy_sa_fwd_t policy_sa_fwd_t;
432
433 /**
434 * Mapping between a policy and an IPsec SA.
435 */
436 struct policy_sa_t {
437 /** Priority assigned to the policy when installed with this SA */
438 u_int32_t priority;
439
440 /** Type of the policy */
441 policy_type_t type;
442
443 /** Assigned SA */
444 ipsec_sa_t *sa;
445 };
446
447 /**
448 * For forward policies we also cache the traffic selectors in order to install
449 * the route.
450 */
451 struct policy_sa_fwd_t {
452 /** Generic interface */
453 policy_sa_t generic;
454
455 /** Source traffic selector of this policy */
456 traffic_selector_t *src_ts;
457
458 /** Destination traffic selector of this policy */
459 traffic_selector_t *dst_ts;
460 };
461
462 /**
463 * Create a policy_sa(_fwd)_t object
464 */
465 static policy_sa_t *policy_sa_create(private_kernel_netlink_ipsec_t *this,
466 policy_dir_t dir, policy_type_t type, host_t *src, host_t *dst,
467 traffic_selector_t *src_ts, traffic_selector_t *dst_ts, mark_t mark,
468 ipsec_sa_cfg_t *cfg)
469 {
470 policy_sa_t *policy;
471
472 if (dir == POLICY_FWD)
473 {
474 policy_sa_fwd_t *fwd;
475 INIT(fwd,
476 .src_ts = src_ts->clone(src_ts),
477 .dst_ts = dst_ts->clone(dst_ts),
478 );
479 policy = &fwd->generic;
480 }
481 else
482 {
483 INIT(policy);
484 }
485 policy->type = type;
486 policy->sa = ipsec_sa_create(this, src, dst, mark, cfg);
487 return policy;
488 }
489
490 /**
491 * Destroy a policy_sa(_fwd)_t object
492 */
493 static void policy_sa_destroy(policy_sa_t *policy, policy_dir_t *dir,
494 private_kernel_netlink_ipsec_t *this)
495 {
496 if (*dir == POLICY_FWD)
497 {
498 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)policy;
499 fwd->src_ts->destroy(fwd->src_ts);
500 fwd->dst_ts->destroy(fwd->dst_ts);
501 }
502 ipsec_sa_destroy(this, policy->sa);
503 free(policy);
504 }
505
506 typedef struct policy_entry_t policy_entry_t;
507
508 /**
509 * Installed kernel policy.
510 */
511 struct policy_entry_t {
512
513 /** Direction of this policy: in, out, forward */
514 u_int8_t direction;
515
516 /** Parameters of installed policy */
517 struct xfrm_selector sel;
518
519 /** Optional mark */
520 u_int32_t mark;
521
522 /** Associated route installed for this policy */
523 route_entry_t *route;
524
525 /** List of SAs this policy is used by, ordered by priority */
526 linked_list_t *used_by;
527 };
528
529 /**
530 * Destroy a policy_entry_t object
531 */
532 static void policy_entry_destroy(private_kernel_netlink_ipsec_t *this,
533 policy_entry_t *policy)
534 {
535 if (policy->route)
536 {
537 route_entry_destroy(policy->route);
538 }
539 if (policy->used_by)
540 {
541 policy->used_by->invoke_function(policy->used_by,
542 (linked_list_invoke_t)policy_sa_destroy,
543 &policy->direction, this);
544 policy->used_by->destroy(policy->used_by);
545 }
546 free(policy);
547 }
548
549 /**
550 * Hash function for policy_entry_t objects
551 */
552 static u_int policy_hash(policy_entry_t *key)
553 {
554 chunk_t chunk = chunk_create((void*)&key->sel,
555 sizeof(struct xfrm_selector) + sizeof(u_int32_t));
556 return chunk_hash(chunk);
557 }
558
559 /**
560 * Equality function for policy_entry_t objects
561 */
562 static bool policy_equals(policy_entry_t *key, policy_entry_t *other_key)
563 {
564 return memeq(&key->sel, &other_key->sel,
565 sizeof(struct xfrm_selector) + sizeof(u_int32_t)) &&
566 key->direction == other_key->direction;
567 }
568
569 /**
570 * Calculate the priority of a policy
571 */
572 static inline u_int32_t get_priority(policy_entry_t *policy,
573 policy_priority_t prio)
574 {
575 u_int32_t priority = PRIO_BASE;
576 switch (prio)
577 {
578 case POLICY_PRIORITY_ROUTED:
579 priority <<= 1;
580 /* fall-through */
581 case POLICY_PRIORITY_DEFAULT:
582 break;
583 }
584 /* calculate priority based on selector size, small size = high prio */
585 priority -= policy->sel.prefixlen_s;
586 priority -= policy->sel.prefixlen_d;
587 priority <<= 2; /* make some room for the two flags */
588 priority += policy->sel.sport_mask || policy->sel.dport_mask ? 0 : 2;
589 priority += policy->sel.proto ? 0 : 1;
590 return priority;
591 }
592
593 /**
594 * Convert the general ipsec mode to the one defined in xfrm.h
595 */
596 static u_int8_t mode2kernel(ipsec_mode_t mode)
597 {
598 switch (mode)
599 {
600 case MODE_TRANSPORT:
601 return XFRM_MODE_TRANSPORT;
602 case MODE_TUNNEL:
603 return XFRM_MODE_TUNNEL;
604 case MODE_BEET:
605 return XFRM_MODE_BEET;
606 default:
607 return mode;
608 }
609 }
610
611 /**
612 * Convert a host_t to a struct xfrm_address
613 */
614 static void host2xfrm(host_t *host, xfrm_address_t *xfrm)
615 {
616 chunk_t chunk = host->get_address(host);
617 memcpy(xfrm, chunk.ptr, min(chunk.len, sizeof(xfrm_address_t)));
618 }
619
620 /**
621 * Convert a struct xfrm_address to a host_t
622 */
623 static host_t* xfrm2host(int family, xfrm_address_t *xfrm, u_int16_t port)
624 {
625 chunk_t chunk;
626
627 switch (family)
628 {
629 case AF_INET:
630 chunk = chunk_create((u_char*)&xfrm->a4, sizeof(xfrm->a4));
631 break;
632 case AF_INET6:
633 chunk = chunk_create((u_char*)&xfrm->a6, sizeof(xfrm->a6));
634 break;
635 default:
636 return NULL;
637 }
638 return host_create_from_chunk(family, chunk, ntohs(port));
639 }
640
641 /**
642 * Convert a traffic selector address range to subnet and its mask.
643 */
644 static void ts2subnet(traffic_selector_t* ts,
645 xfrm_address_t *net, u_int8_t *mask)
646 {
647 host_t *net_host;
648 chunk_t net_chunk;
649
650 ts->to_subnet(ts, &net_host, mask);
651 net_chunk = net_host->get_address(net_host);
652 memcpy(net, net_chunk.ptr, net_chunk.len);
653 net_host->destroy(net_host);
654 }
655
656 /**
657 * Convert a traffic selector port range to port/portmask
658 */
659 static void ts2ports(traffic_selector_t* ts,
660 u_int16_t *port, u_int16_t *mask)
661 {
662 /* Linux does not seem to accept complex portmasks. Only
663 * any or a specific port is allowed. We set to any, if we have
664 * a port range, or to a specific, if we have one port only.
665 */
666 u_int16_t from, to;
667
668 from = ts->get_from_port(ts);
669 to = ts->get_to_port(ts);
670
671 if (from == to)
672 {
673 *port = htons(from);
674 *mask = ~0;
675 }
676 else
677 {
678 *port = 0;
679 *mask = 0;
680 }
681 }
682
683 /**
684 * Convert a pair of traffic_selectors to an xfrm_selector
685 */
686 static struct xfrm_selector ts2selector(traffic_selector_t *src,
687 traffic_selector_t *dst)
688 {
689 struct xfrm_selector sel;
690
691 memset(&sel, 0, sizeof(sel));
692 sel.family = (src->get_type(src) == TS_IPV4_ADDR_RANGE) ? AF_INET : AF_INET6;
693 /* src or dest proto may be "any" (0), use more restrictive one */
694 sel.proto = max(src->get_protocol(src), dst->get_protocol(dst));
695 ts2subnet(dst, &sel.daddr, &sel.prefixlen_d);
696 ts2subnet(src, &sel.saddr, &sel.prefixlen_s);
697 ts2ports(dst, &sel.dport, &sel.dport_mask);
698 ts2ports(src, &sel.sport, &sel.sport_mask);
699 sel.ifindex = 0;
700 sel.user = 0;
701
702 return sel;
703 }
704
705 /**
706 * Convert an xfrm_selector to a src|dst traffic_selector
707 */
708 static traffic_selector_t* selector2ts(struct xfrm_selector *sel, bool src)
709 {
710 u_char *addr;
711 u_int8_t prefixlen;
712 u_int16_t port = 0;
713 host_t *host = NULL;
714
715 if (src)
716 {
717 addr = (u_char*)&sel->saddr;
718 prefixlen = sel->prefixlen_s;
719 if (sel->sport_mask)
720 {
721 port = htons(sel->sport);
722 }
723 }
724 else
725 {
726 addr = (u_char*)&sel->daddr;
727 prefixlen = sel->prefixlen_d;
728 if (sel->dport_mask)
729 {
730 port = htons(sel->dport);
731 }
732 }
733
734 /* The Linux 2.6 kernel does not set the selector's family field,
735 * so as a kludge we additionally test the prefix length.
736 */
737 if (sel->family == AF_INET || sel->prefixlen_s == 32)
738 {
739 host = host_create_from_chunk(AF_INET, chunk_create(addr, 4), 0);
740 }
741 else if (sel->family == AF_INET6 || sel->prefixlen_s == 128)
742 {
743 host = host_create_from_chunk(AF_INET6, chunk_create(addr, 16), 0);
744 }
745
746 if (host)
747 {
748 return traffic_selector_create_from_subnet(host, prefixlen,
749 sel->proto, port);
750 }
751 return NULL;
752 }
753
754 /**
755 * Process a XFRM_MSG_ACQUIRE from kernel
756 */
757 static void process_acquire(private_kernel_netlink_ipsec_t *this,
758 struct nlmsghdr *hdr)
759 {
760 struct xfrm_user_acquire *acquire;
761 struct rtattr *rta;
762 size_t rtasize;
763 traffic_selector_t *src_ts, *dst_ts;
764 u_int32_t reqid = 0;
765 int proto = 0;
766
767 acquire = (struct xfrm_user_acquire*)NLMSG_DATA(hdr);
768 rta = XFRM_RTA(hdr, struct xfrm_user_acquire);
769 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_user_acquire);
770
771 DBG2(DBG_KNL, "received a XFRM_MSG_ACQUIRE");
772
773 while (RTA_OK(rta, rtasize))
774 {
775 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
776
777 if (rta->rta_type == XFRMA_TMPL)
778 {
779 struct xfrm_user_tmpl* tmpl;
780 tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rta);
781 reqid = tmpl->reqid;
782 proto = tmpl->id.proto;
783 }
784 rta = RTA_NEXT(rta, rtasize);
785 }
786 switch (proto)
787 {
788 case 0:
789 case IPPROTO_ESP:
790 case IPPROTO_AH:
791 break;
792 default:
793 /* acquire for AH/ESP only, not for IPCOMP */
794 return;
795 }
796 src_ts = selector2ts(&acquire->sel, TRUE);
797 dst_ts = selector2ts(&acquire->sel, FALSE);
798
799 hydra->kernel_interface->acquire(hydra->kernel_interface, reqid, src_ts,
800 dst_ts);
801 }
802
803 /**
804 * Process a XFRM_MSG_EXPIRE from kernel
805 */
806 static void process_expire(private_kernel_netlink_ipsec_t *this,
807 struct nlmsghdr *hdr)
808 {
809 struct xfrm_user_expire *expire;
810 u_int32_t spi, reqid;
811 u_int8_t protocol;
812
813 expire = (struct xfrm_user_expire*)NLMSG_DATA(hdr);
814 protocol = expire->state.id.proto;
815 spi = expire->state.id.spi;
816 reqid = expire->state.reqid;
817
818 DBG2(DBG_KNL, "received a XFRM_MSG_EXPIRE");
819
820 if (protocol != IPPROTO_ESP && protocol != IPPROTO_AH)
821 {
822 DBG2(DBG_KNL, "ignoring XFRM_MSG_EXPIRE for SA with SPI %.8x and "
823 "reqid {%u} which is not a CHILD_SA", ntohl(spi), reqid);
824 return;
825 }
826
827 hydra->kernel_interface->expire(hydra->kernel_interface, reqid, protocol,
828 spi, expire->hard != 0);
829 }
830
831 /**
832 * Process a XFRM_MSG_MIGRATE from kernel
833 */
834 static void process_migrate(private_kernel_netlink_ipsec_t *this,
835 struct nlmsghdr *hdr)
836 {
837 struct xfrm_userpolicy_id *policy_id;
838 struct rtattr *rta;
839 size_t rtasize;
840 traffic_selector_t *src_ts, *dst_ts;
841 host_t *local = NULL, *remote = NULL;
842 host_t *old_src = NULL, *old_dst = NULL;
843 host_t *new_src = NULL, *new_dst = NULL;
844 u_int32_t reqid = 0;
845 policy_dir_t dir;
846
847 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
848 rta = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
849 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_userpolicy_id);
850
851 DBG2(DBG_KNL, "received a XFRM_MSG_MIGRATE");
852
853 src_ts = selector2ts(&policy_id->sel, TRUE);
854 dst_ts = selector2ts(&policy_id->sel, FALSE);
855 dir = (policy_dir_t)policy_id->dir;
856
857 DBG2(DBG_KNL, " policy: %R === %R %N", src_ts, dst_ts, policy_dir_names);
858
859 while (RTA_OK(rta, rtasize))
860 {
861 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
862 if (rta->rta_type == XFRMA_KMADDRESS)
863 {
864 struct xfrm_user_kmaddress *kmaddress;
865
866 kmaddress = (struct xfrm_user_kmaddress*)RTA_DATA(rta);
867 local = xfrm2host(kmaddress->family, &kmaddress->local, 0);
868 remote = xfrm2host(kmaddress->family, &kmaddress->remote, 0);
869 DBG2(DBG_KNL, " kmaddress: %H...%H", local, remote);
870 }
871 else if (rta->rta_type == XFRMA_MIGRATE)
872 {
873 struct xfrm_user_migrate *migrate;
874
875 migrate = (struct xfrm_user_migrate*)RTA_DATA(rta);
876 old_src = xfrm2host(migrate->old_family, &migrate->old_saddr, 0);
877 old_dst = xfrm2host(migrate->old_family, &migrate->old_daddr, 0);
878 new_src = xfrm2host(migrate->new_family, &migrate->new_saddr, 0);
879 new_dst = xfrm2host(migrate->new_family, &migrate->new_daddr, 0);
880 reqid = migrate->reqid;
881 DBG2(DBG_KNL, " migrate %H...%H to %H...%H, reqid {%u}",
882 old_src, old_dst, new_src, new_dst, reqid);
883 DESTROY_IF(old_src);
884 DESTROY_IF(old_dst);
885 DESTROY_IF(new_src);
886 DESTROY_IF(new_dst);
887 }
888 rta = RTA_NEXT(rta, rtasize);
889 }
890
891 if (src_ts && dst_ts && local && remote)
892 {
893 hydra->kernel_interface->migrate(hydra->kernel_interface, reqid,
894 src_ts, dst_ts, dir, local, remote);
895 }
896 else
897 {
898 DESTROY_IF(src_ts);
899 DESTROY_IF(dst_ts);
900 DESTROY_IF(local);
901 DESTROY_IF(remote);
902 }
903 }
904
905 /**
906 * Process a XFRM_MSG_MAPPING from kernel
907 */
908 static void process_mapping(private_kernel_netlink_ipsec_t *this,
909 struct nlmsghdr *hdr)
910 {
911 struct xfrm_user_mapping *mapping;
912 u_int32_t spi, reqid;
913
914 mapping = (struct xfrm_user_mapping*)NLMSG_DATA(hdr);
915 spi = mapping->id.spi;
916 reqid = mapping->reqid;
917
918 DBG2(DBG_KNL, "received a XFRM_MSG_MAPPING");
919
920 if (mapping->id.proto == IPPROTO_ESP)
921 {
922 host_t *host;
923 host = xfrm2host(mapping->id.family, &mapping->new_saddr,
924 mapping->new_sport);
925 if (host)
926 {
927 hydra->kernel_interface->mapping(hydra->kernel_interface, reqid,
928 spi, host);
929 }
930 }
931 }
932
933 /**
934 * Receives events from kernel
935 */
936 static job_requeue_t receive_events(private_kernel_netlink_ipsec_t *this)
937 {
938 char response[1024];
939 struct nlmsghdr *hdr = (struct nlmsghdr*)response;
940 struct sockaddr_nl addr;
941 socklen_t addr_len = sizeof(addr);
942 int len;
943 bool oldstate;
944
945 oldstate = thread_cancelability(TRUE);
946 len = recvfrom(this->socket_xfrm_events, response, sizeof(response), 0,
947 (struct sockaddr*)&addr, &addr_len);
948 thread_cancelability(oldstate);
949
950 if (len < 0)
951 {
952 switch (errno)
953 {
954 case EINTR:
955 /* interrupted, try again */
956 return JOB_REQUEUE_DIRECT;
957 case EAGAIN:
958 /* no data ready, select again */
959 return JOB_REQUEUE_DIRECT;
960 default:
961 DBG1(DBG_KNL, "unable to receive from xfrm event socket");
962 sleep(1);
963 return JOB_REQUEUE_FAIR;
964 }
965 }
966
967 if (addr.nl_pid != 0)
968 { /* not from kernel. not interested, try another one */
969 return JOB_REQUEUE_DIRECT;
970 }
971
972 while (NLMSG_OK(hdr, len))
973 {
974 switch (hdr->nlmsg_type)
975 {
976 case XFRM_MSG_ACQUIRE:
977 process_acquire(this, hdr);
978 break;
979 case XFRM_MSG_EXPIRE:
980 process_expire(this, hdr);
981 break;
982 case XFRM_MSG_MIGRATE:
983 process_migrate(this, hdr);
984 break;
985 case XFRM_MSG_MAPPING:
986 process_mapping(this, hdr);
987 break;
988 default:
989 DBG1(DBG_KNL, "received unknown event from xfrm event "
990 "socket: %d", hdr->nlmsg_type);
991 break;
992 }
993 hdr = NLMSG_NEXT(hdr, len);
994 }
995 return JOB_REQUEUE_DIRECT;
996 }
997
998 /**
999 * Get an SPI for a specific protocol from the kernel.
1000 */
1001 static status_t get_spi_internal(private_kernel_netlink_ipsec_t *this,
1002 host_t *src, host_t *dst, u_int8_t proto, u_int32_t min, u_int32_t max,
1003 u_int32_t reqid, u_int32_t *spi)
1004 {
1005 netlink_buf_t request;
1006 struct nlmsghdr *hdr, *out;
1007 struct xfrm_userspi_info *userspi;
1008 u_int32_t received_spi = 0;
1009 size_t len;
1010
1011 memset(&request, 0, sizeof(request));
1012
1013 hdr = (struct nlmsghdr*)request;
1014 hdr->nlmsg_flags = NLM_F_REQUEST;
1015 hdr->nlmsg_type = XFRM_MSG_ALLOCSPI;
1016 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userspi_info));
1017
1018 userspi = (struct xfrm_userspi_info*)NLMSG_DATA(hdr);
1019 host2xfrm(src, &userspi->info.saddr);
1020 host2xfrm(dst, &userspi->info.id.daddr);
1021 userspi->info.id.proto = proto;
1022 userspi->info.mode = XFRM_MODE_TUNNEL;
1023 userspi->info.reqid = reqid;
1024 userspi->info.family = src->get_family(src);
1025 userspi->min = min;
1026 userspi->max = max;
1027
1028 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1029 {
1030 hdr = out;
1031 while (NLMSG_OK(hdr, len))
1032 {
1033 switch (hdr->nlmsg_type)
1034 {
1035 case XFRM_MSG_NEWSA:
1036 {
1037 struct xfrm_usersa_info* usersa = NLMSG_DATA(hdr);
1038 received_spi = usersa->id.spi;
1039 break;
1040 }
1041 case NLMSG_ERROR:
1042 {
1043 struct nlmsgerr *err = NLMSG_DATA(hdr);
1044 DBG1(DBG_KNL, "allocating SPI failed: %s (%d)",
1045 strerror(-err->error), -err->error);
1046 break;
1047 }
1048 default:
1049 hdr = NLMSG_NEXT(hdr, len);
1050 continue;
1051 case NLMSG_DONE:
1052 break;
1053 }
1054 break;
1055 }
1056 free(out);
1057 }
1058
1059 if (received_spi == 0)
1060 {
1061 return FAILED;
1062 }
1063
1064 *spi = received_spi;
1065 return SUCCESS;
1066 }
1067
1068 METHOD(kernel_ipsec_t, get_spi, status_t,
1069 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1070 u_int8_t protocol, u_int32_t reqid, u_int32_t *spi)
1071 {
1072 DBG2(DBG_KNL, "getting SPI for reqid {%u}", reqid);
1073
1074 if (get_spi_internal(this, src, dst, protocol,
1075 0xc0000000, 0xcFFFFFFF, reqid, spi) != SUCCESS)
1076 {
1077 DBG1(DBG_KNL, "unable to get SPI for reqid {%u}", reqid);
1078 return FAILED;
1079 }
1080
1081 DBG2(DBG_KNL, "got SPI %.8x for reqid {%u}", ntohl(*spi), reqid);
1082 return SUCCESS;
1083 }
1084
1085 METHOD(kernel_ipsec_t, get_cpi, status_t,
1086 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1087 u_int32_t reqid, u_int16_t *cpi)
1088 {
1089 u_int32_t received_spi = 0;
1090
1091 DBG2(DBG_KNL, "getting CPI for reqid {%u}", reqid);
1092
1093 if (get_spi_internal(this, src, dst, IPPROTO_COMP,
1094 0x100, 0xEFFF, reqid, &received_spi) != SUCCESS)
1095 {
1096 DBG1(DBG_KNL, "unable to get CPI for reqid {%u}", reqid);
1097 return FAILED;
1098 }
1099
1100 *cpi = htons((u_int16_t)ntohl(received_spi));
1101
1102 DBG2(DBG_KNL, "got CPI %.4x for reqid {%u}", ntohs(*cpi), reqid);
1103 return SUCCESS;
1104 }
1105
1106 METHOD(kernel_ipsec_t, add_sa, status_t,
1107 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1108 u_int32_t spi, u_int8_t protocol, u_int32_t reqid, mark_t mark,
1109 u_int32_t tfc, lifetime_cfg_t *lifetime, u_int16_t enc_alg, chunk_t enc_key,
1110 u_int16_t int_alg, chunk_t int_key, ipsec_mode_t mode, u_int16_t ipcomp,
1111 u_int16_t cpi, bool encap, bool esn, bool inbound,
1112 traffic_selector_t* src_ts, traffic_selector_t* dst_ts)
1113 {
1114 netlink_buf_t request;
1115 char *alg_name;
1116 struct nlmsghdr *hdr;
1117 struct xfrm_usersa_info *sa;
1118 u_int16_t icv_size = 64;
1119 status_t status = FAILED;
1120
1121 /* if IPComp is used, we install an additional IPComp SA. if the cpi is 0
1122 * we are in the recursive call below */
1123 if (ipcomp != IPCOMP_NONE && cpi != 0)
1124 {
1125 lifetime_cfg_t lft = {{0,0,0},{0,0,0},{0,0,0}};
1126 add_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, reqid, mark,
1127 tfc, &lft, ENCR_UNDEFINED, chunk_empty, AUTH_UNDEFINED,
1128 chunk_empty, mode, ipcomp, 0, FALSE, FALSE, inbound, NULL, NULL);
1129 ipcomp = IPCOMP_NONE;
1130 /* use transport mode ESP SA, IPComp uses tunnel mode */
1131 mode = MODE_TRANSPORT;
1132 }
1133
1134 memset(&request, 0, sizeof(request));
1135
1136 if (mark.value)
1137 {
1138 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u} (mark "
1139 "%u/0x%8x)", ntohl(spi), reqid, mark.value, mark.mask);
1140 }
1141 else
1142 {
1143 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u}",
1144 ntohl(spi), reqid);
1145 }
1146 hdr = (struct nlmsghdr*)request;
1147 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1148 hdr->nlmsg_type = inbound ? XFRM_MSG_UPDSA : XFRM_MSG_NEWSA;
1149 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1150
1151 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1152 host2xfrm(src, &sa->saddr);
1153 host2xfrm(dst, &sa->id.daddr);
1154 sa->id.spi = spi;
1155 sa->id.proto = protocol;
1156 sa->family = src->get_family(src);
1157 sa->mode = mode2kernel(mode);
1158 switch (mode)
1159 {
1160 case MODE_TUNNEL:
1161 sa->flags |= XFRM_STATE_AF_UNSPEC;
1162 break;
1163 case MODE_BEET:
1164 case MODE_TRANSPORT:
1165 if(src_ts && dst_ts)
1166 {
1167 sa->sel = ts2selector(src_ts, dst_ts);
1168 }
1169 break;
1170 default:
1171 break;
1172 }
1173
1174 sa->reqid = reqid;
1175 sa->lft.soft_byte_limit = XFRM_LIMIT(lifetime->bytes.rekey);
1176 sa->lft.hard_byte_limit = XFRM_LIMIT(lifetime->bytes.life);
1177 sa->lft.soft_packet_limit = XFRM_LIMIT(lifetime->packets.rekey);
1178 sa->lft.hard_packet_limit = XFRM_LIMIT(lifetime->packets.life);
1179 /* we use lifetimes since added, not since used */
1180 sa->lft.soft_add_expires_seconds = lifetime->time.rekey;
1181 sa->lft.hard_add_expires_seconds = lifetime->time.life;
1182 sa->lft.soft_use_expires_seconds = 0;
1183 sa->lft.hard_use_expires_seconds = 0;
1184
1185 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_info);
1186
1187 switch (enc_alg)
1188 {
1189 case ENCR_UNDEFINED:
1190 /* no encryption */
1191 break;
1192 case ENCR_AES_CCM_ICV16:
1193 case ENCR_AES_GCM_ICV16:
1194 case ENCR_NULL_AUTH_AES_GMAC:
1195 case ENCR_CAMELLIA_CCM_ICV16:
1196 icv_size += 32;
1197 /* FALL */
1198 case ENCR_AES_CCM_ICV12:
1199 case ENCR_AES_GCM_ICV12:
1200 case ENCR_CAMELLIA_CCM_ICV12:
1201 icv_size += 32;
1202 /* FALL */
1203 case ENCR_AES_CCM_ICV8:
1204 case ENCR_AES_GCM_ICV8:
1205 case ENCR_CAMELLIA_CCM_ICV8:
1206 {
1207 struct xfrm_algo_aead *algo;
1208
1209 alg_name = lookup_algorithm(encryption_algs, enc_alg);
1210 if (alg_name == NULL)
1211 {
1212 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1213 encryption_algorithm_names, enc_alg);
1214 goto failed;
1215 }
1216 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1217 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1218
1219 rthdr->rta_type = XFRMA_ALG_AEAD;
1220 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_aead) +
1221 enc_key.len);
1222 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1223 if (hdr->nlmsg_len > sizeof(request))
1224 {
1225 goto failed;
1226 }
1227
1228 algo = (struct xfrm_algo_aead*)RTA_DATA(rthdr);
1229 algo->alg_key_len = enc_key.len * 8;
1230 algo->alg_icv_len = icv_size;
1231 strcpy(algo->alg_name, alg_name);
1232 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1233
1234 rthdr = XFRM_RTA_NEXT(rthdr);
1235 break;
1236 }
1237 default:
1238 {
1239 struct xfrm_algo *algo;
1240
1241 alg_name = lookup_algorithm(encryption_algs, enc_alg);
1242 if (alg_name == NULL)
1243 {
1244 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1245 encryption_algorithm_names, enc_alg);
1246 goto failed;
1247 }
1248 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1249 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1250
1251 rthdr->rta_type = XFRMA_ALG_CRYPT;
1252 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + enc_key.len);
1253 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1254 if (hdr->nlmsg_len > sizeof(request))
1255 {
1256 goto failed;
1257 }
1258
1259 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1260 algo->alg_key_len = enc_key.len * 8;
1261 strcpy(algo->alg_name, alg_name);
1262 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1263
1264 rthdr = XFRM_RTA_NEXT(rthdr);
1265 }
1266 }
1267
1268 if (int_alg != AUTH_UNDEFINED)
1269 {
1270 alg_name = lookup_algorithm(integrity_algs, int_alg);
1271 if (alg_name == NULL)
1272 {
1273 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1274 integrity_algorithm_names, int_alg);
1275 goto failed;
1276 }
1277 DBG2(DBG_KNL, " using integrity algorithm %N with key size %d",
1278 integrity_algorithm_names, int_alg, int_key.len * 8);
1279
1280 if (int_alg == AUTH_HMAC_SHA2_256_128)
1281 {
1282 struct xfrm_algo_auth* algo;
1283
1284 /* the kernel uses SHA256 with 96 bit truncation by default,
1285 * use specified truncation size supported by newer kernels */
1286 rthdr->rta_type = XFRMA_ALG_AUTH_TRUNC;
1287 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_auth) +
1288 int_key.len);
1289
1290 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1291 if (hdr->nlmsg_len > sizeof(request))
1292 {
1293 goto failed;
1294 }
1295
1296 algo = (struct xfrm_algo_auth*)RTA_DATA(rthdr);
1297 algo->alg_key_len = int_key.len * 8;
1298 algo->alg_trunc_len = 128;
1299 strcpy(algo->alg_name, alg_name);
1300 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1301 }
1302 else
1303 {
1304 struct xfrm_algo* algo;
1305
1306 rthdr->rta_type = XFRMA_ALG_AUTH;
1307 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + int_key.len);
1308
1309 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1310 if (hdr->nlmsg_len > sizeof(request))
1311 {
1312 goto failed;
1313 }
1314
1315 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1316 algo->alg_key_len = int_key.len * 8;
1317 strcpy(algo->alg_name, alg_name);
1318 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1319 }
1320 rthdr = XFRM_RTA_NEXT(rthdr);
1321 }
1322
1323 if (ipcomp != IPCOMP_NONE)
1324 {
1325 rthdr->rta_type = XFRMA_ALG_COMP;
1326 alg_name = lookup_algorithm(compression_algs, ipcomp);
1327 if (alg_name == NULL)
1328 {
1329 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1330 ipcomp_transform_names, ipcomp);
1331 goto failed;
1332 }
1333 DBG2(DBG_KNL, " using compression algorithm %N",
1334 ipcomp_transform_names, ipcomp);
1335
1336 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo));
1337 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1338 if (hdr->nlmsg_len > sizeof(request))
1339 {
1340 goto failed;
1341 }
1342
1343 struct xfrm_algo* algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1344 algo->alg_key_len = 0;
1345 strcpy(algo->alg_name, alg_name);
1346
1347 rthdr = XFRM_RTA_NEXT(rthdr);
1348 }
1349
1350 if (encap)
1351 {
1352 struct xfrm_encap_tmpl *tmpl;
1353
1354 rthdr->rta_type = XFRMA_ENCAP;
1355 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1356
1357 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1358 if (hdr->nlmsg_len > sizeof(request))
1359 {
1360 goto failed;
1361 }
1362
1363 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rthdr);
1364 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1365 tmpl->encap_sport = htons(src->get_port(src));
1366 tmpl->encap_dport = htons(dst->get_port(dst));
1367 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1368 /* encap_oa could probably be derived from the
1369 * traffic selectors [rfc4306, p39]. In the netlink kernel
1370 * implementation pluto does the same as we do here but it uses
1371 * encap_oa in the pfkey implementation.
1372 * BUT as /usr/src/linux/net/key/af_key.c indicates the kernel ignores
1373 * it anyway
1374 * -> does that mean that NAT-T encap doesn't work in transport mode?
1375 * No. The reason the kernel ignores NAT-OA is that it recomputes
1376 * (or, rather, just ignores) the checksum. If packets pass the IPsec
1377 * checks it marks them "checksum ok" so OA isn't needed. */
1378 rthdr = XFRM_RTA_NEXT(rthdr);
1379 }
1380
1381 if (mark.value)
1382 {
1383 struct xfrm_mark *mrk;
1384
1385 rthdr->rta_type = XFRMA_MARK;
1386 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1387
1388 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1389 if (hdr->nlmsg_len > sizeof(request))
1390 {
1391 goto failed;
1392 }
1393
1394 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1395 mrk->v = mark.value;
1396 mrk->m = mark.mask;
1397 rthdr = XFRM_RTA_NEXT(rthdr);
1398 }
1399
1400 if (tfc)
1401 {
1402 u_int32_t *tfcpad;
1403
1404 rthdr->rta_type = XFRMA_TFCPAD;
1405 rthdr->rta_len = RTA_LENGTH(sizeof(u_int32_t));
1406
1407 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1408 if (hdr->nlmsg_len > sizeof(request))
1409 {
1410 goto failed;
1411 }
1412
1413 tfcpad = (u_int32_t*)RTA_DATA(rthdr);
1414 *tfcpad = tfc;
1415 rthdr = XFRM_RTA_NEXT(rthdr);
1416 }
1417
1418 if (protocol != IPPROTO_COMP)
1419 {
1420 if (esn || this->replay_window > DEFAULT_REPLAY_WINDOW)
1421 {
1422 /* for ESN or larger replay windows we need the new
1423 * XFRMA_REPLAY_ESN_VAL attribute to configure a bitmap */
1424 struct xfrm_replay_state_esn *replay;
1425
1426 rthdr->rta_type = XFRMA_REPLAY_ESN_VAL;
1427 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1428 (this->replay_window + 7) / 8);
1429
1430 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1431 if (hdr->nlmsg_len > sizeof(request))
1432 {
1433 goto failed;
1434 }
1435
1436 replay = (struct xfrm_replay_state_esn*)RTA_DATA(rthdr);
1437 /* bmp_len contains number uf __u32's */
1438 replay->bmp_len = this->replay_bmp;
1439 replay->replay_window = this->replay_window;
1440 DBG2(DBG_KNL, " using replay window of %u bytes",
1441 this->replay_window);
1442
1443 rthdr = XFRM_RTA_NEXT(rthdr);
1444 if (esn)
1445 {
1446 DBG2(DBG_KNL, " using extended sequence numbers (ESN)");
1447 sa->flags |= XFRM_STATE_ESN;
1448 }
1449 }
1450 else
1451 {
1452 sa->replay_window = DEFAULT_REPLAY_WINDOW;
1453 }
1454 }
1455
1456 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1457 {
1458 if (mark.value)
1459 {
1460 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x "
1461 "(mark %u/0x%8x)", ntohl(spi), mark.value, mark.mask);
1462 }
1463 else
1464 {
1465 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x", ntohl(spi));
1466 }
1467 goto failed;
1468 }
1469
1470 status = SUCCESS;
1471
1472 failed:
1473 memwipe(request, sizeof(request));
1474 return status;
1475 }
1476
1477 /**
1478 * Get the ESN replay state (i.e. sequence numbers) of an SA.
1479 *
1480 * Allocates into one the replay state structure we get from the kernel.
1481 */
1482 static void get_replay_state(private_kernel_netlink_ipsec_t *this,
1483 u_int32_t spi, u_int8_t protocol, host_t *dst,
1484 struct xfrm_replay_state_esn **replay_esn,
1485 struct xfrm_replay_state **replay)
1486 {
1487 netlink_buf_t request;
1488 struct nlmsghdr *hdr, *out = NULL;
1489 struct xfrm_aevent_id *out_aevent = NULL, *aevent_id;
1490 size_t len;
1491 struct rtattr *rta;
1492 size_t rtasize;
1493
1494 memset(&request, 0, sizeof(request));
1495
1496 DBG2(DBG_KNL, "querying replay state from SAD entry with SPI %.8x",
1497 ntohl(spi));
1498
1499 hdr = (struct nlmsghdr*)request;
1500 hdr->nlmsg_flags = NLM_F_REQUEST;
1501 hdr->nlmsg_type = XFRM_MSG_GETAE;
1502 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
1503
1504 aevent_id = (struct xfrm_aevent_id*)NLMSG_DATA(hdr);
1505 aevent_id->flags = XFRM_AE_RVAL;
1506
1507 host2xfrm(dst, &aevent_id->sa_id.daddr);
1508 aevent_id->sa_id.spi = spi;
1509 aevent_id->sa_id.proto = protocol;
1510 aevent_id->sa_id.family = dst->get_family(dst);
1511
1512 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1513 {
1514 hdr = out;
1515 while (NLMSG_OK(hdr, len))
1516 {
1517 switch (hdr->nlmsg_type)
1518 {
1519 case XFRM_MSG_NEWAE:
1520 {
1521 out_aevent = NLMSG_DATA(hdr);
1522 break;
1523 }
1524 case NLMSG_ERROR:
1525 {
1526 struct nlmsgerr *err = NLMSG_DATA(hdr);
1527 DBG1(DBG_KNL, "querying replay state from SAD entry "
1528 "failed: %s (%d)", strerror(-err->error),
1529 -err->error);
1530 break;
1531 }
1532 default:
1533 hdr = NLMSG_NEXT(hdr, len);
1534 continue;
1535 case NLMSG_DONE:
1536 break;
1537 }
1538 break;
1539 }
1540 }
1541
1542 if (out_aevent)
1543 {
1544 rta = XFRM_RTA(out, struct xfrm_aevent_id);
1545 rtasize = XFRM_PAYLOAD(out, struct xfrm_aevent_id);
1546 while (RTA_OK(rta, rtasize))
1547 {
1548 if (rta->rta_type == XFRMA_REPLAY_VAL &&
1549 RTA_PAYLOAD(rta) == sizeof(**replay))
1550 {
1551 *replay = malloc(RTA_PAYLOAD(rta));
1552 memcpy(*replay, RTA_DATA(rta), RTA_PAYLOAD(rta));
1553 break;
1554 }
1555 if (rta->rta_type == XFRMA_REPLAY_ESN_VAL &&
1556 RTA_PAYLOAD(rta) >= sizeof(**replay_esn) + this->replay_bmp)
1557 {
1558 *replay_esn = malloc(RTA_PAYLOAD(rta));
1559 memcpy(*replay_esn, RTA_DATA(rta), RTA_PAYLOAD(rta));
1560 break;
1561 }
1562 rta = RTA_NEXT(rta, rtasize);
1563 }
1564 }
1565 free(out);
1566 }
1567
1568 METHOD(kernel_ipsec_t, query_sa, status_t,
1569 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1570 u_int32_t spi, u_int8_t protocol, mark_t mark, u_int64_t *bytes)
1571 {
1572 netlink_buf_t request;
1573 struct nlmsghdr *out = NULL, *hdr;
1574 struct xfrm_usersa_id *sa_id;
1575 struct xfrm_usersa_info *sa = NULL;
1576 status_t status = FAILED;
1577 size_t len;
1578
1579 memset(&request, 0, sizeof(request));
1580
1581 if (mark.value)
1582 {
1583 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x (mark %u/0x%8x)",
1584 ntohl(spi), mark.value, mark.mask);
1585 }
1586 else
1587 {
1588 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x", ntohl(spi));
1589 }
1590 hdr = (struct nlmsghdr*)request;
1591 hdr->nlmsg_flags = NLM_F_REQUEST;
1592 hdr->nlmsg_type = XFRM_MSG_GETSA;
1593 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1594
1595 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1596 host2xfrm(dst, &sa_id->daddr);
1597 sa_id->spi = spi;
1598 sa_id->proto = protocol;
1599 sa_id->family = dst->get_family(dst);
1600
1601 if (mark.value)
1602 {
1603 struct xfrm_mark *mrk;
1604 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1605
1606 rthdr->rta_type = XFRMA_MARK;
1607 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1608 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1609 if (hdr->nlmsg_len > sizeof(request))
1610 {
1611 return FAILED;
1612 }
1613
1614 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1615 mrk->v = mark.value;
1616 mrk->m = mark.mask;
1617 }
1618
1619 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1620 {
1621 hdr = out;
1622 while (NLMSG_OK(hdr, len))
1623 {
1624 switch (hdr->nlmsg_type)
1625 {
1626 case XFRM_MSG_NEWSA:
1627 {
1628 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1629 break;
1630 }
1631 case NLMSG_ERROR:
1632 {
1633 struct nlmsgerr *err = NLMSG_DATA(hdr);
1634
1635 if (mark.value)
1636 {
1637 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1638 "(mark %u/0x%8x) failed: %s (%d)",
1639 ntohl(spi), mark.value, mark.mask,
1640 strerror(-err->error), -err->error);
1641 }
1642 else
1643 {
1644 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1645 "failed: %s (%d)", ntohl(spi),
1646 strerror(-err->error), -err->error);
1647 }
1648 break;
1649 }
1650 default:
1651 hdr = NLMSG_NEXT(hdr, len);
1652 continue;
1653 case NLMSG_DONE:
1654 break;
1655 }
1656 break;
1657 }
1658 }
1659
1660 if (sa == NULL)
1661 {
1662 DBG2(DBG_KNL, "unable to query SAD entry with SPI %.8x", ntohl(spi));
1663 }
1664 else
1665 {
1666 *bytes = sa->curlft.bytes;
1667 status = SUCCESS;
1668 }
1669 memwipe(out, len);
1670 free(out);
1671 return status;
1672 }
1673
1674 METHOD(kernel_ipsec_t, del_sa, status_t,
1675 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1676 u_int32_t spi, u_int8_t protocol, u_int16_t cpi, mark_t mark)
1677 {
1678 netlink_buf_t request;
1679 struct nlmsghdr *hdr;
1680 struct xfrm_usersa_id *sa_id;
1681
1682 /* if IPComp was used, we first delete the additional IPComp SA */
1683 if (cpi)
1684 {
1685 del_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, 0, mark);
1686 }
1687
1688 memset(&request, 0, sizeof(request));
1689
1690 if (mark.value)
1691 {
1692 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x (mark %u/0x%8x)",
1693 ntohl(spi), mark.value, mark.mask);
1694 }
1695 else
1696 {
1697 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x", ntohl(spi));
1698 }
1699 hdr = (struct nlmsghdr*)request;
1700 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1701 hdr->nlmsg_type = XFRM_MSG_DELSA;
1702 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1703
1704 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1705 host2xfrm(dst, &sa_id->daddr);
1706 sa_id->spi = spi;
1707 sa_id->proto = protocol;
1708 sa_id->family = dst->get_family(dst);
1709
1710 if (mark.value)
1711 {
1712 struct xfrm_mark *mrk;
1713 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1714
1715 rthdr->rta_type = XFRMA_MARK;
1716 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1717 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1718 if (hdr->nlmsg_len > sizeof(request))
1719 {
1720 return FAILED;
1721 }
1722
1723 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1724 mrk->v = mark.value;
1725 mrk->m = mark.mask;
1726 }
1727
1728 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1729 {
1730 if (mark.value)
1731 {
1732 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x "
1733 "(mark %u/0x%8x)", ntohl(spi), mark.value, mark.mask);
1734 }
1735 else
1736 {
1737 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x",
1738 ntohl(spi));
1739 }
1740 return FAILED;
1741 }
1742 if (mark.value)
1743 {
1744 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x (mark %u/0x%8x)",
1745 ntohl(spi), mark.value, mark.mask);
1746 }
1747 else
1748 {
1749 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x", ntohl(spi));
1750 }
1751 return SUCCESS;
1752 }
1753
1754 METHOD(kernel_ipsec_t, update_sa, status_t,
1755 private_kernel_netlink_ipsec_t *this, u_int32_t spi, u_int8_t protocol,
1756 u_int16_t cpi, host_t *src, host_t *dst, host_t *new_src, host_t *new_dst,
1757 bool old_encap, bool new_encap, mark_t mark)
1758 {
1759 netlink_buf_t request;
1760 u_char *pos;
1761 struct nlmsghdr *hdr, *out = NULL;
1762 struct xfrm_usersa_id *sa_id;
1763 struct xfrm_usersa_info *out_sa = NULL, *sa;
1764 size_t len;
1765 struct rtattr *rta;
1766 size_t rtasize;
1767 struct xfrm_encap_tmpl* tmpl = NULL;
1768 struct xfrm_replay_state *replay = NULL;
1769 struct xfrm_replay_state_esn *replay_esn = NULL;
1770 status_t status = FAILED;
1771
1772 /* if IPComp is used, we first update the IPComp SA */
1773 if (cpi)
1774 {
1775 update_sa(this, htonl(ntohs(cpi)), IPPROTO_COMP, 0,
1776 src, dst, new_src, new_dst, FALSE, FALSE, mark);
1777 }
1778
1779 memset(&request, 0, sizeof(request));
1780
1781 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x for update", ntohl(spi));
1782
1783 /* query the existing SA first */
1784 hdr = (struct nlmsghdr*)request;
1785 hdr->nlmsg_flags = NLM_F_REQUEST;
1786 hdr->nlmsg_type = XFRM_MSG_GETSA;
1787 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1788
1789 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1790 host2xfrm(dst, &sa_id->daddr);
1791 sa_id->spi = spi;
1792 sa_id->proto = protocol;
1793 sa_id->family = dst->get_family(dst);
1794
1795 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1796 {
1797 hdr = out;
1798 while (NLMSG_OK(hdr, len))
1799 {
1800 switch (hdr->nlmsg_type)
1801 {
1802 case XFRM_MSG_NEWSA:
1803 {
1804 out_sa = NLMSG_DATA(hdr);
1805 break;
1806 }
1807 case NLMSG_ERROR:
1808 {
1809 struct nlmsgerr *err = NLMSG_DATA(hdr);
1810 DBG1(DBG_KNL, "querying SAD entry failed: %s (%d)",
1811 strerror(-err->error), -err->error);
1812 break;
1813 }
1814 default:
1815 hdr = NLMSG_NEXT(hdr, len);
1816 continue;
1817 case NLMSG_DONE:
1818 break;
1819 }
1820 break;
1821 }
1822 }
1823 if (out_sa == NULL)
1824 {
1825 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1826 goto failed;
1827 }
1828
1829 get_replay_state(this, spi, protocol, dst, &replay_esn, &replay);
1830
1831 /* delete the old SA (without affecting the IPComp SA) */
1832 if (del_sa(this, src, dst, spi, protocol, 0, mark) != SUCCESS)
1833 {
1834 DBG1(DBG_KNL, "unable to delete old SAD entry with SPI %.8x",
1835 ntohl(spi));
1836 goto failed;
1837 }
1838
1839 DBG2(DBG_KNL, "updating SAD entry with SPI %.8x from %#H..%#H to %#H..%#H",
1840 ntohl(spi), src, dst, new_src, new_dst);
1841 /* copy over the SA from out to request */
1842 hdr = (struct nlmsghdr*)request;
1843 memcpy(hdr, out, min(out->nlmsg_len, sizeof(request)));
1844 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1845 hdr->nlmsg_type = XFRM_MSG_NEWSA;
1846 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1847 sa = NLMSG_DATA(hdr);
1848 sa->family = new_dst->get_family(new_dst);
1849
1850 if (!src->ip_equals(src, new_src))
1851 {
1852 host2xfrm(new_src, &sa->saddr);
1853 }
1854 if (!dst->ip_equals(dst, new_dst))
1855 {
1856 host2xfrm(new_dst, &sa->id.daddr);
1857 }
1858
1859 rta = XFRM_RTA(out, struct xfrm_usersa_info);
1860 rtasize = XFRM_PAYLOAD(out, struct xfrm_usersa_info);
1861 pos = (u_char*)XFRM_RTA(hdr, struct xfrm_usersa_info);
1862 while(RTA_OK(rta, rtasize))
1863 {
1864 /* copy all attributes, but not XFRMA_ENCAP if we are disabling it */
1865 if (rta->rta_type != XFRMA_ENCAP || new_encap)
1866 {
1867 if (rta->rta_type == XFRMA_ENCAP)
1868 { /* update encap tmpl */
1869 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1870 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1871 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1872 }
1873 memcpy(pos, rta, rta->rta_len);
1874 pos += RTA_ALIGN(rta->rta_len);
1875 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1876 }
1877 rta = RTA_NEXT(rta, rtasize);
1878 }
1879
1880 rta = (struct rtattr*)pos;
1881 if (tmpl == NULL && new_encap)
1882 { /* add tmpl if we are enabling it */
1883 rta->rta_type = XFRMA_ENCAP;
1884 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1885
1886 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1887 if (hdr->nlmsg_len > sizeof(request))
1888 {
1889 goto failed;
1890 }
1891
1892 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1893 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1894 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1895 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1896 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1897
1898 rta = XFRM_RTA_NEXT(rta);
1899 }
1900
1901 if (replay_esn)
1902 {
1903 rta->rta_type = XFRMA_REPLAY_ESN_VAL;
1904 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1905 this->replay_bmp);
1906
1907 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1908 if (hdr->nlmsg_len > sizeof(request))
1909 {
1910 goto failed;
1911 }
1912 memcpy(RTA_DATA(rta), replay_esn,
1913 sizeof(struct xfrm_replay_state_esn) + this->replay_bmp);
1914
1915 rta = XFRM_RTA_NEXT(rta);
1916 }
1917 else if (replay)
1918 {
1919 rta->rta_type = XFRMA_REPLAY_VAL;
1920 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state));
1921
1922 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1923 if (hdr->nlmsg_len > sizeof(request))
1924 {
1925 goto failed;
1926 }
1927 memcpy(RTA_DATA(rta), replay, sizeof(replay));
1928
1929 rta = XFRM_RTA_NEXT(rta);
1930 }
1931 else
1932 {
1933 DBG1(DBG_KNL, "unable to copy replay state from old SAD entry "
1934 "with SPI %.8x", ntohl(spi));
1935 }
1936
1937 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1938 {
1939 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1940 goto failed;
1941 }
1942
1943 status = SUCCESS;
1944 failed:
1945 free(replay);
1946 free(replay_esn);
1947 memwipe(out, len);
1948 free(out);
1949
1950 return status;
1951 }
1952
1953 /**
1954 * Add or update a policy in the kernel.
1955 *
1956 * Note: The mutex has to be locked when entering this function.
1957 */
1958 static status_t add_policy_internal(private_kernel_netlink_ipsec_t *this,
1959 policy_entry_t *policy, policy_sa_t *mapping, bool update)
1960 {
1961 netlink_buf_t request;
1962 policy_entry_t clone;
1963 ipsec_sa_t *ipsec = mapping->sa;
1964 struct xfrm_userpolicy_info *policy_info;
1965 struct nlmsghdr *hdr;
1966 int i;
1967
1968 /* clone the policy so we are able to check it out again later */
1969 memcpy(&clone, policy, sizeof(policy_entry_t));
1970
1971 memset(&request, 0, sizeof(request));
1972 hdr = (struct nlmsghdr*)request;
1973 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1974 hdr->nlmsg_type = update ? XFRM_MSG_UPDPOLICY : XFRM_MSG_NEWPOLICY;
1975 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info));
1976
1977 policy_info = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
1978 policy_info->sel = policy->sel;
1979 policy_info->dir = policy->direction;
1980
1981 /* calculate priority based on selector size, small size = high prio */
1982 policy_info->priority = mapping->priority;
1983 policy_info->action = mapping->type != POLICY_DROP ? XFRM_POLICY_ALLOW
1984 : XFRM_POLICY_BLOCK;
1985 policy_info->share = XFRM_SHARE_ANY;
1986
1987 /* policies don't expire */
1988 policy_info->lft.soft_byte_limit = XFRM_INF;
1989 policy_info->lft.soft_packet_limit = XFRM_INF;
1990 policy_info->lft.hard_byte_limit = XFRM_INF;
1991 policy_info->lft.hard_packet_limit = XFRM_INF;
1992 policy_info->lft.soft_add_expires_seconds = 0;
1993 policy_info->lft.hard_add_expires_seconds = 0;
1994 policy_info->lft.soft_use_expires_seconds = 0;
1995 policy_info->lft.hard_use_expires_seconds = 0;
1996
1997 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_info);
1998
1999 if (mapping->type == POLICY_IPSEC)
2000 {
2001 struct xfrm_user_tmpl *tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rthdr);
2002 struct {
2003 u_int8_t proto;
2004 bool use;
2005 } protos[] = {
2006 { IPPROTO_COMP, ipsec->cfg.ipcomp.transform != IPCOMP_NONE },
2007 { IPPROTO_ESP, ipsec->cfg.esp.use },
2008 { IPPROTO_AH, ipsec->cfg.ah.use },
2009 };
2010 ipsec_mode_t proto_mode = ipsec->cfg.mode;
2011
2012 rthdr->rta_type = XFRMA_TMPL;
2013 rthdr->rta_len = 0; /* actual length is set below */
2014
2015 for (i = 0; i < countof(protos); i++)
2016 {
2017 if (!protos[i].use)
2018 {
2019 continue;
2020 }
2021
2022 rthdr->rta_len += RTA_LENGTH(sizeof(struct xfrm_user_tmpl));
2023 hdr->nlmsg_len += RTA_ALIGN(RTA_LENGTH(sizeof(struct xfrm_user_tmpl)));
2024 if (hdr->nlmsg_len > sizeof(request))
2025 {
2026 return FAILED;
2027 }
2028
2029 tmpl->reqid = ipsec->cfg.reqid;
2030 tmpl->id.proto = protos[i].proto;
2031 tmpl->aalgos = tmpl->ealgos = tmpl->calgos = ~0;
2032 tmpl->mode = mode2kernel(proto_mode);
2033 tmpl->optional = protos[i].proto == IPPROTO_COMP &&
2034 policy->direction != POLICY_OUT;
2035 tmpl->family = ipsec->src->get_family(ipsec->src);
2036
2037 if (proto_mode == MODE_TUNNEL)
2038 { /* only for tunnel mode */
2039 host2xfrm(ipsec->src, &tmpl->saddr);
2040 host2xfrm(ipsec->dst, &tmpl->id.daddr);
2041 }
2042
2043 tmpl++;
2044
2045 /* use transport mode for other SAs */
2046 proto_mode = MODE_TRANSPORT;
2047 }
2048
2049 rthdr = XFRM_RTA_NEXT(rthdr);
2050 }
2051
2052 if (ipsec->mark.value)
2053 {
2054 struct xfrm_mark *mrk;
2055
2056 rthdr->rta_type = XFRMA_MARK;
2057 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2058
2059 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2060 if (hdr->nlmsg_len > sizeof(request))
2061 {
2062 return FAILED;
2063 }
2064
2065 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2066 mrk->v = ipsec->mark.value;
2067 mrk->m = ipsec->mark.mask;
2068 }
2069 this->mutex->unlock(this->mutex);
2070
2071 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2072 {
2073 return FAILED;
2074 }
2075
2076 /* find the policy again */
2077 this->mutex->lock(this->mutex);
2078 policy = this->policies->get(this->policies, &clone);
2079 if (!policy ||
2080 policy->used_by->find_first(policy->used_by,
2081 NULL, (void**)&mapping) != SUCCESS)
2082 { /* policy or mapping is already gone, ignore */
2083 this->mutex->unlock(this->mutex);
2084 return SUCCESS;
2085 }
2086
2087 /* install a route, if:
2088 * - this is a forward policy (to just get one for each child)
2089 * - we are in tunnel/BEET mode
2090 * - routing is not disabled via strongswan.conf
2091 */
2092 if (policy->direction == POLICY_FWD &&
2093 ipsec->cfg.mode != MODE_TRANSPORT && this->install_routes)
2094 {
2095 route_entry_t *route = malloc_thing(route_entry_t);
2096 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)mapping;
2097
2098 if (hydra->kernel_interface->get_address_by_ts(hydra->kernel_interface,
2099 fwd->dst_ts, &route->src_ip) == SUCCESS)
2100 {
2101 /* get the nexthop to src (src as we are in POLICY_FWD) */
2102 route->gateway = hydra->kernel_interface->get_nexthop(
2103 hydra->kernel_interface, ipsec->src);
2104 /* install route via outgoing interface */
2105 route->if_name = hydra->kernel_interface->get_interface(
2106 hydra->kernel_interface, ipsec->dst);
2107 route->dst_net = chunk_alloc(policy->sel.family == AF_INET ? 4 : 16);
2108 memcpy(route->dst_net.ptr, &policy->sel.saddr, route->dst_net.len);
2109 route->prefixlen = policy->sel.prefixlen_s;
2110
2111 if (!route->if_name)
2112 {
2113 this->mutex->unlock(this->mutex);
2114 route_entry_destroy(route);
2115 return SUCCESS;
2116 }
2117
2118 if (policy->route)
2119 {
2120 route_entry_t *old = policy->route;
2121 if (route_entry_equals(old, route))
2122 { /* keep previously installed route */
2123 this->mutex->unlock(this->mutex);
2124 route_entry_destroy(route);
2125 return SUCCESS;
2126 }
2127 /* uninstall previously installed route */
2128 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2129 old->dst_net, old->prefixlen, old->gateway,
2130 old->src_ip, old->if_name) != SUCCESS)
2131 {
2132 DBG1(DBG_KNL, "error uninstalling route installed with "
2133 "policy %R === %R %N", fwd->src_ts,
2134 fwd->dst_ts, policy_dir_names,
2135 policy->direction);
2136 }
2137 route_entry_destroy(old);
2138 policy->route = NULL;
2139 }
2140
2141 DBG2(DBG_KNL, "installing route: %R via %H src %H dev %s",
2142 fwd->src_ts, route->gateway, route->src_ip, route->if_name);
2143 switch (hydra->kernel_interface->add_route(
2144 hydra->kernel_interface, route->dst_net,
2145 route->prefixlen, route->gateway,
2146 route->src_ip, route->if_name))
2147 {
2148 default:
2149 DBG1(DBG_KNL, "unable to install source route for %H",
2150 route->src_ip);
2151 /* FALL */
2152 case ALREADY_DONE:
2153 /* route exists, do not uninstall */
2154 route_entry_destroy(route);
2155 break;
2156 case SUCCESS:
2157 /* cache the installed route */
2158 policy->route = route;
2159 break;
2160 }
2161 }
2162 else
2163 {
2164 free(route);
2165 }
2166 }
2167 this->mutex->unlock(this->mutex);
2168 return SUCCESS;
2169 }
2170
2171 METHOD(kernel_ipsec_t, add_policy, status_t,
2172 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
2173 traffic_selector_t *src_ts, traffic_selector_t *dst_ts,
2174 policy_dir_t direction, policy_type_t type, ipsec_sa_cfg_t *sa,
2175 mark_t mark, policy_priority_t priority)
2176 {
2177 policy_entry_t *policy, *current;
2178 policy_sa_t *assigned_sa, *current_sa;
2179 enumerator_t *enumerator;
2180 bool found = FALSE, update = TRUE;
2181
2182 /* create a policy */
2183 INIT(policy,
2184 .sel = ts2selector(src_ts, dst_ts),
2185 .mark = mark.value & mark.mask,
2186 .direction = direction,
2187 );
2188
2189 /* find the policy, which matches EXACTLY */
2190 this->mutex->lock(this->mutex);
2191 current = this->policies->get(this->policies, policy);
2192 if (current)
2193 {
2194 /* use existing policy */
2195 if (mark.value)
2196 {
2197 DBG2(DBG_KNL, "policy %R === %R %N (mark %u/0x%8x) "
2198 "already exists, increasing refcount",
2199 src_ts, dst_ts, policy_dir_names, direction,
2200 mark.value, mark.mask);
2201 }
2202 else
2203 {
2204 DBG2(DBG_KNL, "policy %R === %R %N "
2205 "already exists, increasing refcount",
2206 src_ts, dst_ts, policy_dir_names, direction);
2207 }
2208 policy_entry_destroy(this, policy);
2209 policy = current;
2210 found = TRUE;
2211 }
2212 else
2213 { /* use the new one, if we have no such policy */
2214 policy->used_by = linked_list_create();
2215 this->policies->put(this->policies, policy, policy);
2216 }
2217
2218 /* cache the assigned IPsec SA */
2219 assigned_sa = policy_sa_create(this, direction, type, src, dst, src_ts,
2220 dst_ts, mark, sa);
2221 assigned_sa->priority = get_priority(policy, priority);
2222
2223 /* insert the SA according to its priority */
2224 enumerator = policy->used_by->create_enumerator(policy->used_by);
2225 while (enumerator->enumerate(enumerator, (void**)&current_sa))
2226 {
2227 if (current_sa->priority >= assigned_sa->priority)
2228 {
2229 break;
2230 }
2231 update = FALSE;
2232 }
2233 policy->used_by->insert_before(policy->used_by, enumerator, assigned_sa);
2234 enumerator->destroy(enumerator);
2235
2236 if (!update)
2237 { /* we don't update the policy if the priority is lower than that of the
2238 * currently installed one */
2239 this->mutex->unlock(this->mutex);
2240 return SUCCESS;
2241 }
2242
2243 if (mark.value)
2244 {
2245 DBG2(DBG_KNL, "%s policy %R === %R %N (mark %u/0x%8x)",
2246 found ? "updating" : "adding", src_ts, dst_ts,
2247 policy_dir_names, direction, mark.value, mark.mask);
2248 }
2249 else
2250 {
2251 DBG2(DBG_KNL, "%s policy %R === %R %N",
2252 found ? "updating" : "adding", src_ts, dst_ts,
2253 policy_dir_names, direction);
2254 }
2255
2256 if (add_policy_internal(this, policy, assigned_sa, found) != SUCCESS)
2257 {
2258 DBG1(DBG_KNL, "unable to %s policy %R === %R %N",
2259 found ? "update" : "add", src_ts, dst_ts,
2260 policy_dir_names, direction);
2261 return FAILED;
2262 }
2263 return SUCCESS;
2264 }
2265
2266 METHOD(kernel_ipsec_t, query_policy, status_t,
2267 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2268 traffic_selector_t *dst_ts, policy_dir_t direction, mark_t mark,
2269 u_int32_t *use_time)
2270 {
2271 netlink_buf_t request;
2272 struct nlmsghdr *out = NULL, *hdr;
2273 struct xfrm_userpolicy_id *policy_id;
2274 struct xfrm_userpolicy_info *policy = NULL;
2275 size_t len;
2276
2277 memset(&request, 0, sizeof(request));
2278
2279 if (mark.value)
2280 {
2281 DBG2(DBG_KNL, "querying policy %R === %R %N (mark %u/0x%8x)",
2282 src_ts, dst_ts, policy_dir_names, direction,
2283 mark.value, mark.mask);
2284 }
2285 else
2286 {
2287 DBG2(DBG_KNL, "querying policy %R === %R %N", src_ts, dst_ts,
2288 policy_dir_names, direction);
2289 }
2290 hdr = (struct nlmsghdr*)request;
2291 hdr->nlmsg_flags = NLM_F_REQUEST;
2292 hdr->nlmsg_type = XFRM_MSG_GETPOLICY;
2293 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2294
2295 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2296 policy_id->sel = ts2selector(src_ts, dst_ts);
2297 policy_id->dir = direction;
2298
2299 if (mark.value)
2300 {
2301 struct xfrm_mark *mrk;
2302 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2303
2304 rthdr->rta_type = XFRMA_MARK;
2305 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2306
2307 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2308 if (hdr->nlmsg_len > sizeof(request))
2309 {
2310 return FAILED;
2311 }
2312
2313 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2314 mrk->v = mark.value;
2315 mrk->m = mark.mask;
2316 }
2317
2318 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
2319 {
2320 hdr = out;
2321 while (NLMSG_OK(hdr, len))
2322 {
2323 switch (hdr->nlmsg_type)
2324 {
2325 case XFRM_MSG_NEWPOLICY:
2326 {
2327 policy = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2328 break;
2329 }
2330 case NLMSG_ERROR:
2331 {
2332 struct nlmsgerr *err = NLMSG_DATA(hdr);
2333 DBG1(DBG_KNL, "querying policy failed: %s (%d)",
2334 strerror(-err->error), -err->error);
2335 break;
2336 }
2337 default:
2338 hdr = NLMSG_NEXT(hdr, len);
2339 continue;
2340 case NLMSG_DONE:
2341 break;
2342 }
2343 break;
2344 }
2345 }
2346
2347 if (policy == NULL)
2348 {
2349 DBG2(DBG_KNL, "unable to query policy %R === %R %N", src_ts, dst_ts,
2350 policy_dir_names, direction);
2351 free(out);
2352 return FAILED;
2353 }
2354
2355 if (policy->curlft.use_time)
2356 {
2357 /* we need the monotonic time, but the kernel returns system time. */
2358 *use_time = time_monotonic(NULL) - (time(NULL) - policy->curlft.use_time);
2359 }
2360 else
2361 {
2362 *use_time = 0;
2363 }
2364
2365 free(out);
2366 return SUCCESS;
2367 }
2368
2369 METHOD(kernel_ipsec_t, del_policy, status_t,
2370 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2371 traffic_selector_t *dst_ts, policy_dir_t direction, u_int32_t reqid,
2372 mark_t mark, policy_priority_t prio)
2373 {
2374 policy_entry_t *current, policy;
2375 enumerator_t *enumerator;
2376 policy_sa_t *mapping;
2377 netlink_buf_t request;
2378 struct nlmsghdr *hdr;
2379 struct xfrm_userpolicy_id *policy_id;
2380 bool is_installed = TRUE;
2381 u_int32_t priority;
2382
2383 if (mark.value)
2384 {
2385 DBG2(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%8x)",
2386 src_ts, dst_ts, policy_dir_names, direction,
2387 mark.value, mark.mask);
2388 }
2389 else
2390 {
2391 DBG2(DBG_KNL, "deleting policy %R === %R %N",
2392 src_ts, dst_ts, policy_dir_names, direction);
2393 }
2394
2395 /* create a policy */
2396 memset(&policy, 0, sizeof(policy_entry_t));
2397 policy.sel = ts2selector(src_ts, dst_ts);
2398 policy.mark = mark.value & mark.mask;
2399 policy.direction = direction;
2400
2401 /* find the policy */
2402 this->mutex->lock(this->mutex);
2403 current = this->policies->get(this->policies, &policy);
2404 if (!current)
2405 {
2406 if (mark.value)
2407 {
2408 DBG1(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%8x) "
2409 "failed, not found", src_ts, dst_ts, policy_dir_names,
2410 direction, mark.value, mark.mask);
2411 }
2412 else
2413 {
2414 DBG1(DBG_KNL, "deleting policy %R === %R %N failed, not found",
2415 src_ts, dst_ts, policy_dir_names, direction);
2416 }
2417 this->mutex->unlock(this->mutex);
2418 return NOT_FOUND;
2419 }
2420
2421 /* remove mapping to SA by reqid and priority */
2422 priority = get_priority(current, prio);
2423 enumerator = current->used_by->create_enumerator(current->used_by);
2424 while (enumerator->enumerate(enumerator, (void**)&mapping))
2425 {
2426 if (reqid == mapping->sa->cfg.reqid && priority == mapping->priority)
2427 {
2428 current->used_by->remove_at(current->used_by, enumerator);
2429 policy_sa_destroy(mapping, &direction, this);
2430 break;
2431 }
2432 is_installed = FALSE;
2433 }
2434 enumerator->destroy(enumerator);
2435
2436 if (current->used_by->get_count(current->used_by) > 0)
2437 { /* policy is used by more SAs, keep in kernel */
2438 DBG2(DBG_KNL, "policy still used by another CHILD_SA, not removed");
2439 if (!is_installed)
2440 { /* no need to update as the policy was not installed for this SA */
2441 this->mutex->unlock(this->mutex);
2442 return SUCCESS;
2443 }
2444
2445 if (mark.value)
2446 {
2447 DBG2(DBG_KNL, "updating policy %R === %R %N (mark %u/0x%8x)",
2448 src_ts, dst_ts, policy_dir_names, direction,
2449 mark.value, mark.mask);
2450 }
2451 else
2452 {
2453 DBG2(DBG_KNL, "updating policy %R === %R %N",
2454 src_ts, dst_ts, policy_dir_names, direction);
2455 }
2456
2457 current->used_by->get_first(current->used_by, (void**)&mapping);
2458 if (add_policy_internal(this, current, mapping, TRUE) != SUCCESS)
2459 {
2460 DBG1(DBG_KNL, "unable to update policy %R === %R %N",
2461 src_ts, dst_ts, policy_dir_names, direction);
2462 return FAILED;
2463 }
2464 return SUCCESS;
2465 }
2466
2467 memset(&request, 0, sizeof(request));
2468
2469 hdr = (struct nlmsghdr*)request;
2470 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2471 hdr->nlmsg_type = XFRM_MSG_DELPOLICY;
2472 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2473
2474 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2475 policy_id->sel = current->sel;
2476 policy_id->dir = direction;
2477
2478 if (mark.value)
2479 {
2480 struct xfrm_mark *mrk;
2481 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2482
2483 rthdr->rta_type = XFRMA_MARK;
2484 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2485 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2486 if (hdr->nlmsg_len > sizeof(request))
2487 {
2488 return FAILED;
2489 }
2490
2491 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2492 mrk->v = mark.value;
2493 mrk->m = mark.mask;
2494 }
2495
2496 if (current->route)
2497 {
2498 route_entry_t *route = current->route;
2499 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2500 route->dst_net, route->prefixlen, route->gateway,
2501 route->src_ip, route->if_name) != SUCCESS)
2502 {
2503 DBG1(DBG_KNL, "error uninstalling route installed with "
2504 "policy %R === %R %N", src_ts, dst_ts,
2505 policy_dir_names, direction);
2506 }
2507 }
2508
2509 this->policies->remove(this->policies, current);
2510 policy_entry_destroy(this, current);
2511 this->mutex->unlock(this->mutex);
2512
2513 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2514 {
2515 if (mark.value)
2516 {
2517 DBG1(DBG_KNL, "unable to delete policy %R === %R %N "
2518 "(mark %u/0x%8x)", src_ts, dst_ts, policy_dir_names,
2519 direction, mark.value, mark.mask);
2520 }
2521 else
2522 {
2523 DBG1(DBG_KNL, "unable to delete policy %R === %R %N",
2524 src_ts, dst_ts, policy_dir_names, direction);
2525 }
2526 return FAILED;
2527 }
2528 return SUCCESS;
2529 }
2530
2531 METHOD(kernel_ipsec_t, bypass_socket, bool,
2532 private_kernel_netlink_ipsec_t *this, int fd, int family)
2533 {
2534 struct xfrm_userpolicy_info policy;
2535 u_int sol, ipsec_policy;
2536
2537 switch (family)
2538 {
2539 case AF_INET:
2540 sol = SOL_IP;
2541 ipsec_policy = IP_XFRM_POLICY;
2542 break;
2543 case AF_INET6:
2544 sol = SOL_IPV6;
2545 ipsec_policy = IPV6_XFRM_POLICY;
2546 break;
2547 default:
2548 return FALSE;
2549 }
2550
2551 memset(&policy, 0, sizeof(policy));
2552 policy.action = XFRM_POLICY_ALLOW;
2553 policy.sel.family = family;
2554
2555 policy.dir = XFRM_POLICY_OUT;
2556 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2557 {
2558 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2559 strerror(errno));
2560 return FALSE;
2561 }
2562 policy.dir = XFRM_POLICY_IN;
2563 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2564 {
2565 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2566 strerror(errno));
2567 return FALSE;
2568 }
2569 return TRUE;
2570 }
2571
2572 METHOD(kernel_ipsec_t, destroy, void,
2573 private_kernel_netlink_ipsec_t *this)
2574 {
2575 enumerator_t *enumerator;
2576 policy_entry_t *policy;
2577
2578 if (this->job)
2579 {
2580 this->job->cancel(this->job);
2581 }
2582 if (this->socket_xfrm_events > 0)
2583 {
2584 close(this->socket_xfrm_events);
2585 }
2586 DESTROY_IF(this->socket_xfrm);
2587 enumerator = this->policies->create_enumerator(this->policies);
2588 while (enumerator->enumerate(enumerator, &policy, &policy))
2589 {
2590 policy_entry_destroy(this, policy);
2591 }
2592 enumerator->destroy(enumerator);
2593 this->policies->destroy(this->policies);
2594 this->sas->destroy(this->sas);
2595 this->mutex->destroy(this->mutex);
2596 free(this);
2597 }
2598
2599 /*
2600 * Described in header.
2601 */
2602 kernel_netlink_ipsec_t *kernel_netlink_ipsec_create()
2603 {
2604 private_kernel_netlink_ipsec_t *this;
2605 struct sockaddr_nl addr;
2606 int fd;
2607
2608 INIT(this,
2609 .public = {
2610 .interface = {
2611 .get_spi = _get_spi,
2612 .get_cpi = _get_cpi,
2613 .add_sa = _add_sa,
2614 .update_sa = _update_sa,
2615 .query_sa = _query_sa,
2616 .del_sa = _del_sa,
2617 .add_policy = _add_policy,
2618 .query_policy = _query_policy,
2619 .del_policy = _del_policy,
2620 .bypass_socket = _bypass_socket,
2621 .destroy = _destroy,
2622 },
2623 },
2624 .policies = hashtable_create((hashtable_hash_t)policy_hash,
2625 (hashtable_equals_t)policy_equals, 32),
2626 .sas = hashtable_create((hashtable_hash_t)ipsec_sa_hash,
2627 (hashtable_equals_t)ipsec_sa_equals, 32),
2628 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
2629 .install_routes = lib->settings->get_bool(lib->settings,
2630 "%s.install_routes", TRUE, hydra->daemon),
2631 .replay_window = lib->settings->get_int(lib->settings,
2632 "%s.replay_window", DEFAULT_REPLAY_WINDOW, hydra->daemon),
2633 );
2634
2635 this->replay_bmp = (this->replay_window + sizeof(u_int32_t) * 8 - 1) /
2636 (sizeof(u_int32_t) * 8);
2637
2638 if (streq(hydra->daemon, "pluto"))
2639 { /* no routes for pluto, they are installed via updown script */
2640 this->install_routes = FALSE;
2641 }
2642
2643 /* disable lifetimes for allocated SPIs in kernel */
2644 fd = open("/proc/sys/net/core/xfrm_acq_expires", O_WRONLY);
2645 if (fd)
2646 {
2647 ignore_result(write(fd, "165", 3));
2648 close(fd);
2649 }
2650
2651 this->socket_xfrm = netlink_socket_create(NETLINK_XFRM);
2652 if (!this->socket_xfrm)
2653 {
2654 destroy(this);
2655 return NULL;
2656 }
2657
2658 memset(&addr, 0, sizeof(addr));
2659 addr.nl_family = AF_NETLINK;
2660
2661 /* create and bind XFRM socket for ACQUIRE, EXPIRE, MIGRATE & MAPPING */
2662 this->socket_xfrm_events = socket(AF_NETLINK, SOCK_RAW, NETLINK_XFRM);
2663 if (this->socket_xfrm_events <= 0)
2664 {
2665 DBG1(DBG_KNL, "unable to create XFRM event socket");
2666 destroy(this);
2667 return NULL;
2668 }
2669 addr.nl_groups = XFRMNLGRP(ACQUIRE) | XFRMNLGRP(EXPIRE) |
2670 XFRMNLGRP(MIGRATE) | XFRMNLGRP(MAPPING);
2671 if (bind(this->socket_xfrm_events, (struct sockaddr*)&addr, sizeof(addr)))
2672 {
2673 DBG1(DBG_KNL, "unable to bind XFRM event socket");
2674 destroy(this);
2675 return NULL;
2676 }
2677 this->job = callback_job_create_with_prio((callback_job_cb_t)receive_events,
2678 this, NULL, NULL, JOB_PRIO_CRITICAL);
2679 lib->processor->queue_job(lib->processor, (job_t*)this->job);
2680
2681 return &this->public;
2682 }
2683