Reduce memory usage of policy history caching.
[strongswan.git] / src / libhydra / plugins / kernel_netlink / kernel_netlink_ipsec.c
1 /*
2 * Copyright (C) 2006-2011 Tobias Brunner
3 * Copyright (C) 2005-2009 Martin Willi
4 * Copyright (C) 2008 Andreas Steffen
5 * Copyright (C) 2006-2007 Fabian Hartmann, Noah Heusser
6 * Copyright (C) 2006 Daniel Roethlisberger
7 * Copyright (C) 2005 Jan Hutter
8 * Hochschule fuer Technik Rapperswil
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 */
20
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <stdint.h>
24 #include <linux/ipsec.h>
25 #include <linux/netlink.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/xfrm.h>
28 #include <linux/udp.h>
29 #include <unistd.h>
30 #include <time.h>
31 #include <errno.h>
32 #include <string.h>
33 #include <fcntl.h>
34
35 #include "kernel_netlink_ipsec.h"
36 #include "kernel_netlink_shared.h"
37
38 #include <hydra.h>
39 #include <debug.h>
40 #include <threading/thread.h>
41 #include <threading/mutex.h>
42 #include <utils/hashtable.h>
43 #include <utils/linked_list.h>
44 #include <processing/jobs/callback_job.h>
45
46 /** required for Linux 2.6.26 kernel and later */
47 #ifndef XFRM_STATE_AF_UNSPEC
48 #define XFRM_STATE_AF_UNSPEC 32
49 #endif
50
51 /** from linux/in.h */
52 #ifndef IP_XFRM_POLICY
53 #define IP_XFRM_POLICY 17
54 #endif
55
56 /* missing on uclibc */
57 #ifndef IPV6_XFRM_POLICY
58 #define IPV6_XFRM_POLICY 34
59 #endif /*IPV6_XFRM_POLICY*/
60
61 /** default priority of installed policies */
62 #define PRIO_LOW 1024
63 #define PRIO_HIGH 512
64
65 /** default replay window size, if not set using charon.replay_window */
66 #define DEFAULT_REPLAY_WINDOW 32
67
68 /**
69 * map the limit for bytes and packets to XFRM_INF per default
70 */
71 #define XFRM_LIMIT(x) ((x) == 0 ? XFRM_INF : (x))
72
73 /**
74 * Create ORable bitfield of XFRM NL groups
75 */
76 #define XFRMNLGRP(x) (1<<(XFRMNLGRP_##x-1))
77
78 /**
79 * returns a pointer to the first rtattr following the nlmsghdr *nlh and the
80 * 'usual' netlink data x like 'struct xfrm_usersa_info'
81 */
82 #define XFRM_RTA(nlh, x) ((struct rtattr*)(NLMSG_DATA(nlh) + NLMSG_ALIGN(sizeof(x))))
83 /**
84 * returns a pointer to the next rtattr following rta.
85 * !!! do not use this to parse messages. use RTA_NEXT and RTA_OK instead !!!
86 */
87 #define XFRM_RTA_NEXT(rta) ((struct rtattr*)(((char*)(rta)) + RTA_ALIGN((rta)->rta_len)))
88 /**
89 * returns the total size of attached rta data
90 * (after 'usual' netlink data x like 'struct xfrm_usersa_info')
91 */
92 #define XFRM_PAYLOAD(nlh, x) NLMSG_PAYLOAD(nlh, sizeof(x))
93
94 typedef struct kernel_algorithm_t kernel_algorithm_t;
95
96 /**
97 * Mapping of IKEv2 kernel identifier to linux crypto API names
98 */
99 struct kernel_algorithm_t {
100 /**
101 * Identifier specified in IKEv2
102 */
103 int ikev2;
104
105 /**
106 * Name of the algorithm in linux crypto API
107 */
108 char *name;
109 };
110
111 ENUM(xfrm_msg_names, XFRM_MSG_NEWSA, XFRM_MSG_MAPPING,
112 "XFRM_MSG_NEWSA",
113 "XFRM_MSG_DELSA",
114 "XFRM_MSG_GETSA",
115 "XFRM_MSG_NEWPOLICY",
116 "XFRM_MSG_DELPOLICY",
117 "XFRM_MSG_GETPOLICY",
118 "XFRM_MSG_ALLOCSPI",
119 "XFRM_MSG_ACQUIRE",
120 "XFRM_MSG_EXPIRE",
121 "XFRM_MSG_UPDPOLICY",
122 "XFRM_MSG_UPDSA",
123 "XFRM_MSG_POLEXPIRE",
124 "XFRM_MSG_FLUSHSA",
125 "XFRM_MSG_FLUSHPOLICY",
126 "XFRM_MSG_NEWAE",
127 "XFRM_MSG_GETAE",
128 "XFRM_MSG_REPORT",
129 "XFRM_MSG_MIGRATE",
130 "XFRM_MSG_NEWSADINFO",
131 "XFRM_MSG_GETSADINFO",
132 "XFRM_MSG_NEWSPDINFO",
133 "XFRM_MSG_GETSPDINFO",
134 "XFRM_MSG_MAPPING"
135 );
136
137 ENUM(xfrm_attr_type_names, XFRMA_UNSPEC, XFRMA_KMADDRESS,
138 "XFRMA_UNSPEC",
139 "XFRMA_ALG_AUTH",
140 "XFRMA_ALG_CRYPT",
141 "XFRMA_ALG_COMP",
142 "XFRMA_ENCAP",
143 "XFRMA_TMPL",
144 "XFRMA_SA",
145 "XFRMA_POLICY",
146 "XFRMA_SEC_CTX",
147 "XFRMA_LTIME_VAL",
148 "XFRMA_REPLAY_VAL",
149 "XFRMA_REPLAY_THRESH",
150 "XFRMA_ETIMER_THRESH",
151 "XFRMA_SRCADDR",
152 "XFRMA_COADDR",
153 "XFRMA_LASTUSED",
154 "XFRMA_POLICY_TYPE",
155 "XFRMA_MIGRATE",
156 "XFRMA_ALG_AEAD",
157 "XFRMA_KMADDRESS"
158 );
159
160 #define END_OF_LIST -1
161
162 /**
163 * Algorithms for encryption
164 */
165 static kernel_algorithm_t encryption_algs[] = {
166 /* {ENCR_DES_IV64, "***" }, */
167 {ENCR_DES, "des" },
168 {ENCR_3DES, "des3_ede" },
169 /* {ENCR_RC5, "***" }, */
170 /* {ENCR_IDEA, "***" }, */
171 {ENCR_CAST, "cast128" },
172 {ENCR_BLOWFISH, "blowfish" },
173 /* {ENCR_3IDEA, "***" }, */
174 /* {ENCR_DES_IV32, "***" }, */
175 {ENCR_NULL, "cipher_null" },
176 {ENCR_AES_CBC, "aes" },
177 {ENCR_AES_CTR, "rfc3686(ctr(aes))" },
178 {ENCR_AES_CCM_ICV8, "rfc4309(ccm(aes))" },
179 {ENCR_AES_CCM_ICV12, "rfc4309(ccm(aes))" },
180 {ENCR_AES_CCM_ICV16, "rfc4309(ccm(aes))" },
181 {ENCR_AES_GCM_ICV8, "rfc4106(gcm(aes))" },
182 {ENCR_AES_GCM_ICV12, "rfc4106(gcm(aes))" },
183 {ENCR_AES_GCM_ICV16, "rfc4106(gcm(aes))" },
184 {ENCR_NULL_AUTH_AES_GMAC, "rfc4543(gcm(aes))" },
185 {ENCR_CAMELLIA_CBC, "cbc(camellia)" },
186 /* {ENCR_CAMELLIA_CTR, "***" }, */
187 /* {ENCR_CAMELLIA_CCM_ICV8, "***" }, */
188 /* {ENCR_CAMELLIA_CCM_ICV12, "***" }, */
189 /* {ENCR_CAMELLIA_CCM_ICV16, "***" }, */
190 {ENCR_SERPENT_CBC, "serpent" },
191 {ENCR_TWOFISH_CBC, "twofish" },
192 {END_OF_LIST, NULL }
193 };
194
195 /**
196 * Algorithms for integrity protection
197 */
198 static kernel_algorithm_t integrity_algs[] = {
199 {AUTH_HMAC_MD5_96, "md5" },
200 {AUTH_HMAC_SHA1_96, "sha1" },
201 {AUTH_HMAC_SHA2_256_96, "sha256" },
202 {AUTH_HMAC_SHA2_256_128, "hmac(sha256)" },
203 {AUTH_HMAC_SHA2_384_192, "hmac(sha384)" },
204 {AUTH_HMAC_SHA2_512_256, "hmac(sha512)" },
205 /* {AUTH_DES_MAC, "***" }, */
206 /* {AUTH_KPDK_MD5, "***" }, */
207 {AUTH_AES_XCBC_96, "xcbc(aes)" },
208 {END_OF_LIST, NULL }
209 };
210
211 /**
212 * Algorithms for IPComp
213 */
214 static kernel_algorithm_t compression_algs[] = {
215 /* {IPCOMP_OUI, "***" }, */
216 {IPCOMP_DEFLATE, "deflate" },
217 {IPCOMP_LZS, "lzs" },
218 {IPCOMP_LZJH, "lzjh" },
219 {END_OF_LIST, NULL }
220 };
221
222 /**
223 * Look up a kernel algorithm name and its key size
224 */
225 static char* lookup_algorithm(kernel_algorithm_t *list, int ikev2)
226 {
227 while (list->ikev2 != END_OF_LIST)
228 {
229 if (list->ikev2 == ikev2)
230 {
231 return list->name;
232 }
233 list++;
234 }
235 return NULL;
236 }
237
238 typedef struct private_kernel_netlink_ipsec_t private_kernel_netlink_ipsec_t;
239
240 /**
241 * Private variables and functions of kernel_netlink class.
242 */
243 struct private_kernel_netlink_ipsec_t {
244 /**
245 * Public part of the kernel_netlink_t object.
246 */
247 kernel_netlink_ipsec_t public;
248
249 /**
250 * mutex to lock access to installed policies
251 */
252 mutex_t *mutex;
253
254 /**
255 * Hash table of installed policies (policy_entry_t)
256 */
257 hashtable_t *policies;
258
259 /**
260 * Hash table of IPsec SAs using policies (ipsec_sa_t)
261 */
262 hashtable_t *sas;
263
264 /**
265 * job receiving netlink events
266 */
267 callback_job_t *job;
268
269 /**
270 * Netlink xfrm socket (IPsec)
271 */
272 netlink_socket_t *socket_xfrm;
273
274 /**
275 * netlink xfrm socket to receive acquire and expire events
276 */
277 int socket_xfrm_events;
278
279 /**
280 * whether to install routes along policies
281 */
282 bool install_routes;
283
284 /**
285 * Size of the replay window, in packets
286 */
287 u_int32_t replay_window;
288
289 /**
290 * Size of the replay window bitmap, in bytes
291 */
292 u_int32_t replay_bmp;
293 };
294
295 typedef struct route_entry_t route_entry_t;
296
297 /**
298 * installed routing entry
299 */
300 struct route_entry_t {
301 /** name of the interface the route is bound to */
302 char *if_name;
303
304 /** source ip of the route */
305 host_t *src_ip;
306
307 /** gateway for this route */
308 host_t *gateway;
309
310 /** destination net */
311 chunk_t dst_net;
312
313 /** destination net prefixlen */
314 u_int8_t prefixlen;
315 };
316
317 /**
318 * destroy a route_entry_t object
319 */
320 static void route_entry_destroy(route_entry_t *this)
321 {
322 free(this->if_name);
323 this->src_ip->destroy(this->src_ip);
324 DESTROY_IF(this->gateway);
325 chunk_free(&this->dst_net);
326 free(this);
327 }
328
329 /**
330 * compare two route_entry_t objects
331 */
332 static bool route_entry_equals(route_entry_t *a, route_entry_t *b)
333 {
334 return a->if_name && b->if_name && streq(a->if_name, b->if_name) &&
335 a->src_ip->equals(a->src_ip, b->src_ip) &&
336 a->gateway->equals(a->gateway, b->gateway) &&
337 chunk_equals(a->dst_net, b->dst_net) && a->prefixlen == b->prefixlen;
338 }
339
340 typedef struct ipsec_sa_t ipsec_sa_t;
341
342 /**
343 * IPsec SA assigned to a policy.
344 */
345 struct ipsec_sa_t {
346 /** source address of this SA */
347 host_t *src;
348
349 /** destination address of this SA */
350 host_t *dst;
351
352 /** optional mark */
353 mark_t mark;
354
355 /** description of this SA */
356 ipsec_sa_cfg_t cfg;
357
358 /** reference count for this SA */
359 refcount_t refcount;
360 };
361
362 /**
363 * Hash function for ipsec_sa_t objects
364 */
365 static u_int ipsec_sa_hash(ipsec_sa_t *sa)
366 {
367 return chunk_hash_inc(sa->src->get_address(sa->src),
368 chunk_hash_inc(sa->dst->get_address(sa->dst),
369 chunk_hash_inc(chunk_from_thing(sa->mark),
370 chunk_hash(chunk_from_thing(sa->cfg)))));
371 }
372
373 /**
374 * Equality function for ipsec_sa_t objects
375 */
376 static bool ipsec_sa_equals(ipsec_sa_t *sa, ipsec_sa_t *other_sa)
377 {
378 return sa->src->ip_equals(sa->src, other_sa->src) &&
379 sa->dst->ip_equals(sa->dst, other_sa->dst) &&
380 memeq(&sa->mark, &other_sa->mark, sizeof(mark_t)) &&
381 memeq(&sa->cfg, &other_sa->cfg, sizeof(ipsec_sa_cfg_t));
382 }
383
384 /**
385 * allocate or reference an IPsec SA object
386 */
387 static ipsec_sa_t *ipsec_sa_create(private_kernel_netlink_ipsec_t *this,
388 host_t *src, host_t *dst, mark_t mark,
389 ipsec_sa_cfg_t *cfg)
390 {
391 ipsec_sa_t *sa, *found;
392 INIT(sa,
393 .src = src,
394 .dst = dst,
395 .mark = mark,
396 .cfg = *cfg,
397 );
398 found = this->sas->get(this->sas, sa);
399 if (!found)
400 {
401 sa->src = src->clone(src);
402 sa->dst = dst->clone(dst);
403 this->sas->put(this->sas, sa, sa);
404 }
405 else
406 {
407 free(sa);
408 sa = found;
409 }
410 ref_get(&sa->refcount);
411 return sa;
412 }
413
414 /**
415 * release and destroy an IPsec SA object
416 */
417 static void ipsec_sa_destroy(private_kernel_netlink_ipsec_t *this,
418 ipsec_sa_t *sa)
419 {
420 if (ref_put(&sa->refcount))
421 {
422 this->sas->remove(this->sas, sa);
423 DESTROY_IF(sa->src);
424 DESTROY_IF(sa->dst);
425 free(sa);
426 }
427 }
428
429 typedef struct policy_sa_t policy_sa_t;
430 typedef struct policy_sa_fwd_t policy_sa_fwd_t;
431
432 /**
433 * Mapping between a policy and an IPsec SA.
434 */
435 struct policy_sa_t {
436 /** priority assigned to the policy when installed with this SA */
437 u_int32_t priority;
438
439 /** type of the policy */
440 policy_type_t type;
441
442 /** assigned SA */
443 ipsec_sa_t *sa;
444 };
445
446 /**
447 * For forward policies we cache the traffic selectors in order to install
448 * the route.
449 */
450 struct policy_sa_fwd_t {
451 /** generic interface */
452 policy_sa_t generic;
453
454 /** source traffic selector of this policy */
455 traffic_selector_t *src_ts;
456
457 /** destination traffic selector of this policy */
458 traffic_selector_t *dst_ts;
459 };
460
461 /**
462 * create a policy_sa(_fwd)_t object
463 */
464 static policy_sa_t *policy_sa_create(private_kernel_netlink_ipsec_t *this,
465 policy_dir_t dir, policy_type_t type, host_t *src, host_t *dst,
466 traffic_selector_t *src_ts, traffic_selector_t *dst_ts, mark_t mark,
467 ipsec_sa_cfg_t *cfg)
468 {
469 policy_sa_t *policy;
470
471 if (dir == POLICY_FWD)
472 {
473 policy_sa_fwd_t *fwd;
474 INIT(fwd,
475 .src_ts = src_ts->clone(src_ts),
476 .dst_ts = dst_ts->clone(dst_ts),
477 );
478 policy = &fwd->generic;
479 }
480 else
481 {
482 INIT(policy);
483 }
484 policy->type = type;
485 policy->sa = ipsec_sa_create(this, src, dst, mark, cfg);
486 return policy;
487 }
488
489 /**
490 * destroy a policy_sa(_fwd)_t object
491 */
492 static void policy_sa_destroy(private_kernel_netlink_ipsec_t *this,
493 policy_dir_t dir, policy_sa_t *policy)
494 {
495 if (dir == POLICY_FWD)
496 {
497 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)policy;
498 fwd->src_ts->destroy(fwd->src_ts);
499 fwd->dst_ts->destroy(fwd->dst_ts);
500 }
501 ipsec_sa_destroy(this, policy->sa);
502 free(policy);
503 }
504
505 typedef struct policy_entry_t policy_entry_t;
506
507 /**
508 * installed kernel policy.
509 */
510 struct policy_entry_t {
511
512 /** direction of this policy: in, out, forward */
513 u_int8_t direction;
514
515 /** parameters of installed policy */
516 struct xfrm_selector sel;
517
518 /** optional mark */
519 u_int32_t mark;
520
521 /** associated route installed for this policy */
522 route_entry_t *route;
523
524 /** list of SAs this policy is used by, ordered by priority */
525 linked_list_t *used_by;
526 };
527
528 static void policy_entry_destroy(private_kernel_netlink_ipsec_t *this,
529 policy_entry_t *policy)
530 {
531 if (policy->route)
532 {
533 route_entry_destroy(policy->route);
534 }
535 if (policy->used_by)
536 {
537 enumerator_t *enumerator;
538 policy_sa_t *sa;
539 enumerator = policy->used_by->create_enumerator(policy->used_by);
540 while (enumerator->enumerate(enumerator, (void**)&sa))
541 {
542 policy_sa_destroy(this, policy->direction, sa);
543 }
544 enumerator->destroy(enumerator);
545 policy->used_by->destroy(policy->used_by);
546 }
547 free(policy);
548 }
549
550 /**
551 * Hash function for policy_entry_t objects
552 */
553 static u_int policy_hash(policy_entry_t *key)
554 {
555 chunk_t chunk = chunk_create((void*)&key->sel,
556 sizeof(struct xfrm_selector) + sizeof(u_int32_t));
557 return chunk_hash(chunk);
558 }
559
560 /**
561 * Equality function for policy_entry_t objects
562 */
563 static bool policy_equals(policy_entry_t *key, policy_entry_t *other_key)
564 {
565 return memeq(&key->sel, &other_key->sel,
566 sizeof(struct xfrm_selector) + sizeof(u_int32_t)) &&
567 key->direction == other_key->direction;
568 }
569
570 /**
571 * convert the general ipsec mode to the one defined in xfrm.h
572 */
573 static u_int8_t mode2kernel(ipsec_mode_t mode)
574 {
575 switch (mode)
576 {
577 case MODE_TRANSPORT:
578 return XFRM_MODE_TRANSPORT;
579 case MODE_TUNNEL:
580 return XFRM_MODE_TUNNEL;
581 case MODE_BEET:
582 return XFRM_MODE_BEET;
583 default:
584 return mode;
585 }
586 }
587
588 /**
589 * convert a host_t to a struct xfrm_address
590 */
591 static void host2xfrm(host_t *host, xfrm_address_t *xfrm)
592 {
593 chunk_t chunk = host->get_address(host);
594 memcpy(xfrm, chunk.ptr, min(chunk.len, sizeof(xfrm_address_t)));
595 }
596
597 /**
598 * convert a struct xfrm_address to a host_t
599 */
600 static host_t* xfrm2host(int family, xfrm_address_t *xfrm, u_int16_t port)
601 {
602 chunk_t chunk;
603
604 switch (family)
605 {
606 case AF_INET:
607 chunk = chunk_create((u_char*)&xfrm->a4, sizeof(xfrm->a4));
608 break;
609 case AF_INET6:
610 chunk = chunk_create((u_char*)&xfrm->a6, sizeof(xfrm->a6));
611 break;
612 default:
613 return NULL;
614 }
615 return host_create_from_chunk(family, chunk, ntohs(port));
616 }
617
618 /**
619 * convert a traffic selector address range to subnet and its mask.
620 */
621 static void ts2subnet(traffic_selector_t* ts,
622 xfrm_address_t *net, u_int8_t *mask)
623 {
624 host_t *net_host;
625 chunk_t net_chunk;
626
627 ts->to_subnet(ts, &net_host, mask);
628 net_chunk = net_host->get_address(net_host);
629 memcpy(net, net_chunk.ptr, net_chunk.len);
630 net_host->destroy(net_host);
631 }
632
633 /**
634 * convert a traffic selector port range to port/portmask
635 */
636 static void ts2ports(traffic_selector_t* ts,
637 u_int16_t *port, u_int16_t *mask)
638 {
639 /* linux does not seem to accept complex portmasks. Only
640 * any or a specific port is allowed. We set to any, if we have
641 * a port range, or to a specific, if we have one port only.
642 */
643 u_int16_t from, to;
644
645 from = ts->get_from_port(ts);
646 to = ts->get_to_port(ts);
647
648 if (from == to)
649 {
650 *port = htons(from);
651 *mask = ~0;
652 }
653 else
654 {
655 *port = 0;
656 *mask = 0;
657 }
658 }
659
660 /**
661 * convert a pair of traffic_selectors to a xfrm_selector
662 */
663 static struct xfrm_selector ts2selector(traffic_selector_t *src,
664 traffic_selector_t *dst)
665 {
666 struct xfrm_selector sel;
667
668 memset(&sel, 0, sizeof(sel));
669 sel.family = (src->get_type(src) == TS_IPV4_ADDR_RANGE) ? AF_INET : AF_INET6;
670 /* src or dest proto may be "any" (0), use more restrictive one */
671 sel.proto = max(src->get_protocol(src), dst->get_protocol(dst));
672 ts2subnet(dst, &sel.daddr, &sel.prefixlen_d);
673 ts2subnet(src, &sel.saddr, &sel.prefixlen_s);
674 ts2ports(dst, &sel.dport, &sel.dport_mask);
675 ts2ports(src, &sel.sport, &sel.sport_mask);
676 sel.ifindex = 0;
677 sel.user = 0;
678
679 return sel;
680 }
681
682 /**
683 * convert a xfrm_selector to a src|dst traffic_selector
684 */
685 static traffic_selector_t* selector2ts(struct xfrm_selector *sel, bool src)
686 {
687 u_char *addr;
688 u_int8_t prefixlen;
689 u_int16_t port = 0;
690 host_t *host = NULL;
691
692 if (src)
693 {
694 addr = (u_char*)&sel->saddr;
695 prefixlen = sel->prefixlen_s;
696 if (sel->sport_mask)
697 {
698 port = htons(sel->sport);
699 }
700 }
701 else
702 {
703 addr = (u_char*)&sel->daddr;
704 prefixlen = sel->prefixlen_d;
705 if (sel->dport_mask)
706 {
707 port = htons(sel->dport);
708 }
709 }
710
711 /* The Linux 2.6 kernel does not set the selector's family field,
712 * so as a kludge we additionally test the prefix length.
713 */
714 if (sel->family == AF_INET || sel->prefixlen_s == 32)
715 {
716 host = host_create_from_chunk(AF_INET, chunk_create(addr, 4), 0);
717 }
718 else if (sel->family == AF_INET6 || sel->prefixlen_s == 128)
719 {
720 host = host_create_from_chunk(AF_INET6, chunk_create(addr, 16), 0);
721 }
722
723 if (host)
724 {
725 return traffic_selector_create_from_subnet(host, prefixlen,
726 sel->proto, port);
727 }
728 return NULL;
729 }
730
731 /**
732 * process a XFRM_MSG_ACQUIRE from kernel
733 */
734 static void process_acquire(private_kernel_netlink_ipsec_t *this, struct nlmsghdr *hdr)
735 {
736 u_int32_t reqid = 0;
737 int proto = 0;
738 traffic_selector_t *src_ts, *dst_ts;
739 struct xfrm_user_acquire *acquire;
740 struct rtattr *rta;
741 size_t rtasize;
742
743 acquire = (struct xfrm_user_acquire*)NLMSG_DATA(hdr);
744 rta = XFRM_RTA(hdr, struct xfrm_user_acquire);
745 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_user_acquire);
746
747 DBG2(DBG_KNL, "received a XFRM_MSG_ACQUIRE");
748
749 while (RTA_OK(rta, rtasize))
750 {
751 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
752
753 if (rta->rta_type == XFRMA_TMPL)
754 {
755 struct xfrm_user_tmpl* tmpl;
756
757 tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rta);
758 reqid = tmpl->reqid;
759 proto = tmpl->id.proto;
760 }
761 rta = RTA_NEXT(rta, rtasize);
762 }
763 switch (proto)
764 {
765 case 0:
766 case IPPROTO_ESP:
767 case IPPROTO_AH:
768 break;
769 default:
770 /* acquire for AH/ESP only, not for IPCOMP */
771 return;
772 }
773 src_ts = selector2ts(&acquire->sel, TRUE);
774 dst_ts = selector2ts(&acquire->sel, FALSE);
775
776 hydra->kernel_interface->acquire(hydra->kernel_interface, reqid, src_ts,
777 dst_ts);
778 }
779
780 /**
781 * process a XFRM_MSG_EXPIRE from kernel
782 */
783 static void process_expire(private_kernel_netlink_ipsec_t *this, struct nlmsghdr *hdr)
784 {
785 u_int8_t protocol;
786 u_int32_t spi, reqid;
787 struct xfrm_user_expire *expire;
788
789 expire = (struct xfrm_user_expire*)NLMSG_DATA(hdr);
790 protocol = expire->state.id.proto;
791 spi = expire->state.id.spi;
792 reqid = expire->state.reqid;
793
794 DBG2(DBG_KNL, "received a XFRM_MSG_EXPIRE");
795
796 if (protocol != IPPROTO_ESP && protocol != IPPROTO_AH)
797 {
798 DBG2(DBG_KNL, "ignoring XFRM_MSG_EXPIRE for SA with SPI %.8x and "
799 "reqid {%u} which is not a CHILD_SA", ntohl(spi), reqid);
800 return;
801 }
802
803 hydra->kernel_interface->expire(hydra->kernel_interface, reqid, protocol,
804 spi, expire->hard != 0);
805 }
806
807 /**
808 * process a XFRM_MSG_MIGRATE from kernel
809 */
810 static void process_migrate(private_kernel_netlink_ipsec_t *this, struct nlmsghdr *hdr)
811 {
812 traffic_selector_t *src_ts, *dst_ts;
813 host_t *local = NULL, *remote = NULL;
814 host_t *old_src = NULL, *old_dst = NULL;
815 host_t *new_src = NULL, *new_dst = NULL;
816 struct xfrm_userpolicy_id *policy_id;
817 struct rtattr *rta;
818 size_t rtasize;
819 u_int32_t reqid = 0;
820 policy_dir_t dir;
821
822 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
823 rta = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
824 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_userpolicy_id);
825
826 DBG2(DBG_KNL, "received a XFRM_MSG_MIGRATE");
827
828 src_ts = selector2ts(&policy_id->sel, TRUE);
829 dst_ts = selector2ts(&policy_id->sel, FALSE);
830 dir = (policy_dir_t)policy_id->dir;
831
832 DBG2(DBG_KNL, " policy: %R === %R %N", src_ts, dst_ts, policy_dir_names);
833
834 while (RTA_OK(rta, rtasize))
835 {
836 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
837 if (rta->rta_type == XFRMA_KMADDRESS)
838 {
839 struct xfrm_user_kmaddress *kmaddress;
840
841 kmaddress = (struct xfrm_user_kmaddress*)RTA_DATA(rta);
842 local = xfrm2host(kmaddress->family, &kmaddress->local, 0);
843 remote = xfrm2host(kmaddress->family, &kmaddress->remote, 0);
844 DBG2(DBG_KNL, " kmaddress: %H...%H", local, remote);
845 }
846 else if (rta->rta_type == XFRMA_MIGRATE)
847 {
848 struct xfrm_user_migrate *migrate;
849
850 migrate = (struct xfrm_user_migrate*)RTA_DATA(rta);
851 old_src = xfrm2host(migrate->old_family, &migrate->old_saddr, 0);
852 old_dst = xfrm2host(migrate->old_family, &migrate->old_daddr, 0);
853 new_src = xfrm2host(migrate->new_family, &migrate->new_saddr, 0);
854 new_dst = xfrm2host(migrate->new_family, &migrate->new_daddr, 0);
855 reqid = migrate->reqid;
856 DBG2(DBG_KNL, " migrate %H...%H to %H...%H, reqid {%u}",
857 old_src, old_dst, new_src, new_dst, reqid);
858 DESTROY_IF(old_src);
859 DESTROY_IF(old_dst);
860 DESTROY_IF(new_src);
861 DESTROY_IF(new_dst);
862 }
863 rta = RTA_NEXT(rta, rtasize);
864 }
865
866 if (src_ts && dst_ts && local && remote)
867 {
868 hydra->kernel_interface->migrate(hydra->kernel_interface, reqid,
869 src_ts, dst_ts, dir, local, remote);
870 }
871 else
872 {
873 DESTROY_IF(src_ts);
874 DESTROY_IF(dst_ts);
875 DESTROY_IF(local);
876 DESTROY_IF(remote);
877 }
878 }
879
880 /**
881 * process a XFRM_MSG_MAPPING from kernel
882 */
883 static void process_mapping(private_kernel_netlink_ipsec_t *this,
884 struct nlmsghdr *hdr)
885 {
886 u_int32_t spi, reqid;
887 struct xfrm_user_mapping *mapping;
888 host_t *host;
889
890 mapping = (struct xfrm_user_mapping*)NLMSG_DATA(hdr);
891 spi = mapping->id.spi;
892 reqid = mapping->reqid;
893
894 DBG2(DBG_KNL, "received a XFRM_MSG_MAPPING");
895
896 if (mapping->id.proto == IPPROTO_ESP)
897 {
898 host = xfrm2host(mapping->id.family, &mapping->new_saddr,
899 mapping->new_sport);
900 if (host)
901 {
902 hydra->kernel_interface->mapping(hydra->kernel_interface, reqid,
903 spi, host);
904 }
905 }
906 }
907
908 /**
909 * Receives events from kernel
910 */
911 static job_requeue_t receive_events(private_kernel_netlink_ipsec_t *this)
912 {
913 char response[1024];
914 struct nlmsghdr *hdr = (struct nlmsghdr*)response;
915 struct sockaddr_nl addr;
916 socklen_t addr_len = sizeof(addr);
917 int len;
918 bool oldstate;
919
920 oldstate = thread_cancelability(TRUE);
921 len = recvfrom(this->socket_xfrm_events, response, sizeof(response), 0,
922 (struct sockaddr*)&addr, &addr_len);
923 thread_cancelability(oldstate);
924
925 if (len < 0)
926 {
927 switch (errno)
928 {
929 case EINTR:
930 /* interrupted, try again */
931 return JOB_REQUEUE_DIRECT;
932 case EAGAIN:
933 /* no data ready, select again */
934 return JOB_REQUEUE_DIRECT;
935 default:
936 DBG1(DBG_KNL, "unable to receive from xfrm event socket");
937 sleep(1);
938 return JOB_REQUEUE_FAIR;
939 }
940 }
941
942 if (addr.nl_pid != 0)
943 { /* not from kernel. not interested, try another one */
944 return JOB_REQUEUE_DIRECT;
945 }
946
947 while (NLMSG_OK(hdr, len))
948 {
949 switch (hdr->nlmsg_type)
950 {
951 case XFRM_MSG_ACQUIRE:
952 process_acquire(this, hdr);
953 break;
954 case XFRM_MSG_EXPIRE:
955 process_expire(this, hdr);
956 break;
957 case XFRM_MSG_MIGRATE:
958 process_migrate(this, hdr);
959 break;
960 case XFRM_MSG_MAPPING:
961 process_mapping(this, hdr);
962 break;
963 default:
964 DBG1(DBG_KNL, "received unknown event from xfrm event socket: %d", hdr->nlmsg_type);
965 break;
966 }
967 hdr = NLMSG_NEXT(hdr, len);
968 }
969 return JOB_REQUEUE_DIRECT;
970 }
971
972 /**
973 * Get an SPI for a specific protocol from the kernel.
974 */
975 static status_t get_spi_internal(private_kernel_netlink_ipsec_t *this,
976 host_t *src, host_t *dst, u_int8_t proto, u_int32_t min, u_int32_t max,
977 u_int32_t reqid, u_int32_t *spi)
978 {
979 netlink_buf_t request;
980 struct nlmsghdr *hdr, *out;
981 struct xfrm_userspi_info *userspi;
982 u_int32_t received_spi = 0;
983 size_t len;
984
985 memset(&request, 0, sizeof(request));
986
987 hdr = (struct nlmsghdr*)request;
988 hdr->nlmsg_flags = NLM_F_REQUEST;
989 hdr->nlmsg_type = XFRM_MSG_ALLOCSPI;
990 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userspi_info));
991
992 userspi = (struct xfrm_userspi_info*)NLMSG_DATA(hdr);
993 host2xfrm(src, &userspi->info.saddr);
994 host2xfrm(dst, &userspi->info.id.daddr);
995 userspi->info.id.proto = proto;
996 userspi->info.mode = XFRM_MODE_TUNNEL;
997 userspi->info.reqid = reqid;
998 userspi->info.family = src->get_family(src);
999 userspi->min = min;
1000 userspi->max = max;
1001
1002 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1003 {
1004 hdr = out;
1005 while (NLMSG_OK(hdr, len))
1006 {
1007 switch (hdr->nlmsg_type)
1008 {
1009 case XFRM_MSG_NEWSA:
1010 {
1011 struct xfrm_usersa_info* usersa = NLMSG_DATA(hdr);
1012 received_spi = usersa->id.spi;
1013 break;
1014 }
1015 case NLMSG_ERROR:
1016 {
1017 struct nlmsgerr *err = NLMSG_DATA(hdr);
1018
1019 DBG1(DBG_KNL, "allocating SPI failed: %s (%d)",
1020 strerror(-err->error), -err->error);
1021 break;
1022 }
1023 default:
1024 hdr = NLMSG_NEXT(hdr, len);
1025 continue;
1026 case NLMSG_DONE:
1027 break;
1028 }
1029 break;
1030 }
1031 free(out);
1032 }
1033
1034 if (received_spi == 0)
1035 {
1036 return FAILED;
1037 }
1038
1039 *spi = received_spi;
1040 return SUCCESS;
1041 }
1042
1043 METHOD(kernel_ipsec_t, get_spi, status_t,
1044 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1045 u_int8_t protocol, u_int32_t reqid, u_int32_t *spi)
1046 {
1047 DBG2(DBG_KNL, "getting SPI for reqid {%u}", reqid);
1048
1049 if (get_spi_internal(this, src, dst, protocol,
1050 0xc0000000, 0xcFFFFFFF, reqid, spi) != SUCCESS)
1051 {
1052 DBG1(DBG_KNL, "unable to get SPI for reqid {%u}", reqid);
1053 return FAILED;
1054 }
1055
1056 DBG2(DBG_KNL, "got SPI %.8x for reqid {%u}", ntohl(*spi), reqid);
1057
1058 return SUCCESS;
1059 }
1060
1061 METHOD(kernel_ipsec_t, get_cpi, status_t,
1062 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1063 u_int32_t reqid, u_int16_t *cpi)
1064 {
1065 u_int32_t received_spi = 0;
1066
1067 DBG2(DBG_KNL, "getting CPI for reqid {%u}", reqid);
1068
1069 if (get_spi_internal(this, src, dst,
1070 IPPROTO_COMP, 0x100, 0xEFFF, reqid, &received_spi) != SUCCESS)
1071 {
1072 DBG1(DBG_KNL, "unable to get CPI for reqid {%u}", reqid);
1073 return FAILED;
1074 }
1075
1076 *cpi = htons((u_int16_t)ntohl(received_spi));
1077
1078 DBG2(DBG_KNL, "got CPI %.4x for reqid {%u}", ntohs(*cpi), reqid);
1079
1080 return SUCCESS;
1081 }
1082
1083 METHOD(kernel_ipsec_t, add_sa, status_t,
1084 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1085 u_int32_t spi, u_int8_t protocol, u_int32_t reqid, mark_t mark,
1086 u_int32_t tfc, lifetime_cfg_t *lifetime, u_int16_t enc_alg, chunk_t enc_key,
1087 u_int16_t int_alg, chunk_t int_key, ipsec_mode_t mode, u_int16_t ipcomp,
1088 u_int16_t cpi, bool encap, bool esn, bool inbound,
1089 traffic_selector_t* src_ts, traffic_selector_t* dst_ts)
1090 {
1091 netlink_buf_t request;
1092 char *alg_name;
1093 struct nlmsghdr *hdr;
1094 struct xfrm_usersa_info *sa;
1095 u_int16_t icv_size = 64;
1096 status_t status = FAILED;
1097
1098 /* if IPComp is used, we install an additional IPComp SA. if the cpi is 0
1099 * we are in the recursive call below */
1100 if (ipcomp != IPCOMP_NONE && cpi != 0)
1101 {
1102 lifetime_cfg_t lft = {{0,0,0},{0,0,0},{0,0,0}};
1103 add_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, reqid, mark, tfc,
1104 &lft, ENCR_UNDEFINED, chunk_empty, AUTH_UNDEFINED, chunk_empty,
1105 mode, ipcomp, 0, FALSE, FALSE, inbound, NULL, NULL);
1106 ipcomp = IPCOMP_NONE;
1107 /* use transport mode ESP SA, IPComp uses tunnel mode */
1108 mode = MODE_TRANSPORT;
1109 }
1110
1111 memset(&request, 0, sizeof(request));
1112
1113 if (mark.value)
1114 {
1115 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u} "
1116 "(mark %u/0x%8x)", ntohl(spi), reqid, mark.value, mark.mask);
1117 }
1118 else
1119 {
1120 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u}",
1121 ntohl(spi), reqid);
1122 }
1123 hdr = (struct nlmsghdr*)request;
1124 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1125 hdr->nlmsg_type = inbound ? XFRM_MSG_UPDSA : XFRM_MSG_NEWSA;
1126 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1127
1128 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1129 host2xfrm(src, &sa->saddr);
1130 host2xfrm(dst, &sa->id.daddr);
1131 sa->id.spi = spi;
1132 sa->id.proto = protocol;
1133 sa->family = src->get_family(src);
1134 sa->mode = mode2kernel(mode);
1135 switch (mode)
1136 {
1137 case MODE_TUNNEL:
1138 sa->flags |= XFRM_STATE_AF_UNSPEC;
1139 break;
1140 case MODE_BEET:
1141 case MODE_TRANSPORT:
1142 if(src_ts && dst_ts)
1143 {
1144 sa->sel = ts2selector(src_ts, dst_ts);
1145 }
1146 break;
1147 default:
1148 break;
1149 }
1150
1151 sa->reqid = reqid;
1152 sa->lft.soft_byte_limit = XFRM_LIMIT(lifetime->bytes.rekey);
1153 sa->lft.hard_byte_limit = XFRM_LIMIT(lifetime->bytes.life);
1154 sa->lft.soft_packet_limit = XFRM_LIMIT(lifetime->packets.rekey);
1155 sa->lft.hard_packet_limit = XFRM_LIMIT(lifetime->packets.life);
1156 /* we use lifetimes since added, not since used */
1157 sa->lft.soft_add_expires_seconds = lifetime->time.rekey;
1158 sa->lft.hard_add_expires_seconds = lifetime->time.life;
1159 sa->lft.soft_use_expires_seconds = 0;
1160 sa->lft.hard_use_expires_seconds = 0;
1161
1162 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_info);
1163
1164 switch (enc_alg)
1165 {
1166 case ENCR_UNDEFINED:
1167 /* no encryption */
1168 break;
1169 case ENCR_AES_CCM_ICV16:
1170 case ENCR_AES_GCM_ICV16:
1171 case ENCR_NULL_AUTH_AES_GMAC:
1172 case ENCR_CAMELLIA_CCM_ICV16:
1173 icv_size += 32;
1174 /* FALL */
1175 case ENCR_AES_CCM_ICV12:
1176 case ENCR_AES_GCM_ICV12:
1177 case ENCR_CAMELLIA_CCM_ICV12:
1178 icv_size += 32;
1179 /* FALL */
1180 case ENCR_AES_CCM_ICV8:
1181 case ENCR_AES_GCM_ICV8:
1182 case ENCR_CAMELLIA_CCM_ICV8:
1183 {
1184 struct xfrm_algo_aead *algo;
1185
1186 alg_name = lookup_algorithm(encryption_algs, enc_alg);
1187 if (alg_name == NULL)
1188 {
1189 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1190 encryption_algorithm_names, enc_alg);
1191 goto failed;
1192 }
1193 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1194 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1195
1196 rthdr->rta_type = XFRMA_ALG_AEAD;
1197 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_aead) + enc_key.len);
1198 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1199 if (hdr->nlmsg_len > sizeof(request))
1200 {
1201 goto failed;
1202 }
1203
1204 algo = (struct xfrm_algo_aead*)RTA_DATA(rthdr);
1205 algo->alg_key_len = enc_key.len * 8;
1206 algo->alg_icv_len = icv_size;
1207 strcpy(algo->alg_name, alg_name);
1208 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1209
1210 rthdr = XFRM_RTA_NEXT(rthdr);
1211 break;
1212 }
1213 default:
1214 {
1215 struct xfrm_algo *algo;
1216
1217 alg_name = lookup_algorithm(encryption_algs, enc_alg);
1218 if (alg_name == NULL)
1219 {
1220 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1221 encryption_algorithm_names, enc_alg);
1222 goto failed;
1223 }
1224 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1225 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1226
1227 rthdr->rta_type = XFRMA_ALG_CRYPT;
1228 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + enc_key.len);
1229 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1230 if (hdr->nlmsg_len > sizeof(request))
1231 {
1232 goto failed;
1233 }
1234
1235 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1236 algo->alg_key_len = enc_key.len * 8;
1237 strcpy(algo->alg_name, alg_name);
1238 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1239
1240 rthdr = XFRM_RTA_NEXT(rthdr);
1241 }
1242 }
1243
1244 if (int_alg != AUTH_UNDEFINED)
1245 {
1246 alg_name = lookup_algorithm(integrity_algs, int_alg);
1247 if (alg_name == NULL)
1248 {
1249 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1250 integrity_algorithm_names, int_alg);
1251 goto failed;
1252 }
1253 DBG2(DBG_KNL, " using integrity algorithm %N with key size %d",
1254 integrity_algorithm_names, int_alg, int_key.len * 8);
1255
1256 if (int_alg == AUTH_HMAC_SHA2_256_128)
1257 {
1258 struct xfrm_algo_auth* algo;
1259
1260 /* the kernel uses SHA256 with 96 bit truncation by default,
1261 * use specified truncation size supported by newer kernels */
1262 rthdr->rta_type = XFRMA_ALG_AUTH_TRUNC;
1263 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_auth) + int_key.len);
1264
1265 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1266 if (hdr->nlmsg_len > sizeof(request))
1267 {
1268 goto failed;
1269 }
1270
1271 algo = (struct xfrm_algo_auth*)RTA_DATA(rthdr);
1272 algo->alg_key_len = int_key.len * 8;
1273 algo->alg_trunc_len = 128;
1274 strcpy(algo->alg_name, alg_name);
1275 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1276 }
1277 else
1278 {
1279 struct xfrm_algo* algo;
1280
1281 rthdr->rta_type = XFRMA_ALG_AUTH;
1282 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + int_key.len);
1283
1284 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1285 if (hdr->nlmsg_len > sizeof(request))
1286 {
1287 goto failed;
1288 }
1289
1290 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1291 algo->alg_key_len = int_key.len * 8;
1292 strcpy(algo->alg_name, alg_name);
1293 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1294 }
1295 rthdr = XFRM_RTA_NEXT(rthdr);
1296 }
1297
1298 if (ipcomp != IPCOMP_NONE)
1299 {
1300 rthdr->rta_type = XFRMA_ALG_COMP;
1301 alg_name = lookup_algorithm(compression_algs, ipcomp);
1302 if (alg_name == NULL)
1303 {
1304 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1305 ipcomp_transform_names, ipcomp);
1306 goto failed;
1307 }
1308 DBG2(DBG_KNL, " using compression algorithm %N",
1309 ipcomp_transform_names, ipcomp);
1310
1311 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo));
1312 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1313 if (hdr->nlmsg_len > sizeof(request))
1314 {
1315 goto failed;
1316 }
1317
1318 struct xfrm_algo* algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1319 algo->alg_key_len = 0;
1320 strcpy(algo->alg_name, alg_name);
1321
1322 rthdr = XFRM_RTA_NEXT(rthdr);
1323 }
1324
1325 if (encap)
1326 {
1327 struct xfrm_encap_tmpl *tmpl;
1328
1329 rthdr->rta_type = XFRMA_ENCAP;
1330 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1331
1332 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1333 if (hdr->nlmsg_len > sizeof(request))
1334 {
1335 goto failed;
1336 }
1337
1338 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rthdr);
1339 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1340 tmpl->encap_sport = htons(src->get_port(src));
1341 tmpl->encap_dport = htons(dst->get_port(dst));
1342 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1343 /* encap_oa could probably be derived from the
1344 * traffic selectors [rfc4306, p39]. In the netlink kernel implementation
1345 * pluto does the same as we do here but it uses encap_oa in the
1346 * pfkey implementation. BUT as /usr/src/linux/net/key/af_key.c indicates
1347 * the kernel ignores it anyway
1348 * -> does that mean that NAT-T encap doesn't work in transport mode?
1349 * No. The reason the kernel ignores NAT-OA is that it recomputes
1350 * (or, rather, just ignores) the checksum. If packets pass
1351 * the IPsec checks it marks them "checksum ok" so OA isn't needed. */
1352 rthdr = XFRM_RTA_NEXT(rthdr);
1353 }
1354
1355 if (mark.value)
1356 {
1357 struct xfrm_mark *mrk;
1358
1359 rthdr->rta_type = XFRMA_MARK;
1360 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1361
1362 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1363 if (hdr->nlmsg_len > sizeof(request))
1364 {
1365 goto failed;
1366 }
1367
1368 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1369 mrk->v = mark.value;
1370 mrk->m = mark.mask;
1371 rthdr = XFRM_RTA_NEXT(rthdr);
1372 }
1373
1374 if (tfc)
1375 {
1376 u_int32_t *tfcpad;
1377
1378 rthdr->rta_type = XFRMA_TFCPAD;
1379 rthdr->rta_len = RTA_LENGTH(sizeof(u_int32_t));
1380
1381 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1382 if (hdr->nlmsg_len > sizeof(request))
1383 {
1384 goto failed;
1385 }
1386
1387 tfcpad = (u_int32_t*)RTA_DATA(rthdr);
1388 *tfcpad = tfc;
1389 rthdr = XFRM_RTA_NEXT(rthdr);
1390 }
1391
1392 if (protocol != IPPROTO_COMP)
1393 {
1394 if (esn || this->replay_window > DEFAULT_REPLAY_WINDOW)
1395 {
1396 /* for ESN or larger replay windows we need the new
1397 * XFRMA_REPLAY_ESN_VAL attribute to configure a bitmap */
1398 struct xfrm_replay_state_esn *replay;
1399
1400 rthdr->rta_type = XFRMA_REPLAY_ESN_VAL;
1401 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1402 (this->replay_window + 7) / 8);
1403
1404 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1405 if (hdr->nlmsg_len > sizeof(request))
1406 {
1407 goto failed;
1408 }
1409
1410 replay = (struct xfrm_replay_state_esn*)RTA_DATA(rthdr);
1411 /* bmp_len contains number uf __u32's */
1412 replay->bmp_len = this->replay_bmp;
1413 replay->replay_window = this->replay_window;
1414
1415 rthdr = XFRM_RTA_NEXT(rthdr);
1416 if (esn)
1417 {
1418 sa->flags |= XFRM_STATE_ESN;
1419 }
1420 }
1421 else
1422 {
1423 sa->replay_window = DEFAULT_REPLAY_WINDOW;
1424 }
1425 }
1426
1427 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1428 {
1429 if (mark.value)
1430 {
1431 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x "
1432 "(mark %u/0x%8x)", ntohl(spi), mark.value, mark.mask);
1433 }
1434 else
1435 {
1436 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x", ntohl(spi));
1437 }
1438 goto failed;
1439 }
1440
1441 status = SUCCESS;
1442
1443 failed:
1444 memwipe(request, sizeof(request));
1445 return status;
1446 }
1447
1448 /**
1449 * Get the ESN replay state (i.e. sequence numbers) of an SA.
1450 *
1451 * Allocates into one the replay state structure we get from the kernel.
1452 */
1453 static void get_replay_state(private_kernel_netlink_ipsec_t *this,
1454 u_int32_t spi, u_int8_t protocol, host_t *dst,
1455 struct xfrm_replay_state_esn **replay_esn,
1456 struct xfrm_replay_state **replay)
1457 {
1458 netlink_buf_t request;
1459 struct nlmsghdr *hdr, *out = NULL;
1460 struct xfrm_aevent_id *out_aevent = NULL, *aevent_id;
1461 size_t len;
1462 struct rtattr *rta;
1463 size_t rtasize;
1464
1465 memset(&request, 0, sizeof(request));
1466
1467 DBG2(DBG_KNL, "querying replay state from SAD entry with SPI %.8x",
1468 ntohl(spi));
1469
1470 hdr = (struct nlmsghdr*)request;
1471 hdr->nlmsg_flags = NLM_F_REQUEST;
1472 hdr->nlmsg_type = XFRM_MSG_GETAE;
1473 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
1474
1475 aevent_id = (struct xfrm_aevent_id*)NLMSG_DATA(hdr);
1476 aevent_id->flags = XFRM_AE_RVAL;
1477
1478 host2xfrm(dst, &aevent_id->sa_id.daddr);
1479 aevent_id->sa_id.spi = spi;
1480 aevent_id->sa_id.proto = protocol;
1481 aevent_id->sa_id.family = dst->get_family(dst);
1482
1483 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1484 {
1485 hdr = out;
1486 while (NLMSG_OK(hdr, len))
1487 {
1488 switch (hdr->nlmsg_type)
1489 {
1490 case XFRM_MSG_NEWAE:
1491 {
1492 out_aevent = NLMSG_DATA(hdr);
1493 break;
1494 }
1495 case NLMSG_ERROR:
1496 {
1497 struct nlmsgerr *err = NLMSG_DATA(hdr);
1498 DBG1(DBG_KNL, "querying replay state from SAD entry failed: %s (%d)",
1499 strerror(-err->error), -err->error);
1500 break;
1501 }
1502 default:
1503 hdr = NLMSG_NEXT(hdr, len);
1504 continue;
1505 case NLMSG_DONE:
1506 break;
1507 }
1508 break;
1509 }
1510 }
1511
1512 if (out_aevent)
1513 {
1514 rta = XFRM_RTA(out, struct xfrm_aevent_id);
1515 rtasize = XFRM_PAYLOAD(out, struct xfrm_aevent_id);
1516 while (RTA_OK(rta, rtasize))
1517 {
1518 if (rta->rta_type == XFRMA_REPLAY_VAL &&
1519 RTA_PAYLOAD(rta) == sizeof(**replay))
1520 {
1521 *replay = malloc(RTA_PAYLOAD(rta));
1522 memcpy(*replay, RTA_DATA(rta), RTA_PAYLOAD(rta));
1523 break;
1524 }
1525 if (rta->rta_type == XFRMA_REPLAY_ESN_VAL &&
1526 RTA_PAYLOAD(rta) >= sizeof(**replay_esn) + this->replay_bmp)
1527 {
1528 *replay_esn = malloc(RTA_PAYLOAD(rta));
1529 memcpy(*replay_esn, RTA_DATA(rta), RTA_PAYLOAD(rta));
1530 break;
1531 }
1532 rta = RTA_NEXT(rta, rtasize);
1533 }
1534 }
1535 free(out);
1536 }
1537
1538 METHOD(kernel_ipsec_t, query_sa, status_t,
1539 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1540 u_int32_t spi, u_int8_t protocol, mark_t mark, u_int64_t *bytes)
1541 {
1542 netlink_buf_t request;
1543 struct nlmsghdr *out = NULL, *hdr;
1544 struct xfrm_usersa_id *sa_id;
1545 struct xfrm_usersa_info *sa = NULL;
1546 status_t status = FAILED;
1547 size_t len;
1548
1549 memset(&request, 0, sizeof(request));
1550
1551 if (mark.value)
1552 {
1553 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x (mark %u/0x%8x)",
1554 ntohl(spi), mark.value, mark.mask);
1555 }
1556 else
1557 {
1558 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x", ntohl(spi));
1559 }
1560 hdr = (struct nlmsghdr*)request;
1561 hdr->nlmsg_flags = NLM_F_REQUEST;
1562 hdr->nlmsg_type = XFRM_MSG_GETSA;
1563 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1564
1565 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1566 host2xfrm(dst, &sa_id->daddr);
1567 sa_id->spi = spi;
1568 sa_id->proto = protocol;
1569 sa_id->family = dst->get_family(dst);
1570
1571 if (mark.value)
1572 {
1573 struct xfrm_mark *mrk;
1574 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1575
1576 rthdr->rta_type = XFRMA_MARK;
1577 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1578 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1579 if (hdr->nlmsg_len > sizeof(request))
1580 {
1581 return FAILED;
1582 }
1583
1584 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1585 mrk->v = mark.value;
1586 mrk->m = mark.mask;
1587 }
1588
1589 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1590 {
1591 hdr = out;
1592 while (NLMSG_OK(hdr, len))
1593 {
1594 switch (hdr->nlmsg_type)
1595 {
1596 case XFRM_MSG_NEWSA:
1597 {
1598 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1599 break;
1600 }
1601 case NLMSG_ERROR:
1602 {
1603 struct nlmsgerr *err = NLMSG_DATA(hdr);
1604
1605 if (mark.value)
1606 {
1607 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1608 "(mark %u/0x%8x) failed: %s (%d)",
1609 ntohl(spi), mark.value, mark.mask,
1610 strerror(-err->error), -err->error);
1611 }
1612 else
1613 {
1614 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1615 "failed: %s (%d)", ntohl(spi),
1616 strerror(-err->error), -err->error);
1617 }
1618 break;
1619 }
1620 default:
1621 hdr = NLMSG_NEXT(hdr, len);
1622 continue;
1623 case NLMSG_DONE:
1624 break;
1625 }
1626 break;
1627 }
1628 }
1629
1630 if (sa == NULL)
1631 {
1632 DBG2(DBG_KNL, "unable to query SAD entry with SPI %.8x", ntohl(spi));
1633 }
1634 else
1635 {
1636 *bytes = sa->curlft.bytes;
1637 status = SUCCESS;
1638 }
1639 memwipe(out, len);
1640 free(out);
1641 return status;
1642 }
1643
1644 METHOD(kernel_ipsec_t, del_sa, status_t,
1645 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1646 u_int32_t spi, u_int8_t protocol, u_int16_t cpi, mark_t mark)
1647 {
1648 netlink_buf_t request;
1649 struct nlmsghdr *hdr;
1650 struct xfrm_usersa_id *sa_id;
1651
1652 /* if IPComp was used, we first delete the additional IPComp SA */
1653 if (cpi)
1654 {
1655 del_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, 0, mark);
1656 }
1657
1658 memset(&request, 0, sizeof(request));
1659
1660 if (mark.value)
1661 {
1662 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x (mark %u/0x%8x)",
1663 ntohl(spi), mark.value, mark.mask);
1664 }
1665 else
1666 {
1667 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x", ntohl(spi));
1668 }
1669 hdr = (struct nlmsghdr*)request;
1670 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1671 hdr->nlmsg_type = XFRM_MSG_DELSA;
1672 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1673
1674 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1675 host2xfrm(dst, &sa_id->daddr);
1676 sa_id->spi = spi;
1677 sa_id->proto = protocol;
1678 sa_id->family = dst->get_family(dst);
1679
1680 if (mark.value)
1681 {
1682 struct xfrm_mark *mrk;
1683 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1684
1685 rthdr->rta_type = XFRMA_MARK;
1686 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1687 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1688 if (hdr->nlmsg_len > sizeof(request))
1689 {
1690 return FAILED;
1691 }
1692
1693 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1694 mrk->v = mark.value;
1695 mrk->m = mark.mask;
1696 }
1697
1698 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1699 {
1700 if (mark.value)
1701 {
1702 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x "
1703 "(mark %u/0x%8x)", ntohl(spi), mark.value, mark.mask);
1704 }
1705 else
1706 {
1707 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x", ntohl(spi));
1708 }
1709 return FAILED;
1710 }
1711 if (mark.value)
1712 {
1713 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x (mark %u/0x%8x)",
1714 ntohl(spi), mark.value, mark.mask);
1715 }
1716 else
1717 {
1718 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x", ntohl(spi));
1719 }
1720 return SUCCESS;
1721 }
1722
1723 METHOD(kernel_ipsec_t, update_sa, status_t,
1724 private_kernel_netlink_ipsec_t *this, u_int32_t spi, u_int8_t protocol,
1725 u_int16_t cpi, host_t *src, host_t *dst, host_t *new_src, host_t *new_dst,
1726 bool old_encap, bool new_encap, mark_t mark)
1727 {
1728 netlink_buf_t request;
1729 u_char *pos;
1730 struct nlmsghdr *hdr, *out = NULL;
1731 struct xfrm_usersa_id *sa_id;
1732 struct xfrm_usersa_info *out_sa = NULL, *sa;
1733 size_t len;
1734 struct rtattr *rta;
1735 size_t rtasize;
1736 struct xfrm_encap_tmpl* tmpl = NULL;
1737 struct xfrm_replay_state *replay = NULL;
1738 struct xfrm_replay_state_esn *replay_esn = NULL;
1739 status_t status = FAILED;
1740
1741 /* if IPComp is used, we first update the IPComp SA */
1742 if (cpi)
1743 {
1744 update_sa(this, htonl(ntohs(cpi)), IPPROTO_COMP, 0,
1745 src, dst, new_src, new_dst, FALSE, FALSE, mark);
1746 }
1747
1748 memset(&request, 0, sizeof(request));
1749
1750 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x for update", ntohl(spi));
1751
1752 /* query the existing SA first */
1753 hdr = (struct nlmsghdr*)request;
1754 hdr->nlmsg_flags = NLM_F_REQUEST;
1755 hdr->nlmsg_type = XFRM_MSG_GETSA;
1756 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1757
1758 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1759 host2xfrm(dst, &sa_id->daddr);
1760 sa_id->spi = spi;
1761 sa_id->proto = protocol;
1762 sa_id->family = dst->get_family(dst);
1763
1764 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1765 {
1766 hdr = out;
1767 while (NLMSG_OK(hdr, len))
1768 {
1769 switch (hdr->nlmsg_type)
1770 {
1771 case XFRM_MSG_NEWSA:
1772 {
1773 out_sa = NLMSG_DATA(hdr);
1774 break;
1775 }
1776 case NLMSG_ERROR:
1777 {
1778 struct nlmsgerr *err = NLMSG_DATA(hdr);
1779 DBG1(DBG_KNL, "querying SAD entry failed: %s (%d)",
1780 strerror(-err->error), -err->error);
1781 break;
1782 }
1783 default:
1784 hdr = NLMSG_NEXT(hdr, len);
1785 continue;
1786 case NLMSG_DONE:
1787 break;
1788 }
1789 break;
1790 }
1791 }
1792 if (out_sa == NULL)
1793 {
1794 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1795 goto failed;
1796 }
1797
1798 get_replay_state(this, spi, protocol, dst, &replay_esn, &replay);
1799
1800 /* delete the old SA (without affecting the IPComp SA) */
1801 if (del_sa(this, src, dst, spi, protocol, 0, mark) != SUCCESS)
1802 {
1803 DBG1(DBG_KNL, "unable to delete old SAD entry with SPI %.8x", ntohl(spi));
1804 goto failed;
1805 }
1806
1807 DBG2(DBG_KNL, "updating SAD entry with SPI %.8x from %#H..%#H to %#H..%#H",
1808 ntohl(spi), src, dst, new_src, new_dst);
1809 /* copy over the SA from out to request */
1810 hdr = (struct nlmsghdr*)request;
1811 memcpy(hdr, out, min(out->nlmsg_len, sizeof(request)));
1812 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1813 hdr->nlmsg_type = XFRM_MSG_NEWSA;
1814 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1815 sa = NLMSG_DATA(hdr);
1816 sa->family = new_dst->get_family(new_dst);
1817
1818 if (!src->ip_equals(src, new_src))
1819 {
1820 host2xfrm(new_src, &sa->saddr);
1821 }
1822 if (!dst->ip_equals(dst, new_dst))
1823 {
1824 host2xfrm(new_dst, &sa->id.daddr);
1825 }
1826
1827 rta = XFRM_RTA(out, struct xfrm_usersa_info);
1828 rtasize = XFRM_PAYLOAD(out, struct xfrm_usersa_info);
1829 pos = (u_char*)XFRM_RTA(hdr, struct xfrm_usersa_info);
1830 while(RTA_OK(rta, rtasize))
1831 {
1832 /* copy all attributes, but not XFRMA_ENCAP if we are disabling it */
1833 if (rta->rta_type != XFRMA_ENCAP || new_encap)
1834 {
1835 if (rta->rta_type == XFRMA_ENCAP)
1836 { /* update encap tmpl */
1837 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1838 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1839 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1840 }
1841 memcpy(pos, rta, rta->rta_len);
1842 pos += RTA_ALIGN(rta->rta_len);
1843 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1844 }
1845 rta = RTA_NEXT(rta, rtasize);
1846 }
1847
1848 rta = (struct rtattr*)pos;
1849 if (tmpl == NULL && new_encap)
1850 { /* add tmpl if we are enabling it */
1851 rta->rta_type = XFRMA_ENCAP;
1852 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1853
1854 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1855 if (hdr->nlmsg_len > sizeof(request))
1856 {
1857 goto failed;
1858 }
1859
1860 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1861 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1862 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1863 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1864 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1865
1866 rta = XFRM_RTA_NEXT(rta);
1867 }
1868
1869 if (replay_esn)
1870 {
1871 rta->rta_type = XFRMA_REPLAY_ESN_VAL;
1872 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1873 this->replay_bmp);
1874
1875 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1876 if (hdr->nlmsg_len > sizeof(request))
1877 {
1878 goto failed;
1879 }
1880 memcpy(RTA_DATA(rta), replay_esn,
1881 sizeof(struct xfrm_replay_state_esn) + this->replay_bmp);
1882
1883 rta = XFRM_RTA_NEXT(rta);
1884 }
1885 else if (replay)
1886 {
1887 rta->rta_type = XFRMA_REPLAY_VAL;
1888 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state));
1889
1890 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1891 if (hdr->nlmsg_len > sizeof(request))
1892 {
1893 goto failed;
1894 }
1895 memcpy(RTA_DATA(rta), replay, sizeof(replay));
1896
1897 rta = XFRM_RTA_NEXT(rta);
1898 }
1899 else
1900 {
1901 DBG1(DBG_KNL, "unable to copy replay state from old SAD entry "
1902 "with SPI %.8x", ntohl(spi));
1903 }
1904
1905 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1906 {
1907 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1908 goto failed;
1909 }
1910
1911 status = SUCCESS;
1912 failed:
1913 free(replay);
1914 free(replay_esn);
1915 memwipe(out, len);
1916 free(out);
1917
1918 return status;
1919 }
1920
1921 /**
1922 * Add or update a policy in the kernel.
1923 *
1924 * Note: The mutex has to be locked when entering this function.
1925 */
1926 static status_t add_policy_internal(private_kernel_netlink_ipsec_t *this,
1927 policy_entry_t *policy, policy_sa_t *mapping, bool update)
1928 {
1929 netlink_buf_t request;
1930 policy_entry_t clone;
1931 ipsec_sa_t *ipsec = mapping->sa;
1932 struct xfrm_userpolicy_info *policy_info;
1933 struct nlmsghdr *hdr;
1934 int i;
1935
1936 /* clone the policy so we are able to check it out again later */
1937 memcpy(&clone, policy, sizeof(policy_entry_t));
1938
1939 memset(&request, 0, sizeof(request));
1940 hdr = (struct nlmsghdr*)request;
1941 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1942 hdr->nlmsg_type = update ? XFRM_MSG_UPDPOLICY : XFRM_MSG_NEWPOLICY;
1943 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info));
1944
1945 policy_info = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
1946 policy_info->sel = policy->sel;
1947 policy_info->dir = policy->direction;
1948
1949 /* calculate priority based on selector size, small size = high prio */
1950 policy_info->priority = mapping->priority;
1951 policy_info->action = mapping->type != POLICY_DROP ? XFRM_POLICY_ALLOW
1952 : XFRM_POLICY_BLOCK;
1953 policy_info->share = XFRM_SHARE_ANY;
1954
1955 /* policies don't expire */
1956 policy_info->lft.soft_byte_limit = XFRM_INF;
1957 policy_info->lft.soft_packet_limit = XFRM_INF;
1958 policy_info->lft.hard_byte_limit = XFRM_INF;
1959 policy_info->lft.hard_packet_limit = XFRM_INF;
1960 policy_info->lft.soft_add_expires_seconds = 0;
1961 policy_info->lft.hard_add_expires_seconds = 0;
1962 policy_info->lft.soft_use_expires_seconds = 0;
1963 policy_info->lft.hard_use_expires_seconds = 0;
1964
1965 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_info);
1966
1967 if (mapping->type == POLICY_IPSEC)
1968 {
1969 struct xfrm_user_tmpl *tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rthdr);
1970 struct {
1971 u_int8_t proto;
1972 bool use;
1973 } protos[] = {
1974 { IPPROTO_COMP, ipsec->cfg.ipcomp.transform != IPCOMP_NONE },
1975 { IPPROTO_ESP, ipsec->cfg.esp.use },
1976 { IPPROTO_AH, ipsec->cfg.ah.use },
1977 };
1978 ipsec_mode_t proto_mode = ipsec->cfg.mode;
1979
1980 rthdr->rta_type = XFRMA_TMPL;
1981 rthdr->rta_len = 0; /* actual length is set below */
1982
1983 for (i = 0; i < countof(protos); i++)
1984 {
1985 if (!protos[i].use)
1986 {
1987 continue;
1988 }
1989
1990 rthdr->rta_len += RTA_LENGTH(sizeof(struct xfrm_user_tmpl));
1991 hdr->nlmsg_len += RTA_ALIGN(RTA_LENGTH(sizeof(struct xfrm_user_tmpl)));
1992 if (hdr->nlmsg_len > sizeof(request))
1993 {
1994 return FAILED;
1995 }
1996
1997 tmpl->reqid = ipsec->cfg.reqid;
1998 tmpl->id.proto = protos[i].proto;
1999 tmpl->aalgos = tmpl->ealgos = tmpl->calgos = ~0;
2000 tmpl->mode = mode2kernel(proto_mode);
2001 tmpl->optional = protos[i].proto == IPPROTO_COMP &&
2002 policy->direction != POLICY_OUT;
2003 tmpl->family = ipsec->src->get_family(ipsec->src);
2004
2005 if (proto_mode == MODE_TUNNEL)
2006 { /* only for tunnel mode */
2007 host2xfrm(ipsec->src, &tmpl->saddr);
2008 host2xfrm(ipsec->dst, &tmpl->id.daddr);
2009 }
2010
2011 tmpl++;
2012
2013 /* use transport mode for other SAs */
2014 proto_mode = MODE_TRANSPORT;
2015 }
2016
2017 rthdr = XFRM_RTA_NEXT(rthdr);
2018 }
2019
2020 if (ipsec->mark.value)
2021 {
2022 struct xfrm_mark *mrk;
2023
2024 rthdr->rta_type = XFRMA_MARK;
2025 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2026
2027 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2028 if (hdr->nlmsg_len > sizeof(request))
2029 {
2030 return FAILED;
2031 }
2032
2033 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2034 mrk->v = ipsec->mark.value;
2035 mrk->m = ipsec->mark.mask;
2036 }
2037 this->mutex->unlock(this->mutex);
2038
2039 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2040 {
2041 return FAILED;
2042 }
2043
2044 /* find the policy again */
2045 this->mutex->lock(this->mutex);
2046 policy = this->policies->get(this->policies, &clone);
2047 if (!policy ||
2048 policy->used_by->find_first(policy->used_by,
2049 NULL, (void**)&mapping) != SUCCESS)
2050 { /* policy or mapping is already gone, ignore */
2051 this->mutex->unlock(this->mutex);
2052 return SUCCESS;
2053 }
2054
2055 /* install a route, if:
2056 * - this is a forward policy (to just get one for each child)
2057 * - we are in tunnel/BEET mode
2058 * - routing is not disabled via strongswan.conf
2059 */
2060 if (policy->direction == POLICY_FWD &&
2061 ipsec->cfg.mode != MODE_TRANSPORT && this->install_routes)
2062 {
2063 route_entry_t *route = malloc_thing(route_entry_t);
2064 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)mapping;
2065
2066 if (hydra->kernel_interface->get_address_by_ts(hydra->kernel_interface,
2067 fwd->dst_ts, &route->src_ip) == SUCCESS)
2068 {
2069 /* get the nexthop to src (src as we are in POLICY_FWD) */
2070 route->gateway = hydra->kernel_interface->get_nexthop(
2071 hydra->kernel_interface, ipsec->src);
2072 /* install route via outgoing interface */
2073 route->if_name = hydra->kernel_interface->get_interface(
2074 hydra->kernel_interface, ipsec->dst);
2075 route->dst_net = chunk_alloc(policy->sel.family == AF_INET ? 4 : 16);
2076 memcpy(route->dst_net.ptr, &policy->sel.saddr, route->dst_net.len);
2077 route->prefixlen = policy->sel.prefixlen_s;
2078
2079 if (!route->if_name)
2080 {
2081 this->mutex->unlock(this->mutex);
2082 route_entry_destroy(route);
2083 return SUCCESS;
2084 }
2085
2086 if (policy->route)
2087 {
2088 route_entry_t *old = policy->route;
2089 if (route_entry_equals(old, route))
2090 { /* keep previously installed route */
2091 this->mutex->unlock(this->mutex);
2092 route_entry_destroy(route);
2093 return SUCCESS;
2094 }
2095 /* uninstall previously installed route */
2096 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2097 old->dst_net, old->prefixlen, old->gateway,
2098 old->src_ip, old->if_name) != SUCCESS)
2099 {
2100 DBG1(DBG_KNL, "error uninstalling route installed with "
2101 "policy %R === %R %N", fwd->src_ts,
2102 fwd->dst_ts, policy_dir_names,
2103 policy->direction);
2104 }
2105 route_entry_destroy(old);
2106 policy->route = NULL;
2107 }
2108
2109 DBG2(DBG_KNL, "installing route: %R via %H src %H dev %s",
2110 fwd->src_ts, route->gateway, route->src_ip, route->if_name);
2111 switch (hydra->kernel_interface->add_route(
2112 hydra->kernel_interface, route->dst_net,
2113 route->prefixlen, route->gateway,
2114 route->src_ip, route->if_name))
2115 {
2116 default:
2117 DBG1(DBG_KNL, "unable to install source route for %H",
2118 route->src_ip);
2119 /* FALL */
2120 case ALREADY_DONE:
2121 /* route exists, do not uninstall */
2122 route_entry_destroy(route);
2123 break;
2124 case SUCCESS:
2125 /* cache the installed route */
2126 policy->route = route;
2127 break;
2128 }
2129 }
2130 else
2131 {
2132 free(route);
2133 }
2134 }
2135 this->mutex->unlock(this->mutex);
2136 return SUCCESS;
2137 }
2138
2139 METHOD(kernel_ipsec_t, add_policy, status_t,
2140 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
2141 traffic_selector_t *src_ts, traffic_selector_t *dst_ts,
2142 policy_dir_t direction, policy_type_t type, ipsec_sa_cfg_t *sa,
2143 mark_t mark, bool routed)
2144 {
2145 policy_entry_t *policy, *current;
2146 policy_sa_t *assigned_sa, *current_sa;
2147 enumerator_t *enumerator;
2148 bool found = FALSE, update = TRUE;
2149
2150 /* create a policy */
2151 INIT(policy,
2152 .sel = ts2selector(src_ts, dst_ts),
2153 .mark = mark.value & mark.mask,
2154 .direction = direction,
2155 );
2156
2157 /* find the policy, which matches EXACTLY */
2158 this->mutex->lock(this->mutex);
2159 current = this->policies->get(this->policies, policy);
2160 if (current)
2161 {
2162 /* use existing policy */
2163 if (mark.value)
2164 {
2165 DBG2(DBG_KNL, "policy %R === %R %N (mark %u/0x%8x) "
2166 "already exists, increasing refcount",
2167 src_ts, dst_ts, policy_dir_names, direction,
2168 mark.value, mark.mask);
2169 }
2170 else
2171 {
2172 DBG2(DBG_KNL, "policy %R === %R %N "
2173 "already exists, increasing refcount",
2174 src_ts, dst_ts, policy_dir_names, direction);
2175 }
2176 policy_entry_destroy(this, policy);
2177 policy = current;
2178 found = TRUE;
2179 }
2180 else
2181 { /* use the new one, if we have no such policy */
2182 policy->used_by = linked_list_create();
2183 this->policies->put(this->policies, policy, policy);
2184 }
2185
2186 /* cache the assigned IPsec SA */
2187 assigned_sa = policy_sa_create(this, direction, type, src, dst, src_ts,
2188 dst_ts, mark, sa);
2189
2190 /* calculate priority based on selector size, small size = high prio */
2191 assigned_sa->priority = routed ? PRIO_LOW : PRIO_HIGH;
2192 assigned_sa->priority -= policy->sel.prefixlen_s;
2193 assigned_sa->priority -= policy->sel.prefixlen_d;
2194 assigned_sa->priority <<= 2; /* make some room for the two flags */
2195 assigned_sa->priority += policy->sel.sport_mask ||
2196 policy->sel.dport_mask ? 0 : 2;
2197 assigned_sa->priority += policy->sel.proto ? 0 : 1;
2198
2199 /* insert the SA according to its priority */
2200 enumerator = policy->used_by->create_enumerator(policy->used_by);
2201 while (enumerator->enumerate(enumerator, (void**)&current_sa))
2202 {
2203 if (current_sa->priority >= assigned_sa->priority)
2204 {
2205 break;
2206 }
2207 update = FALSE;
2208 }
2209 policy->used_by->insert_before(policy->used_by, enumerator, assigned_sa);
2210 enumerator->destroy(enumerator);
2211
2212 if (!update)
2213 { /* we don't update the policy if the priority is lower than that of the
2214 * currently installed one */
2215 this->mutex->unlock(this->mutex);
2216 return SUCCESS;
2217 }
2218
2219 if (mark.value)
2220 {
2221 DBG2(DBG_KNL, "%s policy %R === %R %N (mark %u/0x%8x)",
2222 found ? "updating" : "adding", src_ts, dst_ts,
2223 policy_dir_names, direction, mark.value, mark.mask);
2224 }
2225 else
2226 {
2227 DBG2(DBG_KNL, "%s policy %R === %R %N",
2228 found ? "updating" : "adding", src_ts, dst_ts,
2229 policy_dir_names, direction);
2230 }
2231
2232 if (add_policy_internal(this, policy, assigned_sa, found) != SUCCESS)
2233 {
2234 DBG1(DBG_KNL, "unable to %s policy %R === %R %N",
2235 found ? "update" : "add", src_ts, dst_ts,
2236 policy_dir_names, direction);
2237 return FAILED;
2238 }
2239 return SUCCESS;
2240 }
2241
2242 METHOD(kernel_ipsec_t, query_policy, status_t,
2243 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2244 traffic_selector_t *dst_ts, policy_dir_t direction, mark_t mark,
2245 u_int32_t *use_time)
2246 {
2247 netlink_buf_t request;
2248 struct nlmsghdr *out = NULL, *hdr;
2249 struct xfrm_userpolicy_id *policy_id;
2250 struct xfrm_userpolicy_info *policy = NULL;
2251 size_t len;
2252
2253 memset(&request, 0, sizeof(request));
2254
2255 if (mark.value)
2256 {
2257 DBG2(DBG_KNL, "querying policy %R === %R %N (mark %u/0x%8x)",
2258 src_ts, dst_ts, policy_dir_names, direction,
2259 mark.value, mark.mask);
2260 }
2261 else
2262 {
2263 DBG2(DBG_KNL, "querying policy %R === %R %N", src_ts, dst_ts,
2264 policy_dir_names, direction);
2265 }
2266 hdr = (struct nlmsghdr*)request;
2267 hdr->nlmsg_flags = NLM_F_REQUEST;
2268 hdr->nlmsg_type = XFRM_MSG_GETPOLICY;
2269 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2270
2271 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2272 policy_id->sel = ts2selector(src_ts, dst_ts);
2273 policy_id->dir = direction;
2274
2275 if (mark.value)
2276 {
2277 struct xfrm_mark *mrk;
2278 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2279
2280 rthdr->rta_type = XFRMA_MARK;
2281 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2282
2283 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2284 if (hdr->nlmsg_len > sizeof(request))
2285 {
2286 return FAILED;
2287 }
2288
2289 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2290 mrk->v = mark.value;
2291 mrk->m = mark.mask;
2292 }
2293
2294 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
2295 {
2296 hdr = out;
2297 while (NLMSG_OK(hdr, len))
2298 {
2299 switch (hdr->nlmsg_type)
2300 {
2301 case XFRM_MSG_NEWPOLICY:
2302 {
2303 policy = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2304 break;
2305 }
2306 case NLMSG_ERROR:
2307 {
2308 struct nlmsgerr *err = NLMSG_DATA(hdr);
2309 DBG1(DBG_KNL, "querying policy failed: %s (%d)",
2310 strerror(-err->error), -err->error);
2311 break;
2312 }
2313 default:
2314 hdr = NLMSG_NEXT(hdr, len);
2315 continue;
2316 case NLMSG_DONE:
2317 break;
2318 }
2319 break;
2320 }
2321 }
2322
2323 if (policy == NULL)
2324 {
2325 DBG2(DBG_KNL, "unable to query policy %R === %R %N", src_ts, dst_ts,
2326 policy_dir_names, direction);
2327 free(out);
2328 return FAILED;
2329 }
2330
2331 if (policy->curlft.use_time)
2332 {
2333 /* we need the monotonic time, but the kernel returns system time. */
2334 *use_time = time_monotonic(NULL) - (time(NULL) - policy->curlft.use_time);
2335 }
2336 else
2337 {
2338 *use_time = 0;
2339 }
2340
2341 free(out);
2342 return SUCCESS;
2343 }
2344
2345 METHOD(kernel_ipsec_t, del_policy, status_t,
2346 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2347 traffic_selector_t *dst_ts, policy_dir_t direction, u_int32_t reqid,
2348 mark_t mark, bool unrouted)
2349 {
2350 policy_entry_t *current, policy;
2351 enumerator_t *enumerator;
2352 policy_sa_t *mapping;
2353 netlink_buf_t request;
2354 struct nlmsghdr *hdr;
2355 struct xfrm_userpolicy_id *policy_id;
2356 bool is_installed = TRUE;
2357
2358 if (mark.value)
2359 {
2360 DBG2(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%8x)",
2361 src_ts, dst_ts, policy_dir_names, direction,
2362 mark.value, mark.mask);
2363 }
2364 else
2365 {
2366 DBG2(DBG_KNL, "deleting policy %R === %R %N",
2367 src_ts, dst_ts, policy_dir_names, direction);
2368 }
2369
2370 /* create a policy */
2371 memset(&policy, 0, sizeof(policy_entry_t));
2372 policy.sel = ts2selector(src_ts, dst_ts);
2373 policy.mark = mark.value & mark.mask;
2374 policy.direction = direction;
2375
2376 /* find the policy */
2377 this->mutex->lock(this->mutex);
2378 current = this->policies->get(this->policies, &policy);
2379 if (!current)
2380 {
2381 if (mark.value)
2382 {
2383 DBG1(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%8x) "
2384 "failed, not found", src_ts, dst_ts, policy_dir_names,
2385 direction, mark.value, mark.mask);
2386 }
2387 else
2388 {
2389 DBG1(DBG_KNL, "deleting policy %R === %R %N failed, not found",
2390 src_ts, dst_ts, policy_dir_names, direction);
2391 }
2392 this->mutex->unlock(this->mutex);
2393 return NOT_FOUND;
2394 }
2395
2396 /* remove mapping to SA by reqid */
2397 enumerator = current->used_by->create_enumerator(current->used_by);
2398 while (enumerator->enumerate(enumerator, (void**)&mapping))
2399 {
2400 if (reqid == mapping->sa->cfg.reqid)
2401 {
2402 current->used_by->remove_at(current->used_by, enumerator);
2403 policy_sa_destroy(this, direction, mapping);
2404 break;
2405 }
2406 is_installed = FALSE;
2407 }
2408 enumerator->destroy(enumerator);
2409
2410 if (current->used_by->get_count(current->used_by) > 0)
2411 { /* policy is used by more SAs, keep in kernel */
2412 DBG2(DBG_KNL, "policy still used by another CHILD_SA, not removed");
2413 if (!is_installed)
2414 { /* no need to update as the policy was not installed for this SA */
2415 this->mutex->unlock(this->mutex);
2416 return SUCCESS;
2417 }
2418
2419 if (mark.value)
2420 {
2421 DBG2(DBG_KNL, "updating policy %R === %R %N (mark %u/0x%8x)",
2422 src_ts, dst_ts, policy_dir_names, direction,
2423 mark.value, mark.mask);
2424 }
2425 else
2426 {
2427 DBG2(DBG_KNL, "updating policy %R === %R %N",
2428 src_ts, dst_ts, policy_dir_names, direction);
2429 }
2430
2431 current->used_by->get_first(current->used_by, (void**)&mapping);
2432 if (add_policy_internal(this, current, mapping, TRUE) != SUCCESS)
2433 {
2434 DBG1(DBG_KNL, "unable to update policy %R === %R %N",
2435 src_ts, dst_ts, policy_dir_names, direction);
2436 return FAILED;
2437 }
2438 return SUCCESS;
2439 }
2440
2441 memset(&request, 0, sizeof(request));
2442
2443 hdr = (struct nlmsghdr*)request;
2444 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2445 hdr->nlmsg_type = XFRM_MSG_DELPOLICY;
2446 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2447
2448 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2449 policy_id->sel = current->sel;
2450 policy_id->dir = direction;
2451
2452 if (mark.value)
2453 {
2454 struct xfrm_mark *mrk;
2455 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2456
2457 rthdr->rta_type = XFRMA_MARK;
2458 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2459 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2460 if (hdr->nlmsg_len > sizeof(request))
2461 {
2462 return FAILED;
2463 }
2464
2465 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2466 mrk->v = mark.value;
2467 mrk->m = mark.mask;
2468 }
2469
2470 if (current->route)
2471 {
2472 route_entry_t *route = current->route;
2473 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2474 route->dst_net, route->prefixlen, route->gateway,
2475 route->src_ip, route->if_name) != SUCCESS)
2476 {
2477 DBG1(DBG_KNL, "error uninstalling route installed with "
2478 "policy %R === %R %N", src_ts, dst_ts,
2479 policy_dir_names, direction);
2480 }
2481 }
2482
2483 this->policies->remove(this->policies, current);
2484 policy_entry_destroy(this, current);
2485 this->mutex->unlock(this->mutex);
2486
2487 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2488 {
2489 if (mark.value)
2490 {
2491 DBG1(DBG_KNL, "unable to delete policy %R === %R %N "
2492 "(mark %u/0x%8x)", src_ts, dst_ts, policy_dir_names,
2493 direction, mark.value, mark.mask);
2494 }
2495 else
2496 {
2497 DBG1(DBG_KNL, "unable to delete policy %R === %R %N",
2498 src_ts, dst_ts, policy_dir_names, direction);
2499 }
2500 return FAILED;
2501 }
2502 return SUCCESS;
2503 }
2504
2505 METHOD(kernel_ipsec_t, bypass_socket, bool,
2506 private_kernel_netlink_ipsec_t *this, int fd, int family)
2507 {
2508 struct xfrm_userpolicy_info policy;
2509 u_int sol, ipsec_policy;
2510
2511 switch (family)
2512 {
2513 case AF_INET:
2514 sol = SOL_IP;
2515 ipsec_policy = IP_XFRM_POLICY;
2516 break;
2517 case AF_INET6:
2518 sol = SOL_IPV6;
2519 ipsec_policy = IPV6_XFRM_POLICY;
2520 break;
2521 default:
2522 return FALSE;
2523 }
2524
2525 memset(&policy, 0, sizeof(policy));
2526 policy.action = XFRM_POLICY_ALLOW;
2527 policy.sel.family = family;
2528
2529 policy.dir = XFRM_POLICY_OUT;
2530 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2531 {
2532 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2533 strerror(errno));
2534 return FALSE;
2535 }
2536 policy.dir = XFRM_POLICY_IN;
2537 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2538 {
2539 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2540 strerror(errno));
2541 return FALSE;
2542 }
2543 return TRUE;
2544 }
2545
2546 METHOD(kernel_ipsec_t, destroy, void,
2547 private_kernel_netlink_ipsec_t *this)
2548 {
2549 enumerator_t *enumerator;
2550 policy_entry_t *policy;
2551
2552 if (this->job)
2553 {
2554 this->job->cancel(this->job);
2555 }
2556 if (this->socket_xfrm_events > 0)
2557 {
2558 close(this->socket_xfrm_events);
2559 }
2560 DESTROY_IF(this->socket_xfrm);
2561 enumerator = this->policies->create_enumerator(this->policies);
2562 while (enumerator->enumerate(enumerator, &policy, &policy))
2563 {
2564 policy_entry_destroy(this, policy);
2565 }
2566 enumerator->destroy(enumerator);
2567 this->policies->destroy(this->policies);
2568 this->sas->destroy(this->sas);
2569 this->mutex->destroy(this->mutex);
2570 free(this);
2571 }
2572
2573 /*
2574 * Described in header.
2575 */
2576 kernel_netlink_ipsec_t *kernel_netlink_ipsec_create()
2577 {
2578 private_kernel_netlink_ipsec_t *this;
2579 struct sockaddr_nl addr;
2580 int fd;
2581
2582 INIT(this,
2583 .public = {
2584 .interface = {
2585 .get_spi = _get_spi,
2586 .get_cpi = _get_cpi,
2587 .add_sa = _add_sa,
2588 .update_sa = _update_sa,
2589 .query_sa = _query_sa,
2590 .del_sa = _del_sa,
2591 .add_policy = _add_policy,
2592 .query_policy = _query_policy,
2593 .del_policy = _del_policy,
2594 .bypass_socket = _bypass_socket,
2595 .destroy = _destroy,
2596 },
2597 },
2598 .policies = hashtable_create((hashtable_hash_t)policy_hash,
2599 (hashtable_equals_t)policy_equals, 32),
2600 .sas = hashtable_create((hashtable_hash_t)ipsec_sa_hash,
2601 (hashtable_equals_t)ipsec_sa_equals, 32),
2602 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
2603 .install_routes = lib->settings->get_bool(lib->settings,
2604 "%s.install_routes", TRUE, hydra->daemon),
2605 .replay_window = lib->settings->get_int(lib->settings,
2606 "%s.replay_window", DEFAULT_REPLAY_WINDOW, hydra->daemon),
2607 );
2608
2609 this->replay_bmp = (this->replay_window + sizeof(u_int32_t) * 8 - 1) /
2610 (sizeof(u_int32_t) * 8);
2611
2612 if (streq(hydra->daemon, "pluto"))
2613 { /* no routes for pluto, they are installed via updown script */
2614 this->install_routes = FALSE;
2615 }
2616
2617 /* disable lifetimes for allocated SPIs in kernel */
2618 fd = open("/proc/sys/net/core/xfrm_acq_expires", O_WRONLY);
2619 if (fd)
2620 {
2621 ignore_result(write(fd, "165", 3));
2622 close(fd);
2623 }
2624
2625 this->socket_xfrm = netlink_socket_create(NETLINK_XFRM);
2626 if (!this->socket_xfrm)
2627 {
2628 destroy(this);
2629 return NULL;
2630 }
2631
2632 memset(&addr, 0, sizeof(addr));
2633 addr.nl_family = AF_NETLINK;
2634
2635 /* create and bind XFRM socket for ACQUIRE, EXPIRE, MIGRATE & MAPPING */
2636 this->socket_xfrm_events = socket(AF_NETLINK, SOCK_RAW, NETLINK_XFRM);
2637 if (this->socket_xfrm_events <= 0)
2638 {
2639 DBG1(DBG_KNL, "unable to create XFRM event socket");
2640 destroy(this);
2641 return NULL;
2642 }
2643 addr.nl_groups = XFRMNLGRP(ACQUIRE) | XFRMNLGRP(EXPIRE) |
2644 XFRMNLGRP(MIGRATE) | XFRMNLGRP(MAPPING);
2645 if (bind(this->socket_xfrm_events, (struct sockaddr*)&addr, sizeof(addr)))
2646 {
2647 DBG1(DBG_KNL, "unable to bind XFRM event socket");
2648 destroy(this);
2649 return NULL;
2650 }
2651 this->job = callback_job_create_with_prio((callback_job_cb_t)receive_events,
2652 this, NULL, NULL, JOB_PRIO_CRITICAL);
2653 lib->processor->queue_job(lib->processor, (job_t*)this->job);
2654
2655 return &this->public;
2656 }
2657