24e732aa0cc841f5ceaefd367e891c76527ee56e
[strongswan.git] / src / libhydra / plugins / kernel_netlink / kernel_netlink_ipsec.c
1 /*
2 * Copyright (C) 2006-2011 Tobias Brunner
3 * Copyright (C) 2005-2009 Martin Willi
4 * Copyright (C) 2008 Andreas Steffen
5 * Copyright (C) 2006-2007 Fabian Hartmann, Noah Heusser
6 * Copyright (C) 2006 Daniel Roethlisberger
7 * Copyright (C) 2005 Jan Hutter
8 * Hochschule fuer Technik Rapperswil
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 */
20
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <stdint.h>
24 #include <linux/ipsec.h>
25 #include <linux/netlink.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/xfrm.h>
28 #include <linux/udp.h>
29 #include <unistd.h>
30 #include <time.h>
31 #include <errno.h>
32 #include <string.h>
33 #include <fcntl.h>
34
35 #include "kernel_netlink_ipsec.h"
36 #include "kernel_netlink_shared.h"
37
38 #include <hydra.h>
39 #include <debug.h>
40 #include <threading/thread.h>
41 #include <threading/mutex.h>
42 #include <utils/hashtable.h>
43 #include <utils/linked_list.h>
44 #include <processing/jobs/callback_job.h>
45
46 /** Required for Linux 2.6.26 kernel and later */
47 #ifndef XFRM_STATE_AF_UNSPEC
48 #define XFRM_STATE_AF_UNSPEC 32
49 #endif
50
51 /** From linux/in.h */
52 #ifndef IP_XFRM_POLICY
53 #define IP_XFRM_POLICY 17
54 #endif
55
56 /** Missing on uclibc */
57 #ifndef IPV6_XFRM_POLICY
58 #define IPV6_XFRM_POLICY 34
59 #endif /*IPV6_XFRM_POLICY*/
60
61 /** Default priority of installed policies */
62 #define PRIO_LOW 1024
63 #define PRIO_HIGH 512
64
65 /** Default replay window size, if not set using charon.replay_window */
66 #define DEFAULT_REPLAY_WINDOW 32
67
68 /**
69 * Map the limit for bytes and packets to XFRM_INF by default
70 */
71 #define XFRM_LIMIT(x) ((x) == 0 ? XFRM_INF : (x))
72
73 /**
74 * Create ORable bitfield of XFRM NL groups
75 */
76 #define XFRMNLGRP(x) (1<<(XFRMNLGRP_##x-1))
77
78 /**
79 * Returns a pointer to the first rtattr following the nlmsghdr *nlh and the
80 * 'usual' netlink data x like 'struct xfrm_usersa_info'
81 */
82 #define XFRM_RTA(nlh, x) ((struct rtattr*)(NLMSG_DATA(nlh) + \
83 NLMSG_ALIGN(sizeof(x))))
84 /**
85 * Returns a pointer to the next rtattr following rta.
86 * !!! Do not use this to parse messages. Use RTA_NEXT and RTA_OK instead !!!
87 */
88 #define XFRM_RTA_NEXT(rta) ((struct rtattr*)(((char*)(rta)) + \
89 RTA_ALIGN((rta)->rta_len)))
90 /**
91 * Returns the total size of attached rta data
92 * (after 'usual' netlink data x like 'struct xfrm_usersa_info')
93 */
94 #define XFRM_PAYLOAD(nlh, x) NLMSG_PAYLOAD(nlh, sizeof(x))
95
96 typedef struct kernel_algorithm_t kernel_algorithm_t;
97
98 /**
99 * Mapping of IKEv2 kernel identifier to linux crypto API names
100 */
101 struct kernel_algorithm_t {
102 /**
103 * Identifier specified in IKEv2
104 */
105 int ikev2;
106
107 /**
108 * Name of the algorithm in linux crypto API
109 */
110 char *name;
111 };
112
113 ENUM(xfrm_msg_names, XFRM_MSG_NEWSA, XFRM_MSG_MAPPING,
114 "XFRM_MSG_NEWSA",
115 "XFRM_MSG_DELSA",
116 "XFRM_MSG_GETSA",
117 "XFRM_MSG_NEWPOLICY",
118 "XFRM_MSG_DELPOLICY",
119 "XFRM_MSG_GETPOLICY",
120 "XFRM_MSG_ALLOCSPI",
121 "XFRM_MSG_ACQUIRE",
122 "XFRM_MSG_EXPIRE",
123 "XFRM_MSG_UPDPOLICY",
124 "XFRM_MSG_UPDSA",
125 "XFRM_MSG_POLEXPIRE",
126 "XFRM_MSG_FLUSHSA",
127 "XFRM_MSG_FLUSHPOLICY",
128 "XFRM_MSG_NEWAE",
129 "XFRM_MSG_GETAE",
130 "XFRM_MSG_REPORT",
131 "XFRM_MSG_MIGRATE",
132 "XFRM_MSG_NEWSADINFO",
133 "XFRM_MSG_GETSADINFO",
134 "XFRM_MSG_NEWSPDINFO",
135 "XFRM_MSG_GETSPDINFO",
136 "XFRM_MSG_MAPPING"
137 );
138
139 ENUM(xfrm_attr_type_names, XFRMA_UNSPEC, XFRMA_KMADDRESS,
140 "XFRMA_UNSPEC",
141 "XFRMA_ALG_AUTH",
142 "XFRMA_ALG_CRYPT",
143 "XFRMA_ALG_COMP",
144 "XFRMA_ENCAP",
145 "XFRMA_TMPL",
146 "XFRMA_SA",
147 "XFRMA_POLICY",
148 "XFRMA_SEC_CTX",
149 "XFRMA_LTIME_VAL",
150 "XFRMA_REPLAY_VAL",
151 "XFRMA_REPLAY_THRESH",
152 "XFRMA_ETIMER_THRESH",
153 "XFRMA_SRCADDR",
154 "XFRMA_COADDR",
155 "XFRMA_LASTUSED",
156 "XFRMA_POLICY_TYPE",
157 "XFRMA_MIGRATE",
158 "XFRMA_ALG_AEAD",
159 "XFRMA_KMADDRESS"
160 );
161
162 #define END_OF_LIST -1
163
164 /**
165 * Algorithms for encryption
166 */
167 static kernel_algorithm_t encryption_algs[] = {
168 /* {ENCR_DES_IV64, "***" }, */
169 {ENCR_DES, "des" },
170 {ENCR_3DES, "des3_ede" },
171 /* {ENCR_RC5, "***" }, */
172 /* {ENCR_IDEA, "***" }, */
173 {ENCR_CAST, "cast128" },
174 {ENCR_BLOWFISH, "blowfish" },
175 /* {ENCR_3IDEA, "***" }, */
176 /* {ENCR_DES_IV32, "***" }, */
177 {ENCR_NULL, "cipher_null" },
178 {ENCR_AES_CBC, "aes" },
179 {ENCR_AES_CTR, "rfc3686(ctr(aes))" },
180 {ENCR_AES_CCM_ICV8, "rfc4309(ccm(aes))" },
181 {ENCR_AES_CCM_ICV12, "rfc4309(ccm(aes))" },
182 {ENCR_AES_CCM_ICV16, "rfc4309(ccm(aes))" },
183 {ENCR_AES_GCM_ICV8, "rfc4106(gcm(aes))" },
184 {ENCR_AES_GCM_ICV12, "rfc4106(gcm(aes))" },
185 {ENCR_AES_GCM_ICV16, "rfc4106(gcm(aes))" },
186 {ENCR_NULL_AUTH_AES_GMAC, "rfc4543(gcm(aes))" },
187 {ENCR_CAMELLIA_CBC, "cbc(camellia)" },
188 /* {ENCR_CAMELLIA_CTR, "***" }, */
189 /* {ENCR_CAMELLIA_CCM_ICV8, "***" }, */
190 /* {ENCR_CAMELLIA_CCM_ICV12, "***" }, */
191 /* {ENCR_CAMELLIA_CCM_ICV16, "***" }, */
192 {ENCR_SERPENT_CBC, "serpent" },
193 {ENCR_TWOFISH_CBC, "twofish" },
194 {END_OF_LIST, NULL }
195 };
196
197 /**
198 * Algorithms for integrity protection
199 */
200 static kernel_algorithm_t integrity_algs[] = {
201 {AUTH_HMAC_MD5_96, "md5" },
202 {AUTH_HMAC_SHA1_96, "sha1" },
203 {AUTH_HMAC_SHA2_256_96, "sha256" },
204 {AUTH_HMAC_SHA2_256_128, "hmac(sha256)" },
205 {AUTH_HMAC_SHA2_384_192, "hmac(sha384)" },
206 {AUTH_HMAC_SHA2_512_256, "hmac(sha512)" },
207 /* {AUTH_DES_MAC, "***" }, */
208 /* {AUTH_KPDK_MD5, "***" }, */
209 {AUTH_AES_XCBC_96, "xcbc(aes)" },
210 {END_OF_LIST, NULL }
211 };
212
213 /**
214 * Algorithms for IPComp
215 */
216 static kernel_algorithm_t compression_algs[] = {
217 /* {IPCOMP_OUI, "***" }, */
218 {IPCOMP_DEFLATE, "deflate" },
219 {IPCOMP_LZS, "lzs" },
220 {IPCOMP_LZJH, "lzjh" },
221 {END_OF_LIST, NULL }
222 };
223
224 /**
225 * Look up a kernel algorithm name and its key size
226 */
227 static char* lookup_algorithm(kernel_algorithm_t *list, int ikev2)
228 {
229 while (list->ikev2 != END_OF_LIST)
230 {
231 if (list->ikev2 == ikev2)
232 {
233 return list->name;
234 }
235 list++;
236 }
237 return NULL;
238 }
239
240 typedef struct private_kernel_netlink_ipsec_t private_kernel_netlink_ipsec_t;
241
242 /**
243 * Private variables and functions of kernel_netlink class.
244 */
245 struct private_kernel_netlink_ipsec_t {
246 /**
247 * Public part of the kernel_netlink_t object
248 */
249 kernel_netlink_ipsec_t public;
250
251 /**
252 * Mutex to lock access to installed policies
253 */
254 mutex_t *mutex;
255
256 /**
257 * Hash table of installed policies (policy_entry_t)
258 */
259 hashtable_t *policies;
260
261 /**
262 * Hash table of IPsec SAs using policies (ipsec_sa_t)
263 */
264 hashtable_t *sas;
265
266 /**
267 * Job receiving netlink events
268 */
269 callback_job_t *job;
270
271 /**
272 * Netlink xfrm socket (IPsec)
273 */
274 netlink_socket_t *socket_xfrm;
275
276 /**
277 * Netlink xfrm socket to receive acquire and expire events
278 */
279 int socket_xfrm_events;
280
281 /**
282 * Whether to install routes along policies
283 */
284 bool install_routes;
285
286 /**
287 * Size of the replay window, in packets
288 */
289 u_int32_t replay_window;
290
291 /**
292 * Size of the replay window bitmap, in bytes
293 */
294 u_int32_t replay_bmp;
295 };
296
297 typedef struct route_entry_t route_entry_t;
298
299 /**
300 * Installed routing entry
301 */
302 struct route_entry_t {
303 /** Name of the interface the route is bound to */
304 char *if_name;
305
306 /** Source ip of the route */
307 host_t *src_ip;
308
309 /** Gateway for this route */
310 host_t *gateway;
311
312 /** Destination net */
313 chunk_t dst_net;
314
315 /** Destination net prefixlen */
316 u_int8_t prefixlen;
317 };
318
319 /**
320 * Destroy a route_entry_t object
321 */
322 static void route_entry_destroy(route_entry_t *this)
323 {
324 free(this->if_name);
325 this->src_ip->destroy(this->src_ip);
326 DESTROY_IF(this->gateway);
327 chunk_free(&this->dst_net);
328 free(this);
329 }
330
331 /**
332 * Compare two route_entry_t objects
333 */
334 static bool route_entry_equals(route_entry_t *a, route_entry_t *b)
335 {
336 return a->if_name && b->if_name && streq(a->if_name, b->if_name) &&
337 a->src_ip->equals(a->src_ip, b->src_ip) &&
338 a->gateway->equals(a->gateway, b->gateway) &&
339 chunk_equals(a->dst_net, b->dst_net) && a->prefixlen == b->prefixlen;
340 }
341
342 typedef struct ipsec_sa_t ipsec_sa_t;
343
344 /**
345 * IPsec SA assigned to a policy.
346 */
347 struct ipsec_sa_t {
348 /** Source address of this SA */
349 host_t *src;
350
351 /** Destination address of this SA */
352 host_t *dst;
353
354 /** Optional mark */
355 mark_t mark;
356
357 /** Description of this SA */
358 ipsec_sa_cfg_t cfg;
359
360 /** Reference count for this SA */
361 refcount_t refcount;
362 };
363
364 /**
365 * Hash function for ipsec_sa_t objects
366 */
367 static u_int ipsec_sa_hash(ipsec_sa_t *sa)
368 {
369 return chunk_hash_inc(sa->src->get_address(sa->src),
370 chunk_hash_inc(sa->dst->get_address(sa->dst),
371 chunk_hash_inc(chunk_from_thing(sa->mark),
372 chunk_hash(chunk_from_thing(sa->cfg)))));
373 }
374
375 /**
376 * Equality function for ipsec_sa_t objects
377 */
378 static bool ipsec_sa_equals(ipsec_sa_t *sa, ipsec_sa_t *other_sa)
379 {
380 return sa->src->ip_equals(sa->src, other_sa->src) &&
381 sa->dst->ip_equals(sa->dst, other_sa->dst) &&
382 memeq(&sa->mark, &other_sa->mark, sizeof(mark_t)) &&
383 memeq(&sa->cfg, &other_sa->cfg, sizeof(ipsec_sa_cfg_t));
384 }
385
386 /**
387 * Allocate or reference an IPsec SA object
388 */
389 static ipsec_sa_t *ipsec_sa_create(private_kernel_netlink_ipsec_t *this,
390 host_t *src, host_t *dst, mark_t mark,
391 ipsec_sa_cfg_t *cfg)
392 {
393 ipsec_sa_t *sa, *found;
394 INIT(sa,
395 .src = src,
396 .dst = dst,
397 .mark = mark,
398 .cfg = *cfg,
399 );
400 found = this->sas->get(this->sas, sa);
401 if (!found)
402 {
403 sa->src = src->clone(src);
404 sa->dst = dst->clone(dst);
405 this->sas->put(this->sas, sa, sa);
406 }
407 else
408 {
409 free(sa);
410 sa = found;
411 }
412 ref_get(&sa->refcount);
413 return sa;
414 }
415
416 /**
417 * Release and destroy an IPsec SA object
418 */
419 static void ipsec_sa_destroy(private_kernel_netlink_ipsec_t *this,
420 ipsec_sa_t *sa)
421 {
422 if (ref_put(&sa->refcount))
423 {
424 this->sas->remove(this->sas, sa);
425 DESTROY_IF(sa->src);
426 DESTROY_IF(sa->dst);
427 free(sa);
428 }
429 }
430
431 typedef struct policy_sa_t policy_sa_t;
432 typedef struct policy_sa_fwd_t policy_sa_fwd_t;
433
434 /**
435 * Mapping between a policy and an IPsec SA.
436 */
437 struct policy_sa_t {
438 /** Priority assigned to the policy when installed with this SA */
439 u_int32_t priority;
440
441 /** Type of the policy */
442 policy_type_t type;
443
444 /** Assigned SA */
445 ipsec_sa_t *sa;
446 };
447
448 /**
449 * For forward policies we also cache the traffic selectors in order to install
450 * the route.
451 */
452 struct policy_sa_fwd_t {
453 /** Generic interface */
454 policy_sa_t generic;
455
456 /** Source traffic selector of this policy */
457 traffic_selector_t *src_ts;
458
459 /** Destination traffic selector of this policy */
460 traffic_selector_t *dst_ts;
461 };
462
463 /**
464 * Create a policy_sa(_fwd)_t object
465 */
466 static policy_sa_t *policy_sa_create(private_kernel_netlink_ipsec_t *this,
467 policy_dir_t dir, policy_type_t type, host_t *src, host_t *dst,
468 traffic_selector_t *src_ts, traffic_selector_t *dst_ts, mark_t mark,
469 ipsec_sa_cfg_t *cfg)
470 {
471 policy_sa_t *policy;
472
473 if (dir == POLICY_FWD)
474 {
475 policy_sa_fwd_t *fwd;
476 INIT(fwd,
477 .src_ts = src_ts->clone(src_ts),
478 .dst_ts = dst_ts->clone(dst_ts),
479 );
480 policy = &fwd->generic;
481 }
482 else
483 {
484 INIT(policy);
485 }
486 policy->type = type;
487 policy->sa = ipsec_sa_create(this, src, dst, mark, cfg);
488 return policy;
489 }
490
491 /**
492 * Destroy a policy_sa(_fwd)_t object
493 */
494 static void policy_sa_destroy(private_kernel_netlink_ipsec_t *this,
495 policy_dir_t dir, policy_sa_t *policy)
496 {
497 if (dir == POLICY_FWD)
498 {
499 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)policy;
500 fwd->src_ts->destroy(fwd->src_ts);
501 fwd->dst_ts->destroy(fwd->dst_ts);
502 }
503 ipsec_sa_destroy(this, policy->sa);
504 free(policy);
505 }
506
507 typedef struct policy_entry_t policy_entry_t;
508
509 /**
510 * Installed kernel policy.
511 */
512 struct policy_entry_t {
513
514 /** Direction of this policy: in, out, forward */
515 u_int8_t direction;
516
517 /** Parameters of installed policy */
518 struct xfrm_selector sel;
519
520 /** Optional mark */
521 u_int32_t mark;
522
523 /** Associated route installed for this policy */
524 route_entry_t *route;
525
526 /** List of SAs this policy is used by, ordered by priority */
527 linked_list_t *used_by;
528 };
529
530 /**
531 * Destroy a policy_entry_t object
532 */
533 static void policy_entry_destroy(private_kernel_netlink_ipsec_t *this,
534 policy_entry_t *policy)
535 {
536 if (policy->route)
537 {
538 route_entry_destroy(policy->route);
539 }
540 if (policy->used_by)
541 {
542 enumerator_t *enumerator;
543 policy_sa_t *sa;
544 enumerator = policy->used_by->create_enumerator(policy->used_by);
545 while (enumerator->enumerate(enumerator, (void**)&sa))
546 {
547 policy_sa_destroy(this, policy->direction, sa);
548 }
549 enumerator->destroy(enumerator);
550 policy->used_by->destroy(policy->used_by);
551 }
552 free(policy);
553 }
554
555 /**
556 * Hash function for policy_entry_t objects
557 */
558 static u_int policy_hash(policy_entry_t *key)
559 {
560 chunk_t chunk = chunk_create((void*)&key->sel,
561 sizeof(struct xfrm_selector) + sizeof(u_int32_t));
562 return chunk_hash(chunk);
563 }
564
565 /**
566 * Equality function for policy_entry_t objects
567 */
568 static bool policy_equals(policy_entry_t *key, policy_entry_t *other_key)
569 {
570 return memeq(&key->sel, &other_key->sel,
571 sizeof(struct xfrm_selector) + sizeof(u_int32_t)) &&
572 key->direction == other_key->direction;
573 }
574
575 /**
576 * Convert the general ipsec mode to the one defined in xfrm.h
577 */
578 static u_int8_t mode2kernel(ipsec_mode_t mode)
579 {
580 switch (mode)
581 {
582 case MODE_TRANSPORT:
583 return XFRM_MODE_TRANSPORT;
584 case MODE_TUNNEL:
585 return XFRM_MODE_TUNNEL;
586 case MODE_BEET:
587 return XFRM_MODE_BEET;
588 default:
589 return mode;
590 }
591 }
592
593 /**
594 * Convert a host_t to a struct xfrm_address
595 */
596 static void host2xfrm(host_t *host, xfrm_address_t *xfrm)
597 {
598 chunk_t chunk = host->get_address(host);
599 memcpy(xfrm, chunk.ptr, min(chunk.len, sizeof(xfrm_address_t)));
600 }
601
602 /**
603 * Convert a struct xfrm_address to a host_t
604 */
605 static host_t* xfrm2host(int family, xfrm_address_t *xfrm, u_int16_t port)
606 {
607 chunk_t chunk;
608
609 switch (family)
610 {
611 case AF_INET:
612 chunk = chunk_create((u_char*)&xfrm->a4, sizeof(xfrm->a4));
613 break;
614 case AF_INET6:
615 chunk = chunk_create((u_char*)&xfrm->a6, sizeof(xfrm->a6));
616 break;
617 default:
618 return NULL;
619 }
620 return host_create_from_chunk(family, chunk, ntohs(port));
621 }
622
623 /**
624 * Convert a traffic selector address range to subnet and its mask.
625 */
626 static void ts2subnet(traffic_selector_t* ts,
627 xfrm_address_t *net, u_int8_t *mask)
628 {
629 host_t *net_host;
630 chunk_t net_chunk;
631
632 ts->to_subnet(ts, &net_host, mask);
633 net_chunk = net_host->get_address(net_host);
634 memcpy(net, net_chunk.ptr, net_chunk.len);
635 net_host->destroy(net_host);
636 }
637
638 /**
639 * Convert a traffic selector port range to port/portmask
640 */
641 static void ts2ports(traffic_selector_t* ts,
642 u_int16_t *port, u_int16_t *mask)
643 {
644 /* Linux does not seem to accept complex portmasks. Only
645 * any or a specific port is allowed. We set to any, if we have
646 * a port range, or to a specific, if we have one port only.
647 */
648 u_int16_t from, to;
649
650 from = ts->get_from_port(ts);
651 to = ts->get_to_port(ts);
652
653 if (from == to)
654 {
655 *port = htons(from);
656 *mask = ~0;
657 }
658 else
659 {
660 *port = 0;
661 *mask = 0;
662 }
663 }
664
665 /**
666 * Convert a pair of traffic_selectors to an xfrm_selector
667 */
668 static struct xfrm_selector ts2selector(traffic_selector_t *src,
669 traffic_selector_t *dst)
670 {
671 struct xfrm_selector sel;
672
673 memset(&sel, 0, sizeof(sel));
674 sel.family = (src->get_type(src) == TS_IPV4_ADDR_RANGE) ? AF_INET : AF_INET6;
675 /* src or dest proto may be "any" (0), use more restrictive one */
676 sel.proto = max(src->get_protocol(src), dst->get_protocol(dst));
677 ts2subnet(dst, &sel.daddr, &sel.prefixlen_d);
678 ts2subnet(src, &sel.saddr, &sel.prefixlen_s);
679 ts2ports(dst, &sel.dport, &sel.dport_mask);
680 ts2ports(src, &sel.sport, &sel.sport_mask);
681 sel.ifindex = 0;
682 sel.user = 0;
683
684 return sel;
685 }
686
687 /**
688 * Convert an xfrm_selector to a src|dst traffic_selector
689 */
690 static traffic_selector_t* selector2ts(struct xfrm_selector *sel, bool src)
691 {
692 u_char *addr;
693 u_int8_t prefixlen;
694 u_int16_t port = 0;
695 host_t *host = NULL;
696
697 if (src)
698 {
699 addr = (u_char*)&sel->saddr;
700 prefixlen = sel->prefixlen_s;
701 if (sel->sport_mask)
702 {
703 port = htons(sel->sport);
704 }
705 }
706 else
707 {
708 addr = (u_char*)&sel->daddr;
709 prefixlen = sel->prefixlen_d;
710 if (sel->dport_mask)
711 {
712 port = htons(sel->dport);
713 }
714 }
715
716 /* The Linux 2.6 kernel does not set the selector's family field,
717 * so as a kludge we additionally test the prefix length.
718 */
719 if (sel->family == AF_INET || sel->prefixlen_s == 32)
720 {
721 host = host_create_from_chunk(AF_INET, chunk_create(addr, 4), 0);
722 }
723 else if (sel->family == AF_INET6 || sel->prefixlen_s == 128)
724 {
725 host = host_create_from_chunk(AF_INET6, chunk_create(addr, 16), 0);
726 }
727
728 if (host)
729 {
730 return traffic_selector_create_from_subnet(host, prefixlen,
731 sel->proto, port);
732 }
733 return NULL;
734 }
735
736 /**
737 * Process a XFRM_MSG_ACQUIRE from kernel
738 */
739 static void process_acquire(private_kernel_netlink_ipsec_t *this,
740 struct nlmsghdr *hdr)
741 {
742 struct xfrm_user_acquire *acquire;
743 struct rtattr *rta;
744 size_t rtasize;
745 traffic_selector_t *src_ts, *dst_ts;
746 u_int32_t reqid = 0;
747 int proto = 0;
748
749 acquire = (struct xfrm_user_acquire*)NLMSG_DATA(hdr);
750 rta = XFRM_RTA(hdr, struct xfrm_user_acquire);
751 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_user_acquire);
752
753 DBG2(DBG_KNL, "received a XFRM_MSG_ACQUIRE");
754
755 while (RTA_OK(rta, rtasize))
756 {
757 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
758
759 if (rta->rta_type == XFRMA_TMPL)
760 {
761 struct xfrm_user_tmpl* tmpl;
762 tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rta);
763 reqid = tmpl->reqid;
764 proto = tmpl->id.proto;
765 }
766 rta = RTA_NEXT(rta, rtasize);
767 }
768 switch (proto)
769 {
770 case 0:
771 case IPPROTO_ESP:
772 case IPPROTO_AH:
773 break;
774 default:
775 /* acquire for AH/ESP only, not for IPCOMP */
776 return;
777 }
778 src_ts = selector2ts(&acquire->sel, TRUE);
779 dst_ts = selector2ts(&acquire->sel, FALSE);
780
781 hydra->kernel_interface->acquire(hydra->kernel_interface, reqid, src_ts,
782 dst_ts);
783 }
784
785 /**
786 * Process a XFRM_MSG_EXPIRE from kernel
787 */
788 static void process_expire(private_kernel_netlink_ipsec_t *this,
789 struct nlmsghdr *hdr)
790 {
791 struct xfrm_user_expire *expire;
792 u_int32_t spi, reqid;
793 u_int8_t protocol;
794
795 expire = (struct xfrm_user_expire*)NLMSG_DATA(hdr);
796 protocol = expire->state.id.proto;
797 spi = expire->state.id.spi;
798 reqid = expire->state.reqid;
799
800 DBG2(DBG_KNL, "received a XFRM_MSG_EXPIRE");
801
802 if (protocol != IPPROTO_ESP && protocol != IPPROTO_AH)
803 {
804 DBG2(DBG_KNL, "ignoring XFRM_MSG_EXPIRE for SA with SPI %.8x and "
805 "reqid {%u} which is not a CHILD_SA", ntohl(spi), reqid);
806 return;
807 }
808
809 hydra->kernel_interface->expire(hydra->kernel_interface, reqid, protocol,
810 spi, expire->hard != 0);
811 }
812
813 /**
814 * Process a XFRM_MSG_MIGRATE from kernel
815 */
816 static void process_migrate(private_kernel_netlink_ipsec_t *this,
817 struct nlmsghdr *hdr)
818 {
819 struct xfrm_userpolicy_id *policy_id;
820 struct rtattr *rta;
821 size_t rtasize;
822 traffic_selector_t *src_ts, *dst_ts;
823 host_t *local = NULL, *remote = NULL;
824 host_t *old_src = NULL, *old_dst = NULL;
825 host_t *new_src = NULL, *new_dst = NULL;
826 u_int32_t reqid = 0;
827 policy_dir_t dir;
828
829 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
830 rta = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
831 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_userpolicy_id);
832
833 DBG2(DBG_KNL, "received a XFRM_MSG_MIGRATE");
834
835 src_ts = selector2ts(&policy_id->sel, TRUE);
836 dst_ts = selector2ts(&policy_id->sel, FALSE);
837 dir = (policy_dir_t)policy_id->dir;
838
839 DBG2(DBG_KNL, " policy: %R === %R %N", src_ts, dst_ts, policy_dir_names);
840
841 while (RTA_OK(rta, rtasize))
842 {
843 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
844 if (rta->rta_type == XFRMA_KMADDRESS)
845 {
846 struct xfrm_user_kmaddress *kmaddress;
847
848 kmaddress = (struct xfrm_user_kmaddress*)RTA_DATA(rta);
849 local = xfrm2host(kmaddress->family, &kmaddress->local, 0);
850 remote = xfrm2host(kmaddress->family, &kmaddress->remote, 0);
851 DBG2(DBG_KNL, " kmaddress: %H...%H", local, remote);
852 }
853 else if (rta->rta_type == XFRMA_MIGRATE)
854 {
855 struct xfrm_user_migrate *migrate;
856
857 migrate = (struct xfrm_user_migrate*)RTA_DATA(rta);
858 old_src = xfrm2host(migrate->old_family, &migrate->old_saddr, 0);
859 old_dst = xfrm2host(migrate->old_family, &migrate->old_daddr, 0);
860 new_src = xfrm2host(migrate->new_family, &migrate->new_saddr, 0);
861 new_dst = xfrm2host(migrate->new_family, &migrate->new_daddr, 0);
862 reqid = migrate->reqid;
863 DBG2(DBG_KNL, " migrate %H...%H to %H...%H, reqid {%u}",
864 old_src, old_dst, new_src, new_dst, reqid);
865 DESTROY_IF(old_src);
866 DESTROY_IF(old_dst);
867 DESTROY_IF(new_src);
868 DESTROY_IF(new_dst);
869 }
870 rta = RTA_NEXT(rta, rtasize);
871 }
872
873 if (src_ts && dst_ts && local && remote)
874 {
875 hydra->kernel_interface->migrate(hydra->kernel_interface, reqid,
876 src_ts, dst_ts, dir, local, remote);
877 }
878 else
879 {
880 DESTROY_IF(src_ts);
881 DESTROY_IF(dst_ts);
882 DESTROY_IF(local);
883 DESTROY_IF(remote);
884 }
885 }
886
887 /**
888 * Process a XFRM_MSG_MAPPING from kernel
889 */
890 static void process_mapping(private_kernel_netlink_ipsec_t *this,
891 struct nlmsghdr *hdr)
892 {
893 struct xfrm_user_mapping *mapping;
894 u_int32_t spi, reqid;
895
896 mapping = (struct xfrm_user_mapping*)NLMSG_DATA(hdr);
897 spi = mapping->id.spi;
898 reqid = mapping->reqid;
899
900 DBG2(DBG_KNL, "received a XFRM_MSG_MAPPING");
901
902 if (mapping->id.proto == IPPROTO_ESP)
903 {
904 host_t *host;
905 host = xfrm2host(mapping->id.family, &mapping->new_saddr,
906 mapping->new_sport);
907 if (host)
908 {
909 hydra->kernel_interface->mapping(hydra->kernel_interface, reqid,
910 spi, host);
911 }
912 }
913 }
914
915 /**
916 * Receives events from kernel
917 */
918 static job_requeue_t receive_events(private_kernel_netlink_ipsec_t *this)
919 {
920 char response[1024];
921 struct nlmsghdr *hdr = (struct nlmsghdr*)response;
922 struct sockaddr_nl addr;
923 socklen_t addr_len = sizeof(addr);
924 int len;
925 bool oldstate;
926
927 oldstate = thread_cancelability(TRUE);
928 len = recvfrom(this->socket_xfrm_events, response, sizeof(response), 0,
929 (struct sockaddr*)&addr, &addr_len);
930 thread_cancelability(oldstate);
931
932 if (len < 0)
933 {
934 switch (errno)
935 {
936 case EINTR:
937 /* interrupted, try again */
938 return JOB_REQUEUE_DIRECT;
939 case EAGAIN:
940 /* no data ready, select again */
941 return JOB_REQUEUE_DIRECT;
942 default:
943 DBG1(DBG_KNL, "unable to receive from xfrm event socket");
944 sleep(1);
945 return JOB_REQUEUE_FAIR;
946 }
947 }
948
949 if (addr.nl_pid != 0)
950 { /* not from kernel. not interested, try another one */
951 return JOB_REQUEUE_DIRECT;
952 }
953
954 while (NLMSG_OK(hdr, len))
955 {
956 switch (hdr->nlmsg_type)
957 {
958 case XFRM_MSG_ACQUIRE:
959 process_acquire(this, hdr);
960 break;
961 case XFRM_MSG_EXPIRE:
962 process_expire(this, hdr);
963 break;
964 case XFRM_MSG_MIGRATE:
965 process_migrate(this, hdr);
966 break;
967 case XFRM_MSG_MAPPING:
968 process_mapping(this, hdr);
969 break;
970 default:
971 DBG1(DBG_KNL, "received unknown event from xfrm event "
972 "socket: %d", hdr->nlmsg_type);
973 break;
974 }
975 hdr = NLMSG_NEXT(hdr, len);
976 }
977 return JOB_REQUEUE_DIRECT;
978 }
979
980 /**
981 * Get an SPI for a specific protocol from the kernel.
982 */
983 static status_t get_spi_internal(private_kernel_netlink_ipsec_t *this,
984 host_t *src, host_t *dst, u_int8_t proto, u_int32_t min, u_int32_t max,
985 u_int32_t reqid, u_int32_t *spi)
986 {
987 netlink_buf_t request;
988 struct nlmsghdr *hdr, *out;
989 struct xfrm_userspi_info *userspi;
990 u_int32_t received_spi = 0;
991 size_t len;
992
993 memset(&request, 0, sizeof(request));
994
995 hdr = (struct nlmsghdr*)request;
996 hdr->nlmsg_flags = NLM_F_REQUEST;
997 hdr->nlmsg_type = XFRM_MSG_ALLOCSPI;
998 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userspi_info));
999
1000 userspi = (struct xfrm_userspi_info*)NLMSG_DATA(hdr);
1001 host2xfrm(src, &userspi->info.saddr);
1002 host2xfrm(dst, &userspi->info.id.daddr);
1003 userspi->info.id.proto = proto;
1004 userspi->info.mode = XFRM_MODE_TUNNEL;
1005 userspi->info.reqid = reqid;
1006 userspi->info.family = src->get_family(src);
1007 userspi->min = min;
1008 userspi->max = max;
1009
1010 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1011 {
1012 hdr = out;
1013 while (NLMSG_OK(hdr, len))
1014 {
1015 switch (hdr->nlmsg_type)
1016 {
1017 case XFRM_MSG_NEWSA:
1018 {
1019 struct xfrm_usersa_info* usersa = NLMSG_DATA(hdr);
1020 received_spi = usersa->id.spi;
1021 break;
1022 }
1023 case NLMSG_ERROR:
1024 {
1025 struct nlmsgerr *err = NLMSG_DATA(hdr);
1026 DBG1(DBG_KNL, "allocating SPI failed: %s (%d)",
1027 strerror(-err->error), -err->error);
1028 break;
1029 }
1030 default:
1031 hdr = NLMSG_NEXT(hdr, len);
1032 continue;
1033 case NLMSG_DONE:
1034 break;
1035 }
1036 break;
1037 }
1038 free(out);
1039 }
1040
1041 if (received_spi == 0)
1042 {
1043 return FAILED;
1044 }
1045
1046 *spi = received_spi;
1047 return SUCCESS;
1048 }
1049
1050 METHOD(kernel_ipsec_t, get_spi, status_t,
1051 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1052 u_int8_t protocol, u_int32_t reqid, u_int32_t *spi)
1053 {
1054 DBG2(DBG_KNL, "getting SPI for reqid {%u}", reqid);
1055
1056 if (get_spi_internal(this, src, dst, protocol,
1057 0xc0000000, 0xcFFFFFFF, reqid, spi) != SUCCESS)
1058 {
1059 DBG1(DBG_KNL, "unable to get SPI for reqid {%u}", reqid);
1060 return FAILED;
1061 }
1062
1063 DBG2(DBG_KNL, "got SPI %.8x for reqid {%u}", ntohl(*spi), reqid);
1064 return SUCCESS;
1065 }
1066
1067 METHOD(kernel_ipsec_t, get_cpi, status_t,
1068 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1069 u_int32_t reqid, u_int16_t *cpi)
1070 {
1071 u_int32_t received_spi = 0;
1072
1073 DBG2(DBG_KNL, "getting CPI for reqid {%u}", reqid);
1074
1075 if (get_spi_internal(this, src, dst, IPPROTO_COMP,
1076 0x100, 0xEFFF, reqid, &received_spi) != SUCCESS)
1077 {
1078 DBG1(DBG_KNL, "unable to get CPI for reqid {%u}", reqid);
1079 return FAILED;
1080 }
1081
1082 *cpi = htons((u_int16_t)ntohl(received_spi));
1083
1084 DBG2(DBG_KNL, "got CPI %.4x for reqid {%u}", ntohs(*cpi), reqid);
1085 return SUCCESS;
1086 }
1087
1088 METHOD(kernel_ipsec_t, add_sa, status_t,
1089 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1090 u_int32_t spi, u_int8_t protocol, u_int32_t reqid, mark_t mark,
1091 u_int32_t tfc, lifetime_cfg_t *lifetime, u_int16_t enc_alg, chunk_t enc_key,
1092 u_int16_t int_alg, chunk_t int_key, ipsec_mode_t mode, u_int16_t ipcomp,
1093 u_int16_t cpi, bool encap, bool esn, bool inbound,
1094 traffic_selector_t* src_ts, traffic_selector_t* dst_ts)
1095 {
1096 netlink_buf_t request;
1097 char *alg_name;
1098 struct nlmsghdr *hdr;
1099 struct xfrm_usersa_info *sa;
1100 u_int16_t icv_size = 64;
1101 status_t status = FAILED;
1102
1103 /* if IPComp is used, we install an additional IPComp SA. if the cpi is 0
1104 * we are in the recursive call below */
1105 if (ipcomp != IPCOMP_NONE && cpi != 0)
1106 {
1107 lifetime_cfg_t lft = {{0,0,0},{0,0,0},{0,0,0}};
1108 add_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, reqid, mark,
1109 tfc, &lft, ENCR_UNDEFINED, chunk_empty, AUTH_UNDEFINED,
1110 chunk_empty, mode, ipcomp, 0, FALSE, FALSE, inbound, NULL, NULL);
1111 ipcomp = IPCOMP_NONE;
1112 /* use transport mode ESP SA, IPComp uses tunnel mode */
1113 mode = MODE_TRANSPORT;
1114 }
1115
1116 memset(&request, 0, sizeof(request));
1117
1118 if (mark.value)
1119 {
1120 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u} (mark "
1121 "%u/0x%8x)", ntohl(spi), reqid, mark.value, mark.mask);
1122 }
1123 else
1124 {
1125 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u}",
1126 ntohl(spi), reqid);
1127 }
1128 hdr = (struct nlmsghdr*)request;
1129 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1130 hdr->nlmsg_type = inbound ? XFRM_MSG_UPDSA : XFRM_MSG_NEWSA;
1131 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1132
1133 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1134 host2xfrm(src, &sa->saddr);
1135 host2xfrm(dst, &sa->id.daddr);
1136 sa->id.spi = spi;
1137 sa->id.proto = protocol;
1138 sa->family = src->get_family(src);
1139 sa->mode = mode2kernel(mode);
1140 switch (mode)
1141 {
1142 case MODE_TUNNEL:
1143 sa->flags |= XFRM_STATE_AF_UNSPEC;
1144 break;
1145 case MODE_BEET:
1146 case MODE_TRANSPORT:
1147 if(src_ts && dst_ts)
1148 {
1149 sa->sel = ts2selector(src_ts, dst_ts);
1150 }
1151 break;
1152 default:
1153 break;
1154 }
1155
1156 sa->reqid = reqid;
1157 sa->lft.soft_byte_limit = XFRM_LIMIT(lifetime->bytes.rekey);
1158 sa->lft.hard_byte_limit = XFRM_LIMIT(lifetime->bytes.life);
1159 sa->lft.soft_packet_limit = XFRM_LIMIT(lifetime->packets.rekey);
1160 sa->lft.hard_packet_limit = XFRM_LIMIT(lifetime->packets.life);
1161 /* we use lifetimes since added, not since used */
1162 sa->lft.soft_add_expires_seconds = lifetime->time.rekey;
1163 sa->lft.hard_add_expires_seconds = lifetime->time.life;
1164 sa->lft.soft_use_expires_seconds = 0;
1165 sa->lft.hard_use_expires_seconds = 0;
1166
1167 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_info);
1168
1169 switch (enc_alg)
1170 {
1171 case ENCR_UNDEFINED:
1172 /* no encryption */
1173 break;
1174 case ENCR_AES_CCM_ICV16:
1175 case ENCR_AES_GCM_ICV16:
1176 case ENCR_NULL_AUTH_AES_GMAC:
1177 case ENCR_CAMELLIA_CCM_ICV16:
1178 icv_size += 32;
1179 /* FALL */
1180 case ENCR_AES_CCM_ICV12:
1181 case ENCR_AES_GCM_ICV12:
1182 case ENCR_CAMELLIA_CCM_ICV12:
1183 icv_size += 32;
1184 /* FALL */
1185 case ENCR_AES_CCM_ICV8:
1186 case ENCR_AES_GCM_ICV8:
1187 case ENCR_CAMELLIA_CCM_ICV8:
1188 {
1189 struct xfrm_algo_aead *algo;
1190
1191 alg_name = lookup_algorithm(encryption_algs, enc_alg);
1192 if (alg_name == NULL)
1193 {
1194 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1195 encryption_algorithm_names, enc_alg);
1196 goto failed;
1197 }
1198 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1199 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1200
1201 rthdr->rta_type = XFRMA_ALG_AEAD;
1202 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_aead) +
1203 enc_key.len);
1204 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1205 if (hdr->nlmsg_len > sizeof(request))
1206 {
1207 goto failed;
1208 }
1209
1210 algo = (struct xfrm_algo_aead*)RTA_DATA(rthdr);
1211 algo->alg_key_len = enc_key.len * 8;
1212 algo->alg_icv_len = icv_size;
1213 strcpy(algo->alg_name, alg_name);
1214 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1215
1216 rthdr = XFRM_RTA_NEXT(rthdr);
1217 break;
1218 }
1219 default:
1220 {
1221 struct xfrm_algo *algo;
1222
1223 alg_name = lookup_algorithm(encryption_algs, enc_alg);
1224 if (alg_name == NULL)
1225 {
1226 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1227 encryption_algorithm_names, enc_alg);
1228 goto failed;
1229 }
1230 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1231 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1232
1233 rthdr->rta_type = XFRMA_ALG_CRYPT;
1234 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + enc_key.len);
1235 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1236 if (hdr->nlmsg_len > sizeof(request))
1237 {
1238 goto failed;
1239 }
1240
1241 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1242 algo->alg_key_len = enc_key.len * 8;
1243 strcpy(algo->alg_name, alg_name);
1244 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1245
1246 rthdr = XFRM_RTA_NEXT(rthdr);
1247 }
1248 }
1249
1250 if (int_alg != AUTH_UNDEFINED)
1251 {
1252 alg_name = lookup_algorithm(integrity_algs, int_alg);
1253 if (alg_name == NULL)
1254 {
1255 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1256 integrity_algorithm_names, int_alg);
1257 goto failed;
1258 }
1259 DBG2(DBG_KNL, " using integrity algorithm %N with key size %d",
1260 integrity_algorithm_names, int_alg, int_key.len * 8);
1261
1262 if (int_alg == AUTH_HMAC_SHA2_256_128)
1263 {
1264 struct xfrm_algo_auth* algo;
1265
1266 /* the kernel uses SHA256 with 96 bit truncation by default,
1267 * use specified truncation size supported by newer kernels */
1268 rthdr->rta_type = XFRMA_ALG_AUTH_TRUNC;
1269 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_auth) +
1270 int_key.len);
1271
1272 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1273 if (hdr->nlmsg_len > sizeof(request))
1274 {
1275 goto failed;
1276 }
1277
1278 algo = (struct xfrm_algo_auth*)RTA_DATA(rthdr);
1279 algo->alg_key_len = int_key.len * 8;
1280 algo->alg_trunc_len = 128;
1281 strcpy(algo->alg_name, alg_name);
1282 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1283 }
1284 else
1285 {
1286 struct xfrm_algo* algo;
1287
1288 rthdr->rta_type = XFRMA_ALG_AUTH;
1289 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + int_key.len);
1290
1291 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1292 if (hdr->nlmsg_len > sizeof(request))
1293 {
1294 goto failed;
1295 }
1296
1297 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1298 algo->alg_key_len = int_key.len * 8;
1299 strcpy(algo->alg_name, alg_name);
1300 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1301 }
1302 rthdr = XFRM_RTA_NEXT(rthdr);
1303 }
1304
1305 if (ipcomp != IPCOMP_NONE)
1306 {
1307 rthdr->rta_type = XFRMA_ALG_COMP;
1308 alg_name = lookup_algorithm(compression_algs, ipcomp);
1309 if (alg_name == NULL)
1310 {
1311 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1312 ipcomp_transform_names, ipcomp);
1313 goto failed;
1314 }
1315 DBG2(DBG_KNL, " using compression algorithm %N",
1316 ipcomp_transform_names, ipcomp);
1317
1318 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo));
1319 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1320 if (hdr->nlmsg_len > sizeof(request))
1321 {
1322 goto failed;
1323 }
1324
1325 struct xfrm_algo* algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1326 algo->alg_key_len = 0;
1327 strcpy(algo->alg_name, alg_name);
1328
1329 rthdr = XFRM_RTA_NEXT(rthdr);
1330 }
1331
1332 if (encap)
1333 {
1334 struct xfrm_encap_tmpl *tmpl;
1335
1336 rthdr->rta_type = XFRMA_ENCAP;
1337 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1338
1339 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1340 if (hdr->nlmsg_len > sizeof(request))
1341 {
1342 goto failed;
1343 }
1344
1345 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rthdr);
1346 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1347 tmpl->encap_sport = htons(src->get_port(src));
1348 tmpl->encap_dport = htons(dst->get_port(dst));
1349 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1350 /* encap_oa could probably be derived from the
1351 * traffic selectors [rfc4306, p39]. In the netlink kernel
1352 * implementation pluto does the same as we do here but it uses
1353 * encap_oa in the pfkey implementation.
1354 * BUT as /usr/src/linux/net/key/af_key.c indicates the kernel ignores
1355 * it anyway
1356 * -> does that mean that NAT-T encap doesn't work in transport mode?
1357 * No. The reason the kernel ignores NAT-OA is that it recomputes
1358 * (or, rather, just ignores) the checksum. If packets pass the IPsec
1359 * checks it marks them "checksum ok" so OA isn't needed. */
1360 rthdr = XFRM_RTA_NEXT(rthdr);
1361 }
1362
1363 if (mark.value)
1364 {
1365 struct xfrm_mark *mrk;
1366
1367 rthdr->rta_type = XFRMA_MARK;
1368 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1369
1370 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1371 if (hdr->nlmsg_len > sizeof(request))
1372 {
1373 goto failed;
1374 }
1375
1376 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1377 mrk->v = mark.value;
1378 mrk->m = mark.mask;
1379 rthdr = XFRM_RTA_NEXT(rthdr);
1380 }
1381
1382 if (tfc)
1383 {
1384 u_int32_t *tfcpad;
1385
1386 rthdr->rta_type = XFRMA_TFCPAD;
1387 rthdr->rta_len = RTA_LENGTH(sizeof(u_int32_t));
1388
1389 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1390 if (hdr->nlmsg_len > sizeof(request))
1391 {
1392 goto failed;
1393 }
1394
1395 tfcpad = (u_int32_t*)RTA_DATA(rthdr);
1396 *tfcpad = tfc;
1397 rthdr = XFRM_RTA_NEXT(rthdr);
1398 }
1399
1400 if (protocol != IPPROTO_COMP)
1401 {
1402 if (esn || this->replay_window > DEFAULT_REPLAY_WINDOW)
1403 {
1404 /* for ESN or larger replay windows we need the new
1405 * XFRMA_REPLAY_ESN_VAL attribute to configure a bitmap */
1406 struct xfrm_replay_state_esn *replay;
1407
1408 rthdr->rta_type = XFRMA_REPLAY_ESN_VAL;
1409 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1410 (this->replay_window + 7) / 8);
1411
1412 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1413 if (hdr->nlmsg_len > sizeof(request))
1414 {
1415 goto failed;
1416 }
1417
1418 replay = (struct xfrm_replay_state_esn*)RTA_DATA(rthdr);
1419 /* bmp_len contains number uf __u32's */
1420 replay->bmp_len = this->replay_bmp;
1421 replay->replay_window = this->replay_window;
1422
1423 rthdr = XFRM_RTA_NEXT(rthdr);
1424 if (esn)
1425 {
1426 sa->flags |= XFRM_STATE_ESN;
1427 }
1428 }
1429 else
1430 {
1431 sa->replay_window = DEFAULT_REPLAY_WINDOW;
1432 }
1433 }
1434
1435 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1436 {
1437 if (mark.value)
1438 {
1439 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x "
1440 "(mark %u/0x%8x)", ntohl(spi), mark.value, mark.mask);
1441 }
1442 else
1443 {
1444 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x", ntohl(spi));
1445 }
1446 goto failed;
1447 }
1448
1449 status = SUCCESS;
1450
1451 failed:
1452 memwipe(request, sizeof(request));
1453 return status;
1454 }
1455
1456 /**
1457 * Get the ESN replay state (i.e. sequence numbers) of an SA.
1458 *
1459 * Allocates into one the replay state structure we get from the kernel.
1460 */
1461 static void get_replay_state(private_kernel_netlink_ipsec_t *this,
1462 u_int32_t spi, u_int8_t protocol, host_t *dst,
1463 struct xfrm_replay_state_esn **replay_esn,
1464 struct xfrm_replay_state **replay)
1465 {
1466 netlink_buf_t request;
1467 struct nlmsghdr *hdr, *out = NULL;
1468 struct xfrm_aevent_id *out_aevent = NULL, *aevent_id;
1469 size_t len;
1470 struct rtattr *rta;
1471 size_t rtasize;
1472
1473 memset(&request, 0, sizeof(request));
1474
1475 DBG2(DBG_KNL, "querying replay state from SAD entry with SPI %.8x",
1476 ntohl(spi));
1477
1478 hdr = (struct nlmsghdr*)request;
1479 hdr->nlmsg_flags = NLM_F_REQUEST;
1480 hdr->nlmsg_type = XFRM_MSG_GETAE;
1481 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
1482
1483 aevent_id = (struct xfrm_aevent_id*)NLMSG_DATA(hdr);
1484 aevent_id->flags = XFRM_AE_RVAL;
1485
1486 host2xfrm(dst, &aevent_id->sa_id.daddr);
1487 aevent_id->sa_id.spi = spi;
1488 aevent_id->sa_id.proto = protocol;
1489 aevent_id->sa_id.family = dst->get_family(dst);
1490
1491 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1492 {
1493 hdr = out;
1494 while (NLMSG_OK(hdr, len))
1495 {
1496 switch (hdr->nlmsg_type)
1497 {
1498 case XFRM_MSG_NEWAE:
1499 {
1500 out_aevent = NLMSG_DATA(hdr);
1501 break;
1502 }
1503 case NLMSG_ERROR:
1504 {
1505 struct nlmsgerr *err = NLMSG_DATA(hdr);
1506 DBG1(DBG_KNL, "querying replay state from SAD entry "
1507 "failed: %s (%d)", strerror(-err->error),
1508 -err->error);
1509 break;
1510 }
1511 default:
1512 hdr = NLMSG_NEXT(hdr, len);
1513 continue;
1514 case NLMSG_DONE:
1515 break;
1516 }
1517 break;
1518 }
1519 }
1520
1521 if (out_aevent)
1522 {
1523 rta = XFRM_RTA(out, struct xfrm_aevent_id);
1524 rtasize = XFRM_PAYLOAD(out, struct xfrm_aevent_id);
1525 while (RTA_OK(rta, rtasize))
1526 {
1527 if (rta->rta_type == XFRMA_REPLAY_VAL &&
1528 RTA_PAYLOAD(rta) == sizeof(**replay))
1529 {
1530 *replay = malloc(RTA_PAYLOAD(rta));
1531 memcpy(*replay, RTA_DATA(rta), RTA_PAYLOAD(rta));
1532 break;
1533 }
1534 if (rta->rta_type == XFRMA_REPLAY_ESN_VAL &&
1535 RTA_PAYLOAD(rta) >= sizeof(**replay_esn) + this->replay_bmp)
1536 {
1537 *replay_esn = malloc(RTA_PAYLOAD(rta));
1538 memcpy(*replay_esn, RTA_DATA(rta), RTA_PAYLOAD(rta));
1539 break;
1540 }
1541 rta = RTA_NEXT(rta, rtasize);
1542 }
1543 }
1544 free(out);
1545 }
1546
1547 METHOD(kernel_ipsec_t, query_sa, status_t,
1548 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1549 u_int32_t spi, u_int8_t protocol, mark_t mark, u_int64_t *bytes)
1550 {
1551 netlink_buf_t request;
1552 struct nlmsghdr *out = NULL, *hdr;
1553 struct xfrm_usersa_id *sa_id;
1554 struct xfrm_usersa_info *sa = NULL;
1555 status_t status = FAILED;
1556 size_t len;
1557
1558 memset(&request, 0, sizeof(request));
1559
1560 if (mark.value)
1561 {
1562 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x (mark %u/0x%8x)",
1563 ntohl(spi), mark.value, mark.mask);
1564 }
1565 else
1566 {
1567 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x", ntohl(spi));
1568 }
1569 hdr = (struct nlmsghdr*)request;
1570 hdr->nlmsg_flags = NLM_F_REQUEST;
1571 hdr->nlmsg_type = XFRM_MSG_GETSA;
1572 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1573
1574 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1575 host2xfrm(dst, &sa_id->daddr);
1576 sa_id->spi = spi;
1577 sa_id->proto = protocol;
1578 sa_id->family = dst->get_family(dst);
1579
1580 if (mark.value)
1581 {
1582 struct xfrm_mark *mrk;
1583 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1584
1585 rthdr->rta_type = XFRMA_MARK;
1586 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1587 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1588 if (hdr->nlmsg_len > sizeof(request))
1589 {
1590 return FAILED;
1591 }
1592
1593 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1594 mrk->v = mark.value;
1595 mrk->m = mark.mask;
1596 }
1597
1598 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1599 {
1600 hdr = out;
1601 while (NLMSG_OK(hdr, len))
1602 {
1603 switch (hdr->nlmsg_type)
1604 {
1605 case XFRM_MSG_NEWSA:
1606 {
1607 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1608 break;
1609 }
1610 case NLMSG_ERROR:
1611 {
1612 struct nlmsgerr *err = NLMSG_DATA(hdr);
1613
1614 if (mark.value)
1615 {
1616 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1617 "(mark %u/0x%8x) failed: %s (%d)",
1618 ntohl(spi), mark.value, mark.mask,
1619 strerror(-err->error), -err->error);
1620 }
1621 else
1622 {
1623 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1624 "failed: %s (%d)", ntohl(spi),
1625 strerror(-err->error), -err->error);
1626 }
1627 break;
1628 }
1629 default:
1630 hdr = NLMSG_NEXT(hdr, len);
1631 continue;
1632 case NLMSG_DONE:
1633 break;
1634 }
1635 break;
1636 }
1637 }
1638
1639 if (sa == NULL)
1640 {
1641 DBG2(DBG_KNL, "unable to query SAD entry with SPI %.8x", ntohl(spi));
1642 }
1643 else
1644 {
1645 *bytes = sa->curlft.bytes;
1646 status = SUCCESS;
1647 }
1648 memwipe(out, len);
1649 free(out);
1650 return status;
1651 }
1652
1653 METHOD(kernel_ipsec_t, del_sa, status_t,
1654 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1655 u_int32_t spi, u_int8_t protocol, u_int16_t cpi, mark_t mark)
1656 {
1657 netlink_buf_t request;
1658 struct nlmsghdr *hdr;
1659 struct xfrm_usersa_id *sa_id;
1660
1661 /* if IPComp was used, we first delete the additional IPComp SA */
1662 if (cpi)
1663 {
1664 del_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, 0, mark);
1665 }
1666
1667 memset(&request, 0, sizeof(request));
1668
1669 if (mark.value)
1670 {
1671 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x (mark %u/0x%8x)",
1672 ntohl(spi), mark.value, mark.mask);
1673 }
1674 else
1675 {
1676 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x", ntohl(spi));
1677 }
1678 hdr = (struct nlmsghdr*)request;
1679 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1680 hdr->nlmsg_type = XFRM_MSG_DELSA;
1681 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1682
1683 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1684 host2xfrm(dst, &sa_id->daddr);
1685 sa_id->spi = spi;
1686 sa_id->proto = protocol;
1687 sa_id->family = dst->get_family(dst);
1688
1689 if (mark.value)
1690 {
1691 struct xfrm_mark *mrk;
1692 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1693
1694 rthdr->rta_type = XFRMA_MARK;
1695 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1696 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1697 if (hdr->nlmsg_len > sizeof(request))
1698 {
1699 return FAILED;
1700 }
1701
1702 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1703 mrk->v = mark.value;
1704 mrk->m = mark.mask;
1705 }
1706
1707 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1708 {
1709 if (mark.value)
1710 {
1711 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x "
1712 "(mark %u/0x%8x)", ntohl(spi), mark.value, mark.mask);
1713 }
1714 else
1715 {
1716 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x",
1717 ntohl(spi));
1718 }
1719 return FAILED;
1720 }
1721 if (mark.value)
1722 {
1723 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x (mark %u/0x%8x)",
1724 ntohl(spi), mark.value, mark.mask);
1725 }
1726 else
1727 {
1728 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x", ntohl(spi));
1729 }
1730 return SUCCESS;
1731 }
1732
1733 METHOD(kernel_ipsec_t, update_sa, status_t,
1734 private_kernel_netlink_ipsec_t *this, u_int32_t spi, u_int8_t protocol,
1735 u_int16_t cpi, host_t *src, host_t *dst, host_t *new_src, host_t *new_dst,
1736 bool old_encap, bool new_encap, mark_t mark)
1737 {
1738 netlink_buf_t request;
1739 u_char *pos;
1740 struct nlmsghdr *hdr, *out = NULL;
1741 struct xfrm_usersa_id *sa_id;
1742 struct xfrm_usersa_info *out_sa = NULL, *sa;
1743 size_t len;
1744 struct rtattr *rta;
1745 size_t rtasize;
1746 struct xfrm_encap_tmpl* tmpl = NULL;
1747 struct xfrm_replay_state *replay = NULL;
1748 struct xfrm_replay_state_esn *replay_esn = NULL;
1749 status_t status = FAILED;
1750
1751 /* if IPComp is used, we first update the IPComp SA */
1752 if (cpi)
1753 {
1754 update_sa(this, htonl(ntohs(cpi)), IPPROTO_COMP, 0,
1755 src, dst, new_src, new_dst, FALSE, FALSE, mark);
1756 }
1757
1758 memset(&request, 0, sizeof(request));
1759
1760 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x for update", ntohl(spi));
1761
1762 /* query the existing SA first */
1763 hdr = (struct nlmsghdr*)request;
1764 hdr->nlmsg_flags = NLM_F_REQUEST;
1765 hdr->nlmsg_type = XFRM_MSG_GETSA;
1766 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1767
1768 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1769 host2xfrm(dst, &sa_id->daddr);
1770 sa_id->spi = spi;
1771 sa_id->proto = protocol;
1772 sa_id->family = dst->get_family(dst);
1773
1774 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1775 {
1776 hdr = out;
1777 while (NLMSG_OK(hdr, len))
1778 {
1779 switch (hdr->nlmsg_type)
1780 {
1781 case XFRM_MSG_NEWSA:
1782 {
1783 out_sa = NLMSG_DATA(hdr);
1784 break;
1785 }
1786 case NLMSG_ERROR:
1787 {
1788 struct nlmsgerr *err = NLMSG_DATA(hdr);
1789 DBG1(DBG_KNL, "querying SAD entry failed: %s (%d)",
1790 strerror(-err->error), -err->error);
1791 break;
1792 }
1793 default:
1794 hdr = NLMSG_NEXT(hdr, len);
1795 continue;
1796 case NLMSG_DONE:
1797 break;
1798 }
1799 break;
1800 }
1801 }
1802 if (out_sa == NULL)
1803 {
1804 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1805 goto failed;
1806 }
1807
1808 get_replay_state(this, spi, protocol, dst, &replay_esn, &replay);
1809
1810 /* delete the old SA (without affecting the IPComp SA) */
1811 if (del_sa(this, src, dst, spi, protocol, 0, mark) != SUCCESS)
1812 {
1813 DBG1(DBG_KNL, "unable to delete old SAD entry with SPI %.8x",
1814 ntohl(spi));
1815 goto failed;
1816 }
1817
1818 DBG2(DBG_KNL, "updating SAD entry with SPI %.8x from %#H..%#H to %#H..%#H",
1819 ntohl(spi), src, dst, new_src, new_dst);
1820 /* copy over the SA from out to request */
1821 hdr = (struct nlmsghdr*)request;
1822 memcpy(hdr, out, min(out->nlmsg_len, sizeof(request)));
1823 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1824 hdr->nlmsg_type = XFRM_MSG_NEWSA;
1825 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1826 sa = NLMSG_DATA(hdr);
1827 sa->family = new_dst->get_family(new_dst);
1828
1829 if (!src->ip_equals(src, new_src))
1830 {
1831 host2xfrm(new_src, &sa->saddr);
1832 }
1833 if (!dst->ip_equals(dst, new_dst))
1834 {
1835 host2xfrm(new_dst, &sa->id.daddr);
1836 }
1837
1838 rta = XFRM_RTA(out, struct xfrm_usersa_info);
1839 rtasize = XFRM_PAYLOAD(out, struct xfrm_usersa_info);
1840 pos = (u_char*)XFRM_RTA(hdr, struct xfrm_usersa_info);
1841 while(RTA_OK(rta, rtasize))
1842 {
1843 /* copy all attributes, but not XFRMA_ENCAP if we are disabling it */
1844 if (rta->rta_type != XFRMA_ENCAP || new_encap)
1845 {
1846 if (rta->rta_type == XFRMA_ENCAP)
1847 { /* update encap tmpl */
1848 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1849 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1850 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1851 }
1852 memcpy(pos, rta, rta->rta_len);
1853 pos += RTA_ALIGN(rta->rta_len);
1854 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1855 }
1856 rta = RTA_NEXT(rta, rtasize);
1857 }
1858
1859 rta = (struct rtattr*)pos;
1860 if (tmpl == NULL && new_encap)
1861 { /* add tmpl if we are enabling it */
1862 rta->rta_type = XFRMA_ENCAP;
1863 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1864
1865 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1866 if (hdr->nlmsg_len > sizeof(request))
1867 {
1868 goto failed;
1869 }
1870
1871 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1872 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1873 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1874 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1875 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1876
1877 rta = XFRM_RTA_NEXT(rta);
1878 }
1879
1880 if (replay_esn)
1881 {
1882 rta->rta_type = XFRMA_REPLAY_ESN_VAL;
1883 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1884 this->replay_bmp);
1885
1886 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1887 if (hdr->nlmsg_len > sizeof(request))
1888 {
1889 goto failed;
1890 }
1891 memcpy(RTA_DATA(rta), replay_esn,
1892 sizeof(struct xfrm_replay_state_esn) + this->replay_bmp);
1893
1894 rta = XFRM_RTA_NEXT(rta);
1895 }
1896 else if (replay)
1897 {
1898 rta->rta_type = XFRMA_REPLAY_VAL;
1899 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state));
1900
1901 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1902 if (hdr->nlmsg_len > sizeof(request))
1903 {
1904 goto failed;
1905 }
1906 memcpy(RTA_DATA(rta), replay, sizeof(replay));
1907
1908 rta = XFRM_RTA_NEXT(rta);
1909 }
1910 else
1911 {
1912 DBG1(DBG_KNL, "unable to copy replay state from old SAD entry "
1913 "with SPI %.8x", ntohl(spi));
1914 }
1915
1916 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1917 {
1918 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1919 goto failed;
1920 }
1921
1922 status = SUCCESS;
1923 failed:
1924 free(replay);
1925 free(replay_esn);
1926 memwipe(out, len);
1927 free(out);
1928
1929 return status;
1930 }
1931
1932 /**
1933 * Add or update a policy in the kernel.
1934 *
1935 * Note: The mutex has to be locked when entering this function.
1936 */
1937 static status_t add_policy_internal(private_kernel_netlink_ipsec_t *this,
1938 policy_entry_t *policy, policy_sa_t *mapping, bool update)
1939 {
1940 netlink_buf_t request;
1941 policy_entry_t clone;
1942 ipsec_sa_t *ipsec = mapping->sa;
1943 struct xfrm_userpolicy_info *policy_info;
1944 struct nlmsghdr *hdr;
1945 int i;
1946
1947 /* clone the policy so we are able to check it out again later */
1948 memcpy(&clone, policy, sizeof(policy_entry_t));
1949
1950 memset(&request, 0, sizeof(request));
1951 hdr = (struct nlmsghdr*)request;
1952 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1953 hdr->nlmsg_type = update ? XFRM_MSG_UPDPOLICY : XFRM_MSG_NEWPOLICY;
1954 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info));
1955
1956 policy_info = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
1957 policy_info->sel = policy->sel;
1958 policy_info->dir = policy->direction;
1959
1960 /* calculate priority based on selector size, small size = high prio */
1961 policy_info->priority = mapping->priority;
1962 policy_info->action = mapping->type != POLICY_DROP ? XFRM_POLICY_ALLOW
1963 : XFRM_POLICY_BLOCK;
1964 policy_info->share = XFRM_SHARE_ANY;
1965
1966 /* policies don't expire */
1967 policy_info->lft.soft_byte_limit = XFRM_INF;
1968 policy_info->lft.soft_packet_limit = XFRM_INF;
1969 policy_info->lft.hard_byte_limit = XFRM_INF;
1970 policy_info->lft.hard_packet_limit = XFRM_INF;
1971 policy_info->lft.soft_add_expires_seconds = 0;
1972 policy_info->lft.hard_add_expires_seconds = 0;
1973 policy_info->lft.soft_use_expires_seconds = 0;
1974 policy_info->lft.hard_use_expires_seconds = 0;
1975
1976 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_info);
1977
1978 if (mapping->type == POLICY_IPSEC)
1979 {
1980 struct xfrm_user_tmpl *tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rthdr);
1981 struct {
1982 u_int8_t proto;
1983 bool use;
1984 } protos[] = {
1985 { IPPROTO_COMP, ipsec->cfg.ipcomp.transform != IPCOMP_NONE },
1986 { IPPROTO_ESP, ipsec->cfg.esp.use },
1987 { IPPROTO_AH, ipsec->cfg.ah.use },
1988 };
1989 ipsec_mode_t proto_mode = ipsec->cfg.mode;
1990
1991 rthdr->rta_type = XFRMA_TMPL;
1992 rthdr->rta_len = 0; /* actual length is set below */
1993
1994 for (i = 0; i < countof(protos); i++)
1995 {
1996 if (!protos[i].use)
1997 {
1998 continue;
1999 }
2000
2001 rthdr->rta_len += RTA_LENGTH(sizeof(struct xfrm_user_tmpl));
2002 hdr->nlmsg_len += RTA_ALIGN(RTA_LENGTH(sizeof(struct xfrm_user_tmpl)));
2003 if (hdr->nlmsg_len > sizeof(request))
2004 {
2005 return FAILED;
2006 }
2007
2008 tmpl->reqid = ipsec->cfg.reqid;
2009 tmpl->id.proto = protos[i].proto;
2010 tmpl->aalgos = tmpl->ealgos = tmpl->calgos = ~0;
2011 tmpl->mode = mode2kernel(proto_mode);
2012 tmpl->optional = protos[i].proto == IPPROTO_COMP &&
2013 policy->direction != POLICY_OUT;
2014 tmpl->family = ipsec->src->get_family(ipsec->src);
2015
2016 if (proto_mode == MODE_TUNNEL)
2017 { /* only for tunnel mode */
2018 host2xfrm(ipsec->src, &tmpl->saddr);
2019 host2xfrm(ipsec->dst, &tmpl->id.daddr);
2020 }
2021
2022 tmpl++;
2023
2024 /* use transport mode for other SAs */
2025 proto_mode = MODE_TRANSPORT;
2026 }
2027
2028 rthdr = XFRM_RTA_NEXT(rthdr);
2029 }
2030
2031 if (ipsec->mark.value)
2032 {
2033 struct xfrm_mark *mrk;
2034
2035 rthdr->rta_type = XFRMA_MARK;
2036 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2037
2038 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2039 if (hdr->nlmsg_len > sizeof(request))
2040 {
2041 return FAILED;
2042 }
2043
2044 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2045 mrk->v = ipsec->mark.value;
2046 mrk->m = ipsec->mark.mask;
2047 }
2048 this->mutex->unlock(this->mutex);
2049
2050 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2051 {
2052 return FAILED;
2053 }
2054
2055 /* find the policy again */
2056 this->mutex->lock(this->mutex);
2057 policy = this->policies->get(this->policies, &clone);
2058 if (!policy ||
2059 policy->used_by->find_first(policy->used_by,
2060 NULL, (void**)&mapping) != SUCCESS)
2061 { /* policy or mapping is already gone, ignore */
2062 this->mutex->unlock(this->mutex);
2063 return SUCCESS;
2064 }
2065
2066 /* install a route, if:
2067 * - this is a forward policy (to just get one for each child)
2068 * - we are in tunnel/BEET mode
2069 * - routing is not disabled via strongswan.conf
2070 */
2071 if (policy->direction == POLICY_FWD &&
2072 ipsec->cfg.mode != MODE_TRANSPORT && this->install_routes)
2073 {
2074 route_entry_t *route = malloc_thing(route_entry_t);
2075 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)mapping;
2076
2077 if (hydra->kernel_interface->get_address_by_ts(hydra->kernel_interface,
2078 fwd->dst_ts, &route->src_ip) == SUCCESS)
2079 {
2080 /* get the nexthop to src (src as we are in POLICY_FWD) */
2081 route->gateway = hydra->kernel_interface->get_nexthop(
2082 hydra->kernel_interface, ipsec->src);
2083 /* install route via outgoing interface */
2084 route->if_name = hydra->kernel_interface->get_interface(
2085 hydra->kernel_interface, ipsec->dst);
2086 route->dst_net = chunk_alloc(policy->sel.family == AF_INET ? 4 : 16);
2087 memcpy(route->dst_net.ptr, &policy->sel.saddr, route->dst_net.len);
2088 route->prefixlen = policy->sel.prefixlen_s;
2089
2090 if (!route->if_name)
2091 {
2092 this->mutex->unlock(this->mutex);
2093 route_entry_destroy(route);
2094 return SUCCESS;
2095 }
2096
2097 if (policy->route)
2098 {
2099 route_entry_t *old = policy->route;
2100 if (route_entry_equals(old, route))
2101 { /* keep previously installed route */
2102 this->mutex->unlock(this->mutex);
2103 route_entry_destroy(route);
2104 return SUCCESS;
2105 }
2106 /* uninstall previously installed route */
2107 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2108 old->dst_net, old->prefixlen, old->gateway,
2109 old->src_ip, old->if_name) != SUCCESS)
2110 {
2111 DBG1(DBG_KNL, "error uninstalling route installed with "
2112 "policy %R === %R %N", fwd->src_ts,
2113 fwd->dst_ts, policy_dir_names,
2114 policy->direction);
2115 }
2116 route_entry_destroy(old);
2117 policy->route = NULL;
2118 }
2119
2120 DBG2(DBG_KNL, "installing route: %R via %H src %H dev %s",
2121 fwd->src_ts, route->gateway, route->src_ip, route->if_name);
2122 switch (hydra->kernel_interface->add_route(
2123 hydra->kernel_interface, route->dst_net,
2124 route->prefixlen, route->gateway,
2125 route->src_ip, route->if_name))
2126 {
2127 default:
2128 DBG1(DBG_KNL, "unable to install source route for %H",
2129 route->src_ip);
2130 /* FALL */
2131 case ALREADY_DONE:
2132 /* route exists, do not uninstall */
2133 route_entry_destroy(route);
2134 break;
2135 case SUCCESS:
2136 /* cache the installed route */
2137 policy->route = route;
2138 break;
2139 }
2140 }
2141 else
2142 {
2143 free(route);
2144 }
2145 }
2146 this->mutex->unlock(this->mutex);
2147 return SUCCESS;
2148 }
2149
2150 METHOD(kernel_ipsec_t, add_policy, status_t,
2151 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
2152 traffic_selector_t *src_ts, traffic_selector_t *dst_ts,
2153 policy_dir_t direction, policy_type_t type, ipsec_sa_cfg_t *sa,
2154 mark_t mark, bool routed)
2155 {
2156 policy_entry_t *policy, *current;
2157 policy_sa_t *assigned_sa, *current_sa;
2158 enumerator_t *enumerator;
2159 bool found = FALSE, update = TRUE;
2160
2161 /* create a policy */
2162 INIT(policy,
2163 .sel = ts2selector(src_ts, dst_ts),
2164 .mark = mark.value & mark.mask,
2165 .direction = direction,
2166 );
2167
2168 /* find the policy, which matches EXACTLY */
2169 this->mutex->lock(this->mutex);
2170 current = this->policies->get(this->policies, policy);
2171 if (current)
2172 {
2173 /* use existing policy */
2174 if (mark.value)
2175 {
2176 DBG2(DBG_KNL, "policy %R === %R %N (mark %u/0x%8x) "
2177 "already exists, increasing refcount",
2178 src_ts, dst_ts, policy_dir_names, direction,
2179 mark.value, mark.mask);
2180 }
2181 else
2182 {
2183 DBG2(DBG_KNL, "policy %R === %R %N "
2184 "already exists, increasing refcount",
2185 src_ts, dst_ts, policy_dir_names, direction);
2186 }
2187 policy_entry_destroy(this, policy);
2188 policy = current;
2189 found = TRUE;
2190 }
2191 else
2192 { /* use the new one, if we have no such policy */
2193 policy->used_by = linked_list_create();
2194 this->policies->put(this->policies, policy, policy);
2195 }
2196
2197 /* cache the assigned IPsec SA */
2198 assigned_sa = policy_sa_create(this, direction, type, src, dst, src_ts,
2199 dst_ts, mark, sa);
2200
2201 /* calculate priority based on selector size, small size = high prio */
2202 assigned_sa->priority = routed ? PRIO_LOW : PRIO_HIGH;
2203 assigned_sa->priority -= policy->sel.prefixlen_s;
2204 assigned_sa->priority -= policy->sel.prefixlen_d;
2205 assigned_sa->priority <<= 2; /* make some room for the two flags */
2206 assigned_sa->priority += policy->sel.sport_mask ||
2207 policy->sel.dport_mask ? 0 : 2;
2208 assigned_sa->priority += policy->sel.proto ? 0 : 1;
2209
2210 /* insert the SA according to its priority */
2211 enumerator = policy->used_by->create_enumerator(policy->used_by);
2212 while (enumerator->enumerate(enumerator, (void**)&current_sa))
2213 {
2214 if (current_sa->priority >= assigned_sa->priority)
2215 {
2216 break;
2217 }
2218 update = FALSE;
2219 }
2220 policy->used_by->insert_before(policy->used_by, enumerator, assigned_sa);
2221 enumerator->destroy(enumerator);
2222
2223 if (!update)
2224 { /* we don't update the policy if the priority is lower than that of the
2225 * currently installed one */
2226 this->mutex->unlock(this->mutex);
2227 return SUCCESS;
2228 }
2229
2230 if (mark.value)
2231 {
2232 DBG2(DBG_KNL, "%s policy %R === %R %N (mark %u/0x%8x)",
2233 found ? "updating" : "adding", src_ts, dst_ts,
2234 policy_dir_names, direction, mark.value, mark.mask);
2235 }
2236 else
2237 {
2238 DBG2(DBG_KNL, "%s policy %R === %R %N",
2239 found ? "updating" : "adding", src_ts, dst_ts,
2240 policy_dir_names, direction);
2241 }
2242
2243 if (add_policy_internal(this, policy, assigned_sa, found) != SUCCESS)
2244 {
2245 DBG1(DBG_KNL, "unable to %s policy %R === %R %N",
2246 found ? "update" : "add", src_ts, dst_ts,
2247 policy_dir_names, direction);
2248 return FAILED;
2249 }
2250 return SUCCESS;
2251 }
2252
2253 METHOD(kernel_ipsec_t, query_policy, status_t,
2254 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2255 traffic_selector_t *dst_ts, policy_dir_t direction, mark_t mark,
2256 u_int32_t *use_time)
2257 {
2258 netlink_buf_t request;
2259 struct nlmsghdr *out = NULL, *hdr;
2260 struct xfrm_userpolicy_id *policy_id;
2261 struct xfrm_userpolicy_info *policy = NULL;
2262 size_t len;
2263
2264 memset(&request, 0, sizeof(request));
2265
2266 if (mark.value)
2267 {
2268 DBG2(DBG_KNL, "querying policy %R === %R %N (mark %u/0x%8x)",
2269 src_ts, dst_ts, policy_dir_names, direction,
2270 mark.value, mark.mask);
2271 }
2272 else
2273 {
2274 DBG2(DBG_KNL, "querying policy %R === %R %N", src_ts, dst_ts,
2275 policy_dir_names, direction);
2276 }
2277 hdr = (struct nlmsghdr*)request;
2278 hdr->nlmsg_flags = NLM_F_REQUEST;
2279 hdr->nlmsg_type = XFRM_MSG_GETPOLICY;
2280 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2281
2282 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2283 policy_id->sel = ts2selector(src_ts, dst_ts);
2284 policy_id->dir = direction;
2285
2286 if (mark.value)
2287 {
2288 struct xfrm_mark *mrk;
2289 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2290
2291 rthdr->rta_type = XFRMA_MARK;
2292 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2293
2294 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2295 if (hdr->nlmsg_len > sizeof(request))
2296 {
2297 return FAILED;
2298 }
2299
2300 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2301 mrk->v = mark.value;
2302 mrk->m = mark.mask;
2303 }
2304
2305 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
2306 {
2307 hdr = out;
2308 while (NLMSG_OK(hdr, len))
2309 {
2310 switch (hdr->nlmsg_type)
2311 {
2312 case XFRM_MSG_NEWPOLICY:
2313 {
2314 policy = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2315 break;
2316 }
2317 case NLMSG_ERROR:
2318 {
2319 struct nlmsgerr *err = NLMSG_DATA(hdr);
2320 DBG1(DBG_KNL, "querying policy failed: %s (%d)",
2321 strerror(-err->error), -err->error);
2322 break;
2323 }
2324 default:
2325 hdr = NLMSG_NEXT(hdr, len);
2326 continue;
2327 case NLMSG_DONE:
2328 break;
2329 }
2330 break;
2331 }
2332 }
2333
2334 if (policy == NULL)
2335 {
2336 DBG2(DBG_KNL, "unable to query policy %R === %R %N", src_ts, dst_ts,
2337 policy_dir_names, direction);
2338 free(out);
2339 return FAILED;
2340 }
2341
2342 if (policy->curlft.use_time)
2343 {
2344 /* we need the monotonic time, but the kernel returns system time. */
2345 *use_time = time_monotonic(NULL) - (time(NULL) - policy->curlft.use_time);
2346 }
2347 else
2348 {
2349 *use_time = 0;
2350 }
2351
2352 free(out);
2353 return SUCCESS;
2354 }
2355
2356 METHOD(kernel_ipsec_t, del_policy, status_t,
2357 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2358 traffic_selector_t *dst_ts, policy_dir_t direction, u_int32_t reqid,
2359 mark_t mark, bool unrouted)
2360 {
2361 policy_entry_t *current, policy;
2362 enumerator_t *enumerator;
2363 policy_sa_t *mapping;
2364 netlink_buf_t request;
2365 struct nlmsghdr *hdr;
2366 struct xfrm_userpolicy_id *policy_id;
2367 bool is_installed = TRUE;
2368
2369 if (mark.value)
2370 {
2371 DBG2(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%8x)",
2372 src_ts, dst_ts, policy_dir_names, direction,
2373 mark.value, mark.mask);
2374 }
2375 else
2376 {
2377 DBG2(DBG_KNL, "deleting policy %R === %R %N",
2378 src_ts, dst_ts, policy_dir_names, direction);
2379 }
2380
2381 /* create a policy */
2382 memset(&policy, 0, sizeof(policy_entry_t));
2383 policy.sel = ts2selector(src_ts, dst_ts);
2384 policy.mark = mark.value & mark.mask;
2385 policy.direction = direction;
2386
2387 /* find the policy */
2388 this->mutex->lock(this->mutex);
2389 current = this->policies->get(this->policies, &policy);
2390 if (!current)
2391 {
2392 if (mark.value)
2393 {
2394 DBG1(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%8x) "
2395 "failed, not found", src_ts, dst_ts, policy_dir_names,
2396 direction, mark.value, mark.mask);
2397 }
2398 else
2399 {
2400 DBG1(DBG_KNL, "deleting policy %R === %R %N failed, not found",
2401 src_ts, dst_ts, policy_dir_names, direction);
2402 }
2403 this->mutex->unlock(this->mutex);
2404 return NOT_FOUND;
2405 }
2406
2407 /* remove mapping to SA by reqid */
2408 enumerator = current->used_by->create_enumerator(current->used_by);
2409 while (enumerator->enumerate(enumerator, (void**)&mapping))
2410 {
2411 if (reqid == mapping->sa->cfg.reqid)
2412 {
2413 current->used_by->remove_at(current->used_by, enumerator);
2414 policy_sa_destroy(this, direction, mapping);
2415 break;
2416 }
2417 is_installed = FALSE;
2418 }
2419 enumerator->destroy(enumerator);
2420
2421 if (current->used_by->get_count(current->used_by) > 0)
2422 { /* policy is used by more SAs, keep in kernel */
2423 DBG2(DBG_KNL, "policy still used by another CHILD_SA, not removed");
2424 if (!is_installed)
2425 { /* no need to update as the policy was not installed for this SA */
2426 this->mutex->unlock(this->mutex);
2427 return SUCCESS;
2428 }
2429
2430 if (mark.value)
2431 {
2432 DBG2(DBG_KNL, "updating policy %R === %R %N (mark %u/0x%8x)",
2433 src_ts, dst_ts, policy_dir_names, direction,
2434 mark.value, mark.mask);
2435 }
2436 else
2437 {
2438 DBG2(DBG_KNL, "updating policy %R === %R %N",
2439 src_ts, dst_ts, policy_dir_names, direction);
2440 }
2441
2442 current->used_by->get_first(current->used_by, (void**)&mapping);
2443 if (add_policy_internal(this, current, mapping, TRUE) != SUCCESS)
2444 {
2445 DBG1(DBG_KNL, "unable to update policy %R === %R %N",
2446 src_ts, dst_ts, policy_dir_names, direction);
2447 return FAILED;
2448 }
2449 return SUCCESS;
2450 }
2451
2452 memset(&request, 0, sizeof(request));
2453
2454 hdr = (struct nlmsghdr*)request;
2455 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2456 hdr->nlmsg_type = XFRM_MSG_DELPOLICY;
2457 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2458
2459 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2460 policy_id->sel = current->sel;
2461 policy_id->dir = direction;
2462
2463 if (mark.value)
2464 {
2465 struct xfrm_mark *mrk;
2466 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2467
2468 rthdr->rta_type = XFRMA_MARK;
2469 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2470 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2471 if (hdr->nlmsg_len > sizeof(request))
2472 {
2473 return FAILED;
2474 }
2475
2476 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2477 mrk->v = mark.value;
2478 mrk->m = mark.mask;
2479 }
2480
2481 if (current->route)
2482 {
2483 route_entry_t *route = current->route;
2484 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2485 route->dst_net, route->prefixlen, route->gateway,
2486 route->src_ip, route->if_name) != SUCCESS)
2487 {
2488 DBG1(DBG_KNL, "error uninstalling route installed with "
2489 "policy %R === %R %N", src_ts, dst_ts,
2490 policy_dir_names, direction);
2491 }
2492 }
2493
2494 this->policies->remove(this->policies, current);
2495 policy_entry_destroy(this, current);
2496 this->mutex->unlock(this->mutex);
2497
2498 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2499 {
2500 if (mark.value)
2501 {
2502 DBG1(DBG_KNL, "unable to delete policy %R === %R %N "
2503 "(mark %u/0x%8x)", src_ts, dst_ts, policy_dir_names,
2504 direction, mark.value, mark.mask);
2505 }
2506 else
2507 {
2508 DBG1(DBG_KNL, "unable to delete policy %R === %R %N",
2509 src_ts, dst_ts, policy_dir_names, direction);
2510 }
2511 return FAILED;
2512 }
2513 return SUCCESS;
2514 }
2515
2516 METHOD(kernel_ipsec_t, bypass_socket, bool,
2517 private_kernel_netlink_ipsec_t *this, int fd, int family)
2518 {
2519 struct xfrm_userpolicy_info policy;
2520 u_int sol, ipsec_policy;
2521
2522 switch (family)
2523 {
2524 case AF_INET:
2525 sol = SOL_IP;
2526 ipsec_policy = IP_XFRM_POLICY;
2527 break;
2528 case AF_INET6:
2529 sol = SOL_IPV6;
2530 ipsec_policy = IPV6_XFRM_POLICY;
2531 break;
2532 default:
2533 return FALSE;
2534 }
2535
2536 memset(&policy, 0, sizeof(policy));
2537 policy.action = XFRM_POLICY_ALLOW;
2538 policy.sel.family = family;
2539
2540 policy.dir = XFRM_POLICY_OUT;
2541 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2542 {
2543 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2544 strerror(errno));
2545 return FALSE;
2546 }
2547 policy.dir = XFRM_POLICY_IN;
2548 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2549 {
2550 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2551 strerror(errno));
2552 return FALSE;
2553 }
2554 return TRUE;
2555 }
2556
2557 METHOD(kernel_ipsec_t, destroy, void,
2558 private_kernel_netlink_ipsec_t *this)
2559 {
2560 enumerator_t *enumerator;
2561 policy_entry_t *policy;
2562
2563 if (this->job)
2564 {
2565 this->job->cancel(this->job);
2566 }
2567 if (this->socket_xfrm_events > 0)
2568 {
2569 close(this->socket_xfrm_events);
2570 }
2571 DESTROY_IF(this->socket_xfrm);
2572 enumerator = this->policies->create_enumerator(this->policies);
2573 while (enumerator->enumerate(enumerator, &policy, &policy))
2574 {
2575 policy_entry_destroy(this, policy);
2576 }
2577 enumerator->destroy(enumerator);
2578 this->policies->destroy(this->policies);
2579 this->sas->destroy(this->sas);
2580 this->mutex->destroy(this->mutex);
2581 free(this);
2582 }
2583
2584 /*
2585 * Described in header.
2586 */
2587 kernel_netlink_ipsec_t *kernel_netlink_ipsec_create()
2588 {
2589 private_kernel_netlink_ipsec_t *this;
2590 struct sockaddr_nl addr;
2591 int fd;
2592
2593 INIT(this,
2594 .public = {
2595 .interface = {
2596 .get_spi = _get_spi,
2597 .get_cpi = _get_cpi,
2598 .add_sa = _add_sa,
2599 .update_sa = _update_sa,
2600 .query_sa = _query_sa,
2601 .del_sa = _del_sa,
2602 .add_policy = _add_policy,
2603 .query_policy = _query_policy,
2604 .del_policy = _del_policy,
2605 .bypass_socket = _bypass_socket,
2606 .destroy = _destroy,
2607 },
2608 },
2609 .policies = hashtable_create((hashtable_hash_t)policy_hash,
2610 (hashtable_equals_t)policy_equals, 32),
2611 .sas = hashtable_create((hashtable_hash_t)ipsec_sa_hash,
2612 (hashtable_equals_t)ipsec_sa_equals, 32),
2613 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
2614 .install_routes = lib->settings->get_bool(lib->settings,
2615 "%s.install_routes", TRUE, hydra->daemon),
2616 .replay_window = lib->settings->get_int(lib->settings,
2617 "%s.replay_window", DEFAULT_REPLAY_WINDOW, hydra->daemon),
2618 );
2619
2620 this->replay_bmp = (this->replay_window + sizeof(u_int32_t) * 8 - 1) /
2621 (sizeof(u_int32_t) * 8);
2622
2623 if (streq(hydra->daemon, "pluto"))
2624 { /* no routes for pluto, they are installed via updown script */
2625 this->install_routes = FALSE;
2626 }
2627
2628 /* disable lifetimes for allocated SPIs in kernel */
2629 fd = open("/proc/sys/net/core/xfrm_acq_expires", O_WRONLY);
2630 if (fd)
2631 {
2632 ignore_result(write(fd, "165", 3));
2633 close(fd);
2634 }
2635
2636 this->socket_xfrm = netlink_socket_create(NETLINK_XFRM);
2637 if (!this->socket_xfrm)
2638 {
2639 destroy(this);
2640 return NULL;
2641 }
2642
2643 memset(&addr, 0, sizeof(addr));
2644 addr.nl_family = AF_NETLINK;
2645
2646 /* create and bind XFRM socket for ACQUIRE, EXPIRE, MIGRATE & MAPPING */
2647 this->socket_xfrm_events = socket(AF_NETLINK, SOCK_RAW, NETLINK_XFRM);
2648 if (this->socket_xfrm_events <= 0)
2649 {
2650 DBG1(DBG_KNL, "unable to create XFRM event socket");
2651 destroy(this);
2652 return NULL;
2653 }
2654 addr.nl_groups = XFRMNLGRP(ACQUIRE) | XFRMNLGRP(EXPIRE) |
2655 XFRMNLGRP(MIGRATE) | XFRMNLGRP(MAPPING);
2656 if (bind(this->socket_xfrm_events, (struct sockaddr*)&addr, sizeof(addr)))
2657 {
2658 DBG1(DBG_KNL, "unable to bind XFRM event socket");
2659 destroy(this);
2660 return NULL;
2661 }
2662 this->job = callback_job_create_with_prio((callback_job_cb_t)receive_events,
2663 this, NULL, NULL, JOB_PRIO_CRITICAL);
2664 lib->processor->queue_job(lib->processor, (job_t*)this->job);
2665
2666 return &this->public;
2667 }
2668