d0076b59214c06287497df8e4d44cf2e1c543926
[strongswan.git] / src / libhydra / plugins / kernel_netlink / kernel_netlink_ipsec.c
1 /*
2 * Copyright (C) 2006-2012 Tobias Brunner
3 * Copyright (C) 2005-2009 Martin Willi
4 * Copyright (C) 2008 Andreas Steffen
5 * Copyright (C) 2006-2007 Fabian Hartmann, Noah Heusser
6 * Copyright (C) 2006 Daniel Roethlisberger
7 * Copyright (C) 2005 Jan Hutter
8 * Hochschule fuer Technik Rapperswil
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 */
20
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <stdint.h>
24 #include <linux/ipsec.h>
25 #include <linux/netlink.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/xfrm.h>
28 #include <linux/udp.h>
29 #include <unistd.h>
30 #include <time.h>
31 #include <errno.h>
32 #include <string.h>
33 #include <fcntl.h>
34
35 #include "kernel_netlink_ipsec.h"
36 #include "kernel_netlink_shared.h"
37
38 #include <hydra.h>
39 #include <debug.h>
40 #include <threading/thread.h>
41 #include <threading/mutex.h>
42 #include <utils/hashtable.h>
43 #include <utils/linked_list.h>
44 #include <processing/jobs/callback_job.h>
45
46 /** Required for Linux 2.6.26 kernel and later */
47 #ifndef XFRM_STATE_AF_UNSPEC
48 #define XFRM_STATE_AF_UNSPEC 32
49 #endif
50
51 /** From linux/in.h */
52 #ifndef IP_XFRM_POLICY
53 #define IP_XFRM_POLICY 17
54 #endif
55
56 /** Missing on uclibc */
57 #ifndef IPV6_XFRM_POLICY
58 #define IPV6_XFRM_POLICY 34
59 #endif /*IPV6_XFRM_POLICY*/
60
61 /** Default priority of installed policies */
62 #define PRIO_BASE 512
63
64 /** Default replay window size, if not set using charon.replay_window */
65 #define DEFAULT_REPLAY_WINDOW 32
66
67 /**
68 * Map the limit for bytes and packets to XFRM_INF by default
69 */
70 #define XFRM_LIMIT(x) ((x) == 0 ? XFRM_INF : (x))
71
72 /**
73 * Create ORable bitfield of XFRM NL groups
74 */
75 #define XFRMNLGRP(x) (1<<(XFRMNLGRP_##x-1))
76
77 /**
78 * Returns a pointer to the first rtattr following the nlmsghdr *nlh and the
79 * 'usual' netlink data x like 'struct xfrm_usersa_info'
80 */
81 #define XFRM_RTA(nlh, x) ((struct rtattr*)(NLMSG_DATA(nlh) + \
82 NLMSG_ALIGN(sizeof(x))))
83 /**
84 * Returns a pointer to the next rtattr following rta.
85 * !!! Do not use this to parse messages. Use RTA_NEXT and RTA_OK instead !!!
86 */
87 #define XFRM_RTA_NEXT(rta) ((struct rtattr*)(((char*)(rta)) + \
88 RTA_ALIGN((rta)->rta_len)))
89 /**
90 * Returns the total size of attached rta data
91 * (after 'usual' netlink data x like 'struct xfrm_usersa_info')
92 */
93 #define XFRM_PAYLOAD(nlh, x) NLMSG_PAYLOAD(nlh, sizeof(x))
94
95 typedef struct kernel_algorithm_t kernel_algorithm_t;
96
97 /**
98 * Mapping of IKEv2 kernel identifier to linux crypto API names
99 */
100 struct kernel_algorithm_t {
101 /**
102 * Identifier specified in IKEv2
103 */
104 int ikev2;
105
106 /**
107 * Name of the algorithm in linux crypto API
108 */
109 char *name;
110 };
111
112 ENUM(xfrm_msg_names, XFRM_MSG_NEWSA, XFRM_MSG_MAPPING,
113 "XFRM_MSG_NEWSA",
114 "XFRM_MSG_DELSA",
115 "XFRM_MSG_GETSA",
116 "XFRM_MSG_NEWPOLICY",
117 "XFRM_MSG_DELPOLICY",
118 "XFRM_MSG_GETPOLICY",
119 "XFRM_MSG_ALLOCSPI",
120 "XFRM_MSG_ACQUIRE",
121 "XFRM_MSG_EXPIRE",
122 "XFRM_MSG_UPDPOLICY",
123 "XFRM_MSG_UPDSA",
124 "XFRM_MSG_POLEXPIRE",
125 "XFRM_MSG_FLUSHSA",
126 "XFRM_MSG_FLUSHPOLICY",
127 "XFRM_MSG_NEWAE",
128 "XFRM_MSG_GETAE",
129 "XFRM_MSG_REPORT",
130 "XFRM_MSG_MIGRATE",
131 "XFRM_MSG_NEWSADINFO",
132 "XFRM_MSG_GETSADINFO",
133 "XFRM_MSG_NEWSPDINFO",
134 "XFRM_MSG_GETSPDINFO",
135 "XFRM_MSG_MAPPING"
136 );
137
138 ENUM(xfrm_attr_type_names, XFRMA_UNSPEC, XFRMA_REPLAY_ESN_VAL,
139 "XFRMA_UNSPEC",
140 "XFRMA_ALG_AUTH",
141 "XFRMA_ALG_CRYPT",
142 "XFRMA_ALG_COMP",
143 "XFRMA_ENCAP",
144 "XFRMA_TMPL",
145 "XFRMA_SA",
146 "XFRMA_POLICY",
147 "XFRMA_SEC_CTX",
148 "XFRMA_LTIME_VAL",
149 "XFRMA_REPLAY_VAL",
150 "XFRMA_REPLAY_THRESH",
151 "XFRMA_ETIMER_THRESH",
152 "XFRMA_SRCADDR",
153 "XFRMA_COADDR",
154 "XFRMA_LASTUSED",
155 "XFRMA_POLICY_TYPE",
156 "XFRMA_MIGRATE",
157 "XFRMA_ALG_AEAD",
158 "XFRMA_KMADDRESS",
159 "XFRMA_ALG_AUTH_TRUNC",
160 "XFRMA_MARK",
161 "XFRMA_TFCPAD",
162 "XFRMA_REPLAY_ESN_VAL",
163 );
164
165 #define END_OF_LIST -1
166
167 /**
168 * Algorithms for encryption
169 */
170 static kernel_algorithm_t encryption_algs[] = {
171 /* {ENCR_DES_IV64, "***" }, */
172 {ENCR_DES, "des" },
173 {ENCR_3DES, "des3_ede" },
174 /* {ENCR_RC5, "***" }, */
175 /* {ENCR_IDEA, "***" }, */
176 {ENCR_CAST, "cast128" },
177 {ENCR_BLOWFISH, "blowfish" },
178 /* {ENCR_3IDEA, "***" }, */
179 /* {ENCR_DES_IV32, "***" }, */
180 {ENCR_NULL, "cipher_null" },
181 {ENCR_AES_CBC, "aes" },
182 {ENCR_AES_CTR, "rfc3686(ctr(aes))" },
183 {ENCR_AES_CCM_ICV8, "rfc4309(ccm(aes))" },
184 {ENCR_AES_CCM_ICV12, "rfc4309(ccm(aes))" },
185 {ENCR_AES_CCM_ICV16, "rfc4309(ccm(aes))" },
186 {ENCR_AES_GCM_ICV8, "rfc4106(gcm(aes))" },
187 {ENCR_AES_GCM_ICV12, "rfc4106(gcm(aes))" },
188 {ENCR_AES_GCM_ICV16, "rfc4106(gcm(aes))" },
189 {ENCR_NULL_AUTH_AES_GMAC, "rfc4543(gcm(aes))" },
190 {ENCR_CAMELLIA_CBC, "cbc(camellia)" },
191 /* {ENCR_CAMELLIA_CTR, "***" }, */
192 /* {ENCR_CAMELLIA_CCM_ICV8, "***" }, */
193 /* {ENCR_CAMELLIA_CCM_ICV12, "***" }, */
194 /* {ENCR_CAMELLIA_CCM_ICV16, "***" }, */
195 {ENCR_SERPENT_CBC, "serpent" },
196 {ENCR_TWOFISH_CBC, "twofish" },
197 {END_OF_LIST, NULL }
198 };
199
200 /**
201 * Algorithms for integrity protection
202 */
203 static kernel_algorithm_t integrity_algs[] = {
204 {AUTH_HMAC_MD5_96, "md5" },
205 {AUTH_HMAC_MD5_128, "hmac(md5)" },
206 {AUTH_HMAC_SHA1_96, "sha1" },
207 {AUTH_HMAC_SHA1_160, "hmac(sha1)" },
208 {AUTH_HMAC_SHA2_256_96, "sha256" },
209 {AUTH_HMAC_SHA2_256_128, "hmac(sha256)" },
210 {AUTH_HMAC_SHA2_384_192, "hmac(sha384)" },
211 {AUTH_HMAC_SHA2_512_256, "hmac(sha512)" },
212 /* {AUTH_DES_MAC, "***" }, */
213 /* {AUTH_KPDK_MD5, "***" }, */
214 {AUTH_AES_XCBC_96, "xcbc(aes)" },
215 {END_OF_LIST, NULL }
216 };
217
218 /**
219 * Algorithms for IPComp
220 */
221 static kernel_algorithm_t compression_algs[] = {
222 /* {IPCOMP_OUI, "***" }, */
223 {IPCOMP_DEFLATE, "deflate" },
224 {IPCOMP_LZS, "lzs" },
225 {IPCOMP_LZJH, "lzjh" },
226 {END_OF_LIST, NULL }
227 };
228
229 /**
230 * Look up a kernel algorithm name and its key size
231 */
232 static char* lookup_algorithm(kernel_algorithm_t *list, int ikev2)
233 {
234 while (list->ikev2 != END_OF_LIST)
235 {
236 if (list->ikev2 == ikev2)
237 {
238 return list->name;
239 }
240 list++;
241 }
242 return NULL;
243 }
244
245 typedef struct private_kernel_netlink_ipsec_t private_kernel_netlink_ipsec_t;
246
247 /**
248 * Private variables and functions of kernel_netlink class.
249 */
250 struct private_kernel_netlink_ipsec_t {
251 /**
252 * Public part of the kernel_netlink_t object
253 */
254 kernel_netlink_ipsec_t public;
255
256 /**
257 * Mutex to lock access to installed policies
258 */
259 mutex_t *mutex;
260
261 /**
262 * Hash table of installed policies (policy_entry_t)
263 */
264 hashtable_t *policies;
265
266 /**
267 * Hash table of IPsec SAs using policies (ipsec_sa_t)
268 */
269 hashtable_t *sas;
270
271 /**
272 * Job receiving netlink events
273 */
274 callback_job_t *job;
275
276 /**
277 * Netlink xfrm socket (IPsec)
278 */
279 netlink_socket_t *socket_xfrm;
280
281 /**
282 * Netlink xfrm socket to receive acquire and expire events
283 */
284 int socket_xfrm_events;
285
286 /**
287 * Whether to install routes along policies
288 */
289 bool install_routes;
290
291 /**
292 * Whether to track the history of a policy
293 */
294 bool policy_history;
295
296 /**
297 * Size of the replay window, in packets
298 */
299 u_int32_t replay_window;
300
301 /**
302 * Size of the replay window bitmap, in bytes
303 */
304 u_int32_t replay_bmp;
305 };
306
307 typedef struct route_entry_t route_entry_t;
308
309 /**
310 * Installed routing entry
311 */
312 struct route_entry_t {
313 /** Name of the interface the route is bound to */
314 char *if_name;
315
316 /** Source ip of the route */
317 host_t *src_ip;
318
319 /** Gateway for this route */
320 host_t *gateway;
321
322 /** Destination net */
323 chunk_t dst_net;
324
325 /** Destination net prefixlen */
326 u_int8_t prefixlen;
327 };
328
329 /**
330 * Destroy a route_entry_t object
331 */
332 static void route_entry_destroy(route_entry_t *this)
333 {
334 free(this->if_name);
335 this->src_ip->destroy(this->src_ip);
336 DESTROY_IF(this->gateway);
337 chunk_free(&this->dst_net);
338 free(this);
339 }
340
341 /**
342 * Compare two route_entry_t objects
343 */
344 static bool route_entry_equals(route_entry_t *a, route_entry_t *b)
345 {
346 return a->if_name && b->if_name && streq(a->if_name, b->if_name) &&
347 a->src_ip->equals(a->src_ip, b->src_ip) &&
348 a->gateway->equals(a->gateway, b->gateway) &&
349 chunk_equals(a->dst_net, b->dst_net) && a->prefixlen == b->prefixlen;
350 }
351
352 typedef struct ipsec_sa_t ipsec_sa_t;
353
354 /**
355 * IPsec SA assigned to a policy.
356 */
357 struct ipsec_sa_t {
358 /** Source address of this SA */
359 host_t *src;
360
361 /** Destination address of this SA */
362 host_t *dst;
363
364 /** Optional mark */
365 mark_t mark;
366
367 /** Description of this SA */
368 ipsec_sa_cfg_t cfg;
369
370 /** Reference count for this SA */
371 refcount_t refcount;
372 };
373
374 /**
375 * Hash function for ipsec_sa_t objects
376 */
377 static u_int ipsec_sa_hash(ipsec_sa_t *sa)
378 {
379 return chunk_hash_inc(sa->src->get_address(sa->src),
380 chunk_hash_inc(sa->dst->get_address(sa->dst),
381 chunk_hash_inc(chunk_from_thing(sa->mark),
382 chunk_hash(chunk_from_thing(sa->cfg)))));
383 }
384
385 /**
386 * Equality function for ipsec_sa_t objects
387 */
388 static bool ipsec_sa_equals(ipsec_sa_t *sa, ipsec_sa_t *other_sa)
389 {
390 return sa->src->ip_equals(sa->src, other_sa->src) &&
391 sa->dst->ip_equals(sa->dst, other_sa->dst) &&
392 memeq(&sa->mark, &other_sa->mark, sizeof(mark_t)) &&
393 memeq(&sa->cfg, &other_sa->cfg, sizeof(ipsec_sa_cfg_t));
394 }
395
396 /**
397 * Allocate or reference an IPsec SA object
398 */
399 static ipsec_sa_t *ipsec_sa_create(private_kernel_netlink_ipsec_t *this,
400 host_t *src, host_t *dst, mark_t mark,
401 ipsec_sa_cfg_t *cfg)
402 {
403 ipsec_sa_t *sa, *found;
404 INIT(sa,
405 .src = src,
406 .dst = dst,
407 .mark = mark,
408 .cfg = *cfg,
409 );
410 found = this->sas->get(this->sas, sa);
411 if (!found)
412 {
413 sa->src = src->clone(src);
414 sa->dst = dst->clone(dst);
415 this->sas->put(this->sas, sa, sa);
416 }
417 else
418 {
419 free(sa);
420 sa = found;
421 }
422 ref_get(&sa->refcount);
423 return sa;
424 }
425
426 /**
427 * Release and destroy an IPsec SA object
428 */
429 static void ipsec_sa_destroy(private_kernel_netlink_ipsec_t *this,
430 ipsec_sa_t *sa)
431 {
432 if (ref_put(&sa->refcount))
433 {
434 this->sas->remove(this->sas, sa);
435 DESTROY_IF(sa->src);
436 DESTROY_IF(sa->dst);
437 free(sa);
438 }
439 }
440
441 typedef struct policy_sa_t policy_sa_t;
442 typedef struct policy_sa_fwd_t policy_sa_fwd_t;
443
444 /**
445 * Mapping between a policy and an IPsec SA.
446 */
447 struct policy_sa_t {
448 /** Priority assigned to the policy when installed with this SA */
449 u_int32_t priority;
450
451 /** Type of the policy */
452 policy_type_t type;
453
454 /** Assigned SA */
455 ipsec_sa_t *sa;
456 };
457
458 /**
459 * For forward policies we also cache the traffic selectors in order to install
460 * the route.
461 */
462 struct policy_sa_fwd_t {
463 /** Generic interface */
464 policy_sa_t generic;
465
466 /** Source traffic selector of this policy */
467 traffic_selector_t *src_ts;
468
469 /** Destination traffic selector of this policy */
470 traffic_selector_t *dst_ts;
471 };
472
473 /**
474 * Create a policy_sa(_fwd)_t object
475 */
476 static policy_sa_t *policy_sa_create(private_kernel_netlink_ipsec_t *this,
477 policy_dir_t dir, policy_type_t type, host_t *src, host_t *dst,
478 traffic_selector_t *src_ts, traffic_selector_t *dst_ts, mark_t mark,
479 ipsec_sa_cfg_t *cfg)
480 {
481 policy_sa_t *policy;
482
483 if (dir == POLICY_FWD)
484 {
485 policy_sa_fwd_t *fwd;
486 INIT(fwd,
487 .src_ts = src_ts->clone(src_ts),
488 .dst_ts = dst_ts->clone(dst_ts),
489 );
490 policy = &fwd->generic;
491 }
492 else
493 {
494 INIT(policy, .priority = 0);
495 }
496 policy->type = type;
497 policy->sa = ipsec_sa_create(this, src, dst, mark, cfg);
498 return policy;
499 }
500
501 /**
502 * Destroy a policy_sa(_fwd)_t object
503 */
504 static void policy_sa_destroy(policy_sa_t *policy, policy_dir_t *dir,
505 private_kernel_netlink_ipsec_t *this)
506 {
507 if (*dir == POLICY_FWD)
508 {
509 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)policy;
510 fwd->src_ts->destroy(fwd->src_ts);
511 fwd->dst_ts->destroy(fwd->dst_ts);
512 }
513 ipsec_sa_destroy(this, policy->sa);
514 free(policy);
515 }
516
517 typedef struct policy_entry_t policy_entry_t;
518
519 /**
520 * Installed kernel policy.
521 */
522 struct policy_entry_t {
523
524 /** Direction of this policy: in, out, forward */
525 u_int8_t direction;
526
527 /** Parameters of installed policy */
528 struct xfrm_selector sel;
529
530 /** Optional mark */
531 u_int32_t mark;
532
533 /** Associated route installed for this policy */
534 route_entry_t *route;
535
536 /** List of SAs this policy is used by, ordered by priority */
537 linked_list_t *used_by;
538 };
539
540 /**
541 * Destroy a policy_entry_t object
542 */
543 static void policy_entry_destroy(private_kernel_netlink_ipsec_t *this,
544 policy_entry_t *policy)
545 {
546 if (policy->route)
547 {
548 route_entry_destroy(policy->route);
549 }
550 if (policy->used_by)
551 {
552 policy->used_by->invoke_function(policy->used_by,
553 (linked_list_invoke_t)policy_sa_destroy,
554 &policy->direction, this);
555 policy->used_by->destroy(policy->used_by);
556 }
557 free(policy);
558 }
559
560 /**
561 * Hash function for policy_entry_t objects
562 */
563 static u_int policy_hash(policy_entry_t *key)
564 {
565 chunk_t chunk = chunk_create((void*)&key->sel,
566 sizeof(struct xfrm_selector) + sizeof(u_int32_t));
567 return chunk_hash(chunk);
568 }
569
570 /**
571 * Equality function for policy_entry_t objects
572 */
573 static bool policy_equals(policy_entry_t *key, policy_entry_t *other_key)
574 {
575 return memeq(&key->sel, &other_key->sel,
576 sizeof(struct xfrm_selector) + sizeof(u_int32_t)) &&
577 key->direction == other_key->direction;
578 }
579
580 /**
581 * Calculate the priority of a policy
582 */
583 static inline u_int32_t get_priority(policy_entry_t *policy,
584 policy_priority_t prio)
585 {
586 u_int32_t priority = PRIO_BASE;
587 switch (prio)
588 {
589 case POLICY_PRIORITY_FALLBACK:
590 priority <<= 1;
591 /* fall-through */
592 case POLICY_PRIORITY_ROUTED:
593 priority <<= 1;
594 /* fall-through */
595 case POLICY_PRIORITY_DEFAULT:
596 break;
597 }
598 /* calculate priority based on selector size, small size = high prio */
599 priority -= policy->sel.prefixlen_s;
600 priority -= policy->sel.prefixlen_d;
601 priority <<= 2; /* make some room for the two flags */
602 priority += policy->sel.sport_mask || policy->sel.dport_mask ? 0 : 2;
603 priority += policy->sel.proto ? 0 : 1;
604 return priority;
605 }
606
607 /**
608 * Convert the general ipsec mode to the one defined in xfrm.h
609 */
610 static u_int8_t mode2kernel(ipsec_mode_t mode)
611 {
612 switch (mode)
613 {
614 case MODE_TRANSPORT:
615 return XFRM_MODE_TRANSPORT;
616 case MODE_TUNNEL:
617 return XFRM_MODE_TUNNEL;
618 case MODE_BEET:
619 return XFRM_MODE_BEET;
620 default:
621 return mode;
622 }
623 }
624
625 /**
626 * Convert a host_t to a struct xfrm_address
627 */
628 static void host2xfrm(host_t *host, xfrm_address_t *xfrm)
629 {
630 chunk_t chunk = host->get_address(host);
631 memcpy(xfrm, chunk.ptr, min(chunk.len, sizeof(xfrm_address_t)));
632 }
633
634 /**
635 * Convert a struct xfrm_address to a host_t
636 */
637 static host_t* xfrm2host(int family, xfrm_address_t *xfrm, u_int16_t port)
638 {
639 chunk_t chunk;
640
641 switch (family)
642 {
643 case AF_INET:
644 chunk = chunk_create((u_char*)&xfrm->a4, sizeof(xfrm->a4));
645 break;
646 case AF_INET6:
647 chunk = chunk_create((u_char*)&xfrm->a6, sizeof(xfrm->a6));
648 break;
649 default:
650 return NULL;
651 }
652 return host_create_from_chunk(family, chunk, ntohs(port));
653 }
654
655 /**
656 * Convert a traffic selector address range to subnet and its mask.
657 */
658 static void ts2subnet(traffic_selector_t* ts,
659 xfrm_address_t *net, u_int8_t *mask)
660 {
661 host_t *net_host;
662 chunk_t net_chunk;
663
664 ts->to_subnet(ts, &net_host, mask);
665 net_chunk = net_host->get_address(net_host);
666 memcpy(net, net_chunk.ptr, net_chunk.len);
667 net_host->destroy(net_host);
668 }
669
670 /**
671 * Convert a traffic selector port range to port/portmask
672 */
673 static void ts2ports(traffic_selector_t* ts,
674 u_int16_t *port, u_int16_t *mask)
675 {
676 /* Linux does not seem to accept complex portmasks. Only
677 * any or a specific port is allowed. We set to any, if we have
678 * a port range, or to a specific, if we have one port only.
679 */
680 u_int16_t from, to;
681
682 from = ts->get_from_port(ts);
683 to = ts->get_to_port(ts);
684
685 if (from == to)
686 {
687 *port = htons(from);
688 *mask = ~0;
689 }
690 else
691 {
692 *port = 0;
693 *mask = 0;
694 }
695 }
696
697 /**
698 * Convert a pair of traffic_selectors to an xfrm_selector
699 */
700 static struct xfrm_selector ts2selector(traffic_selector_t *src,
701 traffic_selector_t *dst)
702 {
703 struct xfrm_selector sel;
704
705 memset(&sel, 0, sizeof(sel));
706 sel.family = (src->get_type(src) == TS_IPV4_ADDR_RANGE) ? AF_INET : AF_INET6;
707 /* src or dest proto may be "any" (0), use more restrictive one */
708 sel.proto = max(src->get_protocol(src), dst->get_protocol(dst));
709 ts2subnet(dst, &sel.daddr, &sel.prefixlen_d);
710 ts2subnet(src, &sel.saddr, &sel.prefixlen_s);
711 ts2ports(dst, &sel.dport, &sel.dport_mask);
712 ts2ports(src, &sel.sport, &sel.sport_mask);
713 sel.ifindex = 0;
714 sel.user = 0;
715
716 return sel;
717 }
718
719 /**
720 * Convert an xfrm_selector to a src|dst traffic_selector
721 */
722 static traffic_selector_t* selector2ts(struct xfrm_selector *sel, bool src)
723 {
724 u_char *addr;
725 u_int8_t prefixlen;
726 u_int16_t port = 0;
727 host_t *host = NULL;
728
729 if (src)
730 {
731 addr = (u_char*)&sel->saddr;
732 prefixlen = sel->prefixlen_s;
733 if (sel->sport_mask)
734 {
735 port = htons(sel->sport);
736 }
737 }
738 else
739 {
740 addr = (u_char*)&sel->daddr;
741 prefixlen = sel->prefixlen_d;
742 if (sel->dport_mask)
743 {
744 port = htons(sel->dport);
745 }
746 }
747
748 /* The Linux 2.6 kernel does not set the selector's family field,
749 * so as a kludge we additionally test the prefix length.
750 */
751 if (sel->family == AF_INET || sel->prefixlen_s == 32)
752 {
753 host = host_create_from_chunk(AF_INET, chunk_create(addr, 4), 0);
754 }
755 else if (sel->family == AF_INET6 || sel->prefixlen_s == 128)
756 {
757 host = host_create_from_chunk(AF_INET6, chunk_create(addr, 16), 0);
758 }
759
760 if (host)
761 {
762 return traffic_selector_create_from_subnet(host, prefixlen,
763 sel->proto, port);
764 }
765 return NULL;
766 }
767
768 /**
769 * Process a XFRM_MSG_ACQUIRE from kernel
770 */
771 static void process_acquire(private_kernel_netlink_ipsec_t *this,
772 struct nlmsghdr *hdr)
773 {
774 struct xfrm_user_acquire *acquire;
775 struct rtattr *rta;
776 size_t rtasize;
777 traffic_selector_t *src_ts, *dst_ts;
778 u_int32_t reqid = 0;
779 int proto = 0;
780
781 acquire = (struct xfrm_user_acquire*)NLMSG_DATA(hdr);
782 rta = XFRM_RTA(hdr, struct xfrm_user_acquire);
783 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_user_acquire);
784
785 DBG2(DBG_KNL, "received a XFRM_MSG_ACQUIRE");
786
787 while (RTA_OK(rta, rtasize))
788 {
789 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
790
791 if (rta->rta_type == XFRMA_TMPL)
792 {
793 struct xfrm_user_tmpl* tmpl;
794 tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rta);
795 reqid = tmpl->reqid;
796 proto = tmpl->id.proto;
797 }
798 rta = RTA_NEXT(rta, rtasize);
799 }
800 switch (proto)
801 {
802 case 0:
803 case IPPROTO_ESP:
804 case IPPROTO_AH:
805 break;
806 default:
807 /* acquire for AH/ESP only, not for IPCOMP */
808 return;
809 }
810 src_ts = selector2ts(&acquire->sel, TRUE);
811 dst_ts = selector2ts(&acquire->sel, FALSE);
812
813 hydra->kernel_interface->acquire(hydra->kernel_interface, reqid, src_ts,
814 dst_ts);
815 }
816
817 /**
818 * Process a XFRM_MSG_EXPIRE from kernel
819 */
820 static void process_expire(private_kernel_netlink_ipsec_t *this,
821 struct nlmsghdr *hdr)
822 {
823 struct xfrm_user_expire *expire;
824 u_int32_t spi, reqid;
825 u_int8_t protocol;
826
827 expire = (struct xfrm_user_expire*)NLMSG_DATA(hdr);
828 protocol = expire->state.id.proto;
829 spi = expire->state.id.spi;
830 reqid = expire->state.reqid;
831
832 DBG2(DBG_KNL, "received a XFRM_MSG_EXPIRE");
833
834 if (protocol != IPPROTO_ESP && protocol != IPPROTO_AH)
835 {
836 DBG2(DBG_KNL, "ignoring XFRM_MSG_EXPIRE for SA with SPI %.8x and "
837 "reqid {%u} which is not a CHILD_SA", ntohl(spi), reqid);
838 return;
839 }
840
841 hydra->kernel_interface->expire(hydra->kernel_interface, reqid, protocol,
842 spi, expire->hard != 0);
843 }
844
845 /**
846 * Process a XFRM_MSG_MIGRATE from kernel
847 */
848 static void process_migrate(private_kernel_netlink_ipsec_t *this,
849 struct nlmsghdr *hdr)
850 {
851 struct xfrm_userpolicy_id *policy_id;
852 struct rtattr *rta;
853 size_t rtasize;
854 traffic_selector_t *src_ts, *dst_ts;
855 host_t *local = NULL, *remote = NULL;
856 host_t *old_src = NULL, *old_dst = NULL;
857 host_t *new_src = NULL, *new_dst = NULL;
858 u_int32_t reqid = 0;
859 policy_dir_t dir;
860
861 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
862 rta = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
863 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_userpolicy_id);
864
865 DBG2(DBG_KNL, "received a XFRM_MSG_MIGRATE");
866
867 src_ts = selector2ts(&policy_id->sel, TRUE);
868 dst_ts = selector2ts(&policy_id->sel, FALSE);
869 dir = (policy_dir_t)policy_id->dir;
870
871 DBG2(DBG_KNL, " policy: %R === %R %N", src_ts, dst_ts, policy_dir_names);
872
873 while (RTA_OK(rta, rtasize))
874 {
875 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
876 if (rta->rta_type == XFRMA_KMADDRESS)
877 {
878 struct xfrm_user_kmaddress *kmaddress;
879
880 kmaddress = (struct xfrm_user_kmaddress*)RTA_DATA(rta);
881 local = xfrm2host(kmaddress->family, &kmaddress->local, 0);
882 remote = xfrm2host(kmaddress->family, &kmaddress->remote, 0);
883 DBG2(DBG_KNL, " kmaddress: %H...%H", local, remote);
884 }
885 else if (rta->rta_type == XFRMA_MIGRATE)
886 {
887 struct xfrm_user_migrate *migrate;
888
889 migrate = (struct xfrm_user_migrate*)RTA_DATA(rta);
890 old_src = xfrm2host(migrate->old_family, &migrate->old_saddr, 0);
891 old_dst = xfrm2host(migrate->old_family, &migrate->old_daddr, 0);
892 new_src = xfrm2host(migrate->new_family, &migrate->new_saddr, 0);
893 new_dst = xfrm2host(migrate->new_family, &migrate->new_daddr, 0);
894 reqid = migrate->reqid;
895 DBG2(DBG_KNL, " migrate %H...%H to %H...%H, reqid {%u}",
896 old_src, old_dst, new_src, new_dst, reqid);
897 DESTROY_IF(old_src);
898 DESTROY_IF(old_dst);
899 DESTROY_IF(new_src);
900 DESTROY_IF(new_dst);
901 }
902 rta = RTA_NEXT(rta, rtasize);
903 }
904
905 if (src_ts && dst_ts && local && remote)
906 {
907 hydra->kernel_interface->migrate(hydra->kernel_interface, reqid,
908 src_ts, dst_ts, dir, local, remote);
909 }
910 else
911 {
912 DESTROY_IF(src_ts);
913 DESTROY_IF(dst_ts);
914 DESTROY_IF(local);
915 DESTROY_IF(remote);
916 }
917 }
918
919 /**
920 * Process a XFRM_MSG_MAPPING from kernel
921 */
922 static void process_mapping(private_kernel_netlink_ipsec_t *this,
923 struct nlmsghdr *hdr)
924 {
925 struct xfrm_user_mapping *mapping;
926 u_int32_t spi, reqid;
927
928 mapping = (struct xfrm_user_mapping*)NLMSG_DATA(hdr);
929 spi = mapping->id.spi;
930 reqid = mapping->reqid;
931
932 DBG2(DBG_KNL, "received a XFRM_MSG_MAPPING");
933
934 if (mapping->id.proto == IPPROTO_ESP)
935 {
936 host_t *host;
937 host = xfrm2host(mapping->id.family, &mapping->new_saddr,
938 mapping->new_sport);
939 if (host)
940 {
941 hydra->kernel_interface->mapping(hydra->kernel_interface, reqid,
942 spi, host);
943 }
944 }
945 }
946
947 /**
948 * Receives events from kernel
949 */
950 static job_requeue_t receive_events(private_kernel_netlink_ipsec_t *this)
951 {
952 char response[1024];
953 struct nlmsghdr *hdr = (struct nlmsghdr*)response;
954 struct sockaddr_nl addr;
955 socklen_t addr_len = sizeof(addr);
956 int len;
957 bool oldstate;
958
959 oldstate = thread_cancelability(TRUE);
960 len = recvfrom(this->socket_xfrm_events, response, sizeof(response), 0,
961 (struct sockaddr*)&addr, &addr_len);
962 thread_cancelability(oldstate);
963
964 if (len < 0)
965 {
966 switch (errno)
967 {
968 case EINTR:
969 /* interrupted, try again */
970 return JOB_REQUEUE_DIRECT;
971 case EAGAIN:
972 /* no data ready, select again */
973 return JOB_REQUEUE_DIRECT;
974 default:
975 DBG1(DBG_KNL, "unable to receive from xfrm event socket");
976 sleep(1);
977 return JOB_REQUEUE_FAIR;
978 }
979 }
980
981 if (addr.nl_pid != 0)
982 { /* not from kernel. not interested, try another one */
983 return JOB_REQUEUE_DIRECT;
984 }
985
986 while (NLMSG_OK(hdr, len))
987 {
988 switch (hdr->nlmsg_type)
989 {
990 case XFRM_MSG_ACQUIRE:
991 process_acquire(this, hdr);
992 break;
993 case XFRM_MSG_EXPIRE:
994 process_expire(this, hdr);
995 break;
996 case XFRM_MSG_MIGRATE:
997 process_migrate(this, hdr);
998 break;
999 case XFRM_MSG_MAPPING:
1000 process_mapping(this, hdr);
1001 break;
1002 default:
1003 DBG1(DBG_KNL, "received unknown event from xfrm event "
1004 "socket: %d", hdr->nlmsg_type);
1005 break;
1006 }
1007 hdr = NLMSG_NEXT(hdr, len);
1008 }
1009 return JOB_REQUEUE_DIRECT;
1010 }
1011
1012 /**
1013 * Get an SPI for a specific protocol from the kernel.
1014 */
1015 static status_t get_spi_internal(private_kernel_netlink_ipsec_t *this,
1016 host_t *src, host_t *dst, u_int8_t proto, u_int32_t min, u_int32_t max,
1017 u_int32_t reqid, u_int32_t *spi)
1018 {
1019 netlink_buf_t request;
1020 struct nlmsghdr *hdr, *out;
1021 struct xfrm_userspi_info *userspi;
1022 u_int32_t received_spi = 0;
1023 size_t len;
1024
1025 memset(&request, 0, sizeof(request));
1026
1027 hdr = (struct nlmsghdr*)request;
1028 hdr->nlmsg_flags = NLM_F_REQUEST;
1029 hdr->nlmsg_type = XFRM_MSG_ALLOCSPI;
1030 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userspi_info));
1031
1032 userspi = (struct xfrm_userspi_info*)NLMSG_DATA(hdr);
1033 host2xfrm(src, &userspi->info.saddr);
1034 host2xfrm(dst, &userspi->info.id.daddr);
1035 userspi->info.id.proto = proto;
1036 userspi->info.mode = XFRM_MODE_TUNNEL;
1037 userspi->info.reqid = reqid;
1038 userspi->info.family = src->get_family(src);
1039 userspi->min = min;
1040 userspi->max = max;
1041
1042 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1043 {
1044 hdr = out;
1045 while (NLMSG_OK(hdr, len))
1046 {
1047 switch (hdr->nlmsg_type)
1048 {
1049 case XFRM_MSG_NEWSA:
1050 {
1051 struct xfrm_usersa_info* usersa = NLMSG_DATA(hdr);
1052 received_spi = usersa->id.spi;
1053 break;
1054 }
1055 case NLMSG_ERROR:
1056 {
1057 struct nlmsgerr *err = NLMSG_DATA(hdr);
1058 DBG1(DBG_KNL, "allocating SPI failed: %s (%d)",
1059 strerror(-err->error), -err->error);
1060 break;
1061 }
1062 default:
1063 hdr = NLMSG_NEXT(hdr, len);
1064 continue;
1065 case NLMSG_DONE:
1066 break;
1067 }
1068 break;
1069 }
1070 free(out);
1071 }
1072
1073 if (received_spi == 0)
1074 {
1075 return FAILED;
1076 }
1077
1078 *spi = received_spi;
1079 return SUCCESS;
1080 }
1081
1082 METHOD(kernel_ipsec_t, get_spi, status_t,
1083 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1084 u_int8_t protocol, u_int32_t reqid, u_int32_t *spi)
1085 {
1086 DBG2(DBG_KNL, "getting SPI for reqid {%u}", reqid);
1087
1088 if (get_spi_internal(this, src, dst, protocol,
1089 0xc0000000, 0xcFFFFFFF, reqid, spi) != SUCCESS)
1090 {
1091 DBG1(DBG_KNL, "unable to get SPI for reqid {%u}", reqid);
1092 return FAILED;
1093 }
1094
1095 DBG2(DBG_KNL, "got SPI %.8x for reqid {%u}", ntohl(*spi), reqid);
1096 return SUCCESS;
1097 }
1098
1099 METHOD(kernel_ipsec_t, get_cpi, status_t,
1100 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1101 u_int32_t reqid, u_int16_t *cpi)
1102 {
1103 u_int32_t received_spi = 0;
1104
1105 DBG2(DBG_KNL, "getting CPI for reqid {%u}", reqid);
1106
1107 if (get_spi_internal(this, src, dst, IPPROTO_COMP,
1108 0x100, 0xEFFF, reqid, &received_spi) != SUCCESS)
1109 {
1110 DBG1(DBG_KNL, "unable to get CPI for reqid {%u}", reqid);
1111 return FAILED;
1112 }
1113
1114 *cpi = htons((u_int16_t)ntohl(received_spi));
1115
1116 DBG2(DBG_KNL, "got CPI %.4x for reqid {%u}", ntohs(*cpi), reqid);
1117 return SUCCESS;
1118 }
1119
1120 METHOD(kernel_ipsec_t, add_sa, status_t,
1121 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1122 u_int32_t spi, u_int8_t protocol, u_int32_t reqid, mark_t mark,
1123 u_int32_t tfc, lifetime_cfg_t *lifetime, u_int16_t enc_alg, chunk_t enc_key,
1124 u_int16_t int_alg, chunk_t int_key, ipsec_mode_t mode, u_int16_t ipcomp,
1125 u_int16_t cpi, bool encap, bool esn, bool inbound,
1126 traffic_selector_t* src_ts, traffic_selector_t* dst_ts)
1127 {
1128 netlink_buf_t request;
1129 char *alg_name;
1130 struct nlmsghdr *hdr;
1131 struct xfrm_usersa_info *sa;
1132 u_int16_t icv_size = 64;
1133 status_t status = FAILED;
1134
1135 /* if IPComp is used, we install an additional IPComp SA. if the cpi is 0
1136 * we are in the recursive call below */
1137 if (ipcomp != IPCOMP_NONE && cpi != 0)
1138 {
1139 lifetime_cfg_t lft = {{0,0,0},{0,0,0},{0,0,0}};
1140 add_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, reqid, mark,
1141 tfc, &lft, ENCR_UNDEFINED, chunk_empty, AUTH_UNDEFINED,
1142 chunk_empty, mode, ipcomp, 0, FALSE, FALSE, inbound, NULL, NULL);
1143 ipcomp = IPCOMP_NONE;
1144 /* use transport mode ESP SA, IPComp uses tunnel mode */
1145 mode = MODE_TRANSPORT;
1146 }
1147
1148 memset(&request, 0, sizeof(request));
1149
1150 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u} (mark "
1151 "%u/0x%8x)", ntohl(spi), reqid, mark.value, mark.mask);
1152
1153 hdr = (struct nlmsghdr*)request;
1154 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1155 hdr->nlmsg_type = inbound ? XFRM_MSG_UPDSA : XFRM_MSG_NEWSA;
1156 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1157
1158 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1159 host2xfrm(src, &sa->saddr);
1160 host2xfrm(dst, &sa->id.daddr);
1161 sa->id.spi = spi;
1162 sa->id.proto = protocol;
1163 sa->family = src->get_family(src);
1164 sa->mode = mode2kernel(mode);
1165 switch (mode)
1166 {
1167 case MODE_TUNNEL:
1168 sa->flags |= XFRM_STATE_AF_UNSPEC;
1169 break;
1170 case MODE_BEET:
1171 case MODE_TRANSPORT:
1172 if(src_ts && dst_ts)
1173 {
1174 sa->sel = ts2selector(src_ts, dst_ts);
1175 }
1176 break;
1177 default:
1178 break;
1179 }
1180
1181 sa->reqid = reqid;
1182 sa->lft.soft_byte_limit = XFRM_LIMIT(lifetime->bytes.rekey);
1183 sa->lft.hard_byte_limit = XFRM_LIMIT(lifetime->bytes.life);
1184 sa->lft.soft_packet_limit = XFRM_LIMIT(lifetime->packets.rekey);
1185 sa->lft.hard_packet_limit = XFRM_LIMIT(lifetime->packets.life);
1186 /* we use lifetimes since added, not since used */
1187 sa->lft.soft_add_expires_seconds = lifetime->time.rekey;
1188 sa->lft.hard_add_expires_seconds = lifetime->time.life;
1189 sa->lft.soft_use_expires_seconds = 0;
1190 sa->lft.hard_use_expires_seconds = 0;
1191
1192 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_info);
1193
1194 switch (enc_alg)
1195 {
1196 case ENCR_UNDEFINED:
1197 /* no encryption */
1198 break;
1199 case ENCR_AES_CCM_ICV16:
1200 case ENCR_AES_GCM_ICV16:
1201 case ENCR_NULL_AUTH_AES_GMAC:
1202 case ENCR_CAMELLIA_CCM_ICV16:
1203 icv_size += 32;
1204 /* FALL */
1205 case ENCR_AES_CCM_ICV12:
1206 case ENCR_AES_GCM_ICV12:
1207 case ENCR_CAMELLIA_CCM_ICV12:
1208 icv_size += 32;
1209 /* FALL */
1210 case ENCR_AES_CCM_ICV8:
1211 case ENCR_AES_GCM_ICV8:
1212 case ENCR_CAMELLIA_CCM_ICV8:
1213 {
1214 struct xfrm_algo_aead *algo;
1215
1216 alg_name = lookup_algorithm(encryption_algs, enc_alg);
1217 if (alg_name == NULL)
1218 {
1219 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1220 encryption_algorithm_names, enc_alg);
1221 goto failed;
1222 }
1223 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1224 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1225
1226 rthdr->rta_type = XFRMA_ALG_AEAD;
1227 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_aead) +
1228 enc_key.len);
1229 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1230 if (hdr->nlmsg_len > sizeof(request))
1231 {
1232 goto failed;
1233 }
1234
1235 algo = (struct xfrm_algo_aead*)RTA_DATA(rthdr);
1236 algo->alg_key_len = enc_key.len * 8;
1237 algo->alg_icv_len = icv_size;
1238 strcpy(algo->alg_name, alg_name);
1239 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1240
1241 rthdr = XFRM_RTA_NEXT(rthdr);
1242 break;
1243 }
1244 default:
1245 {
1246 struct xfrm_algo *algo;
1247
1248 alg_name = lookup_algorithm(encryption_algs, enc_alg);
1249 if (alg_name == NULL)
1250 {
1251 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1252 encryption_algorithm_names, enc_alg);
1253 goto failed;
1254 }
1255 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1256 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1257
1258 rthdr->rta_type = XFRMA_ALG_CRYPT;
1259 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + enc_key.len);
1260 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1261 if (hdr->nlmsg_len > sizeof(request))
1262 {
1263 goto failed;
1264 }
1265
1266 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1267 algo->alg_key_len = enc_key.len * 8;
1268 strcpy(algo->alg_name, alg_name);
1269 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1270
1271 rthdr = XFRM_RTA_NEXT(rthdr);
1272 }
1273 }
1274
1275 if (int_alg != AUTH_UNDEFINED)
1276 {
1277 u_int trunc_len = 0;
1278
1279 alg_name = lookup_algorithm(integrity_algs, int_alg);
1280 if (alg_name == NULL)
1281 {
1282 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1283 integrity_algorithm_names, int_alg);
1284 goto failed;
1285 }
1286 DBG2(DBG_KNL, " using integrity algorithm %N with key size %d",
1287 integrity_algorithm_names, int_alg, int_key.len * 8);
1288
1289 switch (int_alg)
1290 {
1291 case AUTH_HMAC_MD5_128:
1292 case AUTH_HMAC_SHA2_256_128:
1293 trunc_len = 128;
1294 break;
1295 case AUTH_HMAC_SHA1_160:
1296 trunc_len = 160;
1297 break;
1298 default:
1299 break;
1300 }
1301
1302 if (trunc_len)
1303 {
1304 struct xfrm_algo_auth* algo;
1305
1306 /* the kernel uses SHA256 with 96 bit truncation by default,
1307 * use specified truncation size supported by newer kernels.
1308 * also use this for untruncated MD5 and SHA1. */
1309 rthdr->rta_type = XFRMA_ALG_AUTH_TRUNC;
1310 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_auth) +
1311 int_key.len);
1312
1313 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1314 if (hdr->nlmsg_len > sizeof(request))
1315 {
1316 goto failed;
1317 }
1318
1319 algo = (struct xfrm_algo_auth*)RTA_DATA(rthdr);
1320 algo->alg_key_len = int_key.len * 8;
1321 algo->alg_trunc_len = trunc_len;
1322 strcpy(algo->alg_name, alg_name);
1323 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1324 }
1325 else
1326 {
1327 struct xfrm_algo* algo;
1328
1329 rthdr->rta_type = XFRMA_ALG_AUTH;
1330 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + int_key.len);
1331
1332 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1333 if (hdr->nlmsg_len > sizeof(request))
1334 {
1335 goto failed;
1336 }
1337
1338 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1339 algo->alg_key_len = int_key.len * 8;
1340 strcpy(algo->alg_name, alg_name);
1341 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1342 }
1343 rthdr = XFRM_RTA_NEXT(rthdr);
1344 }
1345
1346 if (ipcomp != IPCOMP_NONE)
1347 {
1348 rthdr->rta_type = XFRMA_ALG_COMP;
1349 alg_name = lookup_algorithm(compression_algs, ipcomp);
1350 if (alg_name == NULL)
1351 {
1352 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1353 ipcomp_transform_names, ipcomp);
1354 goto failed;
1355 }
1356 DBG2(DBG_KNL, " using compression algorithm %N",
1357 ipcomp_transform_names, ipcomp);
1358
1359 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo));
1360 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1361 if (hdr->nlmsg_len > sizeof(request))
1362 {
1363 goto failed;
1364 }
1365
1366 struct xfrm_algo* algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1367 algo->alg_key_len = 0;
1368 strcpy(algo->alg_name, alg_name);
1369
1370 rthdr = XFRM_RTA_NEXT(rthdr);
1371 }
1372
1373 if (encap)
1374 {
1375 struct xfrm_encap_tmpl *tmpl;
1376
1377 rthdr->rta_type = XFRMA_ENCAP;
1378 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1379
1380 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1381 if (hdr->nlmsg_len > sizeof(request))
1382 {
1383 goto failed;
1384 }
1385
1386 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rthdr);
1387 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1388 tmpl->encap_sport = htons(src->get_port(src));
1389 tmpl->encap_dport = htons(dst->get_port(dst));
1390 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1391 /* encap_oa could probably be derived from the
1392 * traffic selectors [rfc4306, p39]. In the netlink kernel
1393 * implementation pluto does the same as we do here but it uses
1394 * encap_oa in the pfkey implementation.
1395 * BUT as /usr/src/linux/net/key/af_key.c indicates the kernel ignores
1396 * it anyway
1397 * -> does that mean that NAT-T encap doesn't work in transport mode?
1398 * No. The reason the kernel ignores NAT-OA is that it recomputes
1399 * (or, rather, just ignores) the checksum. If packets pass the IPsec
1400 * checks it marks them "checksum ok" so OA isn't needed. */
1401 rthdr = XFRM_RTA_NEXT(rthdr);
1402 }
1403
1404 if (mark.value)
1405 {
1406 struct xfrm_mark *mrk;
1407
1408 rthdr->rta_type = XFRMA_MARK;
1409 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1410
1411 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1412 if (hdr->nlmsg_len > sizeof(request))
1413 {
1414 goto failed;
1415 }
1416
1417 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1418 mrk->v = mark.value;
1419 mrk->m = mark.mask;
1420 rthdr = XFRM_RTA_NEXT(rthdr);
1421 }
1422
1423 if (tfc)
1424 {
1425 u_int32_t *tfcpad;
1426
1427 rthdr->rta_type = XFRMA_TFCPAD;
1428 rthdr->rta_len = RTA_LENGTH(sizeof(u_int32_t));
1429
1430 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1431 if (hdr->nlmsg_len > sizeof(request))
1432 {
1433 goto failed;
1434 }
1435
1436 tfcpad = (u_int32_t*)RTA_DATA(rthdr);
1437 *tfcpad = tfc;
1438 rthdr = XFRM_RTA_NEXT(rthdr);
1439 }
1440
1441 if (protocol != IPPROTO_COMP)
1442 {
1443 if (esn || this->replay_window > DEFAULT_REPLAY_WINDOW)
1444 {
1445 /* for ESN or larger replay windows we need the new
1446 * XFRMA_REPLAY_ESN_VAL attribute to configure a bitmap */
1447 struct xfrm_replay_state_esn *replay;
1448
1449 rthdr->rta_type = XFRMA_REPLAY_ESN_VAL;
1450 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1451 (this->replay_window + 7) / 8);
1452
1453 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1454 if (hdr->nlmsg_len > sizeof(request))
1455 {
1456 goto failed;
1457 }
1458
1459 replay = (struct xfrm_replay_state_esn*)RTA_DATA(rthdr);
1460 /* bmp_len contains number uf __u32's */
1461 replay->bmp_len = this->replay_bmp;
1462 replay->replay_window = this->replay_window;
1463 DBG2(DBG_KNL, " using replay window of %u bytes",
1464 this->replay_window);
1465
1466 rthdr = XFRM_RTA_NEXT(rthdr);
1467 if (esn)
1468 {
1469 DBG2(DBG_KNL, " using extended sequence numbers (ESN)");
1470 sa->flags |= XFRM_STATE_ESN;
1471 }
1472 }
1473 else
1474 {
1475 sa->replay_window = DEFAULT_REPLAY_WINDOW;
1476 }
1477 }
1478
1479 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1480 {
1481 if (mark.value)
1482 {
1483 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x "
1484 "(mark %u/0x%8x)", ntohl(spi), mark.value, mark.mask);
1485 }
1486 else
1487 {
1488 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x", ntohl(spi));
1489 }
1490 goto failed;
1491 }
1492
1493 status = SUCCESS;
1494
1495 failed:
1496 memwipe(request, sizeof(request));
1497 return status;
1498 }
1499
1500 /**
1501 * Get the ESN replay state (i.e. sequence numbers) of an SA.
1502 *
1503 * Allocates into one the replay state structure we get from the kernel.
1504 */
1505 static void get_replay_state(private_kernel_netlink_ipsec_t *this,
1506 u_int32_t spi, u_int8_t protocol, host_t *dst,
1507 struct xfrm_replay_state_esn **replay_esn,
1508 struct xfrm_replay_state **replay)
1509 {
1510 netlink_buf_t request;
1511 struct nlmsghdr *hdr, *out = NULL;
1512 struct xfrm_aevent_id *out_aevent = NULL, *aevent_id;
1513 size_t len;
1514 struct rtattr *rta;
1515 size_t rtasize;
1516
1517 memset(&request, 0, sizeof(request));
1518
1519 DBG2(DBG_KNL, "querying replay state from SAD entry with SPI %.8x",
1520 ntohl(spi));
1521
1522 hdr = (struct nlmsghdr*)request;
1523 hdr->nlmsg_flags = NLM_F_REQUEST;
1524 hdr->nlmsg_type = XFRM_MSG_GETAE;
1525 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
1526
1527 aevent_id = (struct xfrm_aevent_id*)NLMSG_DATA(hdr);
1528 aevent_id->flags = XFRM_AE_RVAL;
1529
1530 host2xfrm(dst, &aevent_id->sa_id.daddr);
1531 aevent_id->sa_id.spi = spi;
1532 aevent_id->sa_id.proto = protocol;
1533 aevent_id->sa_id.family = dst->get_family(dst);
1534
1535 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1536 {
1537 hdr = out;
1538 while (NLMSG_OK(hdr, len))
1539 {
1540 switch (hdr->nlmsg_type)
1541 {
1542 case XFRM_MSG_NEWAE:
1543 {
1544 out_aevent = NLMSG_DATA(hdr);
1545 break;
1546 }
1547 case NLMSG_ERROR:
1548 {
1549 struct nlmsgerr *err = NLMSG_DATA(hdr);
1550 DBG1(DBG_KNL, "querying replay state from SAD entry "
1551 "failed: %s (%d)", strerror(-err->error),
1552 -err->error);
1553 break;
1554 }
1555 default:
1556 hdr = NLMSG_NEXT(hdr, len);
1557 continue;
1558 case NLMSG_DONE:
1559 break;
1560 }
1561 break;
1562 }
1563 }
1564
1565 if (out_aevent)
1566 {
1567 rta = XFRM_RTA(out, struct xfrm_aevent_id);
1568 rtasize = XFRM_PAYLOAD(out, struct xfrm_aevent_id);
1569 while (RTA_OK(rta, rtasize))
1570 {
1571 if (rta->rta_type == XFRMA_REPLAY_VAL &&
1572 RTA_PAYLOAD(rta) == sizeof(**replay))
1573 {
1574 *replay = malloc(RTA_PAYLOAD(rta));
1575 memcpy(*replay, RTA_DATA(rta), RTA_PAYLOAD(rta));
1576 break;
1577 }
1578 if (rta->rta_type == XFRMA_REPLAY_ESN_VAL &&
1579 RTA_PAYLOAD(rta) >= sizeof(**replay_esn) + this->replay_bmp)
1580 {
1581 *replay_esn = malloc(RTA_PAYLOAD(rta));
1582 memcpy(*replay_esn, RTA_DATA(rta), RTA_PAYLOAD(rta));
1583 break;
1584 }
1585 rta = RTA_NEXT(rta, rtasize);
1586 }
1587 }
1588 free(out);
1589 }
1590
1591 METHOD(kernel_ipsec_t, query_sa, status_t,
1592 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1593 u_int32_t spi, u_int8_t protocol, mark_t mark, u_int64_t *bytes)
1594 {
1595 netlink_buf_t request;
1596 struct nlmsghdr *out = NULL, *hdr;
1597 struct xfrm_usersa_id *sa_id;
1598 struct xfrm_usersa_info *sa = NULL;
1599 status_t status = FAILED;
1600 size_t len;
1601
1602 memset(&request, 0, sizeof(request));
1603
1604 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x (mark %u/0x%8x)",
1605 ntohl(spi), mark.value, mark.mask);
1606
1607 hdr = (struct nlmsghdr*)request;
1608 hdr->nlmsg_flags = NLM_F_REQUEST;
1609 hdr->nlmsg_type = XFRM_MSG_GETSA;
1610 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1611
1612 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1613 host2xfrm(dst, &sa_id->daddr);
1614 sa_id->spi = spi;
1615 sa_id->proto = protocol;
1616 sa_id->family = dst->get_family(dst);
1617
1618 if (mark.value)
1619 {
1620 struct xfrm_mark *mrk;
1621 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1622
1623 rthdr->rta_type = XFRMA_MARK;
1624 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1625 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1626 if (hdr->nlmsg_len > sizeof(request))
1627 {
1628 return FAILED;
1629 }
1630
1631 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1632 mrk->v = mark.value;
1633 mrk->m = mark.mask;
1634 }
1635
1636 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1637 {
1638 hdr = out;
1639 while (NLMSG_OK(hdr, len))
1640 {
1641 switch (hdr->nlmsg_type)
1642 {
1643 case XFRM_MSG_NEWSA:
1644 {
1645 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1646 break;
1647 }
1648 case NLMSG_ERROR:
1649 {
1650 struct nlmsgerr *err = NLMSG_DATA(hdr);
1651
1652 if (mark.value)
1653 {
1654 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1655 "(mark %u/0x%8x) failed: %s (%d)",
1656 ntohl(spi), mark.value, mark.mask,
1657 strerror(-err->error), -err->error);
1658 }
1659 else
1660 {
1661 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1662 "failed: %s (%d)", ntohl(spi),
1663 strerror(-err->error), -err->error);
1664 }
1665 break;
1666 }
1667 default:
1668 hdr = NLMSG_NEXT(hdr, len);
1669 continue;
1670 case NLMSG_DONE:
1671 break;
1672 }
1673 break;
1674 }
1675 }
1676
1677 if (sa == NULL)
1678 {
1679 DBG2(DBG_KNL, "unable to query SAD entry with SPI %.8x", ntohl(spi));
1680 }
1681 else
1682 {
1683 *bytes = sa->curlft.bytes;
1684 status = SUCCESS;
1685 }
1686 memwipe(out, len);
1687 free(out);
1688 return status;
1689 }
1690
1691 METHOD(kernel_ipsec_t, del_sa, status_t,
1692 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1693 u_int32_t spi, u_int8_t protocol, u_int16_t cpi, mark_t mark)
1694 {
1695 netlink_buf_t request;
1696 struct nlmsghdr *hdr;
1697 struct xfrm_usersa_id *sa_id;
1698
1699 /* if IPComp was used, we first delete the additional IPComp SA */
1700 if (cpi)
1701 {
1702 del_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, 0, mark);
1703 }
1704
1705 memset(&request, 0, sizeof(request));
1706
1707 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x (mark %u/0x%8x)",
1708 ntohl(spi), mark.value, mark.mask);
1709
1710 hdr = (struct nlmsghdr*)request;
1711 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1712 hdr->nlmsg_type = XFRM_MSG_DELSA;
1713 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1714
1715 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1716 host2xfrm(dst, &sa_id->daddr);
1717 sa_id->spi = spi;
1718 sa_id->proto = protocol;
1719 sa_id->family = dst->get_family(dst);
1720
1721 if (mark.value)
1722 {
1723 struct xfrm_mark *mrk;
1724 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1725
1726 rthdr->rta_type = XFRMA_MARK;
1727 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1728 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1729 if (hdr->nlmsg_len > sizeof(request))
1730 {
1731 return FAILED;
1732 }
1733
1734 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1735 mrk->v = mark.value;
1736 mrk->m = mark.mask;
1737 }
1738
1739 switch (this->socket_xfrm->send_ack(this->socket_xfrm, hdr))
1740 {
1741 case SUCCESS:
1742 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x (mark %u/0x%08x)",
1743 ntohl(spi), mark.value, mark.mask);
1744 return SUCCESS;
1745 case NOT_FOUND:
1746 return NOT_FOUND;
1747 default:
1748 if (mark.value)
1749 {
1750 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x "
1751 "(mark %u/0x%8x)", ntohl(spi), mark.value, mark.mask);
1752 }
1753 else
1754 {
1755 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x",
1756 ntohl(spi));
1757 }
1758 return FAILED;
1759 }
1760 }
1761
1762 METHOD(kernel_ipsec_t, update_sa, status_t,
1763 private_kernel_netlink_ipsec_t *this, u_int32_t spi, u_int8_t protocol,
1764 u_int16_t cpi, host_t *src, host_t *dst, host_t *new_src, host_t *new_dst,
1765 bool old_encap, bool new_encap, mark_t mark)
1766 {
1767 netlink_buf_t request;
1768 u_char *pos;
1769 struct nlmsghdr *hdr, *out = NULL;
1770 struct xfrm_usersa_id *sa_id;
1771 struct xfrm_usersa_info *out_sa = NULL, *sa;
1772 size_t len;
1773 struct rtattr *rta;
1774 size_t rtasize;
1775 struct xfrm_encap_tmpl* tmpl = NULL;
1776 struct xfrm_replay_state *replay = NULL;
1777 struct xfrm_replay_state_esn *replay_esn = NULL;
1778 status_t status = FAILED;
1779
1780 /* if IPComp is used, we first update the IPComp SA */
1781 if (cpi)
1782 {
1783 update_sa(this, htonl(ntohs(cpi)), IPPROTO_COMP, 0,
1784 src, dst, new_src, new_dst, FALSE, FALSE, mark);
1785 }
1786
1787 memset(&request, 0, sizeof(request));
1788
1789 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x for update", ntohl(spi));
1790
1791 /* query the existing SA first */
1792 hdr = (struct nlmsghdr*)request;
1793 hdr->nlmsg_flags = NLM_F_REQUEST;
1794 hdr->nlmsg_type = XFRM_MSG_GETSA;
1795 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1796
1797 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1798 host2xfrm(dst, &sa_id->daddr);
1799 sa_id->spi = spi;
1800 sa_id->proto = protocol;
1801 sa_id->family = dst->get_family(dst);
1802
1803 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1804 {
1805 hdr = out;
1806 while (NLMSG_OK(hdr, len))
1807 {
1808 switch (hdr->nlmsg_type)
1809 {
1810 case XFRM_MSG_NEWSA:
1811 {
1812 out_sa = NLMSG_DATA(hdr);
1813 break;
1814 }
1815 case NLMSG_ERROR:
1816 {
1817 struct nlmsgerr *err = NLMSG_DATA(hdr);
1818 DBG1(DBG_KNL, "querying SAD entry failed: %s (%d)",
1819 strerror(-err->error), -err->error);
1820 break;
1821 }
1822 default:
1823 hdr = NLMSG_NEXT(hdr, len);
1824 continue;
1825 case NLMSG_DONE:
1826 break;
1827 }
1828 break;
1829 }
1830 }
1831 if (out_sa == NULL)
1832 {
1833 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1834 goto failed;
1835 }
1836
1837 get_replay_state(this, spi, protocol, dst, &replay_esn, &replay);
1838
1839 /* delete the old SA (without affecting the IPComp SA) */
1840 if (del_sa(this, src, dst, spi, protocol, 0, mark) != SUCCESS)
1841 {
1842 DBG1(DBG_KNL, "unable to delete old SAD entry with SPI %.8x",
1843 ntohl(spi));
1844 goto failed;
1845 }
1846
1847 DBG2(DBG_KNL, "updating SAD entry with SPI %.8x from %#H..%#H to %#H..%#H",
1848 ntohl(spi), src, dst, new_src, new_dst);
1849 /* copy over the SA from out to request */
1850 hdr = (struct nlmsghdr*)request;
1851 memcpy(hdr, out, min(out->nlmsg_len, sizeof(request)));
1852 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1853 hdr->nlmsg_type = XFRM_MSG_NEWSA;
1854 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1855 sa = NLMSG_DATA(hdr);
1856 sa->family = new_dst->get_family(new_dst);
1857
1858 if (!src->ip_equals(src, new_src))
1859 {
1860 host2xfrm(new_src, &sa->saddr);
1861 }
1862 if (!dst->ip_equals(dst, new_dst))
1863 {
1864 host2xfrm(new_dst, &sa->id.daddr);
1865 }
1866
1867 rta = XFRM_RTA(out, struct xfrm_usersa_info);
1868 rtasize = XFRM_PAYLOAD(out, struct xfrm_usersa_info);
1869 pos = (u_char*)XFRM_RTA(hdr, struct xfrm_usersa_info);
1870 while(RTA_OK(rta, rtasize))
1871 {
1872 /* copy all attributes, but not XFRMA_ENCAP if we are disabling it */
1873 if (rta->rta_type != XFRMA_ENCAP || new_encap)
1874 {
1875 if (rta->rta_type == XFRMA_ENCAP)
1876 { /* update encap tmpl */
1877 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1878 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1879 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1880 }
1881 memcpy(pos, rta, rta->rta_len);
1882 pos += RTA_ALIGN(rta->rta_len);
1883 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1884 }
1885 rta = RTA_NEXT(rta, rtasize);
1886 }
1887
1888 rta = (struct rtattr*)pos;
1889 if (tmpl == NULL && new_encap)
1890 { /* add tmpl if we are enabling it */
1891 rta->rta_type = XFRMA_ENCAP;
1892 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1893
1894 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1895 if (hdr->nlmsg_len > sizeof(request))
1896 {
1897 goto failed;
1898 }
1899
1900 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1901 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1902 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1903 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1904 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1905
1906 rta = XFRM_RTA_NEXT(rta);
1907 }
1908
1909 if (replay_esn)
1910 {
1911 rta->rta_type = XFRMA_REPLAY_ESN_VAL;
1912 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1913 this->replay_bmp);
1914
1915 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1916 if (hdr->nlmsg_len > sizeof(request))
1917 {
1918 goto failed;
1919 }
1920 memcpy(RTA_DATA(rta), replay_esn,
1921 sizeof(struct xfrm_replay_state_esn) + this->replay_bmp);
1922
1923 rta = XFRM_RTA_NEXT(rta);
1924 }
1925 else if (replay)
1926 {
1927 rta->rta_type = XFRMA_REPLAY_VAL;
1928 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state));
1929
1930 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1931 if (hdr->nlmsg_len > sizeof(request))
1932 {
1933 goto failed;
1934 }
1935 memcpy(RTA_DATA(rta), replay, sizeof(replay));
1936
1937 rta = XFRM_RTA_NEXT(rta);
1938 }
1939 else
1940 {
1941 DBG1(DBG_KNL, "unable to copy replay state from old SAD entry "
1942 "with SPI %.8x", ntohl(spi));
1943 }
1944
1945 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1946 {
1947 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1948 goto failed;
1949 }
1950
1951 status = SUCCESS;
1952 failed:
1953 free(replay);
1954 free(replay_esn);
1955 memwipe(out, len);
1956 memwipe(request, sizeof(request));
1957 free(out);
1958
1959 return status;
1960 }
1961
1962 METHOD(kernel_ipsec_t, flush_sas, status_t,
1963 private_kernel_netlink_ipsec_t *this)
1964 {
1965 netlink_buf_t request;
1966 struct nlmsghdr *hdr;
1967 struct xfrm_usersa_flush *flush;
1968
1969 memset(&request, 0, sizeof(request));
1970
1971 DBG2(DBG_KNL, "flushing all SAD entries");
1972
1973 hdr = (struct nlmsghdr*)request;
1974 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1975 hdr->nlmsg_type = XFRM_MSG_FLUSHSA;
1976 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush));
1977
1978 flush = (struct xfrm_usersa_flush*)NLMSG_DATA(hdr);
1979 flush->proto = IPSEC_PROTO_ANY;
1980
1981 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1982 {
1983 DBG1(DBG_KNL, "unable to flush SAD entries");
1984 return FAILED;
1985 }
1986 return SUCCESS;
1987 }
1988
1989 /**
1990 * Add or update a policy in the kernel.
1991 *
1992 * Note: The mutex has to be locked when entering this function
1993 * and is unlocked here in any case.
1994 */
1995 static status_t add_policy_internal(private_kernel_netlink_ipsec_t *this,
1996 policy_entry_t *policy, policy_sa_t *mapping, bool update)
1997 {
1998 netlink_buf_t request;
1999 policy_entry_t clone;
2000 ipsec_sa_t *ipsec = mapping->sa;
2001 struct xfrm_userpolicy_info *policy_info;
2002 struct nlmsghdr *hdr;
2003 int i;
2004
2005 /* clone the policy so we are able to check it out again later */
2006 memcpy(&clone, policy, sizeof(policy_entry_t));
2007
2008 memset(&request, 0, sizeof(request));
2009 hdr = (struct nlmsghdr*)request;
2010 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2011 hdr->nlmsg_type = update ? XFRM_MSG_UPDPOLICY : XFRM_MSG_NEWPOLICY;
2012 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info));
2013
2014 policy_info = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2015 policy_info->sel = policy->sel;
2016 policy_info->dir = policy->direction;
2017
2018 /* calculate priority based on selector size, small size = high prio */
2019 policy_info->priority = mapping->priority;
2020 policy_info->action = mapping->type != POLICY_DROP ? XFRM_POLICY_ALLOW
2021 : XFRM_POLICY_BLOCK;
2022 policy_info->share = XFRM_SHARE_ANY;
2023
2024 /* policies don't expire */
2025 policy_info->lft.soft_byte_limit = XFRM_INF;
2026 policy_info->lft.soft_packet_limit = XFRM_INF;
2027 policy_info->lft.hard_byte_limit = XFRM_INF;
2028 policy_info->lft.hard_packet_limit = XFRM_INF;
2029 policy_info->lft.soft_add_expires_seconds = 0;
2030 policy_info->lft.hard_add_expires_seconds = 0;
2031 policy_info->lft.soft_use_expires_seconds = 0;
2032 policy_info->lft.hard_use_expires_seconds = 0;
2033
2034 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_info);
2035
2036 if (mapping->type == POLICY_IPSEC)
2037 {
2038 struct xfrm_user_tmpl *tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rthdr);
2039 struct {
2040 u_int8_t proto;
2041 bool use;
2042 } protos[] = {
2043 { IPPROTO_COMP, ipsec->cfg.ipcomp.transform != IPCOMP_NONE },
2044 { IPPROTO_ESP, ipsec->cfg.esp.use },
2045 { IPPROTO_AH, ipsec->cfg.ah.use },
2046 };
2047 ipsec_mode_t proto_mode = ipsec->cfg.mode;
2048
2049 rthdr->rta_type = XFRMA_TMPL;
2050 rthdr->rta_len = 0; /* actual length is set below */
2051
2052 for (i = 0; i < countof(protos); i++)
2053 {
2054 if (!protos[i].use)
2055 {
2056 continue;
2057 }
2058
2059 rthdr->rta_len += RTA_LENGTH(sizeof(struct xfrm_user_tmpl));
2060 hdr->nlmsg_len += RTA_ALIGN(RTA_LENGTH(sizeof(struct xfrm_user_tmpl)));
2061 if (hdr->nlmsg_len > sizeof(request))
2062 {
2063 this->mutex->unlock(this->mutex);
2064 return FAILED;
2065 }
2066
2067 tmpl->reqid = ipsec->cfg.reqid;
2068 tmpl->id.proto = protos[i].proto;
2069 tmpl->aalgos = tmpl->ealgos = tmpl->calgos = ~0;
2070 tmpl->mode = mode2kernel(proto_mode);
2071 tmpl->optional = protos[i].proto == IPPROTO_COMP &&
2072 policy->direction != POLICY_OUT;
2073 tmpl->family = ipsec->src->get_family(ipsec->src);
2074
2075 if (proto_mode == MODE_TUNNEL)
2076 { /* only for tunnel mode */
2077 host2xfrm(ipsec->src, &tmpl->saddr);
2078 host2xfrm(ipsec->dst, &tmpl->id.daddr);
2079 }
2080
2081 tmpl++;
2082
2083 /* use transport mode for other SAs */
2084 proto_mode = MODE_TRANSPORT;
2085 }
2086
2087 rthdr = XFRM_RTA_NEXT(rthdr);
2088 }
2089
2090 if (ipsec->mark.value)
2091 {
2092 struct xfrm_mark *mrk;
2093
2094 rthdr->rta_type = XFRMA_MARK;
2095 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2096
2097 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2098 if (hdr->nlmsg_len > sizeof(request))
2099 {
2100 this->mutex->unlock(this->mutex);
2101 return FAILED;
2102 }
2103
2104 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2105 mrk->v = ipsec->mark.value;
2106 mrk->m = ipsec->mark.mask;
2107 }
2108 this->mutex->unlock(this->mutex);
2109
2110 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2111 {
2112 return FAILED;
2113 }
2114
2115 /* find the policy again */
2116 this->mutex->lock(this->mutex);
2117 policy = this->policies->get(this->policies, &clone);
2118 if (!policy ||
2119 policy->used_by->find_first(policy->used_by,
2120 NULL, (void**)&mapping) != SUCCESS)
2121 { /* policy or mapping is already gone, ignore */
2122 this->mutex->unlock(this->mutex);
2123 return SUCCESS;
2124 }
2125
2126 /* install a route, if:
2127 * - this is a forward policy (to just get one for each child)
2128 * - we are in tunnel/BEET mode
2129 * - routing is not disabled via strongswan.conf
2130 */
2131 if (policy->direction == POLICY_FWD &&
2132 ipsec->cfg.mode != MODE_TRANSPORT && this->install_routes)
2133 {
2134 route_entry_t *route = malloc_thing(route_entry_t);
2135 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)mapping;
2136
2137 if (hydra->kernel_interface->get_address_by_ts(hydra->kernel_interface,
2138 fwd->dst_ts, &route->src_ip) == SUCCESS)
2139 {
2140 /* get the nexthop to src (src as we are in POLICY_FWD) */
2141 route->gateway = hydra->kernel_interface->get_nexthop(
2142 hydra->kernel_interface, ipsec->src);
2143 /* install route via outgoing interface */
2144 route->if_name = hydra->kernel_interface->get_interface(
2145 hydra->kernel_interface, ipsec->dst);
2146 route->dst_net = chunk_alloc(policy->sel.family == AF_INET ? 4 : 16);
2147 memcpy(route->dst_net.ptr, &policy->sel.saddr, route->dst_net.len);
2148 route->prefixlen = policy->sel.prefixlen_s;
2149
2150 if (!route->if_name)
2151 {
2152 this->mutex->unlock(this->mutex);
2153 route_entry_destroy(route);
2154 return SUCCESS;
2155 }
2156
2157 if (policy->route)
2158 {
2159 route_entry_t *old = policy->route;
2160 if (route_entry_equals(old, route))
2161 {
2162 this->mutex->unlock(this->mutex);
2163 route_entry_destroy(route);
2164 return SUCCESS;
2165 }
2166 /* uninstall previously installed route */
2167 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2168 old->dst_net, old->prefixlen, old->gateway,
2169 old->src_ip, old->if_name) != SUCCESS)
2170 {
2171 DBG1(DBG_KNL, "error uninstalling route installed with "
2172 "policy %R === %R %N", fwd->src_ts,
2173 fwd->dst_ts, policy_dir_names,
2174 policy->direction);
2175 }
2176 route_entry_destroy(old);
2177 policy->route = NULL;
2178 }
2179
2180 DBG2(DBG_KNL, "installing route: %R via %H src %H dev %s",
2181 fwd->src_ts, route->gateway, route->src_ip, route->if_name);
2182 switch (hydra->kernel_interface->add_route(
2183 hydra->kernel_interface, route->dst_net,
2184 route->prefixlen, route->gateway,
2185 route->src_ip, route->if_name))
2186 {
2187 default:
2188 DBG1(DBG_KNL, "unable to install source route for %H",
2189 route->src_ip);
2190 /* FALL */
2191 case ALREADY_DONE:
2192 /* route exists, do not uninstall */
2193 route_entry_destroy(route);
2194 break;
2195 case SUCCESS:
2196 /* cache the installed route */
2197 policy->route = route;
2198 break;
2199 }
2200 }
2201 else
2202 {
2203 free(route);
2204 }
2205 }
2206 this->mutex->unlock(this->mutex);
2207 return SUCCESS;
2208 }
2209
2210 METHOD(kernel_ipsec_t, add_policy, status_t,
2211 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
2212 traffic_selector_t *src_ts, traffic_selector_t *dst_ts,
2213 policy_dir_t direction, policy_type_t type, ipsec_sa_cfg_t *sa,
2214 mark_t mark, policy_priority_t priority)
2215 {
2216 policy_entry_t *policy, *current;
2217 policy_sa_t *assigned_sa, *current_sa;
2218 enumerator_t *enumerator;
2219 bool found = FALSE, update = TRUE;
2220
2221 /* create a policy */
2222 INIT(policy,
2223 .sel = ts2selector(src_ts, dst_ts),
2224 .mark = mark.value & mark.mask,
2225 .direction = direction,
2226 );
2227
2228 /* find the policy, which matches EXACTLY */
2229 this->mutex->lock(this->mutex);
2230 current = this->policies->get(this->policies, policy);
2231 if (current)
2232 {
2233 /* use existing policy */
2234 DBG2(DBG_KNL, "policy %R === %R %N (mark %u/0x%8x) "
2235 "already exists, increasing refcount",
2236 src_ts, dst_ts, policy_dir_names, direction,
2237 mark.value, mark.mask);
2238 policy_entry_destroy(this, policy);
2239 policy = current;
2240 found = TRUE;
2241 }
2242 else
2243 { /* use the new one, if we have no such policy */
2244 policy->used_by = linked_list_create();
2245 this->policies->put(this->policies, policy, policy);
2246 }
2247
2248 /* cache the assigned IPsec SA */
2249 assigned_sa = policy_sa_create(this, direction, type, src, dst, src_ts,
2250 dst_ts, mark, sa);
2251 assigned_sa->priority = get_priority(policy, priority);
2252
2253 if (this->policy_history)
2254 { /* insert the SA according to its priority */
2255 enumerator = policy->used_by->create_enumerator(policy->used_by);
2256 while (enumerator->enumerate(enumerator, (void**)&current_sa))
2257 {
2258 if (current_sa->priority >= assigned_sa->priority)
2259 {
2260 break;
2261 }
2262 update = FALSE;
2263 }
2264 policy->used_by->insert_before(policy->used_by, enumerator,
2265 assigned_sa);
2266 enumerator->destroy(enumerator);
2267 }
2268 else
2269 { /* simply insert it last and only update if it is not installed yet */
2270 policy->used_by->insert_last(policy->used_by, assigned_sa);
2271 update = !found;
2272 }
2273
2274 if (!update)
2275 { /* we don't update the policy if the priority is lower than that of
2276 * the currently installed one */
2277 this->mutex->unlock(this->mutex);
2278 return SUCCESS;
2279 }
2280
2281 DBG2(DBG_KNL, "%s policy %R === %R %N (mark %u/0x%8x)",
2282 found ? "updating" : "adding", src_ts, dst_ts,
2283 policy_dir_names, direction, mark.value, mark.mask);
2284
2285 if (add_policy_internal(this, policy, assigned_sa, found) != SUCCESS)
2286 {
2287 DBG1(DBG_KNL, "unable to %s policy %R === %R %N",
2288 found ? "update" : "add", src_ts, dst_ts,
2289 policy_dir_names, direction);
2290 return FAILED;
2291 }
2292 return SUCCESS;
2293 }
2294
2295 METHOD(kernel_ipsec_t, query_policy, status_t,
2296 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2297 traffic_selector_t *dst_ts, policy_dir_t direction, mark_t mark,
2298 u_int32_t *use_time)
2299 {
2300 netlink_buf_t request;
2301 struct nlmsghdr *out = NULL, *hdr;
2302 struct xfrm_userpolicy_id *policy_id;
2303 struct xfrm_userpolicy_info *policy = NULL;
2304 size_t len;
2305
2306 memset(&request, 0, sizeof(request));
2307
2308 DBG2(DBG_KNL, "querying policy %R === %R %N (mark %u/0x%8x)",
2309 src_ts, dst_ts, policy_dir_names, direction,
2310 mark.value, mark.mask);
2311
2312 hdr = (struct nlmsghdr*)request;
2313 hdr->nlmsg_flags = NLM_F_REQUEST;
2314 hdr->nlmsg_type = XFRM_MSG_GETPOLICY;
2315 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2316
2317 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2318 policy_id->sel = ts2selector(src_ts, dst_ts);
2319 policy_id->dir = direction;
2320
2321 if (mark.value)
2322 {
2323 struct xfrm_mark *mrk;
2324 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2325
2326 rthdr->rta_type = XFRMA_MARK;
2327 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2328
2329 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2330 if (hdr->nlmsg_len > sizeof(request))
2331 {
2332 return FAILED;
2333 }
2334
2335 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2336 mrk->v = mark.value;
2337 mrk->m = mark.mask;
2338 }
2339
2340 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
2341 {
2342 hdr = out;
2343 while (NLMSG_OK(hdr, len))
2344 {
2345 switch (hdr->nlmsg_type)
2346 {
2347 case XFRM_MSG_NEWPOLICY:
2348 {
2349 policy = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2350 break;
2351 }
2352 case NLMSG_ERROR:
2353 {
2354 struct nlmsgerr *err = NLMSG_DATA(hdr);
2355 DBG1(DBG_KNL, "querying policy failed: %s (%d)",
2356 strerror(-err->error), -err->error);
2357 break;
2358 }
2359 default:
2360 hdr = NLMSG_NEXT(hdr, len);
2361 continue;
2362 case NLMSG_DONE:
2363 break;
2364 }
2365 break;
2366 }
2367 }
2368
2369 if (policy == NULL)
2370 {
2371 DBG2(DBG_KNL, "unable to query policy %R === %R %N", src_ts, dst_ts,
2372 policy_dir_names, direction);
2373 free(out);
2374 return FAILED;
2375 }
2376
2377 if (policy->curlft.use_time)
2378 {
2379 /* we need the monotonic time, but the kernel returns system time. */
2380 *use_time = time_monotonic(NULL) - (time(NULL) - policy->curlft.use_time);
2381 }
2382 else
2383 {
2384 *use_time = 0;
2385 }
2386
2387 free(out);
2388 return SUCCESS;
2389 }
2390
2391 METHOD(kernel_ipsec_t, del_policy, status_t,
2392 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2393 traffic_selector_t *dst_ts, policy_dir_t direction, u_int32_t reqid,
2394 mark_t mark, policy_priority_t prio)
2395 {
2396 policy_entry_t *current, policy;
2397 enumerator_t *enumerator;
2398 policy_sa_t *mapping;
2399 netlink_buf_t request;
2400 struct nlmsghdr *hdr;
2401 struct xfrm_userpolicy_id *policy_id;
2402 bool is_installed = TRUE;
2403 u_int32_t priority;
2404
2405 DBG2(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%8x)",
2406 src_ts, dst_ts, policy_dir_names, direction,
2407 mark.value, mark.mask);
2408
2409 /* create a policy */
2410 memset(&policy, 0, sizeof(policy_entry_t));
2411 policy.sel = ts2selector(src_ts, dst_ts);
2412 policy.mark = mark.value & mark.mask;
2413 policy.direction = direction;
2414
2415 /* find the policy */
2416 this->mutex->lock(this->mutex);
2417 current = this->policies->get(this->policies, &policy);
2418 if (!current)
2419 {
2420 if (mark.value)
2421 {
2422 DBG1(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%8x) "
2423 "failed, not found", src_ts, dst_ts, policy_dir_names,
2424 direction, mark.value, mark.mask);
2425 }
2426 else
2427 {
2428 DBG1(DBG_KNL, "deleting policy %R === %R %N failed, not found",
2429 src_ts, dst_ts, policy_dir_names, direction);
2430 }
2431 this->mutex->unlock(this->mutex);
2432 return NOT_FOUND;
2433 }
2434
2435 if (this->policy_history)
2436 { /* remove mapping to SA by reqid and priority */
2437 priority = get_priority(current, prio);
2438 enumerator = current->used_by->create_enumerator(current->used_by);
2439 while (enumerator->enumerate(enumerator, (void**)&mapping))
2440 {
2441 if (reqid == mapping->sa->cfg.reqid &&
2442 priority == mapping->priority)
2443 {
2444 current->used_by->remove_at(current->used_by, enumerator);
2445 policy_sa_destroy(mapping, &direction, this);
2446 break;
2447 }
2448 is_installed = FALSE;
2449 }
2450 enumerator->destroy(enumerator);
2451 }
2452 else
2453 { /* remove one of the SAs but don't update the policy */
2454 current->used_by->remove_last(current->used_by, (void**)&mapping);
2455 policy_sa_destroy(mapping, &direction, this);
2456 is_installed = FALSE;
2457 }
2458
2459 if (current->used_by->get_count(current->used_by) > 0)
2460 { /* policy is used by more SAs, keep in kernel */
2461 DBG2(DBG_KNL, "policy still used by another CHILD_SA, not removed");
2462 if (!is_installed)
2463 { /* no need to update as the policy was not installed for this SA */
2464 this->mutex->unlock(this->mutex);
2465 return SUCCESS;
2466 }
2467
2468 DBG2(DBG_KNL, "updating policy %R === %R %N (mark %u/0x%8x)",
2469 src_ts, dst_ts, policy_dir_names, direction,
2470 mark.value, mark.mask);
2471
2472 current->used_by->get_first(current->used_by, (void**)&mapping);
2473 if (add_policy_internal(this, current, mapping, TRUE) != SUCCESS)
2474 {
2475 DBG1(DBG_KNL, "unable to update policy %R === %R %N",
2476 src_ts, dst_ts, policy_dir_names, direction);
2477 return FAILED;
2478 }
2479 return SUCCESS;
2480 }
2481
2482 memset(&request, 0, sizeof(request));
2483
2484 hdr = (struct nlmsghdr*)request;
2485 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2486 hdr->nlmsg_type = XFRM_MSG_DELPOLICY;
2487 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2488
2489 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2490 policy_id->sel = current->sel;
2491 policy_id->dir = direction;
2492
2493 if (mark.value)
2494 {
2495 struct xfrm_mark *mrk;
2496 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2497
2498 rthdr->rta_type = XFRMA_MARK;
2499 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2500 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2501 if (hdr->nlmsg_len > sizeof(request))
2502 {
2503 this->mutex->unlock(this->mutex);
2504 return FAILED;
2505 }
2506
2507 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2508 mrk->v = mark.value;
2509 mrk->m = mark.mask;
2510 }
2511
2512 if (current->route)
2513 {
2514 route_entry_t *route = current->route;
2515 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2516 route->dst_net, route->prefixlen, route->gateway,
2517 route->src_ip, route->if_name) != SUCCESS)
2518 {
2519 DBG1(DBG_KNL, "error uninstalling route installed with "
2520 "policy %R === %R %N", src_ts, dst_ts,
2521 policy_dir_names, direction);
2522 }
2523 }
2524
2525 this->policies->remove(this->policies, current);
2526 policy_entry_destroy(this, current);
2527 this->mutex->unlock(this->mutex);
2528
2529 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2530 {
2531 if (mark.value)
2532 {
2533 DBG1(DBG_KNL, "unable to delete policy %R === %R %N "
2534 "(mark %u/0x%8x)", src_ts, dst_ts, policy_dir_names,
2535 direction, mark.value, mark.mask);
2536 }
2537 else
2538 {
2539 DBG1(DBG_KNL, "unable to delete policy %R === %R %N",
2540 src_ts, dst_ts, policy_dir_names, direction);
2541 }
2542 return FAILED;
2543 }
2544 return SUCCESS;
2545 }
2546
2547 METHOD(kernel_ipsec_t, flush_policies, status_t,
2548 private_kernel_netlink_ipsec_t *this)
2549 {
2550 netlink_buf_t request;
2551 struct nlmsghdr *hdr;
2552
2553 memset(&request, 0, sizeof(request));
2554
2555 DBG2(DBG_KNL, "flushing all policies from SPD");
2556
2557 hdr = (struct nlmsghdr*)request;
2558 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2559 hdr->nlmsg_type = XFRM_MSG_FLUSHPOLICY;
2560 hdr->nlmsg_len = NLMSG_LENGTH(0); /* no data associated */
2561
2562 /* by adding an rtattr of type XFRMA_POLICY_TYPE we could restrict this
2563 * to main or sub policies (default is main) */
2564
2565 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2566 {
2567 DBG1(DBG_KNL, "unable to flush SPD entries");
2568 return FAILED;
2569 }
2570 return SUCCESS;
2571 }
2572
2573
2574 METHOD(kernel_ipsec_t, bypass_socket, bool,
2575 private_kernel_netlink_ipsec_t *this, int fd, int family)
2576 {
2577 struct xfrm_userpolicy_info policy;
2578 u_int sol, ipsec_policy;
2579
2580 switch (family)
2581 {
2582 case AF_INET:
2583 sol = SOL_IP;
2584 ipsec_policy = IP_XFRM_POLICY;
2585 break;
2586 case AF_INET6:
2587 sol = SOL_IPV6;
2588 ipsec_policy = IPV6_XFRM_POLICY;
2589 break;
2590 default:
2591 return FALSE;
2592 }
2593
2594 memset(&policy, 0, sizeof(policy));
2595 policy.action = XFRM_POLICY_ALLOW;
2596 policy.sel.family = family;
2597
2598 policy.dir = XFRM_POLICY_OUT;
2599 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2600 {
2601 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2602 strerror(errno));
2603 return FALSE;
2604 }
2605 policy.dir = XFRM_POLICY_IN;
2606 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2607 {
2608 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2609 strerror(errno));
2610 return FALSE;
2611 }
2612 return TRUE;
2613 }
2614
2615 METHOD(kernel_ipsec_t, destroy, void,
2616 private_kernel_netlink_ipsec_t *this)
2617 {
2618 enumerator_t *enumerator;
2619 policy_entry_t *policy;
2620
2621 if (this->job)
2622 {
2623 this->job->cancel(this->job);
2624 }
2625 if (this->socket_xfrm_events > 0)
2626 {
2627 close(this->socket_xfrm_events);
2628 }
2629 DESTROY_IF(this->socket_xfrm);
2630 enumerator = this->policies->create_enumerator(this->policies);
2631 while (enumerator->enumerate(enumerator, &policy, &policy))
2632 {
2633 policy_entry_destroy(this, policy);
2634 }
2635 enumerator->destroy(enumerator);
2636 this->policies->destroy(this->policies);
2637 this->sas->destroy(this->sas);
2638 this->mutex->destroy(this->mutex);
2639 free(this);
2640 }
2641
2642 /*
2643 * Described in header.
2644 */
2645 kernel_netlink_ipsec_t *kernel_netlink_ipsec_create()
2646 {
2647 private_kernel_netlink_ipsec_t *this;
2648 bool register_for_events = TRUE;
2649 int fd;
2650
2651 INIT(this,
2652 .public = {
2653 .interface = {
2654 .get_spi = _get_spi,
2655 .get_cpi = _get_cpi,
2656 .add_sa = _add_sa,
2657 .update_sa = _update_sa,
2658 .query_sa = _query_sa,
2659 .del_sa = _del_sa,
2660 .flush_sas = _flush_sas,
2661 .add_policy = _add_policy,
2662 .query_policy = _query_policy,
2663 .del_policy = _del_policy,
2664 .flush_policies = _flush_policies,
2665 .bypass_socket = _bypass_socket,
2666 .destroy = _destroy,
2667 },
2668 },
2669 .policies = hashtable_create((hashtable_hash_t)policy_hash,
2670 (hashtable_equals_t)policy_equals, 32),
2671 .sas = hashtable_create((hashtable_hash_t)ipsec_sa_hash,
2672 (hashtable_equals_t)ipsec_sa_equals, 32),
2673 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
2674 .policy_history = TRUE,
2675 .install_routes = lib->settings->get_bool(lib->settings,
2676 "%s.install_routes", TRUE, hydra->daemon),
2677 .replay_window = lib->settings->get_int(lib->settings,
2678 "%s.replay_window", DEFAULT_REPLAY_WINDOW, hydra->daemon),
2679 );
2680
2681 this->replay_bmp = (this->replay_window + sizeof(u_int32_t) * 8 - 1) /
2682 (sizeof(u_int32_t) * 8);
2683
2684 if (streq(hydra->daemon, "pluto"))
2685 { /* no routes for pluto, they are installed via updown script */
2686 this->install_routes = FALSE;
2687 /* no policy history for pluto */
2688 this->policy_history = FALSE;
2689 }
2690 else if (streq(hydra->daemon, "starter"))
2691 { /* starter has no threads, so we do not register for kernel events */
2692 register_for_events = FALSE;
2693 }
2694
2695 /* disable lifetimes for allocated SPIs in kernel */
2696 fd = open("/proc/sys/net/core/xfrm_acq_expires", O_WRONLY);
2697 if (fd)
2698 {
2699 ignore_result(write(fd, "165", 3));
2700 close(fd);
2701 }
2702
2703 this->socket_xfrm = netlink_socket_create(NETLINK_XFRM);
2704 if (!this->socket_xfrm)
2705 {
2706 destroy(this);
2707 return NULL;
2708 }
2709
2710 if (register_for_events)
2711 {
2712 struct sockaddr_nl addr;
2713
2714 memset(&addr, 0, sizeof(addr));
2715 addr.nl_family = AF_NETLINK;
2716
2717 /* create and bind XFRM socket for ACQUIRE, EXPIRE, MIGRATE & MAPPING */
2718 this->socket_xfrm_events = socket(AF_NETLINK, SOCK_RAW, NETLINK_XFRM);
2719 if (this->socket_xfrm_events <= 0)
2720 {
2721 DBG1(DBG_KNL, "unable to create XFRM event socket");
2722 destroy(this);
2723 return NULL;
2724 }
2725 addr.nl_groups = XFRMNLGRP(ACQUIRE) | XFRMNLGRP(EXPIRE) |
2726 XFRMNLGRP(MIGRATE) | XFRMNLGRP(MAPPING);
2727 if (bind(this->socket_xfrm_events, (struct sockaddr*)&addr, sizeof(addr)))
2728 {
2729 DBG1(DBG_KNL, "unable to bind XFRM event socket");
2730 destroy(this);
2731 return NULL;
2732 }
2733 this->job = callback_job_create_with_prio((callback_job_cb_t)receive_events,
2734 this, NULL, NULL, JOB_PRIO_CRITICAL);
2735 lib->processor->queue_job(lib->processor, (job_t*)this->job);
2736 }
2737
2738 return &this->public;
2739 }
2740