Consistently log XFRM mark masks with 0 prefix in kernel-netlink plugin
[strongswan.git] / src / libhydra / plugins / kernel_netlink / kernel_netlink_ipsec.c
1 /*
2 * Copyright (C) 2006-2012 Tobias Brunner
3 * Copyright (C) 2005-2009 Martin Willi
4 * Copyright (C) 2008 Andreas Steffen
5 * Copyright (C) 2006-2007 Fabian Hartmann, Noah Heusser
6 * Copyright (C) 2006 Daniel Roethlisberger
7 * Copyright (C) 2005 Jan Hutter
8 * Hochschule fuer Technik Rapperswil
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 */
20
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <stdint.h>
24 #include <linux/ipsec.h>
25 #include <linux/netlink.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/xfrm.h>
28 #include <linux/udp.h>
29 #include <unistd.h>
30 #include <time.h>
31 #include <errno.h>
32 #include <string.h>
33 #include <fcntl.h>
34
35 #include "kernel_netlink_ipsec.h"
36 #include "kernel_netlink_shared.h"
37
38 #include <hydra.h>
39 #include <debug.h>
40 #include <threading/thread.h>
41 #include <threading/mutex.h>
42 #include <utils/hashtable.h>
43 #include <utils/linked_list.h>
44 #include <processing/jobs/callback_job.h>
45
46 /** Required for Linux 2.6.26 kernel and later */
47 #ifndef XFRM_STATE_AF_UNSPEC
48 #define XFRM_STATE_AF_UNSPEC 32
49 #endif
50
51 /** From linux/in.h */
52 #ifndef IP_XFRM_POLICY
53 #define IP_XFRM_POLICY 17
54 #endif
55
56 /** Missing on uclibc */
57 #ifndef IPV6_XFRM_POLICY
58 #define IPV6_XFRM_POLICY 34
59 #endif /*IPV6_XFRM_POLICY*/
60
61 /* from linux/udp.h */
62 #ifndef UDP_ENCAP
63 #define UDP_ENCAP 100
64 #endif
65
66 #ifndef UDP_ENCAP_ESPINUDP
67 #define UDP_ENCAP_ESPINUDP 2
68 #endif
69
70 /* this is not defined on some platforms */
71 #ifndef SOL_UDP
72 #define SOL_UDP IPPROTO_UDP
73 #endif
74
75 /** Default priority of installed policies */
76 #define PRIO_BASE 512
77
78 /** Default replay window size, if not set using charon.replay_window */
79 #define DEFAULT_REPLAY_WINDOW 32
80
81 /**
82 * Map the limit for bytes and packets to XFRM_INF by default
83 */
84 #define XFRM_LIMIT(x) ((x) == 0 ? XFRM_INF : (x))
85
86 /**
87 * Create ORable bitfield of XFRM NL groups
88 */
89 #define XFRMNLGRP(x) (1<<(XFRMNLGRP_##x-1))
90
91 /**
92 * Returns a pointer to the first rtattr following the nlmsghdr *nlh and the
93 * 'usual' netlink data x like 'struct xfrm_usersa_info'
94 */
95 #define XFRM_RTA(nlh, x) ((struct rtattr*)(NLMSG_DATA(nlh) + \
96 NLMSG_ALIGN(sizeof(x))))
97 /**
98 * Returns a pointer to the next rtattr following rta.
99 * !!! Do not use this to parse messages. Use RTA_NEXT and RTA_OK instead !!!
100 */
101 #define XFRM_RTA_NEXT(rta) ((struct rtattr*)(((char*)(rta)) + \
102 RTA_ALIGN((rta)->rta_len)))
103 /**
104 * Returns the total size of attached rta data
105 * (after 'usual' netlink data x like 'struct xfrm_usersa_info')
106 */
107 #define XFRM_PAYLOAD(nlh, x) NLMSG_PAYLOAD(nlh, sizeof(x))
108
109 typedef struct kernel_algorithm_t kernel_algorithm_t;
110
111 /**
112 * Mapping of IKEv2 kernel identifier to linux crypto API names
113 */
114 struct kernel_algorithm_t {
115 /**
116 * Identifier specified in IKEv2
117 */
118 int ikev2;
119
120 /**
121 * Name of the algorithm in linux crypto API
122 */
123 char *name;
124 };
125
126 ENUM(xfrm_msg_names, XFRM_MSG_NEWSA, XFRM_MSG_MAPPING,
127 "XFRM_MSG_NEWSA",
128 "XFRM_MSG_DELSA",
129 "XFRM_MSG_GETSA",
130 "XFRM_MSG_NEWPOLICY",
131 "XFRM_MSG_DELPOLICY",
132 "XFRM_MSG_GETPOLICY",
133 "XFRM_MSG_ALLOCSPI",
134 "XFRM_MSG_ACQUIRE",
135 "XFRM_MSG_EXPIRE",
136 "XFRM_MSG_UPDPOLICY",
137 "XFRM_MSG_UPDSA",
138 "XFRM_MSG_POLEXPIRE",
139 "XFRM_MSG_FLUSHSA",
140 "XFRM_MSG_FLUSHPOLICY",
141 "XFRM_MSG_NEWAE",
142 "XFRM_MSG_GETAE",
143 "XFRM_MSG_REPORT",
144 "XFRM_MSG_MIGRATE",
145 "XFRM_MSG_NEWSADINFO",
146 "XFRM_MSG_GETSADINFO",
147 "XFRM_MSG_NEWSPDINFO",
148 "XFRM_MSG_GETSPDINFO",
149 "XFRM_MSG_MAPPING"
150 );
151
152 ENUM(xfrm_attr_type_names, XFRMA_UNSPEC, XFRMA_REPLAY_ESN_VAL,
153 "XFRMA_UNSPEC",
154 "XFRMA_ALG_AUTH",
155 "XFRMA_ALG_CRYPT",
156 "XFRMA_ALG_COMP",
157 "XFRMA_ENCAP",
158 "XFRMA_TMPL",
159 "XFRMA_SA",
160 "XFRMA_POLICY",
161 "XFRMA_SEC_CTX",
162 "XFRMA_LTIME_VAL",
163 "XFRMA_REPLAY_VAL",
164 "XFRMA_REPLAY_THRESH",
165 "XFRMA_ETIMER_THRESH",
166 "XFRMA_SRCADDR",
167 "XFRMA_COADDR",
168 "XFRMA_LASTUSED",
169 "XFRMA_POLICY_TYPE",
170 "XFRMA_MIGRATE",
171 "XFRMA_ALG_AEAD",
172 "XFRMA_KMADDRESS",
173 "XFRMA_ALG_AUTH_TRUNC",
174 "XFRMA_MARK",
175 "XFRMA_TFCPAD",
176 "XFRMA_REPLAY_ESN_VAL",
177 );
178
179 #define END_OF_LIST -1
180
181 /**
182 * Algorithms for encryption
183 */
184 static kernel_algorithm_t encryption_algs[] = {
185 /* {ENCR_DES_IV64, "***" }, */
186 {ENCR_DES, "des" },
187 {ENCR_3DES, "des3_ede" },
188 /* {ENCR_RC5, "***" }, */
189 /* {ENCR_IDEA, "***" }, */
190 {ENCR_CAST, "cast128" },
191 {ENCR_BLOWFISH, "blowfish" },
192 /* {ENCR_3IDEA, "***" }, */
193 /* {ENCR_DES_IV32, "***" }, */
194 {ENCR_NULL, "cipher_null" },
195 {ENCR_AES_CBC, "aes" },
196 {ENCR_AES_CTR, "rfc3686(ctr(aes))" },
197 {ENCR_AES_CCM_ICV8, "rfc4309(ccm(aes))" },
198 {ENCR_AES_CCM_ICV12, "rfc4309(ccm(aes))" },
199 {ENCR_AES_CCM_ICV16, "rfc4309(ccm(aes))" },
200 {ENCR_AES_GCM_ICV8, "rfc4106(gcm(aes))" },
201 {ENCR_AES_GCM_ICV12, "rfc4106(gcm(aes))" },
202 {ENCR_AES_GCM_ICV16, "rfc4106(gcm(aes))" },
203 {ENCR_NULL_AUTH_AES_GMAC, "rfc4543(gcm(aes))" },
204 {ENCR_CAMELLIA_CBC, "cbc(camellia)" },
205 /* {ENCR_CAMELLIA_CTR, "***" }, */
206 /* {ENCR_CAMELLIA_CCM_ICV8, "***" }, */
207 /* {ENCR_CAMELLIA_CCM_ICV12, "***" }, */
208 /* {ENCR_CAMELLIA_CCM_ICV16, "***" }, */
209 {ENCR_SERPENT_CBC, "serpent" },
210 {ENCR_TWOFISH_CBC, "twofish" },
211 {END_OF_LIST, NULL }
212 };
213
214 /**
215 * Algorithms for integrity protection
216 */
217 static kernel_algorithm_t integrity_algs[] = {
218 {AUTH_HMAC_MD5_96, "md5" },
219 {AUTH_HMAC_MD5_128, "hmac(md5)" },
220 {AUTH_HMAC_SHA1_96, "sha1" },
221 {AUTH_HMAC_SHA1_160, "hmac(sha1)" },
222 {AUTH_HMAC_SHA2_256_96, "sha256" },
223 {AUTH_HMAC_SHA2_256_128, "hmac(sha256)" },
224 {AUTH_HMAC_SHA2_384_192, "hmac(sha384)" },
225 {AUTH_HMAC_SHA2_512_256, "hmac(sha512)" },
226 /* {AUTH_DES_MAC, "***" }, */
227 /* {AUTH_KPDK_MD5, "***" }, */
228 {AUTH_AES_XCBC_96, "xcbc(aes)" },
229 {END_OF_LIST, NULL }
230 };
231
232 /**
233 * Algorithms for IPComp
234 */
235 static kernel_algorithm_t compression_algs[] = {
236 /* {IPCOMP_OUI, "***" }, */
237 {IPCOMP_DEFLATE, "deflate" },
238 {IPCOMP_LZS, "lzs" },
239 {IPCOMP_LZJH, "lzjh" },
240 {END_OF_LIST, NULL }
241 };
242
243 /**
244 * Look up a kernel algorithm name and its key size
245 */
246 static char* lookup_algorithm(kernel_algorithm_t *list, int ikev2)
247 {
248 while (list->ikev2 != END_OF_LIST)
249 {
250 if (list->ikev2 == ikev2)
251 {
252 return list->name;
253 }
254 list++;
255 }
256 return NULL;
257 }
258
259 typedef struct private_kernel_netlink_ipsec_t private_kernel_netlink_ipsec_t;
260
261 /**
262 * Private variables and functions of kernel_netlink class.
263 */
264 struct private_kernel_netlink_ipsec_t {
265 /**
266 * Public part of the kernel_netlink_t object
267 */
268 kernel_netlink_ipsec_t public;
269
270 /**
271 * Mutex to lock access to installed policies
272 */
273 mutex_t *mutex;
274
275 /**
276 * Hash table of installed policies (policy_entry_t)
277 */
278 hashtable_t *policies;
279
280 /**
281 * Hash table of IPsec SAs using policies (ipsec_sa_t)
282 */
283 hashtable_t *sas;
284
285 /**
286 * Netlink xfrm socket (IPsec)
287 */
288 netlink_socket_t *socket_xfrm;
289
290 /**
291 * Netlink xfrm socket to receive acquire and expire events
292 */
293 int socket_xfrm_events;
294
295 /**
296 * Whether to install routes along policies
297 */
298 bool install_routes;
299
300 /**
301 * Whether to track the history of a policy
302 */
303 bool policy_history;
304
305 /**
306 * Size of the replay window, in packets
307 */
308 u_int32_t replay_window;
309
310 /**
311 * Size of the replay window bitmap, in bytes
312 */
313 u_int32_t replay_bmp;
314 };
315
316 typedef struct route_entry_t route_entry_t;
317
318 /**
319 * Installed routing entry
320 */
321 struct route_entry_t {
322 /** Name of the interface the route is bound to */
323 char *if_name;
324
325 /** Source ip of the route */
326 host_t *src_ip;
327
328 /** Gateway for this route */
329 host_t *gateway;
330
331 /** Destination net */
332 chunk_t dst_net;
333
334 /** Destination net prefixlen */
335 u_int8_t prefixlen;
336 };
337
338 /**
339 * Destroy a route_entry_t object
340 */
341 static void route_entry_destroy(route_entry_t *this)
342 {
343 free(this->if_name);
344 this->src_ip->destroy(this->src_ip);
345 DESTROY_IF(this->gateway);
346 chunk_free(&this->dst_net);
347 free(this);
348 }
349
350 /**
351 * Compare two route_entry_t objects
352 */
353 static bool route_entry_equals(route_entry_t *a, route_entry_t *b)
354 {
355 return a->if_name && b->if_name && streq(a->if_name, b->if_name) &&
356 a->src_ip->ip_equals(a->src_ip, b->src_ip) &&
357 a->gateway->ip_equals(a->gateway, b->gateway) &&
358 chunk_equals(a->dst_net, b->dst_net) && a->prefixlen == b->prefixlen;
359 }
360
361 typedef struct ipsec_sa_t ipsec_sa_t;
362
363 /**
364 * IPsec SA assigned to a policy.
365 */
366 struct ipsec_sa_t {
367 /** Source address of this SA */
368 host_t *src;
369
370 /** Destination address of this SA */
371 host_t *dst;
372
373 /** Optional mark */
374 mark_t mark;
375
376 /** Description of this SA */
377 ipsec_sa_cfg_t cfg;
378
379 /** Reference count for this SA */
380 refcount_t refcount;
381 };
382
383 /**
384 * Hash function for ipsec_sa_t objects
385 */
386 static u_int ipsec_sa_hash(ipsec_sa_t *sa)
387 {
388 return chunk_hash_inc(sa->src->get_address(sa->src),
389 chunk_hash_inc(sa->dst->get_address(sa->dst),
390 chunk_hash_inc(chunk_from_thing(sa->mark),
391 chunk_hash(chunk_from_thing(sa->cfg)))));
392 }
393
394 /**
395 * Equality function for ipsec_sa_t objects
396 */
397 static bool ipsec_sa_equals(ipsec_sa_t *sa, ipsec_sa_t *other_sa)
398 {
399 return sa->src->ip_equals(sa->src, other_sa->src) &&
400 sa->dst->ip_equals(sa->dst, other_sa->dst) &&
401 memeq(&sa->mark, &other_sa->mark, sizeof(mark_t)) &&
402 memeq(&sa->cfg, &other_sa->cfg, sizeof(ipsec_sa_cfg_t));
403 }
404
405 /**
406 * Allocate or reference an IPsec SA object
407 */
408 static ipsec_sa_t *ipsec_sa_create(private_kernel_netlink_ipsec_t *this,
409 host_t *src, host_t *dst, mark_t mark,
410 ipsec_sa_cfg_t *cfg)
411 {
412 ipsec_sa_t *sa, *found;
413 INIT(sa,
414 .src = src,
415 .dst = dst,
416 .mark = mark,
417 .cfg = *cfg,
418 );
419 found = this->sas->get(this->sas, sa);
420 if (!found)
421 {
422 sa->src = src->clone(src);
423 sa->dst = dst->clone(dst);
424 this->sas->put(this->sas, sa, sa);
425 }
426 else
427 {
428 free(sa);
429 sa = found;
430 }
431 ref_get(&sa->refcount);
432 return sa;
433 }
434
435 /**
436 * Release and destroy an IPsec SA object
437 */
438 static void ipsec_sa_destroy(private_kernel_netlink_ipsec_t *this,
439 ipsec_sa_t *sa)
440 {
441 if (ref_put(&sa->refcount))
442 {
443 this->sas->remove(this->sas, sa);
444 DESTROY_IF(sa->src);
445 DESTROY_IF(sa->dst);
446 free(sa);
447 }
448 }
449
450 typedef struct policy_sa_t policy_sa_t;
451 typedef struct policy_sa_fwd_t policy_sa_fwd_t;
452
453 /**
454 * Mapping between a policy and an IPsec SA.
455 */
456 struct policy_sa_t {
457 /** Priority assigned to the policy when installed with this SA */
458 u_int32_t priority;
459
460 /** Type of the policy */
461 policy_type_t type;
462
463 /** Assigned SA */
464 ipsec_sa_t *sa;
465 };
466
467 /**
468 * For forward policies we also cache the traffic selectors in order to install
469 * the route.
470 */
471 struct policy_sa_fwd_t {
472 /** Generic interface */
473 policy_sa_t generic;
474
475 /** Source traffic selector of this policy */
476 traffic_selector_t *src_ts;
477
478 /** Destination traffic selector of this policy */
479 traffic_selector_t *dst_ts;
480 };
481
482 /**
483 * Create a policy_sa(_fwd)_t object
484 */
485 static policy_sa_t *policy_sa_create(private_kernel_netlink_ipsec_t *this,
486 policy_dir_t dir, policy_type_t type, host_t *src, host_t *dst,
487 traffic_selector_t *src_ts, traffic_selector_t *dst_ts, mark_t mark,
488 ipsec_sa_cfg_t *cfg)
489 {
490 policy_sa_t *policy;
491
492 if (dir == POLICY_FWD)
493 {
494 policy_sa_fwd_t *fwd;
495 INIT(fwd,
496 .src_ts = src_ts->clone(src_ts),
497 .dst_ts = dst_ts->clone(dst_ts),
498 );
499 policy = &fwd->generic;
500 }
501 else
502 {
503 INIT(policy, .priority = 0);
504 }
505 policy->type = type;
506 policy->sa = ipsec_sa_create(this, src, dst, mark, cfg);
507 return policy;
508 }
509
510 /**
511 * Destroy a policy_sa(_fwd)_t object
512 */
513 static void policy_sa_destroy(policy_sa_t *policy, policy_dir_t *dir,
514 private_kernel_netlink_ipsec_t *this)
515 {
516 if (*dir == POLICY_FWD)
517 {
518 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)policy;
519 fwd->src_ts->destroy(fwd->src_ts);
520 fwd->dst_ts->destroy(fwd->dst_ts);
521 }
522 ipsec_sa_destroy(this, policy->sa);
523 free(policy);
524 }
525
526 typedef struct policy_entry_t policy_entry_t;
527
528 /**
529 * Installed kernel policy.
530 */
531 struct policy_entry_t {
532
533 /** Direction of this policy: in, out, forward */
534 u_int8_t direction;
535
536 /** Parameters of installed policy */
537 struct xfrm_selector sel;
538
539 /** Optional mark */
540 u_int32_t mark;
541
542 /** Associated route installed for this policy */
543 route_entry_t *route;
544
545 /** List of SAs this policy is used by, ordered by priority */
546 linked_list_t *used_by;
547 };
548
549 /**
550 * Destroy a policy_entry_t object
551 */
552 static void policy_entry_destroy(private_kernel_netlink_ipsec_t *this,
553 policy_entry_t *policy)
554 {
555 if (policy->route)
556 {
557 route_entry_destroy(policy->route);
558 }
559 if (policy->used_by)
560 {
561 policy->used_by->invoke_function(policy->used_by,
562 (linked_list_invoke_t)policy_sa_destroy,
563 &policy->direction, this);
564 policy->used_by->destroy(policy->used_by);
565 }
566 free(policy);
567 }
568
569 /**
570 * Hash function for policy_entry_t objects
571 */
572 static u_int policy_hash(policy_entry_t *key)
573 {
574 chunk_t chunk = chunk_create((void*)&key->sel,
575 sizeof(struct xfrm_selector) + sizeof(u_int32_t));
576 return chunk_hash(chunk);
577 }
578
579 /**
580 * Equality function for policy_entry_t objects
581 */
582 static bool policy_equals(policy_entry_t *key, policy_entry_t *other_key)
583 {
584 return memeq(&key->sel, &other_key->sel,
585 sizeof(struct xfrm_selector) + sizeof(u_int32_t)) &&
586 key->direction == other_key->direction;
587 }
588
589 /**
590 * Calculate the priority of a policy
591 */
592 static inline u_int32_t get_priority(policy_entry_t *policy,
593 policy_priority_t prio)
594 {
595 u_int32_t priority = PRIO_BASE;
596 switch (prio)
597 {
598 case POLICY_PRIORITY_FALLBACK:
599 priority <<= 1;
600 /* fall-through */
601 case POLICY_PRIORITY_ROUTED:
602 priority <<= 1;
603 /* fall-through */
604 case POLICY_PRIORITY_DEFAULT:
605 break;
606 }
607 /* calculate priority based on selector size, small size = high prio */
608 priority -= policy->sel.prefixlen_s;
609 priority -= policy->sel.prefixlen_d;
610 priority <<= 2; /* make some room for the two flags */
611 priority += policy->sel.sport_mask || policy->sel.dport_mask ? 0 : 2;
612 priority += policy->sel.proto ? 0 : 1;
613 return priority;
614 }
615
616 /**
617 * Convert the general ipsec mode to the one defined in xfrm.h
618 */
619 static u_int8_t mode2kernel(ipsec_mode_t mode)
620 {
621 switch (mode)
622 {
623 case MODE_TRANSPORT:
624 return XFRM_MODE_TRANSPORT;
625 case MODE_TUNNEL:
626 return XFRM_MODE_TUNNEL;
627 case MODE_BEET:
628 return XFRM_MODE_BEET;
629 default:
630 return mode;
631 }
632 }
633
634 /**
635 * Convert a host_t to a struct xfrm_address
636 */
637 static void host2xfrm(host_t *host, xfrm_address_t *xfrm)
638 {
639 chunk_t chunk = host->get_address(host);
640 memcpy(xfrm, chunk.ptr, min(chunk.len, sizeof(xfrm_address_t)));
641 }
642
643 /**
644 * Convert a struct xfrm_address to a host_t
645 */
646 static host_t* xfrm2host(int family, xfrm_address_t *xfrm, u_int16_t port)
647 {
648 chunk_t chunk;
649
650 switch (family)
651 {
652 case AF_INET:
653 chunk = chunk_create((u_char*)&xfrm->a4, sizeof(xfrm->a4));
654 break;
655 case AF_INET6:
656 chunk = chunk_create((u_char*)&xfrm->a6, sizeof(xfrm->a6));
657 break;
658 default:
659 return NULL;
660 }
661 return host_create_from_chunk(family, chunk, ntohs(port));
662 }
663
664 /**
665 * Convert a traffic selector address range to subnet and its mask.
666 */
667 static void ts2subnet(traffic_selector_t* ts,
668 xfrm_address_t *net, u_int8_t *mask)
669 {
670 host_t *net_host;
671 chunk_t net_chunk;
672
673 ts->to_subnet(ts, &net_host, mask);
674 net_chunk = net_host->get_address(net_host);
675 memcpy(net, net_chunk.ptr, net_chunk.len);
676 net_host->destroy(net_host);
677 }
678
679 /**
680 * Convert a traffic selector port range to port/portmask
681 */
682 static void ts2ports(traffic_selector_t* ts,
683 u_int16_t *port, u_int16_t *mask)
684 {
685 /* Linux does not seem to accept complex portmasks. Only
686 * any or a specific port is allowed. We set to any, if we have
687 * a port range, or to a specific, if we have one port only.
688 */
689 u_int16_t from, to;
690
691 from = ts->get_from_port(ts);
692 to = ts->get_to_port(ts);
693
694 if (from == to)
695 {
696 *port = htons(from);
697 *mask = ~0;
698 }
699 else
700 {
701 *port = 0;
702 *mask = 0;
703 }
704 }
705
706 /**
707 * Convert a pair of traffic_selectors to an xfrm_selector
708 */
709 static struct xfrm_selector ts2selector(traffic_selector_t *src,
710 traffic_selector_t *dst)
711 {
712 struct xfrm_selector sel;
713
714 memset(&sel, 0, sizeof(sel));
715 sel.family = (src->get_type(src) == TS_IPV4_ADDR_RANGE) ? AF_INET : AF_INET6;
716 /* src or dest proto may be "any" (0), use more restrictive one */
717 sel.proto = max(src->get_protocol(src), dst->get_protocol(dst));
718 ts2subnet(dst, &sel.daddr, &sel.prefixlen_d);
719 ts2subnet(src, &sel.saddr, &sel.prefixlen_s);
720 ts2ports(dst, &sel.dport, &sel.dport_mask);
721 ts2ports(src, &sel.sport, &sel.sport_mask);
722 sel.ifindex = 0;
723 sel.user = 0;
724
725 return sel;
726 }
727
728 /**
729 * Convert an xfrm_selector to a src|dst traffic_selector
730 */
731 static traffic_selector_t* selector2ts(struct xfrm_selector *sel, bool src)
732 {
733 u_char *addr;
734 u_int8_t prefixlen;
735 u_int16_t port = 0;
736 host_t *host = NULL;
737
738 if (src)
739 {
740 addr = (u_char*)&sel->saddr;
741 prefixlen = sel->prefixlen_s;
742 if (sel->sport_mask)
743 {
744 port = htons(sel->sport);
745 }
746 }
747 else
748 {
749 addr = (u_char*)&sel->daddr;
750 prefixlen = sel->prefixlen_d;
751 if (sel->dport_mask)
752 {
753 port = htons(sel->dport);
754 }
755 }
756
757 /* The Linux 2.6 kernel does not set the selector's family field,
758 * so as a kludge we additionally test the prefix length.
759 */
760 if (sel->family == AF_INET || sel->prefixlen_s == 32)
761 {
762 host = host_create_from_chunk(AF_INET, chunk_create(addr, 4), 0);
763 }
764 else if (sel->family == AF_INET6 || sel->prefixlen_s == 128)
765 {
766 host = host_create_from_chunk(AF_INET6, chunk_create(addr, 16), 0);
767 }
768
769 if (host)
770 {
771 return traffic_selector_create_from_subnet(host, prefixlen,
772 sel->proto, port);
773 }
774 return NULL;
775 }
776
777 /**
778 * Process a XFRM_MSG_ACQUIRE from kernel
779 */
780 static void process_acquire(private_kernel_netlink_ipsec_t *this,
781 struct nlmsghdr *hdr)
782 {
783 struct xfrm_user_acquire *acquire;
784 struct rtattr *rta;
785 size_t rtasize;
786 traffic_selector_t *src_ts, *dst_ts;
787 u_int32_t reqid = 0;
788 int proto = 0;
789
790 acquire = (struct xfrm_user_acquire*)NLMSG_DATA(hdr);
791 rta = XFRM_RTA(hdr, struct xfrm_user_acquire);
792 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_user_acquire);
793
794 DBG2(DBG_KNL, "received a XFRM_MSG_ACQUIRE");
795
796 while (RTA_OK(rta, rtasize))
797 {
798 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
799
800 if (rta->rta_type == XFRMA_TMPL)
801 {
802 struct xfrm_user_tmpl* tmpl;
803 tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rta);
804 reqid = tmpl->reqid;
805 proto = tmpl->id.proto;
806 }
807 rta = RTA_NEXT(rta, rtasize);
808 }
809 switch (proto)
810 {
811 case 0:
812 case IPPROTO_ESP:
813 case IPPROTO_AH:
814 break;
815 default:
816 /* acquire for AH/ESP only, not for IPCOMP */
817 return;
818 }
819 src_ts = selector2ts(&acquire->sel, TRUE);
820 dst_ts = selector2ts(&acquire->sel, FALSE);
821
822 hydra->kernel_interface->acquire(hydra->kernel_interface, reqid, src_ts,
823 dst_ts);
824 }
825
826 /**
827 * Process a XFRM_MSG_EXPIRE from kernel
828 */
829 static void process_expire(private_kernel_netlink_ipsec_t *this,
830 struct nlmsghdr *hdr)
831 {
832 struct xfrm_user_expire *expire;
833 u_int32_t spi, reqid;
834 u_int8_t protocol;
835
836 expire = (struct xfrm_user_expire*)NLMSG_DATA(hdr);
837 protocol = expire->state.id.proto;
838 spi = expire->state.id.spi;
839 reqid = expire->state.reqid;
840
841 DBG2(DBG_KNL, "received a XFRM_MSG_EXPIRE");
842
843 if (protocol != IPPROTO_ESP && protocol != IPPROTO_AH)
844 {
845 DBG2(DBG_KNL, "ignoring XFRM_MSG_EXPIRE for SA with SPI %.8x and "
846 "reqid {%u} which is not a CHILD_SA", ntohl(spi), reqid);
847 return;
848 }
849
850 hydra->kernel_interface->expire(hydra->kernel_interface, reqid, protocol,
851 spi, expire->hard != 0);
852 }
853
854 /**
855 * Process a XFRM_MSG_MIGRATE from kernel
856 */
857 static void process_migrate(private_kernel_netlink_ipsec_t *this,
858 struct nlmsghdr *hdr)
859 {
860 struct xfrm_userpolicy_id *policy_id;
861 struct rtattr *rta;
862 size_t rtasize;
863 traffic_selector_t *src_ts, *dst_ts;
864 host_t *local = NULL, *remote = NULL;
865 host_t *old_src = NULL, *old_dst = NULL;
866 host_t *new_src = NULL, *new_dst = NULL;
867 u_int32_t reqid = 0;
868 policy_dir_t dir;
869
870 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
871 rta = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
872 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_userpolicy_id);
873
874 DBG2(DBG_KNL, "received a XFRM_MSG_MIGRATE");
875
876 src_ts = selector2ts(&policy_id->sel, TRUE);
877 dst_ts = selector2ts(&policy_id->sel, FALSE);
878 dir = (policy_dir_t)policy_id->dir;
879
880 DBG2(DBG_KNL, " policy: %R === %R %N", src_ts, dst_ts, policy_dir_names);
881
882 while (RTA_OK(rta, rtasize))
883 {
884 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
885 if (rta->rta_type == XFRMA_KMADDRESS)
886 {
887 struct xfrm_user_kmaddress *kmaddress;
888
889 kmaddress = (struct xfrm_user_kmaddress*)RTA_DATA(rta);
890 local = xfrm2host(kmaddress->family, &kmaddress->local, 0);
891 remote = xfrm2host(kmaddress->family, &kmaddress->remote, 0);
892 DBG2(DBG_KNL, " kmaddress: %H...%H", local, remote);
893 }
894 else if (rta->rta_type == XFRMA_MIGRATE)
895 {
896 struct xfrm_user_migrate *migrate;
897
898 migrate = (struct xfrm_user_migrate*)RTA_DATA(rta);
899 old_src = xfrm2host(migrate->old_family, &migrate->old_saddr, 0);
900 old_dst = xfrm2host(migrate->old_family, &migrate->old_daddr, 0);
901 new_src = xfrm2host(migrate->new_family, &migrate->new_saddr, 0);
902 new_dst = xfrm2host(migrate->new_family, &migrate->new_daddr, 0);
903 reqid = migrate->reqid;
904 DBG2(DBG_KNL, " migrate %H...%H to %H...%H, reqid {%u}",
905 old_src, old_dst, new_src, new_dst, reqid);
906 DESTROY_IF(old_src);
907 DESTROY_IF(old_dst);
908 DESTROY_IF(new_src);
909 DESTROY_IF(new_dst);
910 }
911 rta = RTA_NEXT(rta, rtasize);
912 }
913
914 if (src_ts && dst_ts && local && remote)
915 {
916 hydra->kernel_interface->migrate(hydra->kernel_interface, reqid,
917 src_ts, dst_ts, dir, local, remote);
918 }
919 else
920 {
921 DESTROY_IF(src_ts);
922 DESTROY_IF(dst_ts);
923 DESTROY_IF(local);
924 DESTROY_IF(remote);
925 }
926 }
927
928 /**
929 * Process a XFRM_MSG_MAPPING from kernel
930 */
931 static void process_mapping(private_kernel_netlink_ipsec_t *this,
932 struct nlmsghdr *hdr)
933 {
934 struct xfrm_user_mapping *mapping;
935 u_int32_t spi, reqid;
936
937 mapping = (struct xfrm_user_mapping*)NLMSG_DATA(hdr);
938 spi = mapping->id.spi;
939 reqid = mapping->reqid;
940
941 DBG2(DBG_KNL, "received a XFRM_MSG_MAPPING");
942
943 if (mapping->id.proto == IPPROTO_ESP)
944 {
945 host_t *host;
946 host = xfrm2host(mapping->id.family, &mapping->new_saddr,
947 mapping->new_sport);
948 if (host)
949 {
950 hydra->kernel_interface->mapping(hydra->kernel_interface, reqid,
951 spi, host);
952 }
953 }
954 }
955
956 /**
957 * Receives events from kernel
958 */
959 static job_requeue_t receive_events(private_kernel_netlink_ipsec_t *this)
960 {
961 char response[1024];
962 struct nlmsghdr *hdr = (struct nlmsghdr*)response;
963 struct sockaddr_nl addr;
964 socklen_t addr_len = sizeof(addr);
965 int len;
966 bool oldstate;
967
968 oldstate = thread_cancelability(TRUE);
969 len = recvfrom(this->socket_xfrm_events, response, sizeof(response), 0,
970 (struct sockaddr*)&addr, &addr_len);
971 thread_cancelability(oldstate);
972
973 if (len < 0)
974 {
975 switch (errno)
976 {
977 case EINTR:
978 /* interrupted, try again */
979 return JOB_REQUEUE_DIRECT;
980 case EAGAIN:
981 /* no data ready, select again */
982 return JOB_REQUEUE_DIRECT;
983 default:
984 DBG1(DBG_KNL, "unable to receive from xfrm event socket");
985 sleep(1);
986 return JOB_REQUEUE_FAIR;
987 }
988 }
989
990 if (addr.nl_pid != 0)
991 { /* not from kernel. not interested, try another one */
992 return JOB_REQUEUE_DIRECT;
993 }
994
995 while (NLMSG_OK(hdr, len))
996 {
997 switch (hdr->nlmsg_type)
998 {
999 case XFRM_MSG_ACQUIRE:
1000 process_acquire(this, hdr);
1001 break;
1002 case XFRM_MSG_EXPIRE:
1003 process_expire(this, hdr);
1004 break;
1005 case XFRM_MSG_MIGRATE:
1006 process_migrate(this, hdr);
1007 break;
1008 case XFRM_MSG_MAPPING:
1009 process_mapping(this, hdr);
1010 break;
1011 default:
1012 DBG1(DBG_KNL, "received unknown event from xfrm event "
1013 "socket: %d", hdr->nlmsg_type);
1014 break;
1015 }
1016 hdr = NLMSG_NEXT(hdr, len);
1017 }
1018 return JOB_REQUEUE_DIRECT;
1019 }
1020
1021 /**
1022 * Get an SPI for a specific protocol from the kernel.
1023 */
1024 static status_t get_spi_internal(private_kernel_netlink_ipsec_t *this,
1025 host_t *src, host_t *dst, u_int8_t proto, u_int32_t min, u_int32_t max,
1026 u_int32_t reqid, u_int32_t *spi)
1027 {
1028 netlink_buf_t request;
1029 struct nlmsghdr *hdr, *out;
1030 struct xfrm_userspi_info *userspi;
1031 u_int32_t received_spi = 0;
1032 size_t len;
1033
1034 memset(&request, 0, sizeof(request));
1035
1036 hdr = (struct nlmsghdr*)request;
1037 hdr->nlmsg_flags = NLM_F_REQUEST;
1038 hdr->nlmsg_type = XFRM_MSG_ALLOCSPI;
1039 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userspi_info));
1040
1041 userspi = (struct xfrm_userspi_info*)NLMSG_DATA(hdr);
1042 host2xfrm(src, &userspi->info.saddr);
1043 host2xfrm(dst, &userspi->info.id.daddr);
1044 userspi->info.id.proto = proto;
1045 userspi->info.mode = XFRM_MODE_TUNNEL;
1046 userspi->info.reqid = reqid;
1047 userspi->info.family = src->get_family(src);
1048 userspi->min = min;
1049 userspi->max = max;
1050
1051 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1052 {
1053 hdr = out;
1054 while (NLMSG_OK(hdr, len))
1055 {
1056 switch (hdr->nlmsg_type)
1057 {
1058 case XFRM_MSG_NEWSA:
1059 {
1060 struct xfrm_usersa_info* usersa = NLMSG_DATA(hdr);
1061 received_spi = usersa->id.spi;
1062 break;
1063 }
1064 case NLMSG_ERROR:
1065 {
1066 struct nlmsgerr *err = NLMSG_DATA(hdr);
1067 DBG1(DBG_KNL, "allocating SPI failed: %s (%d)",
1068 strerror(-err->error), -err->error);
1069 break;
1070 }
1071 default:
1072 hdr = NLMSG_NEXT(hdr, len);
1073 continue;
1074 case NLMSG_DONE:
1075 break;
1076 }
1077 break;
1078 }
1079 free(out);
1080 }
1081
1082 if (received_spi == 0)
1083 {
1084 return FAILED;
1085 }
1086
1087 *spi = received_spi;
1088 return SUCCESS;
1089 }
1090
1091 METHOD(kernel_ipsec_t, get_spi, status_t,
1092 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1093 u_int8_t protocol, u_int32_t reqid, u_int32_t *spi)
1094 {
1095 DBG2(DBG_KNL, "getting SPI for reqid {%u}", reqid);
1096
1097 if (get_spi_internal(this, src, dst, protocol,
1098 0xc0000000, 0xcFFFFFFF, reqid, spi) != SUCCESS)
1099 {
1100 DBG1(DBG_KNL, "unable to get SPI for reqid {%u}", reqid);
1101 return FAILED;
1102 }
1103
1104 DBG2(DBG_KNL, "got SPI %.8x for reqid {%u}", ntohl(*spi), reqid);
1105 return SUCCESS;
1106 }
1107
1108 METHOD(kernel_ipsec_t, get_cpi, status_t,
1109 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1110 u_int32_t reqid, u_int16_t *cpi)
1111 {
1112 u_int32_t received_spi = 0;
1113
1114 DBG2(DBG_KNL, "getting CPI for reqid {%u}", reqid);
1115
1116 if (get_spi_internal(this, src, dst, IPPROTO_COMP,
1117 0x100, 0xEFFF, reqid, &received_spi) != SUCCESS)
1118 {
1119 DBG1(DBG_KNL, "unable to get CPI for reqid {%u}", reqid);
1120 return FAILED;
1121 }
1122
1123 *cpi = htons((u_int16_t)ntohl(received_spi));
1124
1125 DBG2(DBG_KNL, "got CPI %.4x for reqid {%u}", ntohs(*cpi), reqid);
1126 return SUCCESS;
1127 }
1128
1129 METHOD(kernel_ipsec_t, add_sa, status_t,
1130 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1131 u_int32_t spi, u_int8_t protocol, u_int32_t reqid, mark_t mark,
1132 u_int32_t tfc, lifetime_cfg_t *lifetime, u_int16_t enc_alg, chunk_t enc_key,
1133 u_int16_t int_alg, chunk_t int_key, ipsec_mode_t mode, u_int16_t ipcomp,
1134 u_int16_t cpi, bool encap, bool esn, bool inbound,
1135 traffic_selector_t* src_ts, traffic_selector_t* dst_ts)
1136 {
1137 netlink_buf_t request;
1138 char *alg_name;
1139 struct nlmsghdr *hdr;
1140 struct xfrm_usersa_info *sa;
1141 u_int16_t icv_size = 64;
1142 status_t status = FAILED;
1143
1144 /* if IPComp is used, we install an additional IPComp SA. if the cpi is 0
1145 * we are in the recursive call below */
1146 if (ipcomp != IPCOMP_NONE && cpi != 0)
1147 {
1148 lifetime_cfg_t lft = {{0,0,0},{0,0,0},{0,0,0}};
1149 add_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, reqid, mark,
1150 tfc, &lft, ENCR_UNDEFINED, chunk_empty, AUTH_UNDEFINED,
1151 chunk_empty, mode, ipcomp, 0, FALSE, FALSE, inbound, NULL, NULL);
1152 ipcomp = IPCOMP_NONE;
1153 /* use transport mode ESP SA, IPComp uses tunnel mode */
1154 mode = MODE_TRANSPORT;
1155 }
1156
1157 memset(&request, 0, sizeof(request));
1158
1159 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u} (mark "
1160 "%u/0x%08x)", ntohl(spi), reqid, mark.value, mark.mask);
1161
1162 hdr = (struct nlmsghdr*)request;
1163 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1164 hdr->nlmsg_type = inbound ? XFRM_MSG_UPDSA : XFRM_MSG_NEWSA;
1165 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1166
1167 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1168 host2xfrm(src, &sa->saddr);
1169 host2xfrm(dst, &sa->id.daddr);
1170 sa->id.spi = spi;
1171 sa->id.proto = protocol;
1172 sa->family = src->get_family(src);
1173 sa->mode = mode2kernel(mode);
1174 switch (mode)
1175 {
1176 case MODE_TUNNEL:
1177 sa->flags |= XFRM_STATE_AF_UNSPEC;
1178 break;
1179 case MODE_BEET:
1180 case MODE_TRANSPORT:
1181 if(src_ts && dst_ts)
1182 {
1183 sa->sel = ts2selector(src_ts, dst_ts);
1184 }
1185 break;
1186 default:
1187 break;
1188 }
1189
1190 sa->reqid = reqid;
1191 sa->lft.soft_byte_limit = XFRM_LIMIT(lifetime->bytes.rekey);
1192 sa->lft.hard_byte_limit = XFRM_LIMIT(lifetime->bytes.life);
1193 sa->lft.soft_packet_limit = XFRM_LIMIT(lifetime->packets.rekey);
1194 sa->lft.hard_packet_limit = XFRM_LIMIT(lifetime->packets.life);
1195 /* we use lifetimes since added, not since used */
1196 sa->lft.soft_add_expires_seconds = lifetime->time.rekey;
1197 sa->lft.hard_add_expires_seconds = lifetime->time.life;
1198 sa->lft.soft_use_expires_seconds = 0;
1199 sa->lft.hard_use_expires_seconds = 0;
1200
1201 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_info);
1202
1203 switch (enc_alg)
1204 {
1205 case ENCR_UNDEFINED:
1206 /* no encryption */
1207 break;
1208 case ENCR_AES_CCM_ICV16:
1209 case ENCR_AES_GCM_ICV16:
1210 case ENCR_NULL_AUTH_AES_GMAC:
1211 case ENCR_CAMELLIA_CCM_ICV16:
1212 icv_size += 32;
1213 /* FALL */
1214 case ENCR_AES_CCM_ICV12:
1215 case ENCR_AES_GCM_ICV12:
1216 case ENCR_CAMELLIA_CCM_ICV12:
1217 icv_size += 32;
1218 /* FALL */
1219 case ENCR_AES_CCM_ICV8:
1220 case ENCR_AES_GCM_ICV8:
1221 case ENCR_CAMELLIA_CCM_ICV8:
1222 {
1223 struct xfrm_algo_aead *algo;
1224
1225 alg_name = lookup_algorithm(encryption_algs, enc_alg);
1226 if (alg_name == NULL)
1227 {
1228 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1229 encryption_algorithm_names, enc_alg);
1230 goto failed;
1231 }
1232 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1233 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1234
1235 rthdr->rta_type = XFRMA_ALG_AEAD;
1236 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_aead) +
1237 enc_key.len);
1238 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1239 if (hdr->nlmsg_len > sizeof(request))
1240 {
1241 goto failed;
1242 }
1243
1244 algo = (struct xfrm_algo_aead*)RTA_DATA(rthdr);
1245 algo->alg_key_len = enc_key.len * 8;
1246 algo->alg_icv_len = icv_size;
1247 strcpy(algo->alg_name, alg_name);
1248 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1249
1250 rthdr = XFRM_RTA_NEXT(rthdr);
1251 break;
1252 }
1253 default:
1254 {
1255 struct xfrm_algo *algo;
1256
1257 alg_name = lookup_algorithm(encryption_algs, enc_alg);
1258 if (alg_name == NULL)
1259 {
1260 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1261 encryption_algorithm_names, enc_alg);
1262 goto failed;
1263 }
1264 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1265 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1266
1267 rthdr->rta_type = XFRMA_ALG_CRYPT;
1268 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + enc_key.len);
1269 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1270 if (hdr->nlmsg_len > sizeof(request))
1271 {
1272 goto failed;
1273 }
1274
1275 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1276 algo->alg_key_len = enc_key.len * 8;
1277 strcpy(algo->alg_name, alg_name);
1278 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1279
1280 rthdr = XFRM_RTA_NEXT(rthdr);
1281 }
1282 }
1283
1284 if (int_alg != AUTH_UNDEFINED)
1285 {
1286 u_int trunc_len = 0;
1287
1288 alg_name = lookup_algorithm(integrity_algs, int_alg);
1289 if (alg_name == NULL)
1290 {
1291 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1292 integrity_algorithm_names, int_alg);
1293 goto failed;
1294 }
1295 DBG2(DBG_KNL, " using integrity algorithm %N with key size %d",
1296 integrity_algorithm_names, int_alg, int_key.len * 8);
1297
1298 switch (int_alg)
1299 {
1300 case AUTH_HMAC_MD5_128:
1301 case AUTH_HMAC_SHA2_256_128:
1302 trunc_len = 128;
1303 break;
1304 case AUTH_HMAC_SHA1_160:
1305 trunc_len = 160;
1306 break;
1307 default:
1308 break;
1309 }
1310
1311 if (trunc_len)
1312 {
1313 struct xfrm_algo_auth* algo;
1314
1315 /* the kernel uses SHA256 with 96 bit truncation by default,
1316 * use specified truncation size supported by newer kernels.
1317 * also use this for untruncated MD5 and SHA1. */
1318 rthdr->rta_type = XFRMA_ALG_AUTH_TRUNC;
1319 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_auth) +
1320 int_key.len);
1321
1322 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1323 if (hdr->nlmsg_len > sizeof(request))
1324 {
1325 goto failed;
1326 }
1327
1328 algo = (struct xfrm_algo_auth*)RTA_DATA(rthdr);
1329 algo->alg_key_len = int_key.len * 8;
1330 algo->alg_trunc_len = trunc_len;
1331 strcpy(algo->alg_name, alg_name);
1332 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1333 }
1334 else
1335 {
1336 struct xfrm_algo* algo;
1337
1338 rthdr->rta_type = XFRMA_ALG_AUTH;
1339 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + int_key.len);
1340
1341 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1342 if (hdr->nlmsg_len > sizeof(request))
1343 {
1344 goto failed;
1345 }
1346
1347 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1348 algo->alg_key_len = int_key.len * 8;
1349 strcpy(algo->alg_name, alg_name);
1350 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1351 }
1352 rthdr = XFRM_RTA_NEXT(rthdr);
1353 }
1354
1355 if (ipcomp != IPCOMP_NONE)
1356 {
1357 rthdr->rta_type = XFRMA_ALG_COMP;
1358 alg_name = lookup_algorithm(compression_algs, ipcomp);
1359 if (alg_name == NULL)
1360 {
1361 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1362 ipcomp_transform_names, ipcomp);
1363 goto failed;
1364 }
1365 DBG2(DBG_KNL, " using compression algorithm %N",
1366 ipcomp_transform_names, ipcomp);
1367
1368 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo));
1369 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1370 if (hdr->nlmsg_len > sizeof(request))
1371 {
1372 goto failed;
1373 }
1374
1375 struct xfrm_algo* algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1376 algo->alg_key_len = 0;
1377 strcpy(algo->alg_name, alg_name);
1378
1379 rthdr = XFRM_RTA_NEXT(rthdr);
1380 }
1381
1382 if (encap)
1383 {
1384 struct xfrm_encap_tmpl *tmpl;
1385
1386 rthdr->rta_type = XFRMA_ENCAP;
1387 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1388
1389 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1390 if (hdr->nlmsg_len > sizeof(request))
1391 {
1392 goto failed;
1393 }
1394
1395 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rthdr);
1396 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1397 tmpl->encap_sport = htons(src->get_port(src));
1398 tmpl->encap_dport = htons(dst->get_port(dst));
1399 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1400 /* encap_oa could probably be derived from the
1401 * traffic selectors [rfc4306, p39]. In the netlink kernel
1402 * implementation pluto does the same as we do here but it uses
1403 * encap_oa in the pfkey implementation.
1404 * BUT as /usr/src/linux/net/key/af_key.c indicates the kernel ignores
1405 * it anyway
1406 * -> does that mean that NAT-T encap doesn't work in transport mode?
1407 * No. The reason the kernel ignores NAT-OA is that it recomputes
1408 * (or, rather, just ignores) the checksum. If packets pass the IPsec
1409 * checks it marks them "checksum ok" so OA isn't needed. */
1410 rthdr = XFRM_RTA_NEXT(rthdr);
1411 }
1412
1413 if (mark.value)
1414 {
1415 struct xfrm_mark *mrk;
1416
1417 rthdr->rta_type = XFRMA_MARK;
1418 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1419
1420 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1421 if (hdr->nlmsg_len > sizeof(request))
1422 {
1423 goto failed;
1424 }
1425
1426 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1427 mrk->v = mark.value;
1428 mrk->m = mark.mask;
1429 rthdr = XFRM_RTA_NEXT(rthdr);
1430 }
1431
1432 if (tfc)
1433 {
1434 u_int32_t *tfcpad;
1435
1436 rthdr->rta_type = XFRMA_TFCPAD;
1437 rthdr->rta_len = RTA_LENGTH(sizeof(u_int32_t));
1438
1439 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1440 if (hdr->nlmsg_len > sizeof(request))
1441 {
1442 goto failed;
1443 }
1444
1445 tfcpad = (u_int32_t*)RTA_DATA(rthdr);
1446 *tfcpad = tfc;
1447 rthdr = XFRM_RTA_NEXT(rthdr);
1448 }
1449
1450 if (protocol != IPPROTO_COMP)
1451 {
1452 if (esn || this->replay_window > DEFAULT_REPLAY_WINDOW)
1453 {
1454 /* for ESN or larger replay windows we need the new
1455 * XFRMA_REPLAY_ESN_VAL attribute to configure a bitmap */
1456 struct xfrm_replay_state_esn *replay;
1457
1458 rthdr->rta_type = XFRMA_REPLAY_ESN_VAL;
1459 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1460 (this->replay_window + 7) / 8);
1461
1462 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1463 if (hdr->nlmsg_len > sizeof(request))
1464 {
1465 goto failed;
1466 }
1467
1468 replay = (struct xfrm_replay_state_esn*)RTA_DATA(rthdr);
1469 /* bmp_len contains number uf __u32's */
1470 replay->bmp_len = this->replay_bmp;
1471 replay->replay_window = this->replay_window;
1472 DBG2(DBG_KNL, " using replay window of %u bytes",
1473 this->replay_window);
1474
1475 rthdr = XFRM_RTA_NEXT(rthdr);
1476 if (esn)
1477 {
1478 DBG2(DBG_KNL, " using extended sequence numbers (ESN)");
1479 sa->flags |= XFRM_STATE_ESN;
1480 }
1481 }
1482 else
1483 {
1484 sa->replay_window = DEFAULT_REPLAY_WINDOW;
1485 }
1486 }
1487
1488 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1489 {
1490 if (mark.value)
1491 {
1492 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x "
1493 "(mark %u/0x%08x)", ntohl(spi), mark.value, mark.mask);
1494 }
1495 else
1496 {
1497 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x", ntohl(spi));
1498 }
1499 goto failed;
1500 }
1501
1502 status = SUCCESS;
1503
1504 failed:
1505 memwipe(request, sizeof(request));
1506 return status;
1507 }
1508
1509 /**
1510 * Get the ESN replay state (i.e. sequence numbers) of an SA.
1511 *
1512 * Allocates into one the replay state structure we get from the kernel.
1513 */
1514 static void get_replay_state(private_kernel_netlink_ipsec_t *this,
1515 u_int32_t spi, u_int8_t protocol, host_t *dst,
1516 struct xfrm_replay_state_esn **replay_esn,
1517 struct xfrm_replay_state **replay)
1518 {
1519 netlink_buf_t request;
1520 struct nlmsghdr *hdr, *out = NULL;
1521 struct xfrm_aevent_id *out_aevent = NULL, *aevent_id;
1522 size_t len;
1523 struct rtattr *rta;
1524 size_t rtasize;
1525
1526 memset(&request, 0, sizeof(request));
1527
1528 DBG2(DBG_KNL, "querying replay state from SAD entry with SPI %.8x",
1529 ntohl(spi));
1530
1531 hdr = (struct nlmsghdr*)request;
1532 hdr->nlmsg_flags = NLM_F_REQUEST;
1533 hdr->nlmsg_type = XFRM_MSG_GETAE;
1534 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
1535
1536 aevent_id = (struct xfrm_aevent_id*)NLMSG_DATA(hdr);
1537 aevent_id->flags = XFRM_AE_RVAL;
1538
1539 host2xfrm(dst, &aevent_id->sa_id.daddr);
1540 aevent_id->sa_id.spi = spi;
1541 aevent_id->sa_id.proto = protocol;
1542 aevent_id->sa_id.family = dst->get_family(dst);
1543
1544 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1545 {
1546 hdr = out;
1547 while (NLMSG_OK(hdr, len))
1548 {
1549 switch (hdr->nlmsg_type)
1550 {
1551 case XFRM_MSG_NEWAE:
1552 {
1553 out_aevent = NLMSG_DATA(hdr);
1554 break;
1555 }
1556 case NLMSG_ERROR:
1557 {
1558 struct nlmsgerr *err = NLMSG_DATA(hdr);
1559 DBG1(DBG_KNL, "querying replay state from SAD entry "
1560 "failed: %s (%d)", strerror(-err->error),
1561 -err->error);
1562 break;
1563 }
1564 default:
1565 hdr = NLMSG_NEXT(hdr, len);
1566 continue;
1567 case NLMSG_DONE:
1568 break;
1569 }
1570 break;
1571 }
1572 }
1573
1574 if (out_aevent)
1575 {
1576 rta = XFRM_RTA(out, struct xfrm_aevent_id);
1577 rtasize = XFRM_PAYLOAD(out, struct xfrm_aevent_id);
1578 while (RTA_OK(rta, rtasize))
1579 {
1580 if (rta->rta_type == XFRMA_REPLAY_VAL &&
1581 RTA_PAYLOAD(rta) == sizeof(**replay))
1582 {
1583 *replay = malloc(RTA_PAYLOAD(rta));
1584 memcpy(*replay, RTA_DATA(rta), RTA_PAYLOAD(rta));
1585 break;
1586 }
1587 if (rta->rta_type == XFRMA_REPLAY_ESN_VAL &&
1588 RTA_PAYLOAD(rta) >= sizeof(**replay_esn) + this->replay_bmp)
1589 {
1590 *replay_esn = malloc(RTA_PAYLOAD(rta));
1591 memcpy(*replay_esn, RTA_DATA(rta), RTA_PAYLOAD(rta));
1592 break;
1593 }
1594 rta = RTA_NEXT(rta, rtasize);
1595 }
1596 }
1597 free(out);
1598 }
1599
1600 METHOD(kernel_ipsec_t, query_sa, status_t,
1601 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1602 u_int32_t spi, u_int8_t protocol, mark_t mark, u_int64_t *bytes)
1603 {
1604 netlink_buf_t request;
1605 struct nlmsghdr *out = NULL, *hdr;
1606 struct xfrm_usersa_id *sa_id;
1607 struct xfrm_usersa_info *sa = NULL;
1608 status_t status = FAILED;
1609 size_t len;
1610
1611 memset(&request, 0, sizeof(request));
1612
1613 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x (mark %u/0x%08x)",
1614 ntohl(spi), mark.value, mark.mask);
1615
1616 hdr = (struct nlmsghdr*)request;
1617 hdr->nlmsg_flags = NLM_F_REQUEST;
1618 hdr->nlmsg_type = XFRM_MSG_GETSA;
1619 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1620
1621 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1622 host2xfrm(dst, &sa_id->daddr);
1623 sa_id->spi = spi;
1624 sa_id->proto = protocol;
1625 sa_id->family = dst->get_family(dst);
1626
1627 if (mark.value)
1628 {
1629 struct xfrm_mark *mrk;
1630 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1631
1632 rthdr->rta_type = XFRMA_MARK;
1633 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1634 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1635 if (hdr->nlmsg_len > sizeof(request))
1636 {
1637 return FAILED;
1638 }
1639
1640 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1641 mrk->v = mark.value;
1642 mrk->m = mark.mask;
1643 }
1644
1645 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1646 {
1647 hdr = out;
1648 while (NLMSG_OK(hdr, len))
1649 {
1650 switch (hdr->nlmsg_type)
1651 {
1652 case XFRM_MSG_NEWSA:
1653 {
1654 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1655 break;
1656 }
1657 case NLMSG_ERROR:
1658 {
1659 struct nlmsgerr *err = NLMSG_DATA(hdr);
1660
1661 if (mark.value)
1662 {
1663 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1664 "(mark %u/0x%08x) failed: %s (%d)",
1665 ntohl(spi), mark.value, mark.mask,
1666 strerror(-err->error), -err->error);
1667 }
1668 else
1669 {
1670 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1671 "failed: %s (%d)", ntohl(spi),
1672 strerror(-err->error), -err->error);
1673 }
1674 break;
1675 }
1676 default:
1677 hdr = NLMSG_NEXT(hdr, len);
1678 continue;
1679 case NLMSG_DONE:
1680 break;
1681 }
1682 break;
1683 }
1684 }
1685
1686 if (sa == NULL)
1687 {
1688 DBG2(DBG_KNL, "unable to query SAD entry with SPI %.8x", ntohl(spi));
1689 }
1690 else
1691 {
1692 *bytes = sa->curlft.bytes;
1693 status = SUCCESS;
1694 }
1695 memwipe(out, len);
1696 free(out);
1697 return status;
1698 }
1699
1700 METHOD(kernel_ipsec_t, del_sa, status_t,
1701 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1702 u_int32_t spi, u_int8_t protocol, u_int16_t cpi, mark_t mark)
1703 {
1704 netlink_buf_t request;
1705 struct nlmsghdr *hdr;
1706 struct xfrm_usersa_id *sa_id;
1707
1708 /* if IPComp was used, we first delete the additional IPComp SA */
1709 if (cpi)
1710 {
1711 del_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, 0, mark);
1712 }
1713
1714 memset(&request, 0, sizeof(request));
1715
1716 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x (mark %u/0x%08x)",
1717 ntohl(spi), mark.value, mark.mask);
1718
1719 hdr = (struct nlmsghdr*)request;
1720 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1721 hdr->nlmsg_type = XFRM_MSG_DELSA;
1722 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1723
1724 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1725 host2xfrm(dst, &sa_id->daddr);
1726 sa_id->spi = spi;
1727 sa_id->proto = protocol;
1728 sa_id->family = dst->get_family(dst);
1729
1730 if (mark.value)
1731 {
1732 struct xfrm_mark *mrk;
1733 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1734
1735 rthdr->rta_type = XFRMA_MARK;
1736 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1737 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1738 if (hdr->nlmsg_len > sizeof(request))
1739 {
1740 return FAILED;
1741 }
1742
1743 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1744 mrk->v = mark.value;
1745 mrk->m = mark.mask;
1746 }
1747
1748 switch (this->socket_xfrm->send_ack(this->socket_xfrm, hdr))
1749 {
1750 case SUCCESS:
1751 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x (mark %u/0x%08x)",
1752 ntohl(spi), mark.value, mark.mask);
1753 return SUCCESS;
1754 case NOT_FOUND:
1755 return NOT_FOUND;
1756 default:
1757 if (mark.value)
1758 {
1759 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x "
1760 "(mark %u/0x%08x)", ntohl(spi), mark.value, mark.mask);
1761 }
1762 else
1763 {
1764 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x",
1765 ntohl(spi));
1766 }
1767 return FAILED;
1768 }
1769 }
1770
1771 METHOD(kernel_ipsec_t, update_sa, status_t,
1772 private_kernel_netlink_ipsec_t *this, u_int32_t spi, u_int8_t protocol,
1773 u_int16_t cpi, host_t *src, host_t *dst, host_t *new_src, host_t *new_dst,
1774 bool old_encap, bool new_encap, mark_t mark)
1775 {
1776 netlink_buf_t request;
1777 u_char *pos;
1778 struct nlmsghdr *hdr, *out = NULL;
1779 struct xfrm_usersa_id *sa_id;
1780 struct xfrm_usersa_info *out_sa = NULL, *sa;
1781 size_t len;
1782 struct rtattr *rta;
1783 size_t rtasize;
1784 struct xfrm_encap_tmpl* tmpl = NULL;
1785 struct xfrm_replay_state *replay = NULL;
1786 struct xfrm_replay_state_esn *replay_esn = NULL;
1787 status_t status = FAILED;
1788
1789 /* if IPComp is used, we first update the IPComp SA */
1790 if (cpi)
1791 {
1792 update_sa(this, htonl(ntohs(cpi)), IPPROTO_COMP, 0,
1793 src, dst, new_src, new_dst, FALSE, FALSE, mark);
1794 }
1795
1796 memset(&request, 0, sizeof(request));
1797
1798 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x for update", ntohl(spi));
1799
1800 /* query the existing SA first */
1801 hdr = (struct nlmsghdr*)request;
1802 hdr->nlmsg_flags = NLM_F_REQUEST;
1803 hdr->nlmsg_type = XFRM_MSG_GETSA;
1804 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1805
1806 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1807 host2xfrm(dst, &sa_id->daddr);
1808 sa_id->spi = spi;
1809 sa_id->proto = protocol;
1810 sa_id->family = dst->get_family(dst);
1811
1812 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1813 {
1814 hdr = out;
1815 while (NLMSG_OK(hdr, len))
1816 {
1817 switch (hdr->nlmsg_type)
1818 {
1819 case XFRM_MSG_NEWSA:
1820 {
1821 out_sa = NLMSG_DATA(hdr);
1822 break;
1823 }
1824 case NLMSG_ERROR:
1825 {
1826 struct nlmsgerr *err = NLMSG_DATA(hdr);
1827 DBG1(DBG_KNL, "querying SAD entry failed: %s (%d)",
1828 strerror(-err->error), -err->error);
1829 break;
1830 }
1831 default:
1832 hdr = NLMSG_NEXT(hdr, len);
1833 continue;
1834 case NLMSG_DONE:
1835 break;
1836 }
1837 break;
1838 }
1839 }
1840 if (out_sa == NULL)
1841 {
1842 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1843 goto failed;
1844 }
1845
1846 get_replay_state(this, spi, protocol, dst, &replay_esn, &replay);
1847
1848 /* delete the old SA (without affecting the IPComp SA) */
1849 if (del_sa(this, src, dst, spi, protocol, 0, mark) != SUCCESS)
1850 {
1851 DBG1(DBG_KNL, "unable to delete old SAD entry with SPI %.8x",
1852 ntohl(spi));
1853 goto failed;
1854 }
1855
1856 DBG2(DBG_KNL, "updating SAD entry with SPI %.8x from %#H..%#H to %#H..%#H",
1857 ntohl(spi), src, dst, new_src, new_dst);
1858 /* copy over the SA from out to request */
1859 hdr = (struct nlmsghdr*)request;
1860 memcpy(hdr, out, min(out->nlmsg_len, sizeof(request)));
1861 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1862 hdr->nlmsg_type = XFRM_MSG_NEWSA;
1863 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1864 sa = NLMSG_DATA(hdr);
1865 sa->family = new_dst->get_family(new_dst);
1866
1867 if (!src->ip_equals(src, new_src))
1868 {
1869 host2xfrm(new_src, &sa->saddr);
1870 }
1871 if (!dst->ip_equals(dst, new_dst))
1872 {
1873 host2xfrm(new_dst, &sa->id.daddr);
1874 }
1875
1876 rta = XFRM_RTA(out, struct xfrm_usersa_info);
1877 rtasize = XFRM_PAYLOAD(out, struct xfrm_usersa_info);
1878 pos = (u_char*)XFRM_RTA(hdr, struct xfrm_usersa_info);
1879 while(RTA_OK(rta, rtasize))
1880 {
1881 /* copy all attributes, but not XFRMA_ENCAP if we are disabling it */
1882 if (rta->rta_type != XFRMA_ENCAP || new_encap)
1883 {
1884 if (rta->rta_type == XFRMA_ENCAP)
1885 { /* update encap tmpl */
1886 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1887 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1888 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1889 }
1890 memcpy(pos, rta, rta->rta_len);
1891 pos += RTA_ALIGN(rta->rta_len);
1892 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1893 }
1894 rta = RTA_NEXT(rta, rtasize);
1895 }
1896
1897 rta = (struct rtattr*)pos;
1898 if (tmpl == NULL && new_encap)
1899 { /* add tmpl if we are enabling it */
1900 rta->rta_type = XFRMA_ENCAP;
1901 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1902
1903 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1904 if (hdr->nlmsg_len > sizeof(request))
1905 {
1906 goto failed;
1907 }
1908
1909 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1910 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1911 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1912 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1913 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1914
1915 rta = XFRM_RTA_NEXT(rta);
1916 }
1917
1918 if (replay_esn)
1919 {
1920 rta->rta_type = XFRMA_REPLAY_ESN_VAL;
1921 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1922 this->replay_bmp);
1923
1924 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1925 if (hdr->nlmsg_len > sizeof(request))
1926 {
1927 goto failed;
1928 }
1929 memcpy(RTA_DATA(rta), replay_esn,
1930 sizeof(struct xfrm_replay_state_esn) + this->replay_bmp);
1931
1932 rta = XFRM_RTA_NEXT(rta);
1933 }
1934 else if (replay)
1935 {
1936 rta->rta_type = XFRMA_REPLAY_VAL;
1937 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state));
1938
1939 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1940 if (hdr->nlmsg_len > sizeof(request))
1941 {
1942 goto failed;
1943 }
1944 memcpy(RTA_DATA(rta), replay, sizeof(replay));
1945
1946 rta = XFRM_RTA_NEXT(rta);
1947 }
1948 else
1949 {
1950 DBG1(DBG_KNL, "unable to copy replay state from old SAD entry "
1951 "with SPI %.8x", ntohl(spi));
1952 }
1953
1954 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1955 {
1956 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1957 goto failed;
1958 }
1959
1960 status = SUCCESS;
1961 failed:
1962 free(replay);
1963 free(replay_esn);
1964 memwipe(out, len);
1965 memwipe(request, sizeof(request));
1966 free(out);
1967
1968 return status;
1969 }
1970
1971 METHOD(kernel_ipsec_t, flush_sas, status_t,
1972 private_kernel_netlink_ipsec_t *this)
1973 {
1974 netlink_buf_t request;
1975 struct nlmsghdr *hdr;
1976 struct xfrm_usersa_flush *flush;
1977
1978 memset(&request, 0, sizeof(request));
1979
1980 DBG2(DBG_KNL, "flushing all SAD entries");
1981
1982 hdr = (struct nlmsghdr*)request;
1983 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1984 hdr->nlmsg_type = XFRM_MSG_FLUSHSA;
1985 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush));
1986
1987 flush = (struct xfrm_usersa_flush*)NLMSG_DATA(hdr);
1988 flush->proto = IPSEC_PROTO_ANY;
1989
1990 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1991 {
1992 DBG1(DBG_KNL, "unable to flush SAD entries");
1993 return FAILED;
1994 }
1995 return SUCCESS;
1996 }
1997
1998 /**
1999 * Add or update a policy in the kernel.
2000 *
2001 * Note: The mutex has to be locked when entering this function
2002 * and is unlocked here in any case.
2003 */
2004 static status_t add_policy_internal(private_kernel_netlink_ipsec_t *this,
2005 policy_entry_t *policy, policy_sa_t *mapping, bool update)
2006 {
2007 netlink_buf_t request;
2008 policy_entry_t clone;
2009 ipsec_sa_t *ipsec = mapping->sa;
2010 struct xfrm_userpolicy_info *policy_info;
2011 struct nlmsghdr *hdr;
2012 int i;
2013
2014 /* clone the policy so we are able to check it out again later */
2015 memcpy(&clone, policy, sizeof(policy_entry_t));
2016
2017 memset(&request, 0, sizeof(request));
2018 hdr = (struct nlmsghdr*)request;
2019 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2020 hdr->nlmsg_type = update ? XFRM_MSG_UPDPOLICY : XFRM_MSG_NEWPOLICY;
2021 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info));
2022
2023 policy_info = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2024 policy_info->sel = policy->sel;
2025 policy_info->dir = policy->direction;
2026
2027 /* calculate priority based on selector size, small size = high prio */
2028 policy_info->priority = mapping->priority;
2029 policy_info->action = mapping->type != POLICY_DROP ? XFRM_POLICY_ALLOW
2030 : XFRM_POLICY_BLOCK;
2031 policy_info->share = XFRM_SHARE_ANY;
2032
2033 /* policies don't expire */
2034 policy_info->lft.soft_byte_limit = XFRM_INF;
2035 policy_info->lft.soft_packet_limit = XFRM_INF;
2036 policy_info->lft.hard_byte_limit = XFRM_INF;
2037 policy_info->lft.hard_packet_limit = XFRM_INF;
2038 policy_info->lft.soft_add_expires_seconds = 0;
2039 policy_info->lft.hard_add_expires_seconds = 0;
2040 policy_info->lft.soft_use_expires_seconds = 0;
2041 policy_info->lft.hard_use_expires_seconds = 0;
2042
2043 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_info);
2044
2045 if (mapping->type == POLICY_IPSEC)
2046 {
2047 struct xfrm_user_tmpl *tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rthdr);
2048 struct {
2049 u_int8_t proto;
2050 bool use;
2051 } protos[] = {
2052 { IPPROTO_COMP, ipsec->cfg.ipcomp.transform != IPCOMP_NONE },
2053 { IPPROTO_ESP, ipsec->cfg.esp.use },
2054 { IPPROTO_AH, ipsec->cfg.ah.use },
2055 };
2056 ipsec_mode_t proto_mode = ipsec->cfg.mode;
2057
2058 rthdr->rta_type = XFRMA_TMPL;
2059 rthdr->rta_len = 0; /* actual length is set below */
2060
2061 for (i = 0; i < countof(protos); i++)
2062 {
2063 if (!protos[i].use)
2064 {
2065 continue;
2066 }
2067
2068 rthdr->rta_len += RTA_LENGTH(sizeof(struct xfrm_user_tmpl));
2069 hdr->nlmsg_len += RTA_ALIGN(RTA_LENGTH(sizeof(struct xfrm_user_tmpl)));
2070 if (hdr->nlmsg_len > sizeof(request))
2071 {
2072 this->mutex->unlock(this->mutex);
2073 return FAILED;
2074 }
2075
2076 tmpl->reqid = ipsec->cfg.reqid;
2077 tmpl->id.proto = protos[i].proto;
2078 tmpl->aalgos = tmpl->ealgos = tmpl->calgos = ~0;
2079 tmpl->mode = mode2kernel(proto_mode);
2080 tmpl->optional = protos[i].proto == IPPROTO_COMP &&
2081 policy->direction != POLICY_OUT;
2082 tmpl->family = ipsec->src->get_family(ipsec->src);
2083
2084 if (proto_mode == MODE_TUNNEL)
2085 { /* only for tunnel mode */
2086 host2xfrm(ipsec->src, &tmpl->saddr);
2087 host2xfrm(ipsec->dst, &tmpl->id.daddr);
2088 }
2089
2090 tmpl++;
2091
2092 /* use transport mode for other SAs */
2093 proto_mode = MODE_TRANSPORT;
2094 }
2095
2096 rthdr = XFRM_RTA_NEXT(rthdr);
2097 }
2098
2099 if (ipsec->mark.value)
2100 {
2101 struct xfrm_mark *mrk;
2102
2103 rthdr->rta_type = XFRMA_MARK;
2104 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2105
2106 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2107 if (hdr->nlmsg_len > sizeof(request))
2108 {
2109 this->mutex->unlock(this->mutex);
2110 return FAILED;
2111 }
2112
2113 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2114 mrk->v = ipsec->mark.value;
2115 mrk->m = ipsec->mark.mask;
2116 }
2117 this->mutex->unlock(this->mutex);
2118
2119 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2120 {
2121 return FAILED;
2122 }
2123
2124 /* find the policy again */
2125 this->mutex->lock(this->mutex);
2126 policy = this->policies->get(this->policies, &clone);
2127 if (!policy ||
2128 policy->used_by->find_first(policy->used_by,
2129 NULL, (void**)&mapping) != SUCCESS)
2130 { /* policy or mapping is already gone, ignore */
2131 this->mutex->unlock(this->mutex);
2132 return SUCCESS;
2133 }
2134
2135 /* install a route, if:
2136 * - this is a forward policy (to just get one for each child)
2137 * - we are in tunnel/BEET mode
2138 * - routing is not disabled via strongswan.conf
2139 */
2140 if (policy->direction == POLICY_FWD &&
2141 ipsec->cfg.mode != MODE_TRANSPORT && this->install_routes)
2142 {
2143 route_entry_t *route = malloc_thing(route_entry_t);
2144 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)mapping;
2145
2146 if (hydra->kernel_interface->get_address_by_ts(hydra->kernel_interface,
2147 fwd->dst_ts, &route->src_ip) == SUCCESS)
2148 {
2149 /* get the nexthop to src (src as we are in POLICY_FWD) */
2150 route->gateway = hydra->kernel_interface->get_nexthop(
2151 hydra->kernel_interface, ipsec->src);
2152 /* install route via outgoing interface */
2153 route->if_name = hydra->kernel_interface->get_interface(
2154 hydra->kernel_interface, ipsec->dst);
2155 route->dst_net = chunk_alloc(policy->sel.family == AF_INET ? 4 : 16);
2156 memcpy(route->dst_net.ptr, &policy->sel.saddr, route->dst_net.len);
2157 route->prefixlen = policy->sel.prefixlen_s;
2158
2159 if (!route->if_name)
2160 {
2161 this->mutex->unlock(this->mutex);
2162 route_entry_destroy(route);
2163 return SUCCESS;
2164 }
2165
2166 if (policy->route)
2167 {
2168 route_entry_t *old = policy->route;
2169 if (route_entry_equals(old, route))
2170 {
2171 this->mutex->unlock(this->mutex);
2172 route_entry_destroy(route);
2173 return SUCCESS;
2174 }
2175 /* uninstall previously installed route */
2176 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2177 old->dst_net, old->prefixlen, old->gateway,
2178 old->src_ip, old->if_name) != SUCCESS)
2179 {
2180 DBG1(DBG_KNL, "error uninstalling route installed with "
2181 "policy %R === %R %N", fwd->src_ts,
2182 fwd->dst_ts, policy_dir_names,
2183 policy->direction);
2184 }
2185 route_entry_destroy(old);
2186 policy->route = NULL;
2187 }
2188
2189 DBG2(DBG_KNL, "installing route: %R via %H src %H dev %s",
2190 fwd->src_ts, route->gateway, route->src_ip, route->if_name);
2191 switch (hydra->kernel_interface->add_route(
2192 hydra->kernel_interface, route->dst_net,
2193 route->prefixlen, route->gateway,
2194 route->src_ip, route->if_name))
2195 {
2196 default:
2197 DBG1(DBG_KNL, "unable to install source route for %H",
2198 route->src_ip);
2199 /* FALL */
2200 case ALREADY_DONE:
2201 /* route exists, do not uninstall */
2202 route_entry_destroy(route);
2203 break;
2204 case SUCCESS:
2205 /* cache the installed route */
2206 policy->route = route;
2207 break;
2208 }
2209 }
2210 else
2211 {
2212 free(route);
2213 }
2214 }
2215 this->mutex->unlock(this->mutex);
2216 return SUCCESS;
2217 }
2218
2219 METHOD(kernel_ipsec_t, add_policy, status_t,
2220 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
2221 traffic_selector_t *src_ts, traffic_selector_t *dst_ts,
2222 policy_dir_t direction, policy_type_t type, ipsec_sa_cfg_t *sa,
2223 mark_t mark, policy_priority_t priority)
2224 {
2225 policy_entry_t *policy, *current;
2226 policy_sa_t *assigned_sa, *current_sa;
2227 enumerator_t *enumerator;
2228 bool found = FALSE, update = TRUE;
2229
2230 /* create a policy */
2231 INIT(policy,
2232 .sel = ts2selector(src_ts, dst_ts),
2233 .mark = mark.value & mark.mask,
2234 .direction = direction,
2235 );
2236
2237 /* find the policy, which matches EXACTLY */
2238 this->mutex->lock(this->mutex);
2239 current = this->policies->get(this->policies, policy);
2240 if (current)
2241 {
2242 /* use existing policy */
2243 DBG2(DBG_KNL, "policy %R === %R %N (mark %u/0x%08x) "
2244 "already exists, increasing refcount",
2245 src_ts, dst_ts, policy_dir_names, direction,
2246 mark.value, mark.mask);
2247 policy_entry_destroy(this, policy);
2248 policy = current;
2249 found = TRUE;
2250 }
2251 else
2252 { /* use the new one, if we have no such policy */
2253 policy->used_by = linked_list_create();
2254 this->policies->put(this->policies, policy, policy);
2255 }
2256
2257 /* cache the assigned IPsec SA */
2258 assigned_sa = policy_sa_create(this, direction, type, src, dst, src_ts,
2259 dst_ts, mark, sa);
2260 assigned_sa->priority = get_priority(policy, priority);
2261
2262 if (this->policy_history)
2263 { /* insert the SA according to its priority */
2264 enumerator = policy->used_by->create_enumerator(policy->used_by);
2265 while (enumerator->enumerate(enumerator, (void**)&current_sa))
2266 {
2267 if (current_sa->priority >= assigned_sa->priority)
2268 {
2269 break;
2270 }
2271 update = FALSE;
2272 }
2273 policy->used_by->insert_before(policy->used_by, enumerator,
2274 assigned_sa);
2275 enumerator->destroy(enumerator);
2276 }
2277 else
2278 { /* simply insert it last and only update if it is not installed yet */
2279 policy->used_by->insert_last(policy->used_by, assigned_sa);
2280 update = !found;
2281 }
2282
2283 if (!update)
2284 { /* we don't update the policy if the priority is lower than that of
2285 * the currently installed one */
2286 this->mutex->unlock(this->mutex);
2287 return SUCCESS;
2288 }
2289
2290 DBG2(DBG_KNL, "%s policy %R === %R %N (mark %u/0x%08x)",
2291 found ? "updating" : "adding", src_ts, dst_ts,
2292 policy_dir_names, direction, mark.value, mark.mask);
2293
2294 if (add_policy_internal(this, policy, assigned_sa, found) != SUCCESS)
2295 {
2296 DBG1(DBG_KNL, "unable to %s policy %R === %R %N",
2297 found ? "update" : "add", src_ts, dst_ts,
2298 policy_dir_names, direction);
2299 return FAILED;
2300 }
2301 return SUCCESS;
2302 }
2303
2304 METHOD(kernel_ipsec_t, query_policy, status_t,
2305 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2306 traffic_selector_t *dst_ts, policy_dir_t direction, mark_t mark,
2307 u_int32_t *use_time)
2308 {
2309 netlink_buf_t request;
2310 struct nlmsghdr *out = NULL, *hdr;
2311 struct xfrm_userpolicy_id *policy_id;
2312 struct xfrm_userpolicy_info *policy = NULL;
2313 size_t len;
2314
2315 memset(&request, 0, sizeof(request));
2316
2317 DBG2(DBG_KNL, "querying policy %R === %R %N (mark %u/0x%08x)",
2318 src_ts, dst_ts, policy_dir_names, direction,
2319 mark.value, mark.mask);
2320
2321 hdr = (struct nlmsghdr*)request;
2322 hdr->nlmsg_flags = NLM_F_REQUEST;
2323 hdr->nlmsg_type = XFRM_MSG_GETPOLICY;
2324 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2325
2326 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2327 policy_id->sel = ts2selector(src_ts, dst_ts);
2328 policy_id->dir = direction;
2329
2330 if (mark.value)
2331 {
2332 struct xfrm_mark *mrk;
2333 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2334
2335 rthdr->rta_type = XFRMA_MARK;
2336 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2337
2338 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2339 if (hdr->nlmsg_len > sizeof(request))
2340 {
2341 return FAILED;
2342 }
2343
2344 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2345 mrk->v = mark.value;
2346 mrk->m = mark.mask;
2347 }
2348
2349 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
2350 {
2351 hdr = out;
2352 while (NLMSG_OK(hdr, len))
2353 {
2354 switch (hdr->nlmsg_type)
2355 {
2356 case XFRM_MSG_NEWPOLICY:
2357 {
2358 policy = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2359 break;
2360 }
2361 case NLMSG_ERROR:
2362 {
2363 struct nlmsgerr *err = NLMSG_DATA(hdr);
2364 DBG1(DBG_KNL, "querying policy failed: %s (%d)",
2365 strerror(-err->error), -err->error);
2366 break;
2367 }
2368 default:
2369 hdr = NLMSG_NEXT(hdr, len);
2370 continue;
2371 case NLMSG_DONE:
2372 break;
2373 }
2374 break;
2375 }
2376 }
2377
2378 if (policy == NULL)
2379 {
2380 DBG2(DBG_KNL, "unable to query policy %R === %R %N", src_ts, dst_ts,
2381 policy_dir_names, direction);
2382 free(out);
2383 return FAILED;
2384 }
2385
2386 if (policy->curlft.use_time)
2387 {
2388 /* we need the monotonic time, but the kernel returns system time. */
2389 *use_time = time_monotonic(NULL) - (time(NULL) - policy->curlft.use_time);
2390 }
2391 else
2392 {
2393 *use_time = 0;
2394 }
2395
2396 free(out);
2397 return SUCCESS;
2398 }
2399
2400 METHOD(kernel_ipsec_t, del_policy, status_t,
2401 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2402 traffic_selector_t *dst_ts, policy_dir_t direction, u_int32_t reqid,
2403 mark_t mark, policy_priority_t prio)
2404 {
2405 policy_entry_t *current, policy;
2406 enumerator_t *enumerator;
2407 policy_sa_t *mapping;
2408 netlink_buf_t request;
2409 struct nlmsghdr *hdr;
2410 struct xfrm_userpolicy_id *policy_id;
2411 bool is_installed = TRUE;
2412 u_int32_t priority;
2413
2414 DBG2(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%08x)",
2415 src_ts, dst_ts, policy_dir_names, direction,
2416 mark.value, mark.mask);
2417
2418 /* create a policy */
2419 memset(&policy, 0, sizeof(policy_entry_t));
2420 policy.sel = ts2selector(src_ts, dst_ts);
2421 policy.mark = mark.value & mark.mask;
2422 policy.direction = direction;
2423
2424 /* find the policy */
2425 this->mutex->lock(this->mutex);
2426 current = this->policies->get(this->policies, &policy);
2427 if (!current)
2428 {
2429 if (mark.value)
2430 {
2431 DBG1(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%08x) "
2432 "failed, not found", src_ts, dst_ts, policy_dir_names,
2433 direction, mark.value, mark.mask);
2434 }
2435 else
2436 {
2437 DBG1(DBG_KNL, "deleting policy %R === %R %N failed, not found",
2438 src_ts, dst_ts, policy_dir_names, direction);
2439 }
2440 this->mutex->unlock(this->mutex);
2441 return NOT_FOUND;
2442 }
2443
2444 if (this->policy_history)
2445 { /* remove mapping to SA by reqid and priority */
2446 priority = get_priority(current, prio);
2447 enumerator = current->used_by->create_enumerator(current->used_by);
2448 while (enumerator->enumerate(enumerator, (void**)&mapping))
2449 {
2450 if (reqid == mapping->sa->cfg.reqid &&
2451 priority == mapping->priority)
2452 {
2453 current->used_by->remove_at(current->used_by, enumerator);
2454 policy_sa_destroy(mapping, &direction, this);
2455 break;
2456 }
2457 is_installed = FALSE;
2458 }
2459 enumerator->destroy(enumerator);
2460 }
2461 else
2462 { /* remove one of the SAs but don't update the policy */
2463 current->used_by->remove_last(current->used_by, (void**)&mapping);
2464 policy_sa_destroy(mapping, &direction, this);
2465 is_installed = FALSE;
2466 }
2467
2468 if (current->used_by->get_count(current->used_by) > 0)
2469 { /* policy is used by more SAs, keep in kernel */
2470 DBG2(DBG_KNL, "policy still used by another CHILD_SA, not removed");
2471 if (!is_installed)
2472 { /* no need to update as the policy was not installed for this SA */
2473 this->mutex->unlock(this->mutex);
2474 return SUCCESS;
2475 }
2476
2477 DBG2(DBG_KNL, "updating policy %R === %R %N (mark %u/0x%08x)",
2478 src_ts, dst_ts, policy_dir_names, direction,
2479 mark.value, mark.mask);
2480
2481 current->used_by->get_first(current->used_by, (void**)&mapping);
2482 if (add_policy_internal(this, current, mapping, TRUE) != SUCCESS)
2483 {
2484 DBG1(DBG_KNL, "unable to update policy %R === %R %N",
2485 src_ts, dst_ts, policy_dir_names, direction);
2486 return FAILED;
2487 }
2488 return SUCCESS;
2489 }
2490
2491 memset(&request, 0, sizeof(request));
2492
2493 hdr = (struct nlmsghdr*)request;
2494 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2495 hdr->nlmsg_type = XFRM_MSG_DELPOLICY;
2496 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2497
2498 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2499 policy_id->sel = current->sel;
2500 policy_id->dir = direction;
2501
2502 if (mark.value)
2503 {
2504 struct xfrm_mark *mrk;
2505 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2506
2507 rthdr->rta_type = XFRMA_MARK;
2508 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2509 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2510 if (hdr->nlmsg_len > sizeof(request))
2511 {
2512 this->mutex->unlock(this->mutex);
2513 return FAILED;
2514 }
2515
2516 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2517 mrk->v = mark.value;
2518 mrk->m = mark.mask;
2519 }
2520
2521 if (current->route)
2522 {
2523 route_entry_t *route = current->route;
2524 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2525 route->dst_net, route->prefixlen, route->gateway,
2526 route->src_ip, route->if_name) != SUCCESS)
2527 {
2528 DBG1(DBG_KNL, "error uninstalling route installed with "
2529 "policy %R === %R %N", src_ts, dst_ts,
2530 policy_dir_names, direction);
2531 }
2532 }
2533
2534 this->policies->remove(this->policies, current);
2535 policy_entry_destroy(this, current);
2536 this->mutex->unlock(this->mutex);
2537
2538 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2539 {
2540 if (mark.value)
2541 {
2542 DBG1(DBG_KNL, "unable to delete policy %R === %R %N "
2543 "(mark %u/0x%08x)", src_ts, dst_ts, policy_dir_names,
2544 direction, mark.value, mark.mask);
2545 }
2546 else
2547 {
2548 DBG1(DBG_KNL, "unable to delete policy %R === %R %N",
2549 src_ts, dst_ts, policy_dir_names, direction);
2550 }
2551 return FAILED;
2552 }
2553 return SUCCESS;
2554 }
2555
2556 METHOD(kernel_ipsec_t, flush_policies, status_t,
2557 private_kernel_netlink_ipsec_t *this)
2558 {
2559 netlink_buf_t request;
2560 struct nlmsghdr *hdr;
2561
2562 memset(&request, 0, sizeof(request));
2563
2564 DBG2(DBG_KNL, "flushing all policies from SPD");
2565
2566 hdr = (struct nlmsghdr*)request;
2567 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2568 hdr->nlmsg_type = XFRM_MSG_FLUSHPOLICY;
2569 hdr->nlmsg_len = NLMSG_LENGTH(0); /* no data associated */
2570
2571 /* by adding an rtattr of type XFRMA_POLICY_TYPE we could restrict this
2572 * to main or sub policies (default is main) */
2573
2574 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2575 {
2576 DBG1(DBG_KNL, "unable to flush SPD entries");
2577 return FAILED;
2578 }
2579 return SUCCESS;
2580 }
2581
2582
2583 METHOD(kernel_ipsec_t, bypass_socket, bool,
2584 private_kernel_netlink_ipsec_t *this, int fd, int family)
2585 {
2586 struct xfrm_userpolicy_info policy;
2587 u_int sol, ipsec_policy;
2588
2589 switch (family)
2590 {
2591 case AF_INET:
2592 sol = SOL_IP;
2593 ipsec_policy = IP_XFRM_POLICY;
2594 break;
2595 case AF_INET6:
2596 sol = SOL_IPV6;
2597 ipsec_policy = IPV6_XFRM_POLICY;
2598 break;
2599 default:
2600 return FALSE;
2601 }
2602
2603 memset(&policy, 0, sizeof(policy));
2604 policy.action = XFRM_POLICY_ALLOW;
2605 policy.sel.family = family;
2606
2607 policy.dir = XFRM_POLICY_OUT;
2608 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2609 {
2610 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2611 strerror(errno));
2612 return FALSE;
2613 }
2614 policy.dir = XFRM_POLICY_IN;
2615 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2616 {
2617 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2618 strerror(errno));
2619 return FALSE;
2620 }
2621 return TRUE;
2622 }
2623
2624 METHOD(kernel_ipsec_t, enable_udp_decap, bool,
2625 private_kernel_netlink_ipsec_t *this, int fd, int family, u_int16_t port)
2626 {
2627 int type = UDP_ENCAP_ESPINUDP;
2628
2629 if (setsockopt(fd, SOL_UDP, UDP_ENCAP, &type, sizeof(type)) < 0)
2630 {
2631 DBG1(DBG_KNL, "unable to set UDP_ENCAP: %s", strerror(errno));
2632 return FALSE;
2633 }
2634 return TRUE;
2635 }
2636
2637 METHOD(kernel_ipsec_t, destroy, void,
2638 private_kernel_netlink_ipsec_t *this)
2639 {
2640 enumerator_t *enumerator;
2641 policy_entry_t *policy;
2642
2643 if (this->socket_xfrm_events > 0)
2644 {
2645 close(this->socket_xfrm_events);
2646 }
2647 DESTROY_IF(this->socket_xfrm);
2648 enumerator = this->policies->create_enumerator(this->policies);
2649 while (enumerator->enumerate(enumerator, &policy, &policy))
2650 {
2651 policy_entry_destroy(this, policy);
2652 }
2653 enumerator->destroy(enumerator);
2654 this->policies->destroy(this->policies);
2655 this->sas->destroy(this->sas);
2656 this->mutex->destroy(this->mutex);
2657 free(this);
2658 }
2659
2660 /*
2661 * Described in header.
2662 */
2663 kernel_netlink_ipsec_t *kernel_netlink_ipsec_create()
2664 {
2665 private_kernel_netlink_ipsec_t *this;
2666 bool register_for_events = TRUE;
2667 int fd;
2668
2669 INIT(this,
2670 .public = {
2671 .interface = {
2672 .get_spi = _get_spi,
2673 .get_cpi = _get_cpi,
2674 .add_sa = _add_sa,
2675 .update_sa = _update_sa,
2676 .query_sa = _query_sa,
2677 .del_sa = _del_sa,
2678 .flush_sas = _flush_sas,
2679 .add_policy = _add_policy,
2680 .query_policy = _query_policy,
2681 .del_policy = _del_policy,
2682 .flush_policies = _flush_policies,
2683 .bypass_socket = _bypass_socket,
2684 .enable_udp_decap = _enable_udp_decap,
2685 .destroy = _destroy,
2686 },
2687 },
2688 .policies = hashtable_create((hashtable_hash_t)policy_hash,
2689 (hashtable_equals_t)policy_equals, 32),
2690 .sas = hashtable_create((hashtable_hash_t)ipsec_sa_hash,
2691 (hashtable_equals_t)ipsec_sa_equals, 32),
2692 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
2693 .policy_history = TRUE,
2694 .install_routes = lib->settings->get_bool(lib->settings,
2695 "%s.install_routes", TRUE, hydra->daemon),
2696 .replay_window = lib->settings->get_int(lib->settings,
2697 "%s.replay_window", DEFAULT_REPLAY_WINDOW, hydra->daemon),
2698 );
2699
2700 this->replay_bmp = (this->replay_window + sizeof(u_int32_t) * 8 - 1) /
2701 (sizeof(u_int32_t) * 8);
2702
2703 if (streq(hydra->daemon, "pluto"))
2704 { /* no routes for pluto, they are installed via updown script */
2705 this->install_routes = FALSE;
2706 /* no policy history for pluto */
2707 this->policy_history = FALSE;
2708 }
2709 else if (streq(hydra->daemon, "starter"))
2710 { /* starter has no threads, so we do not register for kernel events */
2711 register_for_events = FALSE;
2712 }
2713
2714 /* disable lifetimes for allocated SPIs in kernel */
2715 fd = open("/proc/sys/net/core/xfrm_acq_expires", O_WRONLY);
2716 if (fd)
2717 {
2718 ignore_result(write(fd, "165", 3));
2719 close(fd);
2720 }
2721
2722 this->socket_xfrm = netlink_socket_create(NETLINK_XFRM);
2723 if (!this->socket_xfrm)
2724 {
2725 destroy(this);
2726 return NULL;
2727 }
2728
2729 if (register_for_events)
2730 {
2731 struct sockaddr_nl addr;
2732
2733 memset(&addr, 0, sizeof(addr));
2734 addr.nl_family = AF_NETLINK;
2735
2736 /* create and bind XFRM socket for ACQUIRE, EXPIRE, MIGRATE & MAPPING */
2737 this->socket_xfrm_events = socket(AF_NETLINK, SOCK_RAW, NETLINK_XFRM);
2738 if (this->socket_xfrm_events <= 0)
2739 {
2740 DBG1(DBG_KNL, "unable to create XFRM event socket");
2741 destroy(this);
2742 return NULL;
2743 }
2744 addr.nl_groups = XFRMNLGRP(ACQUIRE) | XFRMNLGRP(EXPIRE) |
2745 XFRMNLGRP(MIGRATE) | XFRMNLGRP(MAPPING);
2746 if (bind(this->socket_xfrm_events, (struct sockaddr*)&addr, sizeof(addr)))
2747 {
2748 DBG1(DBG_KNL, "unable to bind XFRM event socket");
2749 destroy(this);
2750 return NULL;
2751 }
2752 lib->processor->queue_job(lib->processor,
2753 (job_t*)callback_job_create_with_prio(
2754 (callback_job_cb_t)receive_events, this, NULL,
2755 (callback_job_cancel_t)return_false, JOB_PRIO_CRITICAL));
2756 }
2757
2758 return &this->public;
2759 }
2760