Compile warning fixed in kernel interfaces.
[strongswan.git] / src / libhydra / plugins / kernel_netlink / kernel_netlink_ipsec.c
1 /*
2 * Copyright (C) 2006-2011 Tobias Brunner
3 * Copyright (C) 2005-2009 Martin Willi
4 * Copyright (C) 2008 Andreas Steffen
5 * Copyright (C) 2006-2007 Fabian Hartmann, Noah Heusser
6 * Copyright (C) 2006 Daniel Roethlisberger
7 * Copyright (C) 2005 Jan Hutter
8 * Hochschule fuer Technik Rapperswil
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 */
20
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <stdint.h>
24 #include <linux/ipsec.h>
25 #include <linux/netlink.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/xfrm.h>
28 #include <linux/udp.h>
29 #include <unistd.h>
30 #include <time.h>
31 #include <errno.h>
32 #include <string.h>
33 #include <fcntl.h>
34
35 #include "kernel_netlink_ipsec.h"
36 #include "kernel_netlink_shared.h"
37
38 #include <hydra.h>
39 #include <debug.h>
40 #include <threading/thread.h>
41 #include <threading/mutex.h>
42 #include <utils/hashtable.h>
43 #include <utils/linked_list.h>
44 #include <processing/jobs/callback_job.h>
45
46 /** Required for Linux 2.6.26 kernel and later */
47 #ifndef XFRM_STATE_AF_UNSPEC
48 #define XFRM_STATE_AF_UNSPEC 32
49 #endif
50
51 /** From linux/in.h */
52 #ifndef IP_XFRM_POLICY
53 #define IP_XFRM_POLICY 17
54 #endif
55
56 /** Missing on uclibc */
57 #ifndef IPV6_XFRM_POLICY
58 #define IPV6_XFRM_POLICY 34
59 #endif /*IPV6_XFRM_POLICY*/
60
61 /** Default priority of installed policies */
62 #define PRIO_BASE 512
63
64 /** Default replay window size, if not set using charon.replay_window */
65 #define DEFAULT_REPLAY_WINDOW 32
66
67 /**
68 * Map the limit for bytes and packets to XFRM_INF by default
69 */
70 #define XFRM_LIMIT(x) ((x) == 0 ? XFRM_INF : (x))
71
72 /**
73 * Create ORable bitfield of XFRM NL groups
74 */
75 #define XFRMNLGRP(x) (1<<(XFRMNLGRP_##x-1))
76
77 /**
78 * Returns a pointer to the first rtattr following the nlmsghdr *nlh and the
79 * 'usual' netlink data x like 'struct xfrm_usersa_info'
80 */
81 #define XFRM_RTA(nlh, x) ((struct rtattr*)(NLMSG_DATA(nlh) + \
82 NLMSG_ALIGN(sizeof(x))))
83 /**
84 * Returns a pointer to the next rtattr following rta.
85 * !!! Do not use this to parse messages. Use RTA_NEXT and RTA_OK instead !!!
86 */
87 #define XFRM_RTA_NEXT(rta) ((struct rtattr*)(((char*)(rta)) + \
88 RTA_ALIGN((rta)->rta_len)))
89 /**
90 * Returns the total size of attached rta data
91 * (after 'usual' netlink data x like 'struct xfrm_usersa_info')
92 */
93 #define XFRM_PAYLOAD(nlh, x) NLMSG_PAYLOAD(nlh, sizeof(x))
94
95 typedef struct kernel_algorithm_t kernel_algorithm_t;
96
97 /**
98 * Mapping of IKEv2 kernel identifier to linux crypto API names
99 */
100 struct kernel_algorithm_t {
101 /**
102 * Identifier specified in IKEv2
103 */
104 int ikev2;
105
106 /**
107 * Name of the algorithm in linux crypto API
108 */
109 char *name;
110 };
111
112 ENUM(xfrm_msg_names, XFRM_MSG_NEWSA, XFRM_MSG_MAPPING,
113 "XFRM_MSG_NEWSA",
114 "XFRM_MSG_DELSA",
115 "XFRM_MSG_GETSA",
116 "XFRM_MSG_NEWPOLICY",
117 "XFRM_MSG_DELPOLICY",
118 "XFRM_MSG_GETPOLICY",
119 "XFRM_MSG_ALLOCSPI",
120 "XFRM_MSG_ACQUIRE",
121 "XFRM_MSG_EXPIRE",
122 "XFRM_MSG_UPDPOLICY",
123 "XFRM_MSG_UPDSA",
124 "XFRM_MSG_POLEXPIRE",
125 "XFRM_MSG_FLUSHSA",
126 "XFRM_MSG_FLUSHPOLICY",
127 "XFRM_MSG_NEWAE",
128 "XFRM_MSG_GETAE",
129 "XFRM_MSG_REPORT",
130 "XFRM_MSG_MIGRATE",
131 "XFRM_MSG_NEWSADINFO",
132 "XFRM_MSG_GETSADINFO",
133 "XFRM_MSG_NEWSPDINFO",
134 "XFRM_MSG_GETSPDINFO",
135 "XFRM_MSG_MAPPING"
136 );
137
138 ENUM(xfrm_attr_type_names, XFRMA_UNSPEC, XFRMA_KMADDRESS,
139 "XFRMA_UNSPEC",
140 "XFRMA_ALG_AUTH",
141 "XFRMA_ALG_CRYPT",
142 "XFRMA_ALG_COMP",
143 "XFRMA_ENCAP",
144 "XFRMA_TMPL",
145 "XFRMA_SA",
146 "XFRMA_POLICY",
147 "XFRMA_SEC_CTX",
148 "XFRMA_LTIME_VAL",
149 "XFRMA_REPLAY_VAL",
150 "XFRMA_REPLAY_THRESH",
151 "XFRMA_ETIMER_THRESH",
152 "XFRMA_SRCADDR",
153 "XFRMA_COADDR",
154 "XFRMA_LASTUSED",
155 "XFRMA_POLICY_TYPE",
156 "XFRMA_MIGRATE",
157 "XFRMA_ALG_AEAD",
158 "XFRMA_KMADDRESS"
159 );
160
161 #define END_OF_LIST -1
162
163 /**
164 * Algorithms for encryption
165 */
166 static kernel_algorithm_t encryption_algs[] = {
167 /* {ENCR_DES_IV64, "***" }, */
168 {ENCR_DES, "des" },
169 {ENCR_3DES, "des3_ede" },
170 /* {ENCR_RC5, "***" }, */
171 /* {ENCR_IDEA, "***" }, */
172 {ENCR_CAST, "cast128" },
173 {ENCR_BLOWFISH, "blowfish" },
174 /* {ENCR_3IDEA, "***" }, */
175 /* {ENCR_DES_IV32, "***" }, */
176 {ENCR_NULL, "cipher_null" },
177 {ENCR_AES_CBC, "aes" },
178 {ENCR_AES_CTR, "rfc3686(ctr(aes))" },
179 {ENCR_AES_CCM_ICV8, "rfc4309(ccm(aes))" },
180 {ENCR_AES_CCM_ICV12, "rfc4309(ccm(aes))" },
181 {ENCR_AES_CCM_ICV16, "rfc4309(ccm(aes))" },
182 {ENCR_AES_GCM_ICV8, "rfc4106(gcm(aes))" },
183 {ENCR_AES_GCM_ICV12, "rfc4106(gcm(aes))" },
184 {ENCR_AES_GCM_ICV16, "rfc4106(gcm(aes))" },
185 {ENCR_NULL_AUTH_AES_GMAC, "rfc4543(gcm(aes))" },
186 {ENCR_CAMELLIA_CBC, "cbc(camellia)" },
187 /* {ENCR_CAMELLIA_CTR, "***" }, */
188 /* {ENCR_CAMELLIA_CCM_ICV8, "***" }, */
189 /* {ENCR_CAMELLIA_CCM_ICV12, "***" }, */
190 /* {ENCR_CAMELLIA_CCM_ICV16, "***" }, */
191 {ENCR_SERPENT_CBC, "serpent" },
192 {ENCR_TWOFISH_CBC, "twofish" },
193 {END_OF_LIST, NULL }
194 };
195
196 /**
197 * Algorithms for integrity protection
198 */
199 static kernel_algorithm_t integrity_algs[] = {
200 {AUTH_HMAC_MD5_96, "md5" },
201 {AUTH_HMAC_SHA1_96, "sha1" },
202 {AUTH_HMAC_SHA2_256_96, "sha256" },
203 {AUTH_HMAC_SHA2_256_128, "hmac(sha256)" },
204 {AUTH_HMAC_SHA2_384_192, "hmac(sha384)" },
205 {AUTH_HMAC_SHA2_512_256, "hmac(sha512)" },
206 /* {AUTH_DES_MAC, "***" }, */
207 /* {AUTH_KPDK_MD5, "***" }, */
208 {AUTH_AES_XCBC_96, "xcbc(aes)" },
209 {END_OF_LIST, NULL }
210 };
211
212 /**
213 * Algorithms for IPComp
214 */
215 static kernel_algorithm_t compression_algs[] = {
216 /* {IPCOMP_OUI, "***" }, */
217 {IPCOMP_DEFLATE, "deflate" },
218 {IPCOMP_LZS, "lzs" },
219 {IPCOMP_LZJH, "lzjh" },
220 {END_OF_LIST, NULL }
221 };
222
223 /**
224 * Look up a kernel algorithm name and its key size
225 */
226 static char* lookup_algorithm(kernel_algorithm_t *list, int ikev2)
227 {
228 while (list->ikev2 != END_OF_LIST)
229 {
230 if (list->ikev2 == ikev2)
231 {
232 return list->name;
233 }
234 list++;
235 }
236 return NULL;
237 }
238
239 typedef struct private_kernel_netlink_ipsec_t private_kernel_netlink_ipsec_t;
240
241 /**
242 * Private variables and functions of kernel_netlink class.
243 */
244 struct private_kernel_netlink_ipsec_t {
245 /**
246 * Public part of the kernel_netlink_t object
247 */
248 kernel_netlink_ipsec_t public;
249
250 /**
251 * Mutex to lock access to installed policies
252 */
253 mutex_t *mutex;
254
255 /**
256 * Hash table of installed policies (policy_entry_t)
257 */
258 hashtable_t *policies;
259
260 /**
261 * Hash table of IPsec SAs using policies (ipsec_sa_t)
262 */
263 hashtable_t *sas;
264
265 /**
266 * Job receiving netlink events
267 */
268 callback_job_t *job;
269
270 /**
271 * Netlink xfrm socket (IPsec)
272 */
273 netlink_socket_t *socket_xfrm;
274
275 /**
276 * Netlink xfrm socket to receive acquire and expire events
277 */
278 int socket_xfrm_events;
279
280 /**
281 * Whether to install routes along policies
282 */
283 bool install_routes;
284
285 /**
286 * Whether to track the history of a policy
287 */
288 bool policy_history;
289
290 /**
291 * Size of the replay window, in packets
292 */
293 u_int32_t replay_window;
294
295 /**
296 * Size of the replay window bitmap, in bytes
297 */
298 u_int32_t replay_bmp;
299 };
300
301 typedef struct route_entry_t route_entry_t;
302
303 /**
304 * Installed routing entry
305 */
306 struct route_entry_t {
307 /** Name of the interface the route is bound to */
308 char *if_name;
309
310 /** Source ip of the route */
311 host_t *src_ip;
312
313 /** Gateway for this route */
314 host_t *gateway;
315
316 /** Destination net */
317 chunk_t dst_net;
318
319 /** Destination net prefixlen */
320 u_int8_t prefixlen;
321 };
322
323 /**
324 * Destroy a route_entry_t object
325 */
326 static void route_entry_destroy(route_entry_t *this)
327 {
328 free(this->if_name);
329 this->src_ip->destroy(this->src_ip);
330 DESTROY_IF(this->gateway);
331 chunk_free(&this->dst_net);
332 free(this);
333 }
334
335 /**
336 * Compare two route_entry_t objects
337 */
338 static bool route_entry_equals(route_entry_t *a, route_entry_t *b)
339 {
340 return a->if_name && b->if_name && streq(a->if_name, b->if_name) &&
341 a->src_ip->equals(a->src_ip, b->src_ip) &&
342 a->gateway->equals(a->gateway, b->gateway) &&
343 chunk_equals(a->dst_net, b->dst_net) && a->prefixlen == b->prefixlen;
344 }
345
346 typedef struct ipsec_sa_t ipsec_sa_t;
347
348 /**
349 * IPsec SA assigned to a policy.
350 */
351 struct ipsec_sa_t {
352 /** Source address of this SA */
353 host_t *src;
354
355 /** Destination address of this SA */
356 host_t *dst;
357
358 /** Optional mark */
359 mark_t mark;
360
361 /** Description of this SA */
362 ipsec_sa_cfg_t cfg;
363
364 /** Reference count for this SA */
365 refcount_t refcount;
366 };
367
368 /**
369 * Hash function for ipsec_sa_t objects
370 */
371 static u_int ipsec_sa_hash(ipsec_sa_t *sa)
372 {
373 return chunk_hash_inc(sa->src->get_address(sa->src),
374 chunk_hash_inc(sa->dst->get_address(sa->dst),
375 chunk_hash_inc(chunk_from_thing(sa->mark),
376 chunk_hash(chunk_from_thing(sa->cfg)))));
377 }
378
379 /**
380 * Equality function for ipsec_sa_t objects
381 */
382 static bool ipsec_sa_equals(ipsec_sa_t *sa, ipsec_sa_t *other_sa)
383 {
384 return sa->src->ip_equals(sa->src, other_sa->src) &&
385 sa->dst->ip_equals(sa->dst, other_sa->dst) &&
386 memeq(&sa->mark, &other_sa->mark, sizeof(mark_t)) &&
387 memeq(&sa->cfg, &other_sa->cfg, sizeof(ipsec_sa_cfg_t));
388 }
389
390 /**
391 * Allocate or reference an IPsec SA object
392 */
393 static ipsec_sa_t *ipsec_sa_create(private_kernel_netlink_ipsec_t *this,
394 host_t *src, host_t *dst, mark_t mark,
395 ipsec_sa_cfg_t *cfg)
396 {
397 ipsec_sa_t *sa, *found;
398 INIT(sa,
399 .src = src,
400 .dst = dst,
401 .mark = mark,
402 .cfg = *cfg,
403 );
404 found = this->sas->get(this->sas, sa);
405 if (!found)
406 {
407 sa->src = src->clone(src);
408 sa->dst = dst->clone(dst);
409 this->sas->put(this->sas, sa, sa);
410 }
411 else
412 {
413 free(sa);
414 sa = found;
415 }
416 ref_get(&sa->refcount);
417 return sa;
418 }
419
420 /**
421 * Release and destroy an IPsec SA object
422 */
423 static void ipsec_sa_destroy(private_kernel_netlink_ipsec_t *this,
424 ipsec_sa_t *sa)
425 {
426 if (ref_put(&sa->refcount))
427 {
428 this->sas->remove(this->sas, sa);
429 DESTROY_IF(sa->src);
430 DESTROY_IF(sa->dst);
431 free(sa);
432 }
433 }
434
435 typedef struct policy_sa_t policy_sa_t;
436 typedef struct policy_sa_fwd_t policy_sa_fwd_t;
437
438 /**
439 * Mapping between a policy and an IPsec SA.
440 */
441 struct policy_sa_t {
442 /** Priority assigned to the policy when installed with this SA */
443 u_int32_t priority;
444
445 /** Type of the policy */
446 policy_type_t type;
447
448 /** Assigned SA */
449 ipsec_sa_t *sa;
450 };
451
452 /**
453 * For forward policies we also cache the traffic selectors in order to install
454 * the route.
455 */
456 struct policy_sa_fwd_t {
457 /** Generic interface */
458 policy_sa_t generic;
459
460 /** Source traffic selector of this policy */
461 traffic_selector_t *src_ts;
462
463 /** Destination traffic selector of this policy */
464 traffic_selector_t *dst_ts;
465 };
466
467 /**
468 * Create a policy_sa(_fwd)_t object
469 */
470 static policy_sa_t *policy_sa_create(private_kernel_netlink_ipsec_t *this,
471 policy_dir_t dir, policy_type_t type, host_t *src, host_t *dst,
472 traffic_selector_t *src_ts, traffic_selector_t *dst_ts, mark_t mark,
473 ipsec_sa_cfg_t *cfg)
474 {
475 policy_sa_t *policy;
476
477 if (dir == POLICY_FWD)
478 {
479 policy_sa_fwd_t *fwd;
480 INIT(fwd,
481 .src_ts = src_ts->clone(src_ts),
482 .dst_ts = dst_ts->clone(dst_ts),
483 );
484 policy = &fwd->generic;
485 }
486 else
487 {
488 INIT(policy, .priority = 0);
489 }
490 policy->type = type;
491 policy->sa = ipsec_sa_create(this, src, dst, mark, cfg);
492 return policy;
493 }
494
495 /**
496 * Destroy a policy_sa(_fwd)_t object
497 */
498 static void policy_sa_destroy(policy_sa_t *policy, policy_dir_t *dir,
499 private_kernel_netlink_ipsec_t *this)
500 {
501 if (*dir == POLICY_FWD)
502 {
503 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)policy;
504 fwd->src_ts->destroy(fwd->src_ts);
505 fwd->dst_ts->destroy(fwd->dst_ts);
506 }
507 ipsec_sa_destroy(this, policy->sa);
508 free(policy);
509 }
510
511 typedef struct policy_entry_t policy_entry_t;
512
513 /**
514 * Installed kernel policy.
515 */
516 struct policy_entry_t {
517
518 /** Direction of this policy: in, out, forward */
519 u_int8_t direction;
520
521 /** Parameters of installed policy */
522 struct xfrm_selector sel;
523
524 /** Optional mark */
525 u_int32_t mark;
526
527 /** Associated route installed for this policy */
528 route_entry_t *route;
529
530 /** List of SAs this policy is used by, ordered by priority */
531 linked_list_t *used_by;
532 };
533
534 /**
535 * Destroy a policy_entry_t object
536 */
537 static void policy_entry_destroy(private_kernel_netlink_ipsec_t *this,
538 policy_entry_t *policy)
539 {
540 if (policy->route)
541 {
542 route_entry_destroy(policy->route);
543 }
544 if (policy->used_by)
545 {
546 policy->used_by->invoke_function(policy->used_by,
547 (linked_list_invoke_t)policy_sa_destroy,
548 &policy->direction, this);
549 policy->used_by->destroy(policy->used_by);
550 }
551 free(policy);
552 }
553
554 /**
555 * Hash function for policy_entry_t objects
556 */
557 static u_int policy_hash(policy_entry_t *key)
558 {
559 chunk_t chunk = chunk_create((void*)&key->sel,
560 sizeof(struct xfrm_selector) + sizeof(u_int32_t));
561 return chunk_hash(chunk);
562 }
563
564 /**
565 * Equality function for policy_entry_t objects
566 */
567 static bool policy_equals(policy_entry_t *key, policy_entry_t *other_key)
568 {
569 return memeq(&key->sel, &other_key->sel,
570 sizeof(struct xfrm_selector) + sizeof(u_int32_t)) &&
571 key->direction == other_key->direction;
572 }
573
574 /**
575 * Calculate the priority of a policy
576 */
577 static inline u_int32_t get_priority(policy_entry_t *policy,
578 policy_priority_t prio)
579 {
580 u_int32_t priority = PRIO_BASE;
581 switch (prio)
582 {
583 case POLICY_PRIORITY_FALLBACK:
584 priority <<= 1;
585 /* fall-through */
586 case POLICY_PRIORITY_ROUTED:
587 priority <<= 1;
588 /* fall-through */
589 case POLICY_PRIORITY_DEFAULT:
590 break;
591 }
592 /* calculate priority based on selector size, small size = high prio */
593 priority -= policy->sel.prefixlen_s;
594 priority -= policy->sel.prefixlen_d;
595 priority <<= 2; /* make some room for the two flags */
596 priority += policy->sel.sport_mask || policy->sel.dport_mask ? 0 : 2;
597 priority += policy->sel.proto ? 0 : 1;
598 return priority;
599 }
600
601 /**
602 * Convert the general ipsec mode to the one defined in xfrm.h
603 */
604 static u_int8_t mode2kernel(ipsec_mode_t mode)
605 {
606 switch (mode)
607 {
608 case MODE_TRANSPORT:
609 return XFRM_MODE_TRANSPORT;
610 case MODE_TUNNEL:
611 return XFRM_MODE_TUNNEL;
612 case MODE_BEET:
613 return XFRM_MODE_BEET;
614 default:
615 return mode;
616 }
617 }
618
619 /**
620 * Convert a host_t to a struct xfrm_address
621 */
622 static void host2xfrm(host_t *host, xfrm_address_t *xfrm)
623 {
624 chunk_t chunk = host->get_address(host);
625 memcpy(xfrm, chunk.ptr, min(chunk.len, sizeof(xfrm_address_t)));
626 }
627
628 /**
629 * Convert a struct xfrm_address to a host_t
630 */
631 static host_t* xfrm2host(int family, xfrm_address_t *xfrm, u_int16_t port)
632 {
633 chunk_t chunk;
634
635 switch (family)
636 {
637 case AF_INET:
638 chunk = chunk_create((u_char*)&xfrm->a4, sizeof(xfrm->a4));
639 break;
640 case AF_INET6:
641 chunk = chunk_create((u_char*)&xfrm->a6, sizeof(xfrm->a6));
642 break;
643 default:
644 return NULL;
645 }
646 return host_create_from_chunk(family, chunk, ntohs(port));
647 }
648
649 /**
650 * Convert a traffic selector address range to subnet and its mask.
651 */
652 static void ts2subnet(traffic_selector_t* ts,
653 xfrm_address_t *net, u_int8_t *mask)
654 {
655 host_t *net_host;
656 chunk_t net_chunk;
657
658 ts->to_subnet(ts, &net_host, mask);
659 net_chunk = net_host->get_address(net_host);
660 memcpy(net, net_chunk.ptr, net_chunk.len);
661 net_host->destroy(net_host);
662 }
663
664 /**
665 * Convert a traffic selector port range to port/portmask
666 */
667 static void ts2ports(traffic_selector_t* ts,
668 u_int16_t *port, u_int16_t *mask)
669 {
670 /* Linux does not seem to accept complex portmasks. Only
671 * any or a specific port is allowed. We set to any, if we have
672 * a port range, or to a specific, if we have one port only.
673 */
674 u_int16_t from, to;
675
676 from = ts->get_from_port(ts);
677 to = ts->get_to_port(ts);
678
679 if (from == to)
680 {
681 *port = htons(from);
682 *mask = ~0;
683 }
684 else
685 {
686 *port = 0;
687 *mask = 0;
688 }
689 }
690
691 /**
692 * Convert a pair of traffic_selectors to an xfrm_selector
693 */
694 static struct xfrm_selector ts2selector(traffic_selector_t *src,
695 traffic_selector_t *dst)
696 {
697 struct xfrm_selector sel;
698
699 memset(&sel, 0, sizeof(sel));
700 sel.family = (src->get_type(src) == TS_IPV4_ADDR_RANGE) ? AF_INET : AF_INET6;
701 /* src or dest proto may be "any" (0), use more restrictive one */
702 sel.proto = max(src->get_protocol(src), dst->get_protocol(dst));
703 ts2subnet(dst, &sel.daddr, &sel.prefixlen_d);
704 ts2subnet(src, &sel.saddr, &sel.prefixlen_s);
705 ts2ports(dst, &sel.dport, &sel.dport_mask);
706 ts2ports(src, &sel.sport, &sel.sport_mask);
707 sel.ifindex = 0;
708 sel.user = 0;
709
710 return sel;
711 }
712
713 /**
714 * Convert an xfrm_selector to a src|dst traffic_selector
715 */
716 static traffic_selector_t* selector2ts(struct xfrm_selector *sel, bool src)
717 {
718 u_char *addr;
719 u_int8_t prefixlen;
720 u_int16_t port = 0;
721 host_t *host = NULL;
722
723 if (src)
724 {
725 addr = (u_char*)&sel->saddr;
726 prefixlen = sel->prefixlen_s;
727 if (sel->sport_mask)
728 {
729 port = htons(sel->sport);
730 }
731 }
732 else
733 {
734 addr = (u_char*)&sel->daddr;
735 prefixlen = sel->prefixlen_d;
736 if (sel->dport_mask)
737 {
738 port = htons(sel->dport);
739 }
740 }
741
742 /* The Linux 2.6 kernel does not set the selector's family field,
743 * so as a kludge we additionally test the prefix length.
744 */
745 if (sel->family == AF_INET || sel->prefixlen_s == 32)
746 {
747 host = host_create_from_chunk(AF_INET, chunk_create(addr, 4), 0);
748 }
749 else if (sel->family == AF_INET6 || sel->prefixlen_s == 128)
750 {
751 host = host_create_from_chunk(AF_INET6, chunk_create(addr, 16), 0);
752 }
753
754 if (host)
755 {
756 return traffic_selector_create_from_subnet(host, prefixlen,
757 sel->proto, port);
758 }
759 return NULL;
760 }
761
762 /**
763 * Process a XFRM_MSG_ACQUIRE from kernel
764 */
765 static void process_acquire(private_kernel_netlink_ipsec_t *this,
766 struct nlmsghdr *hdr)
767 {
768 struct xfrm_user_acquire *acquire;
769 struct rtattr *rta;
770 size_t rtasize;
771 traffic_selector_t *src_ts, *dst_ts;
772 u_int32_t reqid = 0;
773 int proto = 0;
774
775 acquire = (struct xfrm_user_acquire*)NLMSG_DATA(hdr);
776 rta = XFRM_RTA(hdr, struct xfrm_user_acquire);
777 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_user_acquire);
778
779 DBG2(DBG_KNL, "received a XFRM_MSG_ACQUIRE");
780
781 while (RTA_OK(rta, rtasize))
782 {
783 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
784
785 if (rta->rta_type == XFRMA_TMPL)
786 {
787 struct xfrm_user_tmpl* tmpl;
788 tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rta);
789 reqid = tmpl->reqid;
790 proto = tmpl->id.proto;
791 }
792 rta = RTA_NEXT(rta, rtasize);
793 }
794 switch (proto)
795 {
796 case 0:
797 case IPPROTO_ESP:
798 case IPPROTO_AH:
799 break;
800 default:
801 /* acquire for AH/ESP only, not for IPCOMP */
802 return;
803 }
804 src_ts = selector2ts(&acquire->sel, TRUE);
805 dst_ts = selector2ts(&acquire->sel, FALSE);
806
807 hydra->kernel_interface->acquire(hydra->kernel_interface, reqid, src_ts,
808 dst_ts);
809 }
810
811 /**
812 * Process a XFRM_MSG_EXPIRE from kernel
813 */
814 static void process_expire(private_kernel_netlink_ipsec_t *this,
815 struct nlmsghdr *hdr)
816 {
817 struct xfrm_user_expire *expire;
818 u_int32_t spi, reqid;
819 u_int8_t protocol;
820
821 expire = (struct xfrm_user_expire*)NLMSG_DATA(hdr);
822 protocol = expire->state.id.proto;
823 spi = expire->state.id.spi;
824 reqid = expire->state.reqid;
825
826 DBG2(DBG_KNL, "received a XFRM_MSG_EXPIRE");
827
828 if (protocol != IPPROTO_ESP && protocol != IPPROTO_AH)
829 {
830 DBG2(DBG_KNL, "ignoring XFRM_MSG_EXPIRE for SA with SPI %.8x and "
831 "reqid {%u} which is not a CHILD_SA", ntohl(spi), reqid);
832 return;
833 }
834
835 hydra->kernel_interface->expire(hydra->kernel_interface, reqid, protocol,
836 spi, expire->hard != 0);
837 }
838
839 /**
840 * Process a XFRM_MSG_MIGRATE from kernel
841 */
842 static void process_migrate(private_kernel_netlink_ipsec_t *this,
843 struct nlmsghdr *hdr)
844 {
845 struct xfrm_userpolicy_id *policy_id;
846 struct rtattr *rta;
847 size_t rtasize;
848 traffic_selector_t *src_ts, *dst_ts;
849 host_t *local = NULL, *remote = NULL;
850 host_t *old_src = NULL, *old_dst = NULL;
851 host_t *new_src = NULL, *new_dst = NULL;
852 u_int32_t reqid = 0;
853 policy_dir_t dir;
854
855 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
856 rta = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
857 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_userpolicy_id);
858
859 DBG2(DBG_KNL, "received a XFRM_MSG_MIGRATE");
860
861 src_ts = selector2ts(&policy_id->sel, TRUE);
862 dst_ts = selector2ts(&policy_id->sel, FALSE);
863 dir = (policy_dir_t)policy_id->dir;
864
865 DBG2(DBG_KNL, " policy: %R === %R %N", src_ts, dst_ts, policy_dir_names);
866
867 while (RTA_OK(rta, rtasize))
868 {
869 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
870 if (rta->rta_type == XFRMA_KMADDRESS)
871 {
872 struct xfrm_user_kmaddress *kmaddress;
873
874 kmaddress = (struct xfrm_user_kmaddress*)RTA_DATA(rta);
875 local = xfrm2host(kmaddress->family, &kmaddress->local, 0);
876 remote = xfrm2host(kmaddress->family, &kmaddress->remote, 0);
877 DBG2(DBG_KNL, " kmaddress: %H...%H", local, remote);
878 }
879 else if (rta->rta_type == XFRMA_MIGRATE)
880 {
881 struct xfrm_user_migrate *migrate;
882
883 migrate = (struct xfrm_user_migrate*)RTA_DATA(rta);
884 old_src = xfrm2host(migrate->old_family, &migrate->old_saddr, 0);
885 old_dst = xfrm2host(migrate->old_family, &migrate->old_daddr, 0);
886 new_src = xfrm2host(migrate->new_family, &migrate->new_saddr, 0);
887 new_dst = xfrm2host(migrate->new_family, &migrate->new_daddr, 0);
888 reqid = migrate->reqid;
889 DBG2(DBG_KNL, " migrate %H...%H to %H...%H, reqid {%u}",
890 old_src, old_dst, new_src, new_dst, reqid);
891 DESTROY_IF(old_src);
892 DESTROY_IF(old_dst);
893 DESTROY_IF(new_src);
894 DESTROY_IF(new_dst);
895 }
896 rta = RTA_NEXT(rta, rtasize);
897 }
898
899 if (src_ts && dst_ts && local && remote)
900 {
901 hydra->kernel_interface->migrate(hydra->kernel_interface, reqid,
902 src_ts, dst_ts, dir, local, remote);
903 }
904 else
905 {
906 DESTROY_IF(src_ts);
907 DESTROY_IF(dst_ts);
908 DESTROY_IF(local);
909 DESTROY_IF(remote);
910 }
911 }
912
913 /**
914 * Process a XFRM_MSG_MAPPING from kernel
915 */
916 static void process_mapping(private_kernel_netlink_ipsec_t *this,
917 struct nlmsghdr *hdr)
918 {
919 struct xfrm_user_mapping *mapping;
920 u_int32_t spi, reqid;
921
922 mapping = (struct xfrm_user_mapping*)NLMSG_DATA(hdr);
923 spi = mapping->id.spi;
924 reqid = mapping->reqid;
925
926 DBG2(DBG_KNL, "received a XFRM_MSG_MAPPING");
927
928 if (mapping->id.proto == IPPROTO_ESP)
929 {
930 host_t *host;
931 host = xfrm2host(mapping->id.family, &mapping->new_saddr,
932 mapping->new_sport);
933 if (host)
934 {
935 hydra->kernel_interface->mapping(hydra->kernel_interface, reqid,
936 spi, host);
937 }
938 }
939 }
940
941 /**
942 * Receives events from kernel
943 */
944 static job_requeue_t receive_events(private_kernel_netlink_ipsec_t *this)
945 {
946 char response[1024];
947 struct nlmsghdr *hdr = (struct nlmsghdr*)response;
948 struct sockaddr_nl addr;
949 socklen_t addr_len = sizeof(addr);
950 int len;
951 bool oldstate;
952
953 oldstate = thread_cancelability(TRUE);
954 len = recvfrom(this->socket_xfrm_events, response, sizeof(response), 0,
955 (struct sockaddr*)&addr, &addr_len);
956 thread_cancelability(oldstate);
957
958 if (len < 0)
959 {
960 switch (errno)
961 {
962 case EINTR:
963 /* interrupted, try again */
964 return JOB_REQUEUE_DIRECT;
965 case EAGAIN:
966 /* no data ready, select again */
967 return JOB_REQUEUE_DIRECT;
968 default:
969 DBG1(DBG_KNL, "unable to receive from xfrm event socket");
970 sleep(1);
971 return JOB_REQUEUE_FAIR;
972 }
973 }
974
975 if (addr.nl_pid != 0)
976 { /* not from kernel. not interested, try another one */
977 return JOB_REQUEUE_DIRECT;
978 }
979
980 while (NLMSG_OK(hdr, len))
981 {
982 switch (hdr->nlmsg_type)
983 {
984 case XFRM_MSG_ACQUIRE:
985 process_acquire(this, hdr);
986 break;
987 case XFRM_MSG_EXPIRE:
988 process_expire(this, hdr);
989 break;
990 case XFRM_MSG_MIGRATE:
991 process_migrate(this, hdr);
992 break;
993 case XFRM_MSG_MAPPING:
994 process_mapping(this, hdr);
995 break;
996 default:
997 DBG1(DBG_KNL, "received unknown event from xfrm event "
998 "socket: %d", hdr->nlmsg_type);
999 break;
1000 }
1001 hdr = NLMSG_NEXT(hdr, len);
1002 }
1003 return JOB_REQUEUE_DIRECT;
1004 }
1005
1006 /**
1007 * Get an SPI for a specific protocol from the kernel.
1008 */
1009 static status_t get_spi_internal(private_kernel_netlink_ipsec_t *this,
1010 host_t *src, host_t *dst, u_int8_t proto, u_int32_t min, u_int32_t max,
1011 u_int32_t reqid, u_int32_t *spi)
1012 {
1013 netlink_buf_t request;
1014 struct nlmsghdr *hdr, *out;
1015 struct xfrm_userspi_info *userspi;
1016 u_int32_t received_spi = 0;
1017 size_t len;
1018
1019 memset(&request, 0, sizeof(request));
1020
1021 hdr = (struct nlmsghdr*)request;
1022 hdr->nlmsg_flags = NLM_F_REQUEST;
1023 hdr->nlmsg_type = XFRM_MSG_ALLOCSPI;
1024 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userspi_info));
1025
1026 userspi = (struct xfrm_userspi_info*)NLMSG_DATA(hdr);
1027 host2xfrm(src, &userspi->info.saddr);
1028 host2xfrm(dst, &userspi->info.id.daddr);
1029 userspi->info.id.proto = proto;
1030 userspi->info.mode = XFRM_MODE_TUNNEL;
1031 userspi->info.reqid = reqid;
1032 userspi->info.family = src->get_family(src);
1033 userspi->min = min;
1034 userspi->max = max;
1035
1036 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1037 {
1038 hdr = out;
1039 while (NLMSG_OK(hdr, len))
1040 {
1041 switch (hdr->nlmsg_type)
1042 {
1043 case XFRM_MSG_NEWSA:
1044 {
1045 struct xfrm_usersa_info* usersa = NLMSG_DATA(hdr);
1046 received_spi = usersa->id.spi;
1047 break;
1048 }
1049 case NLMSG_ERROR:
1050 {
1051 struct nlmsgerr *err = NLMSG_DATA(hdr);
1052 DBG1(DBG_KNL, "allocating SPI failed: %s (%d)",
1053 strerror(-err->error), -err->error);
1054 break;
1055 }
1056 default:
1057 hdr = NLMSG_NEXT(hdr, len);
1058 continue;
1059 case NLMSG_DONE:
1060 break;
1061 }
1062 break;
1063 }
1064 free(out);
1065 }
1066
1067 if (received_spi == 0)
1068 {
1069 return FAILED;
1070 }
1071
1072 *spi = received_spi;
1073 return SUCCESS;
1074 }
1075
1076 METHOD(kernel_ipsec_t, get_spi, status_t,
1077 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1078 u_int8_t protocol, u_int32_t reqid, u_int32_t *spi)
1079 {
1080 DBG2(DBG_KNL, "getting SPI for reqid {%u}", reqid);
1081
1082 if (get_spi_internal(this, src, dst, protocol,
1083 0xc0000000, 0xcFFFFFFF, reqid, spi) != SUCCESS)
1084 {
1085 DBG1(DBG_KNL, "unable to get SPI for reqid {%u}", reqid);
1086 return FAILED;
1087 }
1088
1089 DBG2(DBG_KNL, "got SPI %.8x for reqid {%u}", ntohl(*spi), reqid);
1090 return SUCCESS;
1091 }
1092
1093 METHOD(kernel_ipsec_t, get_cpi, status_t,
1094 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1095 u_int32_t reqid, u_int16_t *cpi)
1096 {
1097 u_int32_t received_spi = 0;
1098
1099 DBG2(DBG_KNL, "getting CPI for reqid {%u}", reqid);
1100
1101 if (get_spi_internal(this, src, dst, IPPROTO_COMP,
1102 0x100, 0xEFFF, reqid, &received_spi) != SUCCESS)
1103 {
1104 DBG1(DBG_KNL, "unable to get CPI for reqid {%u}", reqid);
1105 return FAILED;
1106 }
1107
1108 *cpi = htons((u_int16_t)ntohl(received_spi));
1109
1110 DBG2(DBG_KNL, "got CPI %.4x for reqid {%u}", ntohs(*cpi), reqid);
1111 return SUCCESS;
1112 }
1113
1114 METHOD(kernel_ipsec_t, add_sa, status_t,
1115 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1116 u_int32_t spi, u_int8_t protocol, u_int32_t reqid, mark_t mark,
1117 u_int32_t tfc, lifetime_cfg_t *lifetime, u_int16_t enc_alg, chunk_t enc_key,
1118 u_int16_t int_alg, chunk_t int_key, ipsec_mode_t mode, u_int16_t ipcomp,
1119 u_int16_t cpi, bool encap, bool esn, bool inbound,
1120 traffic_selector_t* src_ts, traffic_selector_t* dst_ts)
1121 {
1122 netlink_buf_t request;
1123 char *alg_name;
1124 struct nlmsghdr *hdr;
1125 struct xfrm_usersa_info *sa;
1126 u_int16_t icv_size = 64;
1127 status_t status = FAILED;
1128
1129 /* if IPComp is used, we install an additional IPComp SA. if the cpi is 0
1130 * we are in the recursive call below */
1131 if (ipcomp != IPCOMP_NONE && cpi != 0)
1132 {
1133 lifetime_cfg_t lft = {{0,0,0},{0,0,0},{0,0,0}};
1134 add_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, reqid, mark,
1135 tfc, &lft, ENCR_UNDEFINED, chunk_empty, AUTH_UNDEFINED,
1136 chunk_empty, mode, ipcomp, 0, FALSE, FALSE, inbound, NULL, NULL);
1137 ipcomp = IPCOMP_NONE;
1138 /* use transport mode ESP SA, IPComp uses tunnel mode */
1139 mode = MODE_TRANSPORT;
1140 }
1141
1142 memset(&request, 0, sizeof(request));
1143
1144 if (mark.value)
1145 {
1146 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u} (mark "
1147 "%u/0x%8x)", ntohl(spi), reqid, mark.value, mark.mask);
1148 }
1149 else
1150 {
1151 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u}",
1152 ntohl(spi), reqid);
1153 }
1154 hdr = (struct nlmsghdr*)request;
1155 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1156 hdr->nlmsg_type = inbound ? XFRM_MSG_UPDSA : XFRM_MSG_NEWSA;
1157 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1158
1159 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1160 host2xfrm(src, &sa->saddr);
1161 host2xfrm(dst, &sa->id.daddr);
1162 sa->id.spi = spi;
1163 sa->id.proto = protocol;
1164 sa->family = src->get_family(src);
1165 sa->mode = mode2kernel(mode);
1166 switch (mode)
1167 {
1168 case MODE_TUNNEL:
1169 sa->flags |= XFRM_STATE_AF_UNSPEC;
1170 break;
1171 case MODE_BEET:
1172 case MODE_TRANSPORT:
1173 if(src_ts && dst_ts)
1174 {
1175 sa->sel = ts2selector(src_ts, dst_ts);
1176 }
1177 break;
1178 default:
1179 break;
1180 }
1181
1182 sa->reqid = reqid;
1183 sa->lft.soft_byte_limit = XFRM_LIMIT(lifetime->bytes.rekey);
1184 sa->lft.hard_byte_limit = XFRM_LIMIT(lifetime->bytes.life);
1185 sa->lft.soft_packet_limit = XFRM_LIMIT(lifetime->packets.rekey);
1186 sa->lft.hard_packet_limit = XFRM_LIMIT(lifetime->packets.life);
1187 /* we use lifetimes since added, not since used */
1188 sa->lft.soft_add_expires_seconds = lifetime->time.rekey;
1189 sa->lft.hard_add_expires_seconds = lifetime->time.life;
1190 sa->lft.soft_use_expires_seconds = 0;
1191 sa->lft.hard_use_expires_seconds = 0;
1192
1193 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_info);
1194
1195 switch (enc_alg)
1196 {
1197 case ENCR_UNDEFINED:
1198 /* no encryption */
1199 break;
1200 case ENCR_AES_CCM_ICV16:
1201 case ENCR_AES_GCM_ICV16:
1202 case ENCR_NULL_AUTH_AES_GMAC:
1203 case ENCR_CAMELLIA_CCM_ICV16:
1204 icv_size += 32;
1205 /* FALL */
1206 case ENCR_AES_CCM_ICV12:
1207 case ENCR_AES_GCM_ICV12:
1208 case ENCR_CAMELLIA_CCM_ICV12:
1209 icv_size += 32;
1210 /* FALL */
1211 case ENCR_AES_CCM_ICV8:
1212 case ENCR_AES_GCM_ICV8:
1213 case ENCR_CAMELLIA_CCM_ICV8:
1214 {
1215 struct xfrm_algo_aead *algo;
1216
1217 alg_name = lookup_algorithm(encryption_algs, enc_alg);
1218 if (alg_name == NULL)
1219 {
1220 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1221 encryption_algorithm_names, enc_alg);
1222 goto failed;
1223 }
1224 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1225 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1226
1227 rthdr->rta_type = XFRMA_ALG_AEAD;
1228 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_aead) +
1229 enc_key.len);
1230 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1231 if (hdr->nlmsg_len > sizeof(request))
1232 {
1233 goto failed;
1234 }
1235
1236 algo = (struct xfrm_algo_aead*)RTA_DATA(rthdr);
1237 algo->alg_key_len = enc_key.len * 8;
1238 algo->alg_icv_len = icv_size;
1239 strcpy(algo->alg_name, alg_name);
1240 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1241
1242 rthdr = XFRM_RTA_NEXT(rthdr);
1243 break;
1244 }
1245 default:
1246 {
1247 struct xfrm_algo *algo;
1248
1249 alg_name = lookup_algorithm(encryption_algs, enc_alg);
1250 if (alg_name == NULL)
1251 {
1252 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1253 encryption_algorithm_names, enc_alg);
1254 goto failed;
1255 }
1256 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1257 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1258
1259 rthdr->rta_type = XFRMA_ALG_CRYPT;
1260 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + enc_key.len);
1261 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1262 if (hdr->nlmsg_len > sizeof(request))
1263 {
1264 goto failed;
1265 }
1266
1267 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1268 algo->alg_key_len = enc_key.len * 8;
1269 strcpy(algo->alg_name, alg_name);
1270 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1271
1272 rthdr = XFRM_RTA_NEXT(rthdr);
1273 }
1274 }
1275
1276 if (int_alg != AUTH_UNDEFINED)
1277 {
1278 alg_name = lookup_algorithm(integrity_algs, int_alg);
1279 if (alg_name == NULL)
1280 {
1281 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1282 integrity_algorithm_names, int_alg);
1283 goto failed;
1284 }
1285 DBG2(DBG_KNL, " using integrity algorithm %N with key size %d",
1286 integrity_algorithm_names, int_alg, int_key.len * 8);
1287
1288 if (int_alg == AUTH_HMAC_SHA2_256_128)
1289 {
1290 struct xfrm_algo_auth* algo;
1291
1292 /* the kernel uses SHA256 with 96 bit truncation by default,
1293 * use specified truncation size supported by newer kernels */
1294 rthdr->rta_type = XFRMA_ALG_AUTH_TRUNC;
1295 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_auth) +
1296 int_key.len);
1297
1298 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1299 if (hdr->nlmsg_len > sizeof(request))
1300 {
1301 goto failed;
1302 }
1303
1304 algo = (struct xfrm_algo_auth*)RTA_DATA(rthdr);
1305 algo->alg_key_len = int_key.len * 8;
1306 algo->alg_trunc_len = 128;
1307 strcpy(algo->alg_name, alg_name);
1308 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1309 }
1310 else
1311 {
1312 struct xfrm_algo* algo;
1313
1314 rthdr->rta_type = XFRMA_ALG_AUTH;
1315 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + int_key.len);
1316
1317 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1318 if (hdr->nlmsg_len > sizeof(request))
1319 {
1320 goto failed;
1321 }
1322
1323 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1324 algo->alg_key_len = int_key.len * 8;
1325 strcpy(algo->alg_name, alg_name);
1326 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1327 }
1328 rthdr = XFRM_RTA_NEXT(rthdr);
1329 }
1330
1331 if (ipcomp != IPCOMP_NONE)
1332 {
1333 rthdr->rta_type = XFRMA_ALG_COMP;
1334 alg_name = lookup_algorithm(compression_algs, ipcomp);
1335 if (alg_name == NULL)
1336 {
1337 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1338 ipcomp_transform_names, ipcomp);
1339 goto failed;
1340 }
1341 DBG2(DBG_KNL, " using compression algorithm %N",
1342 ipcomp_transform_names, ipcomp);
1343
1344 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo));
1345 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1346 if (hdr->nlmsg_len > sizeof(request))
1347 {
1348 goto failed;
1349 }
1350
1351 struct xfrm_algo* algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1352 algo->alg_key_len = 0;
1353 strcpy(algo->alg_name, alg_name);
1354
1355 rthdr = XFRM_RTA_NEXT(rthdr);
1356 }
1357
1358 if (encap)
1359 {
1360 struct xfrm_encap_tmpl *tmpl;
1361
1362 rthdr->rta_type = XFRMA_ENCAP;
1363 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1364
1365 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1366 if (hdr->nlmsg_len > sizeof(request))
1367 {
1368 goto failed;
1369 }
1370
1371 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rthdr);
1372 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1373 tmpl->encap_sport = htons(src->get_port(src));
1374 tmpl->encap_dport = htons(dst->get_port(dst));
1375 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1376 /* encap_oa could probably be derived from the
1377 * traffic selectors [rfc4306, p39]. In the netlink kernel
1378 * implementation pluto does the same as we do here but it uses
1379 * encap_oa in the pfkey implementation.
1380 * BUT as /usr/src/linux/net/key/af_key.c indicates the kernel ignores
1381 * it anyway
1382 * -> does that mean that NAT-T encap doesn't work in transport mode?
1383 * No. The reason the kernel ignores NAT-OA is that it recomputes
1384 * (or, rather, just ignores) the checksum. If packets pass the IPsec
1385 * checks it marks them "checksum ok" so OA isn't needed. */
1386 rthdr = XFRM_RTA_NEXT(rthdr);
1387 }
1388
1389 if (mark.value)
1390 {
1391 struct xfrm_mark *mrk;
1392
1393 rthdr->rta_type = XFRMA_MARK;
1394 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1395
1396 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1397 if (hdr->nlmsg_len > sizeof(request))
1398 {
1399 goto failed;
1400 }
1401
1402 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1403 mrk->v = mark.value;
1404 mrk->m = mark.mask;
1405 rthdr = XFRM_RTA_NEXT(rthdr);
1406 }
1407
1408 if (tfc)
1409 {
1410 u_int32_t *tfcpad;
1411
1412 rthdr->rta_type = XFRMA_TFCPAD;
1413 rthdr->rta_len = RTA_LENGTH(sizeof(u_int32_t));
1414
1415 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1416 if (hdr->nlmsg_len > sizeof(request))
1417 {
1418 goto failed;
1419 }
1420
1421 tfcpad = (u_int32_t*)RTA_DATA(rthdr);
1422 *tfcpad = tfc;
1423 rthdr = XFRM_RTA_NEXT(rthdr);
1424 }
1425
1426 if (protocol != IPPROTO_COMP)
1427 {
1428 if (esn || this->replay_window > DEFAULT_REPLAY_WINDOW)
1429 {
1430 /* for ESN or larger replay windows we need the new
1431 * XFRMA_REPLAY_ESN_VAL attribute to configure a bitmap */
1432 struct xfrm_replay_state_esn *replay;
1433
1434 rthdr->rta_type = XFRMA_REPLAY_ESN_VAL;
1435 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1436 (this->replay_window + 7) / 8);
1437
1438 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1439 if (hdr->nlmsg_len > sizeof(request))
1440 {
1441 goto failed;
1442 }
1443
1444 replay = (struct xfrm_replay_state_esn*)RTA_DATA(rthdr);
1445 /* bmp_len contains number uf __u32's */
1446 replay->bmp_len = this->replay_bmp;
1447 replay->replay_window = this->replay_window;
1448 DBG2(DBG_KNL, " using replay window of %u bytes",
1449 this->replay_window);
1450
1451 rthdr = XFRM_RTA_NEXT(rthdr);
1452 if (esn)
1453 {
1454 DBG2(DBG_KNL, " using extended sequence numbers (ESN)");
1455 sa->flags |= XFRM_STATE_ESN;
1456 }
1457 }
1458 else
1459 {
1460 sa->replay_window = DEFAULT_REPLAY_WINDOW;
1461 }
1462 }
1463
1464 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1465 {
1466 if (mark.value)
1467 {
1468 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x "
1469 "(mark %u/0x%8x)", ntohl(spi), mark.value, mark.mask);
1470 }
1471 else
1472 {
1473 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x", ntohl(spi));
1474 }
1475 goto failed;
1476 }
1477
1478 status = SUCCESS;
1479
1480 failed:
1481 memwipe(request, sizeof(request));
1482 return status;
1483 }
1484
1485 /**
1486 * Get the ESN replay state (i.e. sequence numbers) of an SA.
1487 *
1488 * Allocates into one the replay state structure we get from the kernel.
1489 */
1490 static void get_replay_state(private_kernel_netlink_ipsec_t *this,
1491 u_int32_t spi, u_int8_t protocol, host_t *dst,
1492 struct xfrm_replay_state_esn **replay_esn,
1493 struct xfrm_replay_state **replay)
1494 {
1495 netlink_buf_t request;
1496 struct nlmsghdr *hdr, *out = NULL;
1497 struct xfrm_aevent_id *out_aevent = NULL, *aevent_id;
1498 size_t len;
1499 struct rtattr *rta;
1500 size_t rtasize;
1501
1502 memset(&request, 0, sizeof(request));
1503
1504 DBG2(DBG_KNL, "querying replay state from SAD entry with SPI %.8x",
1505 ntohl(spi));
1506
1507 hdr = (struct nlmsghdr*)request;
1508 hdr->nlmsg_flags = NLM_F_REQUEST;
1509 hdr->nlmsg_type = XFRM_MSG_GETAE;
1510 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
1511
1512 aevent_id = (struct xfrm_aevent_id*)NLMSG_DATA(hdr);
1513 aevent_id->flags = XFRM_AE_RVAL;
1514
1515 host2xfrm(dst, &aevent_id->sa_id.daddr);
1516 aevent_id->sa_id.spi = spi;
1517 aevent_id->sa_id.proto = protocol;
1518 aevent_id->sa_id.family = dst->get_family(dst);
1519
1520 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1521 {
1522 hdr = out;
1523 while (NLMSG_OK(hdr, len))
1524 {
1525 switch (hdr->nlmsg_type)
1526 {
1527 case XFRM_MSG_NEWAE:
1528 {
1529 out_aevent = NLMSG_DATA(hdr);
1530 break;
1531 }
1532 case NLMSG_ERROR:
1533 {
1534 struct nlmsgerr *err = NLMSG_DATA(hdr);
1535 DBG1(DBG_KNL, "querying replay state from SAD entry "
1536 "failed: %s (%d)", strerror(-err->error),
1537 -err->error);
1538 break;
1539 }
1540 default:
1541 hdr = NLMSG_NEXT(hdr, len);
1542 continue;
1543 case NLMSG_DONE:
1544 break;
1545 }
1546 break;
1547 }
1548 }
1549
1550 if (out_aevent)
1551 {
1552 rta = XFRM_RTA(out, struct xfrm_aevent_id);
1553 rtasize = XFRM_PAYLOAD(out, struct xfrm_aevent_id);
1554 while (RTA_OK(rta, rtasize))
1555 {
1556 if (rta->rta_type == XFRMA_REPLAY_VAL &&
1557 RTA_PAYLOAD(rta) == sizeof(**replay))
1558 {
1559 *replay = malloc(RTA_PAYLOAD(rta));
1560 memcpy(*replay, RTA_DATA(rta), RTA_PAYLOAD(rta));
1561 break;
1562 }
1563 if (rta->rta_type == XFRMA_REPLAY_ESN_VAL &&
1564 RTA_PAYLOAD(rta) >= sizeof(**replay_esn) + this->replay_bmp)
1565 {
1566 *replay_esn = malloc(RTA_PAYLOAD(rta));
1567 memcpy(*replay_esn, RTA_DATA(rta), RTA_PAYLOAD(rta));
1568 break;
1569 }
1570 rta = RTA_NEXT(rta, rtasize);
1571 }
1572 }
1573 free(out);
1574 }
1575
1576 METHOD(kernel_ipsec_t, query_sa, status_t,
1577 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1578 u_int32_t spi, u_int8_t protocol, mark_t mark, u_int64_t *bytes)
1579 {
1580 netlink_buf_t request;
1581 struct nlmsghdr *out = NULL, *hdr;
1582 struct xfrm_usersa_id *sa_id;
1583 struct xfrm_usersa_info *sa = NULL;
1584 status_t status = FAILED;
1585 size_t len;
1586
1587 memset(&request, 0, sizeof(request));
1588
1589 if (mark.value)
1590 {
1591 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x (mark %u/0x%8x)",
1592 ntohl(spi), mark.value, mark.mask);
1593 }
1594 else
1595 {
1596 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x", ntohl(spi));
1597 }
1598 hdr = (struct nlmsghdr*)request;
1599 hdr->nlmsg_flags = NLM_F_REQUEST;
1600 hdr->nlmsg_type = XFRM_MSG_GETSA;
1601 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1602
1603 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1604 host2xfrm(dst, &sa_id->daddr);
1605 sa_id->spi = spi;
1606 sa_id->proto = protocol;
1607 sa_id->family = dst->get_family(dst);
1608
1609 if (mark.value)
1610 {
1611 struct xfrm_mark *mrk;
1612 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1613
1614 rthdr->rta_type = XFRMA_MARK;
1615 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1616 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1617 if (hdr->nlmsg_len > sizeof(request))
1618 {
1619 return FAILED;
1620 }
1621
1622 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1623 mrk->v = mark.value;
1624 mrk->m = mark.mask;
1625 }
1626
1627 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1628 {
1629 hdr = out;
1630 while (NLMSG_OK(hdr, len))
1631 {
1632 switch (hdr->nlmsg_type)
1633 {
1634 case XFRM_MSG_NEWSA:
1635 {
1636 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1637 break;
1638 }
1639 case NLMSG_ERROR:
1640 {
1641 struct nlmsgerr *err = NLMSG_DATA(hdr);
1642
1643 if (mark.value)
1644 {
1645 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1646 "(mark %u/0x%8x) failed: %s (%d)",
1647 ntohl(spi), mark.value, mark.mask,
1648 strerror(-err->error), -err->error);
1649 }
1650 else
1651 {
1652 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1653 "failed: %s (%d)", ntohl(spi),
1654 strerror(-err->error), -err->error);
1655 }
1656 break;
1657 }
1658 default:
1659 hdr = NLMSG_NEXT(hdr, len);
1660 continue;
1661 case NLMSG_DONE:
1662 break;
1663 }
1664 break;
1665 }
1666 }
1667
1668 if (sa == NULL)
1669 {
1670 DBG2(DBG_KNL, "unable to query SAD entry with SPI %.8x", ntohl(spi));
1671 }
1672 else
1673 {
1674 *bytes = sa->curlft.bytes;
1675 status = SUCCESS;
1676 }
1677 memwipe(out, len);
1678 free(out);
1679 return status;
1680 }
1681
1682 METHOD(kernel_ipsec_t, del_sa, status_t,
1683 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1684 u_int32_t spi, u_int8_t protocol, u_int16_t cpi, mark_t mark)
1685 {
1686 netlink_buf_t request;
1687 struct nlmsghdr *hdr;
1688 struct xfrm_usersa_id *sa_id;
1689
1690 /* if IPComp was used, we first delete the additional IPComp SA */
1691 if (cpi)
1692 {
1693 del_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, 0, mark);
1694 }
1695
1696 memset(&request, 0, sizeof(request));
1697
1698 if (mark.value)
1699 {
1700 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x (mark %u/0x%8x)",
1701 ntohl(spi), mark.value, mark.mask);
1702 }
1703 else
1704 {
1705 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x", ntohl(spi));
1706 }
1707 hdr = (struct nlmsghdr*)request;
1708 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1709 hdr->nlmsg_type = XFRM_MSG_DELSA;
1710 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1711
1712 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1713 host2xfrm(dst, &sa_id->daddr);
1714 sa_id->spi = spi;
1715 sa_id->proto = protocol;
1716 sa_id->family = dst->get_family(dst);
1717
1718 if (mark.value)
1719 {
1720 struct xfrm_mark *mrk;
1721 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1722
1723 rthdr->rta_type = XFRMA_MARK;
1724 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1725 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
1726 if (hdr->nlmsg_len > sizeof(request))
1727 {
1728 return FAILED;
1729 }
1730
1731 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1732 mrk->v = mark.value;
1733 mrk->m = mark.mask;
1734 }
1735
1736 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1737 {
1738 if (mark.value)
1739 {
1740 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x "
1741 "(mark %u/0x%8x)", ntohl(spi), mark.value, mark.mask);
1742 }
1743 else
1744 {
1745 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x",
1746 ntohl(spi));
1747 }
1748 return FAILED;
1749 }
1750 if (mark.value)
1751 {
1752 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x (mark %u/0x%8x)",
1753 ntohl(spi), mark.value, mark.mask);
1754 }
1755 else
1756 {
1757 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x", ntohl(spi));
1758 }
1759 return SUCCESS;
1760 }
1761
1762 METHOD(kernel_ipsec_t, update_sa, status_t,
1763 private_kernel_netlink_ipsec_t *this, u_int32_t spi, u_int8_t protocol,
1764 u_int16_t cpi, host_t *src, host_t *dst, host_t *new_src, host_t *new_dst,
1765 bool old_encap, bool new_encap, mark_t mark)
1766 {
1767 netlink_buf_t request;
1768 u_char *pos;
1769 struct nlmsghdr *hdr, *out = NULL;
1770 struct xfrm_usersa_id *sa_id;
1771 struct xfrm_usersa_info *out_sa = NULL, *sa;
1772 size_t len;
1773 struct rtattr *rta;
1774 size_t rtasize;
1775 struct xfrm_encap_tmpl* tmpl = NULL;
1776 struct xfrm_replay_state *replay = NULL;
1777 struct xfrm_replay_state_esn *replay_esn = NULL;
1778 status_t status = FAILED;
1779
1780 /* if IPComp is used, we first update the IPComp SA */
1781 if (cpi)
1782 {
1783 update_sa(this, htonl(ntohs(cpi)), IPPROTO_COMP, 0,
1784 src, dst, new_src, new_dst, FALSE, FALSE, mark);
1785 }
1786
1787 memset(&request, 0, sizeof(request));
1788
1789 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x for update", ntohl(spi));
1790
1791 /* query the existing SA first */
1792 hdr = (struct nlmsghdr*)request;
1793 hdr->nlmsg_flags = NLM_F_REQUEST;
1794 hdr->nlmsg_type = XFRM_MSG_GETSA;
1795 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1796
1797 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1798 host2xfrm(dst, &sa_id->daddr);
1799 sa_id->spi = spi;
1800 sa_id->proto = protocol;
1801 sa_id->family = dst->get_family(dst);
1802
1803 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1804 {
1805 hdr = out;
1806 while (NLMSG_OK(hdr, len))
1807 {
1808 switch (hdr->nlmsg_type)
1809 {
1810 case XFRM_MSG_NEWSA:
1811 {
1812 out_sa = NLMSG_DATA(hdr);
1813 break;
1814 }
1815 case NLMSG_ERROR:
1816 {
1817 struct nlmsgerr *err = NLMSG_DATA(hdr);
1818 DBG1(DBG_KNL, "querying SAD entry failed: %s (%d)",
1819 strerror(-err->error), -err->error);
1820 break;
1821 }
1822 default:
1823 hdr = NLMSG_NEXT(hdr, len);
1824 continue;
1825 case NLMSG_DONE:
1826 break;
1827 }
1828 break;
1829 }
1830 }
1831 if (out_sa == NULL)
1832 {
1833 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1834 goto failed;
1835 }
1836
1837 get_replay_state(this, spi, protocol, dst, &replay_esn, &replay);
1838
1839 /* delete the old SA (without affecting the IPComp SA) */
1840 if (del_sa(this, src, dst, spi, protocol, 0, mark) != SUCCESS)
1841 {
1842 DBG1(DBG_KNL, "unable to delete old SAD entry with SPI %.8x",
1843 ntohl(spi));
1844 goto failed;
1845 }
1846
1847 DBG2(DBG_KNL, "updating SAD entry with SPI %.8x from %#H..%#H to %#H..%#H",
1848 ntohl(spi), src, dst, new_src, new_dst);
1849 /* copy over the SA from out to request */
1850 hdr = (struct nlmsghdr*)request;
1851 memcpy(hdr, out, min(out->nlmsg_len, sizeof(request)));
1852 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1853 hdr->nlmsg_type = XFRM_MSG_NEWSA;
1854 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1855 sa = NLMSG_DATA(hdr);
1856 sa->family = new_dst->get_family(new_dst);
1857
1858 if (!src->ip_equals(src, new_src))
1859 {
1860 host2xfrm(new_src, &sa->saddr);
1861 }
1862 if (!dst->ip_equals(dst, new_dst))
1863 {
1864 host2xfrm(new_dst, &sa->id.daddr);
1865 }
1866
1867 rta = XFRM_RTA(out, struct xfrm_usersa_info);
1868 rtasize = XFRM_PAYLOAD(out, struct xfrm_usersa_info);
1869 pos = (u_char*)XFRM_RTA(hdr, struct xfrm_usersa_info);
1870 while(RTA_OK(rta, rtasize))
1871 {
1872 /* copy all attributes, but not XFRMA_ENCAP if we are disabling it */
1873 if (rta->rta_type != XFRMA_ENCAP || new_encap)
1874 {
1875 if (rta->rta_type == XFRMA_ENCAP)
1876 { /* update encap tmpl */
1877 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1878 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1879 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1880 }
1881 memcpy(pos, rta, rta->rta_len);
1882 pos += RTA_ALIGN(rta->rta_len);
1883 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1884 }
1885 rta = RTA_NEXT(rta, rtasize);
1886 }
1887
1888 rta = (struct rtattr*)pos;
1889 if (tmpl == NULL && new_encap)
1890 { /* add tmpl if we are enabling it */
1891 rta->rta_type = XFRMA_ENCAP;
1892 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1893
1894 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1895 if (hdr->nlmsg_len > sizeof(request))
1896 {
1897 goto failed;
1898 }
1899
1900 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1901 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1902 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1903 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1904 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1905
1906 rta = XFRM_RTA_NEXT(rta);
1907 }
1908
1909 if (replay_esn)
1910 {
1911 rta->rta_type = XFRMA_REPLAY_ESN_VAL;
1912 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state_esn) +
1913 this->replay_bmp);
1914
1915 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1916 if (hdr->nlmsg_len > sizeof(request))
1917 {
1918 goto failed;
1919 }
1920 memcpy(RTA_DATA(rta), replay_esn,
1921 sizeof(struct xfrm_replay_state_esn) + this->replay_bmp);
1922
1923 rta = XFRM_RTA_NEXT(rta);
1924 }
1925 else if (replay)
1926 {
1927 rta->rta_type = XFRMA_REPLAY_VAL;
1928 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state));
1929
1930 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1931 if (hdr->nlmsg_len > sizeof(request))
1932 {
1933 goto failed;
1934 }
1935 memcpy(RTA_DATA(rta), replay, sizeof(replay));
1936
1937 rta = XFRM_RTA_NEXT(rta);
1938 }
1939 else
1940 {
1941 DBG1(DBG_KNL, "unable to copy replay state from old SAD entry "
1942 "with SPI %.8x", ntohl(spi));
1943 }
1944
1945 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1946 {
1947 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1948 goto failed;
1949 }
1950
1951 status = SUCCESS;
1952 failed:
1953 free(replay);
1954 free(replay_esn);
1955 memwipe(out, len);
1956 free(out);
1957
1958 return status;
1959 }
1960
1961 METHOD(kernel_ipsec_t, flush_sas, status_t,
1962 private_kernel_netlink_ipsec_t *this)
1963 {
1964 netlink_buf_t request;
1965 struct nlmsghdr *hdr;
1966 struct xfrm_usersa_flush *flush;
1967
1968 memset(&request, 0, sizeof(request));
1969
1970 DBG2(DBG_KNL, "flushing all SAD entries");
1971
1972 hdr = (struct nlmsghdr*)request;
1973 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1974 hdr->nlmsg_type = XFRM_MSG_FLUSHSA;
1975 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush));
1976
1977 flush = (struct xfrm_usersa_flush*)NLMSG_DATA(hdr);
1978 flush->proto = IPSEC_PROTO_ANY;
1979
1980 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1981 {
1982 DBG1(DBG_KNL, "unable to flush SAD entries");
1983 return FAILED;
1984 }
1985 return SUCCESS;
1986 }
1987
1988 /**
1989 * Add or update a policy in the kernel.
1990 *
1991 * Note: The mutex has to be locked when entering this function.
1992 */
1993 static status_t add_policy_internal(private_kernel_netlink_ipsec_t *this,
1994 policy_entry_t *policy, policy_sa_t *mapping, bool update)
1995 {
1996 netlink_buf_t request;
1997 policy_entry_t clone;
1998 ipsec_sa_t *ipsec = mapping->sa;
1999 struct xfrm_userpolicy_info *policy_info;
2000 struct nlmsghdr *hdr;
2001 int i;
2002
2003 /* clone the policy so we are able to check it out again later */
2004 memcpy(&clone, policy, sizeof(policy_entry_t));
2005
2006 memset(&request, 0, sizeof(request));
2007 hdr = (struct nlmsghdr*)request;
2008 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2009 hdr->nlmsg_type = update ? XFRM_MSG_UPDPOLICY : XFRM_MSG_NEWPOLICY;
2010 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info));
2011
2012 policy_info = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2013 policy_info->sel = policy->sel;
2014 policy_info->dir = policy->direction;
2015
2016 /* calculate priority based on selector size, small size = high prio */
2017 policy_info->priority = mapping->priority;
2018 policy_info->action = mapping->type != POLICY_DROP ? XFRM_POLICY_ALLOW
2019 : XFRM_POLICY_BLOCK;
2020 policy_info->share = XFRM_SHARE_ANY;
2021
2022 /* policies don't expire */
2023 policy_info->lft.soft_byte_limit = XFRM_INF;
2024 policy_info->lft.soft_packet_limit = XFRM_INF;
2025 policy_info->lft.hard_byte_limit = XFRM_INF;
2026 policy_info->lft.hard_packet_limit = XFRM_INF;
2027 policy_info->lft.soft_add_expires_seconds = 0;
2028 policy_info->lft.hard_add_expires_seconds = 0;
2029 policy_info->lft.soft_use_expires_seconds = 0;
2030 policy_info->lft.hard_use_expires_seconds = 0;
2031
2032 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_info);
2033
2034 if (mapping->type == POLICY_IPSEC)
2035 {
2036 struct xfrm_user_tmpl *tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rthdr);
2037 struct {
2038 u_int8_t proto;
2039 bool use;
2040 } protos[] = {
2041 { IPPROTO_COMP, ipsec->cfg.ipcomp.transform != IPCOMP_NONE },
2042 { IPPROTO_ESP, ipsec->cfg.esp.use },
2043 { IPPROTO_AH, ipsec->cfg.ah.use },
2044 };
2045 ipsec_mode_t proto_mode = ipsec->cfg.mode;
2046
2047 rthdr->rta_type = XFRMA_TMPL;
2048 rthdr->rta_len = 0; /* actual length is set below */
2049
2050 for (i = 0; i < countof(protos); i++)
2051 {
2052 if (!protos[i].use)
2053 {
2054 continue;
2055 }
2056
2057 rthdr->rta_len += RTA_LENGTH(sizeof(struct xfrm_user_tmpl));
2058 hdr->nlmsg_len += RTA_ALIGN(RTA_LENGTH(sizeof(struct xfrm_user_tmpl)));
2059 if (hdr->nlmsg_len > sizeof(request))
2060 {
2061 return FAILED;
2062 }
2063
2064 tmpl->reqid = ipsec->cfg.reqid;
2065 tmpl->id.proto = protos[i].proto;
2066 tmpl->aalgos = tmpl->ealgos = tmpl->calgos = ~0;
2067 tmpl->mode = mode2kernel(proto_mode);
2068 tmpl->optional = protos[i].proto == IPPROTO_COMP &&
2069 policy->direction != POLICY_OUT;
2070 tmpl->family = ipsec->src->get_family(ipsec->src);
2071
2072 if (proto_mode == MODE_TUNNEL)
2073 { /* only for tunnel mode */
2074 host2xfrm(ipsec->src, &tmpl->saddr);
2075 host2xfrm(ipsec->dst, &tmpl->id.daddr);
2076 }
2077
2078 tmpl++;
2079
2080 /* use transport mode for other SAs */
2081 proto_mode = MODE_TRANSPORT;
2082 }
2083
2084 rthdr = XFRM_RTA_NEXT(rthdr);
2085 }
2086
2087 if (ipsec->mark.value)
2088 {
2089 struct xfrm_mark *mrk;
2090
2091 rthdr->rta_type = XFRMA_MARK;
2092 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2093
2094 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2095 if (hdr->nlmsg_len > sizeof(request))
2096 {
2097 return FAILED;
2098 }
2099
2100 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2101 mrk->v = ipsec->mark.value;
2102 mrk->m = ipsec->mark.mask;
2103 }
2104 this->mutex->unlock(this->mutex);
2105
2106 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2107 {
2108 return FAILED;
2109 }
2110
2111 /* find the policy again */
2112 this->mutex->lock(this->mutex);
2113 policy = this->policies->get(this->policies, &clone);
2114 if (!policy ||
2115 policy->used_by->find_first(policy->used_by,
2116 NULL, (void**)&mapping) != SUCCESS)
2117 { /* policy or mapping is already gone, ignore */
2118 this->mutex->unlock(this->mutex);
2119 return SUCCESS;
2120 }
2121
2122 /* install a route, if:
2123 * - this is a forward policy (to just get one for each child)
2124 * - we are in tunnel/BEET mode
2125 * - routing is not disabled via strongswan.conf
2126 */
2127 if (policy->direction == POLICY_FWD &&
2128 ipsec->cfg.mode != MODE_TRANSPORT && this->install_routes)
2129 {
2130 route_entry_t *route = malloc_thing(route_entry_t);
2131 policy_sa_fwd_t *fwd = (policy_sa_fwd_t*)mapping;
2132
2133 if (hydra->kernel_interface->get_address_by_ts(hydra->kernel_interface,
2134 fwd->dst_ts, &route->src_ip) == SUCCESS)
2135 {
2136 /* get the nexthop to src (src as we are in POLICY_FWD) */
2137 route->gateway = hydra->kernel_interface->get_nexthop(
2138 hydra->kernel_interface, ipsec->src);
2139 /* install route via outgoing interface */
2140 route->if_name = hydra->kernel_interface->get_interface(
2141 hydra->kernel_interface, ipsec->dst);
2142 route->dst_net = chunk_alloc(policy->sel.family == AF_INET ? 4 : 16);
2143 memcpy(route->dst_net.ptr, &policy->sel.saddr, route->dst_net.len);
2144 route->prefixlen = policy->sel.prefixlen_s;
2145
2146 if (!route->if_name)
2147 {
2148 this->mutex->unlock(this->mutex);
2149 route_entry_destroy(route);
2150 return SUCCESS;
2151 }
2152
2153 if (policy->route)
2154 {
2155 route_entry_t *old = policy->route;
2156 if (route_entry_equals(old, route))
2157 { /* keep previously installed route */
2158 this->mutex->unlock(this->mutex);
2159 route_entry_destroy(route);
2160 return SUCCESS;
2161 }
2162 /* uninstall previously installed route */
2163 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2164 old->dst_net, old->prefixlen, old->gateway,
2165 old->src_ip, old->if_name) != SUCCESS)
2166 {
2167 DBG1(DBG_KNL, "error uninstalling route installed with "
2168 "policy %R === %R %N", fwd->src_ts,
2169 fwd->dst_ts, policy_dir_names,
2170 policy->direction);
2171 }
2172 route_entry_destroy(old);
2173 policy->route = NULL;
2174 }
2175
2176 DBG2(DBG_KNL, "installing route: %R via %H src %H dev %s",
2177 fwd->src_ts, route->gateway, route->src_ip, route->if_name);
2178 switch (hydra->kernel_interface->add_route(
2179 hydra->kernel_interface, route->dst_net,
2180 route->prefixlen, route->gateway,
2181 route->src_ip, route->if_name))
2182 {
2183 default:
2184 DBG1(DBG_KNL, "unable to install source route for %H",
2185 route->src_ip);
2186 /* FALL */
2187 case ALREADY_DONE:
2188 /* route exists, do not uninstall */
2189 route_entry_destroy(route);
2190 break;
2191 case SUCCESS:
2192 /* cache the installed route */
2193 policy->route = route;
2194 break;
2195 }
2196 }
2197 else
2198 {
2199 free(route);
2200 }
2201 }
2202 this->mutex->unlock(this->mutex);
2203 return SUCCESS;
2204 }
2205
2206 METHOD(kernel_ipsec_t, add_policy, status_t,
2207 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
2208 traffic_selector_t *src_ts, traffic_selector_t *dst_ts,
2209 policy_dir_t direction, policy_type_t type, ipsec_sa_cfg_t *sa,
2210 mark_t mark, policy_priority_t priority)
2211 {
2212 policy_entry_t *policy, *current;
2213 policy_sa_t *assigned_sa, *current_sa;
2214 enumerator_t *enumerator;
2215 bool found = FALSE, update = TRUE;
2216
2217 /* create a policy */
2218 INIT(policy,
2219 .sel = ts2selector(src_ts, dst_ts),
2220 .mark = mark.value & mark.mask,
2221 .direction = direction,
2222 );
2223
2224 /* find the policy, which matches EXACTLY */
2225 this->mutex->lock(this->mutex);
2226 current = this->policies->get(this->policies, policy);
2227 if (current)
2228 {
2229 /* use existing policy */
2230 if (mark.value)
2231 {
2232 DBG2(DBG_KNL, "policy %R === %R %N (mark %u/0x%8x) "
2233 "already exists, increasing refcount",
2234 src_ts, dst_ts, policy_dir_names, direction,
2235 mark.value, mark.mask);
2236 }
2237 else
2238 {
2239 DBG2(DBG_KNL, "policy %R === %R %N "
2240 "already exists, increasing refcount",
2241 src_ts, dst_ts, policy_dir_names, direction);
2242 }
2243 policy_entry_destroy(this, policy);
2244 policy = current;
2245 found = TRUE;
2246 }
2247 else
2248 { /* use the new one, if we have no such policy */
2249 policy->used_by = linked_list_create();
2250 this->policies->put(this->policies, policy, policy);
2251 }
2252
2253 /* cache the assigned IPsec SA */
2254 assigned_sa = policy_sa_create(this, direction, type, src, dst, src_ts,
2255 dst_ts, mark, sa);
2256 assigned_sa->priority = get_priority(policy, priority);
2257
2258 if (this->policy_history)
2259 { /* insert the SA according to its priority */
2260 enumerator = policy->used_by->create_enumerator(policy->used_by);
2261 while (enumerator->enumerate(enumerator, (void**)&current_sa))
2262 {
2263 if (current_sa->priority >= assigned_sa->priority)
2264 {
2265 break;
2266 }
2267 update = FALSE;
2268 }
2269 policy->used_by->insert_before(policy->used_by, enumerator,
2270 assigned_sa);
2271 enumerator->destroy(enumerator);
2272 }
2273 else
2274 { /* simply insert it last and only update if it is not installed yet */
2275 policy->used_by->insert_last(policy->used_by, assigned_sa);
2276 update = !found;
2277 }
2278
2279 if (!update)
2280 { /* we don't update the policy if the priority is lower than that of
2281 * the currently installed one */
2282 this->mutex->unlock(this->mutex);
2283 return SUCCESS;
2284 }
2285
2286 if (mark.value)
2287 {
2288 DBG2(DBG_KNL, "%s policy %R === %R %N (mark %u/0x%8x)",
2289 found ? "updating" : "adding", src_ts, dst_ts,
2290 policy_dir_names, direction, mark.value, mark.mask);
2291 }
2292 else
2293 {
2294 DBG2(DBG_KNL, "%s policy %R === %R %N",
2295 found ? "updating" : "adding", src_ts, dst_ts,
2296 policy_dir_names, direction);
2297 }
2298
2299 if (add_policy_internal(this, policy, assigned_sa, found) != SUCCESS)
2300 {
2301 DBG1(DBG_KNL, "unable to %s policy %R === %R %N",
2302 found ? "update" : "add", src_ts, dst_ts,
2303 policy_dir_names, direction);
2304 return FAILED;
2305 }
2306 return SUCCESS;
2307 }
2308
2309 METHOD(kernel_ipsec_t, query_policy, status_t,
2310 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2311 traffic_selector_t *dst_ts, policy_dir_t direction, mark_t mark,
2312 u_int32_t *use_time)
2313 {
2314 netlink_buf_t request;
2315 struct nlmsghdr *out = NULL, *hdr;
2316 struct xfrm_userpolicy_id *policy_id;
2317 struct xfrm_userpolicy_info *policy = NULL;
2318 size_t len;
2319
2320 memset(&request, 0, sizeof(request));
2321
2322 if (mark.value)
2323 {
2324 DBG2(DBG_KNL, "querying policy %R === %R %N (mark %u/0x%8x)",
2325 src_ts, dst_ts, policy_dir_names, direction,
2326 mark.value, mark.mask);
2327 }
2328 else
2329 {
2330 DBG2(DBG_KNL, "querying policy %R === %R %N", src_ts, dst_ts,
2331 policy_dir_names, direction);
2332 }
2333 hdr = (struct nlmsghdr*)request;
2334 hdr->nlmsg_flags = NLM_F_REQUEST;
2335 hdr->nlmsg_type = XFRM_MSG_GETPOLICY;
2336 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2337
2338 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2339 policy_id->sel = ts2selector(src_ts, dst_ts);
2340 policy_id->dir = direction;
2341
2342 if (mark.value)
2343 {
2344 struct xfrm_mark *mrk;
2345 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2346
2347 rthdr->rta_type = XFRMA_MARK;
2348 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2349
2350 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2351 if (hdr->nlmsg_len > sizeof(request))
2352 {
2353 return FAILED;
2354 }
2355
2356 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2357 mrk->v = mark.value;
2358 mrk->m = mark.mask;
2359 }
2360
2361 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
2362 {
2363 hdr = out;
2364 while (NLMSG_OK(hdr, len))
2365 {
2366 switch (hdr->nlmsg_type)
2367 {
2368 case XFRM_MSG_NEWPOLICY:
2369 {
2370 policy = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
2371 break;
2372 }
2373 case NLMSG_ERROR:
2374 {
2375 struct nlmsgerr *err = NLMSG_DATA(hdr);
2376 DBG1(DBG_KNL, "querying policy failed: %s (%d)",
2377 strerror(-err->error), -err->error);
2378 break;
2379 }
2380 default:
2381 hdr = NLMSG_NEXT(hdr, len);
2382 continue;
2383 case NLMSG_DONE:
2384 break;
2385 }
2386 break;
2387 }
2388 }
2389
2390 if (policy == NULL)
2391 {
2392 DBG2(DBG_KNL, "unable to query policy %R === %R %N", src_ts, dst_ts,
2393 policy_dir_names, direction);
2394 free(out);
2395 return FAILED;
2396 }
2397
2398 if (policy->curlft.use_time)
2399 {
2400 /* we need the monotonic time, but the kernel returns system time. */
2401 *use_time = time_monotonic(NULL) - (time(NULL) - policy->curlft.use_time);
2402 }
2403 else
2404 {
2405 *use_time = 0;
2406 }
2407
2408 free(out);
2409 return SUCCESS;
2410 }
2411
2412 METHOD(kernel_ipsec_t, del_policy, status_t,
2413 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
2414 traffic_selector_t *dst_ts, policy_dir_t direction, u_int32_t reqid,
2415 mark_t mark, policy_priority_t prio)
2416 {
2417 policy_entry_t *current, policy;
2418 enumerator_t *enumerator;
2419 policy_sa_t *mapping;
2420 netlink_buf_t request;
2421 struct nlmsghdr *hdr;
2422 struct xfrm_userpolicy_id *policy_id;
2423 bool is_installed = TRUE;
2424 u_int32_t priority;
2425
2426 if (mark.value)
2427 {
2428 DBG2(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%8x)",
2429 src_ts, dst_ts, policy_dir_names, direction,
2430 mark.value, mark.mask);
2431 }
2432 else
2433 {
2434 DBG2(DBG_KNL, "deleting policy %R === %R %N",
2435 src_ts, dst_ts, policy_dir_names, direction);
2436 }
2437
2438 /* create a policy */
2439 memset(&policy, 0, sizeof(policy_entry_t));
2440 policy.sel = ts2selector(src_ts, dst_ts);
2441 policy.mark = mark.value & mark.mask;
2442 policy.direction = direction;
2443
2444 /* find the policy */
2445 this->mutex->lock(this->mutex);
2446 current = this->policies->get(this->policies, &policy);
2447 if (!current)
2448 {
2449 if (mark.value)
2450 {
2451 DBG1(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%8x) "
2452 "failed, not found", src_ts, dst_ts, policy_dir_names,
2453 direction, mark.value, mark.mask);
2454 }
2455 else
2456 {
2457 DBG1(DBG_KNL, "deleting policy %R === %R %N failed, not found",
2458 src_ts, dst_ts, policy_dir_names, direction);
2459 }
2460 this->mutex->unlock(this->mutex);
2461 return NOT_FOUND;
2462 }
2463
2464 if (this->policy_history)
2465 { /* remove mapping to SA by reqid and priority */
2466 priority = get_priority(current, prio);
2467 enumerator = current->used_by->create_enumerator(current->used_by);
2468 while (enumerator->enumerate(enumerator, (void**)&mapping))
2469 {
2470 if (reqid == mapping->sa->cfg.reqid &&
2471 priority == mapping->priority)
2472 {
2473 current->used_by->remove_at(current->used_by, enumerator);
2474 policy_sa_destroy(mapping, &direction, this);
2475 break;
2476 }
2477 is_installed = FALSE;
2478 }
2479 enumerator->destroy(enumerator);
2480 }
2481 else
2482 { /* remove one of the SAs but don't update the policy */
2483 current->used_by->remove_last(current->used_by, (void**)&mapping);
2484 policy_sa_destroy(mapping, &direction, this);
2485 is_installed = FALSE;
2486 }
2487
2488 if (current->used_by->get_count(current->used_by) > 0)
2489 { /* policy is used by more SAs, keep in kernel */
2490 DBG2(DBG_KNL, "policy still used by another CHILD_SA, not removed");
2491 if (!is_installed)
2492 { /* no need to update as the policy was not installed for this SA */
2493 this->mutex->unlock(this->mutex);
2494 return SUCCESS;
2495 }
2496
2497 if (mark.value)
2498 {
2499 DBG2(DBG_KNL, "updating policy %R === %R %N (mark %u/0x%8x)",
2500 src_ts, dst_ts, policy_dir_names, direction,
2501 mark.value, mark.mask);
2502 }
2503 else
2504 {
2505 DBG2(DBG_KNL, "updating policy %R === %R %N",
2506 src_ts, dst_ts, policy_dir_names, direction);
2507 }
2508
2509 current->used_by->get_first(current->used_by, (void**)&mapping);
2510 if (add_policy_internal(this, current, mapping, TRUE) != SUCCESS)
2511 {
2512 DBG1(DBG_KNL, "unable to update policy %R === %R %N",
2513 src_ts, dst_ts, policy_dir_names, direction);
2514 return FAILED;
2515 }
2516 return SUCCESS;
2517 }
2518
2519 memset(&request, 0, sizeof(request));
2520
2521 hdr = (struct nlmsghdr*)request;
2522 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2523 hdr->nlmsg_type = XFRM_MSG_DELPOLICY;
2524 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2525
2526 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2527 policy_id->sel = current->sel;
2528 policy_id->dir = direction;
2529
2530 if (mark.value)
2531 {
2532 struct xfrm_mark *mrk;
2533 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2534
2535 rthdr->rta_type = XFRMA_MARK;
2536 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2537 hdr->nlmsg_len += RTA_ALIGN(rthdr->rta_len);
2538 if (hdr->nlmsg_len > sizeof(request))
2539 {
2540 return FAILED;
2541 }
2542
2543 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2544 mrk->v = mark.value;
2545 mrk->m = mark.mask;
2546 }
2547
2548 if (current->route)
2549 {
2550 route_entry_t *route = current->route;
2551 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2552 route->dst_net, route->prefixlen, route->gateway,
2553 route->src_ip, route->if_name) != SUCCESS)
2554 {
2555 DBG1(DBG_KNL, "error uninstalling route installed with "
2556 "policy %R === %R %N", src_ts, dst_ts,
2557 policy_dir_names, direction);
2558 }
2559 }
2560
2561 this->policies->remove(this->policies, current);
2562 policy_entry_destroy(this, current);
2563 this->mutex->unlock(this->mutex);
2564
2565 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2566 {
2567 if (mark.value)
2568 {
2569 DBG1(DBG_KNL, "unable to delete policy %R === %R %N "
2570 "(mark %u/0x%8x)", src_ts, dst_ts, policy_dir_names,
2571 direction, mark.value, mark.mask);
2572 }
2573 else
2574 {
2575 DBG1(DBG_KNL, "unable to delete policy %R === %R %N",
2576 src_ts, dst_ts, policy_dir_names, direction);
2577 }
2578 return FAILED;
2579 }
2580 return SUCCESS;
2581 }
2582
2583 METHOD(kernel_ipsec_t, flush_policies, status_t,
2584 private_kernel_netlink_ipsec_t *this)
2585 {
2586 netlink_buf_t request;
2587 struct nlmsghdr *hdr;
2588
2589 memset(&request, 0, sizeof(request));
2590
2591 DBG2(DBG_KNL, "flushing all policies from SPD");
2592
2593 hdr = (struct nlmsghdr*)request;
2594 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2595 hdr->nlmsg_type = XFRM_MSG_FLUSHPOLICY;
2596 hdr->nlmsg_len = NLMSG_LENGTH(0); /* no data associated */
2597
2598 /* by adding an rtattr of type XFRMA_POLICY_TYPE we could restrict this
2599 * to main or sub policies (default is main) */
2600
2601 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2602 {
2603 DBG1(DBG_KNL, "unable to flush SPD entries");
2604 return FAILED;
2605 }
2606 return SUCCESS;
2607 }
2608
2609
2610 METHOD(kernel_ipsec_t, bypass_socket, bool,
2611 private_kernel_netlink_ipsec_t *this, int fd, int family)
2612 {
2613 struct xfrm_userpolicy_info policy;
2614 u_int sol, ipsec_policy;
2615
2616 switch (family)
2617 {
2618 case AF_INET:
2619 sol = SOL_IP;
2620 ipsec_policy = IP_XFRM_POLICY;
2621 break;
2622 case AF_INET6:
2623 sol = SOL_IPV6;
2624 ipsec_policy = IPV6_XFRM_POLICY;
2625 break;
2626 default:
2627 return FALSE;
2628 }
2629
2630 memset(&policy, 0, sizeof(policy));
2631 policy.action = XFRM_POLICY_ALLOW;
2632 policy.sel.family = family;
2633
2634 policy.dir = XFRM_POLICY_OUT;
2635 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2636 {
2637 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2638 strerror(errno));
2639 return FALSE;
2640 }
2641 policy.dir = XFRM_POLICY_IN;
2642 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2643 {
2644 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2645 strerror(errno));
2646 return FALSE;
2647 }
2648 return TRUE;
2649 }
2650
2651 METHOD(kernel_ipsec_t, destroy, void,
2652 private_kernel_netlink_ipsec_t *this)
2653 {
2654 enumerator_t *enumerator;
2655 policy_entry_t *policy;
2656
2657 if (this->job)
2658 {
2659 this->job->cancel(this->job);
2660 }
2661 if (this->socket_xfrm_events > 0)
2662 {
2663 close(this->socket_xfrm_events);
2664 }
2665 DESTROY_IF(this->socket_xfrm);
2666 enumerator = this->policies->create_enumerator(this->policies);
2667 while (enumerator->enumerate(enumerator, &policy, &policy))
2668 {
2669 policy_entry_destroy(this, policy);
2670 }
2671 enumerator->destroy(enumerator);
2672 this->policies->destroy(this->policies);
2673 this->sas->destroy(this->sas);
2674 this->mutex->destroy(this->mutex);
2675 free(this);
2676 }
2677
2678 /*
2679 * Described in header.
2680 */
2681 kernel_netlink_ipsec_t *kernel_netlink_ipsec_create()
2682 {
2683 private_kernel_netlink_ipsec_t *this;
2684 struct sockaddr_nl addr;
2685 int fd;
2686
2687 INIT(this,
2688 .public = {
2689 .interface = {
2690 .get_spi = _get_spi,
2691 .get_cpi = _get_cpi,
2692 .add_sa = _add_sa,
2693 .update_sa = _update_sa,
2694 .query_sa = _query_sa,
2695 .del_sa = _del_sa,
2696 .flush_sas = _flush_sas,
2697 .add_policy = _add_policy,
2698 .query_policy = _query_policy,
2699 .del_policy = _del_policy,
2700 .flush_policies = _flush_policies,
2701 .bypass_socket = _bypass_socket,
2702 .destroy = _destroy,
2703 },
2704 },
2705 .policies = hashtable_create((hashtable_hash_t)policy_hash,
2706 (hashtable_equals_t)policy_equals, 32),
2707 .sas = hashtable_create((hashtable_hash_t)ipsec_sa_hash,
2708 (hashtable_equals_t)ipsec_sa_equals, 32),
2709 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
2710 .policy_history = TRUE,
2711 .install_routes = lib->settings->get_bool(lib->settings,
2712 "%s.install_routes", TRUE, hydra->daemon),
2713 .replay_window = lib->settings->get_int(lib->settings,
2714 "%s.replay_window", DEFAULT_REPLAY_WINDOW, hydra->daemon),
2715 );
2716
2717 this->replay_bmp = (this->replay_window + sizeof(u_int32_t) * 8 - 1) /
2718 (sizeof(u_int32_t) * 8);
2719
2720 if (streq(hydra->daemon, "pluto"))
2721 { /* no routes for pluto, they are installed via updown script */
2722 this->install_routes = FALSE;
2723 /* no policy history for pluto */
2724 this->policy_history = FALSE;
2725 }
2726
2727 /* disable lifetimes for allocated SPIs in kernel */
2728 fd = open("/proc/sys/net/core/xfrm_acq_expires", O_WRONLY);
2729 if (fd)
2730 {
2731 ignore_result(write(fd, "165", 3));
2732 close(fd);
2733 }
2734
2735 this->socket_xfrm = netlink_socket_create(NETLINK_XFRM);
2736 if (!this->socket_xfrm)
2737 {
2738 destroy(this);
2739 return NULL;
2740 }
2741
2742 memset(&addr, 0, sizeof(addr));
2743 addr.nl_family = AF_NETLINK;
2744
2745 /* create and bind XFRM socket for ACQUIRE, EXPIRE, MIGRATE & MAPPING */
2746 this->socket_xfrm_events = socket(AF_NETLINK, SOCK_RAW, NETLINK_XFRM);
2747 if (this->socket_xfrm_events <= 0)
2748 {
2749 DBG1(DBG_KNL, "unable to create XFRM event socket");
2750 destroy(this);
2751 return NULL;
2752 }
2753 addr.nl_groups = XFRMNLGRP(ACQUIRE) | XFRMNLGRP(EXPIRE) |
2754 XFRMNLGRP(MIGRATE) | XFRMNLGRP(MAPPING);
2755 if (bind(this->socket_xfrm_events, (struct sockaddr*)&addr, sizeof(addr)))
2756 {
2757 DBG1(DBG_KNL, "unable to bind XFRM event socket");
2758 destroy(this);
2759 return NULL;
2760 }
2761 this->job = callback_job_create_with_prio((callback_job_cb_t)receive_events,
2762 this, NULL, NULL, JOB_PRIO_CRITICAL);
2763 lib->processor->queue_job(lib->processor, (job_t*)this->job);
2764
2765 return &this->public;
2766 }
2767