1db6ee9e3b6cd371efc45222597c73ab73a86aae
[strongswan.git] / src / libhydra / plugins / kernel_netlink / kernel_netlink_ipsec.c
1 /*
2 * Copyright (C) 2006-2010 Tobias Brunner
3 * Copyright (C) 2005-2009 Martin Willi
4 * Copyright (C) 2008 Andreas Steffen
5 * Copyright (C) 2006-2007 Fabian Hartmann, Noah Heusser
6 * Copyright (C) 2006 Daniel Roethlisberger
7 * Copyright (C) 2005 Jan Hutter
8 * Hochschule fuer Technik Rapperswil
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 */
20
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <stdint.h>
24 #include <linux/ipsec.h>
25 #include <linux/netlink.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/xfrm.h>
28 #include <linux/udp.h>
29 #include <unistd.h>
30 #include <time.h>
31 #include <errno.h>
32 #include <string.h>
33 #include <fcntl.h>
34
35 #include "kernel_netlink_ipsec.h"
36 #include "kernel_netlink_shared.h"
37
38 #include <hydra.h>
39 #include <debug.h>
40 #include <threading/thread.h>
41 #include <threading/mutex.h>
42 #include <utils/hashtable.h>
43 #include <processing/jobs/callback_job.h>
44
45 /** required for Linux 2.6.26 kernel and later */
46 #ifndef XFRM_STATE_AF_UNSPEC
47 #define XFRM_STATE_AF_UNSPEC 32
48 #endif
49
50 /** from linux/in.h */
51 #ifndef IP_XFRM_POLICY
52 #define IP_XFRM_POLICY 17
53 #endif
54
55 /* missing on uclibc */
56 #ifndef IPV6_XFRM_POLICY
57 #define IPV6_XFRM_POLICY 34
58 #endif /*IPV6_XFRM_POLICY*/
59
60 /** default priority of installed policies */
61 #define PRIO_LOW 3000
62 #define PRIO_HIGH 2000
63
64 /**
65 * map the limit for bytes and packets to XFRM_INF per default
66 */
67 #define XFRM_LIMIT(x) ((x) == 0 ? XFRM_INF : (x))
68
69 /**
70 * Create ORable bitfield of XFRM NL groups
71 */
72 #define XFRMNLGRP(x) (1<<(XFRMNLGRP_##x-1))
73
74 /**
75 * returns a pointer to the first rtattr following the nlmsghdr *nlh and the
76 * 'usual' netlink data x like 'struct xfrm_usersa_info'
77 */
78 #define XFRM_RTA(nlh, x) ((struct rtattr*)(NLMSG_DATA(nlh) + NLMSG_ALIGN(sizeof(x))))
79 /**
80 * returns a pointer to the next rtattr following rta.
81 * !!! do not use this to parse messages. use RTA_NEXT and RTA_OK instead !!!
82 */
83 #define XFRM_RTA_NEXT(rta) ((struct rtattr*)(((char*)(rta)) + RTA_ALIGN((rta)->rta_len)))
84 /**
85 * returns the total size of attached rta data
86 * (after 'usual' netlink data x like 'struct xfrm_usersa_info')
87 */
88 #define XFRM_PAYLOAD(nlh, x) NLMSG_PAYLOAD(nlh, sizeof(x))
89
90 typedef struct kernel_algorithm_t kernel_algorithm_t;
91
92 /**
93 * Mapping of IKEv2 kernel identifier to linux crypto API names
94 */
95 struct kernel_algorithm_t {
96 /**
97 * Identifier specified in IKEv2
98 */
99 int ikev2;
100
101 /**
102 * Name of the algorithm in linux crypto API
103 */
104 char *name;
105 };
106
107 ENUM(xfrm_msg_names, XFRM_MSG_NEWSA, XFRM_MSG_MAPPING,
108 "XFRM_MSG_NEWSA",
109 "XFRM_MSG_DELSA",
110 "XFRM_MSG_GETSA",
111 "XFRM_MSG_NEWPOLICY",
112 "XFRM_MSG_DELPOLICY",
113 "XFRM_MSG_GETPOLICY",
114 "XFRM_MSG_ALLOCSPI",
115 "XFRM_MSG_ACQUIRE",
116 "XFRM_MSG_EXPIRE",
117 "XFRM_MSG_UPDPOLICY",
118 "XFRM_MSG_UPDSA",
119 "XFRM_MSG_POLEXPIRE",
120 "XFRM_MSG_FLUSHSA",
121 "XFRM_MSG_FLUSHPOLICY",
122 "XFRM_MSG_NEWAE",
123 "XFRM_MSG_GETAE",
124 "XFRM_MSG_REPORT",
125 "XFRM_MSG_MIGRATE",
126 "XFRM_MSG_NEWSADINFO",
127 "XFRM_MSG_GETSADINFO",
128 "XFRM_MSG_NEWSPDINFO",
129 "XFRM_MSG_GETSPDINFO",
130 "XFRM_MSG_MAPPING"
131 );
132
133 ENUM(xfrm_attr_type_names, XFRMA_UNSPEC, XFRMA_KMADDRESS,
134 "XFRMA_UNSPEC",
135 "XFRMA_ALG_AUTH",
136 "XFRMA_ALG_CRYPT",
137 "XFRMA_ALG_COMP",
138 "XFRMA_ENCAP",
139 "XFRMA_TMPL",
140 "XFRMA_SA",
141 "XFRMA_POLICY",
142 "XFRMA_SEC_CTX",
143 "XFRMA_LTIME_VAL",
144 "XFRMA_REPLAY_VAL",
145 "XFRMA_REPLAY_THRESH",
146 "XFRMA_ETIMER_THRESH",
147 "XFRMA_SRCADDR",
148 "XFRMA_COADDR",
149 "XFRMA_LASTUSED",
150 "XFRMA_POLICY_TYPE",
151 "XFRMA_MIGRATE",
152 "XFRMA_ALG_AEAD",
153 "XFRMA_KMADDRESS"
154 );
155
156 #define END_OF_LIST -1
157
158 /**
159 * Algorithms for encryption
160 */
161 static kernel_algorithm_t encryption_algs[] = {
162 /* {ENCR_DES_IV64, "***" }, */
163 {ENCR_DES, "des" },
164 {ENCR_3DES, "des3_ede" },
165 /* {ENCR_RC5, "***" }, */
166 /* {ENCR_IDEA, "***" }, */
167 {ENCR_CAST, "cast128" },
168 {ENCR_BLOWFISH, "blowfish" },
169 /* {ENCR_3IDEA, "***" }, */
170 /* {ENCR_DES_IV32, "***" }, */
171 {ENCR_NULL, "cipher_null" },
172 {ENCR_AES_CBC, "aes" },
173 {ENCR_AES_CTR, "rfc3686(ctr(aes))" },
174 {ENCR_AES_CCM_ICV8, "rfc4309(ccm(aes))" },
175 {ENCR_AES_CCM_ICV12, "rfc4309(ccm(aes))" },
176 {ENCR_AES_CCM_ICV16, "rfc4309(ccm(aes))" },
177 {ENCR_AES_GCM_ICV8, "rfc4106(gcm(aes))" },
178 {ENCR_AES_GCM_ICV12, "rfc4106(gcm(aes))" },
179 {ENCR_AES_GCM_ICV16, "rfc4106(gcm(aes))" },
180 {ENCR_NULL_AUTH_AES_GMAC, "rfc4543(gcm(aes))" },
181 {ENCR_CAMELLIA_CBC, "cbc(camellia)" },
182 /* {ENCR_CAMELLIA_CTR, "***" }, */
183 /* {ENCR_CAMELLIA_CCM_ICV8, "***" }, */
184 /* {ENCR_CAMELLIA_CCM_ICV12, "***" }, */
185 /* {ENCR_CAMELLIA_CCM_ICV16, "***" }, */
186 {END_OF_LIST, NULL }
187 };
188
189 /**
190 * Algorithms for integrity protection
191 */
192 static kernel_algorithm_t integrity_algs[] = {
193 {AUTH_HMAC_MD5_96, "md5" },
194 {AUTH_HMAC_SHA1_96, "sha1" },
195 {AUTH_HMAC_SHA2_256_96, "sha256" },
196 {AUTH_HMAC_SHA2_256_128, "hmac(sha256)" },
197 {AUTH_HMAC_SHA2_384_192, "hmac(sha384)" },
198 {AUTH_HMAC_SHA2_512_256, "hmac(sha512)" },
199 /* {AUTH_DES_MAC, "***" }, */
200 /* {AUTH_KPDK_MD5, "***" }, */
201 {AUTH_AES_XCBC_96, "xcbc(aes)" },
202 {END_OF_LIST, NULL }
203 };
204
205 /**
206 * Algorithms for IPComp
207 */
208 static kernel_algorithm_t compression_algs[] = {
209 /* {IPCOMP_OUI, "***" }, */
210 {IPCOMP_DEFLATE, "deflate" },
211 {IPCOMP_LZS, "lzs" },
212 {IPCOMP_LZJH, "lzjh" },
213 {END_OF_LIST, NULL }
214 };
215
216 /**
217 * Look up a kernel algorithm name and its key size
218 */
219 static char* lookup_algorithm(kernel_algorithm_t *list, int ikev2)
220 {
221 while (list->ikev2 != END_OF_LIST)
222 {
223 if (list->ikev2 == ikev2)
224 {
225 return list->name;
226 }
227 list++;
228 }
229 return NULL;
230 }
231
232 typedef struct route_entry_t route_entry_t;
233
234 /**
235 * installed routing entry
236 */
237 struct route_entry_t {
238 /** Name of the interface the route is bound to */
239 char *if_name;
240
241 /** Source ip of the route */
242 host_t *src_ip;
243
244 /** gateway for this route */
245 host_t *gateway;
246
247 /** Destination net */
248 chunk_t dst_net;
249
250 /** Destination net prefixlen */
251 u_int8_t prefixlen;
252 };
253
254 /**
255 * destroy an route_entry_t object
256 */
257 static void route_entry_destroy(route_entry_t *this)
258 {
259 free(this->if_name);
260 this->src_ip->destroy(this->src_ip);
261 DESTROY_IF(this->gateway);
262 chunk_free(&this->dst_net);
263 free(this);
264 }
265
266 typedef struct policy_entry_t policy_entry_t;
267
268 /**
269 * installed kernel policy.
270 */
271 struct policy_entry_t {
272
273 /** direction of this policy: in, out, forward */
274 u_int8_t direction;
275
276 /** parameters of installed policy */
277 struct xfrm_selector sel;
278
279 /** optional mark */
280 u_int32_t mark;
281
282 /** associated route installed for this policy */
283 route_entry_t *route;
284
285 /** by how many CHILD_SA's this policy is used */
286 u_int refcount;
287 };
288
289 /**
290 * Hash function for policy_entry_t objects
291 */
292 static u_int policy_hash(policy_entry_t *key)
293 {
294 chunk_t chunk = chunk_create((void*)&key->sel,
295 sizeof(struct xfrm_selector) + sizeof(u_int32_t));
296 return chunk_hash(chunk);
297 }
298
299 /**
300 * Equality function for policy_entry_t objects
301 */
302 static bool policy_equals(policy_entry_t *key, policy_entry_t *other_key)
303 {
304 return memeq(&key->sel, &other_key->sel,
305 sizeof(struct xfrm_selector) + sizeof(u_int32_t)) &&
306 key->direction == other_key->direction;
307 }
308
309 typedef struct private_kernel_netlink_ipsec_t private_kernel_netlink_ipsec_t;
310
311 /**
312 * Private variables and functions of kernel_netlink class.
313 */
314 struct private_kernel_netlink_ipsec_t {
315 /**
316 * Public part of the kernel_netlink_t object.
317 */
318 kernel_netlink_ipsec_t public;
319
320 /**
321 * mutex to lock access to various lists
322 */
323 mutex_t *mutex;
324
325 /**
326 * Hash table of installed policies (policy_entry_t)
327 */
328 hashtable_t *policies;
329
330 /**
331 * job receiving netlink events
332 */
333 callback_job_t *job;
334
335 /**
336 * Netlink xfrm socket (IPsec)
337 */
338 netlink_socket_t *socket_xfrm;
339
340 /**
341 * netlink xfrm socket to receive acquire and expire events
342 */
343 int socket_xfrm_events;
344
345 /**
346 * whether to install routes along policies
347 */
348 bool install_routes;
349 };
350
351 /**
352 * convert the general ipsec mode to the one defined in xfrm.h
353 */
354 static u_int8_t mode2kernel(ipsec_mode_t mode)
355 {
356 switch (mode)
357 {
358 case MODE_TRANSPORT:
359 return XFRM_MODE_TRANSPORT;
360 case MODE_TUNNEL:
361 return XFRM_MODE_TUNNEL;
362 case MODE_BEET:
363 return XFRM_MODE_BEET;
364 default:
365 return mode;
366 }
367 }
368
369 /**
370 * convert a host_t to a struct xfrm_address
371 */
372 static void host2xfrm(host_t *host, xfrm_address_t *xfrm)
373 {
374 chunk_t chunk = host->get_address(host);
375 memcpy(xfrm, chunk.ptr, min(chunk.len, sizeof(xfrm_address_t)));
376 }
377
378 /**
379 * convert a struct xfrm_address to a host_t
380 */
381 static host_t* xfrm2host(int family, xfrm_address_t *xfrm, u_int16_t port)
382 {
383 chunk_t chunk;
384
385 switch (family)
386 {
387 case AF_INET:
388 chunk = chunk_create((u_char*)&xfrm->a4, sizeof(xfrm->a4));
389 break;
390 case AF_INET6:
391 chunk = chunk_create((u_char*)&xfrm->a6, sizeof(xfrm->a6));
392 break;
393 default:
394 return NULL;
395 }
396 return host_create_from_chunk(family, chunk, ntohs(port));
397 }
398
399 /**
400 * convert a traffic selector address range to subnet and its mask.
401 */
402 static void ts2subnet(traffic_selector_t* ts,
403 xfrm_address_t *net, u_int8_t *mask)
404 {
405 host_t *net_host;
406 chunk_t net_chunk;
407
408 ts->to_subnet(ts, &net_host, mask);
409 net_chunk = net_host->get_address(net_host);
410 memcpy(net, net_chunk.ptr, net_chunk.len);
411 net_host->destroy(net_host);
412 }
413
414 /**
415 * convert a traffic selector port range to port/portmask
416 */
417 static void ts2ports(traffic_selector_t* ts,
418 u_int16_t *port, u_int16_t *mask)
419 {
420 /* linux does not seem to accept complex portmasks. Only
421 * any or a specific port is allowed. We set to any, if we have
422 * a port range, or to a specific, if we have one port only.
423 */
424 u_int16_t from, to;
425
426 from = ts->get_from_port(ts);
427 to = ts->get_to_port(ts);
428
429 if (from == to)
430 {
431 *port = htons(from);
432 *mask = ~0;
433 }
434 else
435 {
436 *port = 0;
437 *mask = 0;
438 }
439 }
440
441 /**
442 * convert a pair of traffic_selectors to a xfrm_selector
443 */
444 static struct xfrm_selector ts2selector(traffic_selector_t *src,
445 traffic_selector_t *dst)
446 {
447 struct xfrm_selector sel;
448
449 memset(&sel, 0, sizeof(sel));
450 sel.family = (src->get_type(src) == TS_IPV4_ADDR_RANGE) ? AF_INET : AF_INET6;
451 /* src or dest proto may be "any" (0), use more restrictive one */
452 sel.proto = max(src->get_protocol(src), dst->get_protocol(dst));
453 ts2subnet(dst, &sel.daddr, &sel.prefixlen_d);
454 ts2subnet(src, &sel.saddr, &sel.prefixlen_s);
455 ts2ports(dst, &sel.dport, &sel.dport_mask);
456 ts2ports(src, &sel.sport, &sel.sport_mask);
457 sel.ifindex = 0;
458 sel.user = 0;
459
460 return sel;
461 }
462
463 /**
464 * convert a xfrm_selector to a src|dst traffic_selector
465 */
466 static traffic_selector_t* selector2ts(struct xfrm_selector *sel, bool src)
467 {
468 u_char *addr;
469 u_int8_t prefixlen;
470 u_int16_t port = 0;
471 host_t *host = NULL;
472
473 if (src)
474 {
475 addr = (u_char*)&sel->saddr;
476 prefixlen = sel->prefixlen_s;
477 if (sel->sport_mask)
478 {
479 port = htons(sel->sport);
480 }
481 }
482 else
483 {
484 addr = (u_char*)&sel->daddr;
485 prefixlen = sel->prefixlen_d;
486 if (sel->dport_mask)
487 {
488 port = htons(sel->dport);
489 }
490 }
491
492 /* The Linux 2.6 kernel does not set the selector's family field,
493 * so as a kludge we additionally test the prefix length.
494 */
495 if (sel->family == AF_INET || sel->prefixlen_s == 32)
496 {
497 host = host_create_from_chunk(AF_INET, chunk_create(addr, 4), 0);
498 }
499 else if (sel->family == AF_INET6 || sel->prefixlen_s == 128)
500 {
501 host = host_create_from_chunk(AF_INET6, chunk_create(addr, 16), 0);
502 }
503
504 if (host)
505 {
506 return traffic_selector_create_from_subnet(host, prefixlen,
507 sel->proto, port);
508 }
509 return NULL;
510 }
511
512 /**
513 * process a XFRM_MSG_ACQUIRE from kernel
514 */
515 static void process_acquire(private_kernel_netlink_ipsec_t *this, struct nlmsghdr *hdr)
516 {
517 u_int32_t reqid = 0;
518 int proto = 0;
519 traffic_selector_t *src_ts, *dst_ts;
520 struct xfrm_user_acquire *acquire;
521 struct rtattr *rta;
522 size_t rtasize;
523
524 acquire = (struct xfrm_user_acquire*)NLMSG_DATA(hdr);
525 rta = XFRM_RTA(hdr, struct xfrm_user_acquire);
526 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_user_acquire);
527
528 DBG2(DBG_KNL, "received a XFRM_MSG_ACQUIRE");
529
530 while (RTA_OK(rta, rtasize))
531 {
532 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
533
534 if (rta->rta_type == XFRMA_TMPL)
535 {
536 struct xfrm_user_tmpl* tmpl;
537
538 tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rta);
539 reqid = tmpl->reqid;
540 proto = tmpl->id.proto;
541 }
542 rta = RTA_NEXT(rta, rtasize);
543 }
544 switch (proto)
545 {
546 case 0:
547 case IPPROTO_ESP:
548 case IPPROTO_AH:
549 break;
550 default:
551 /* acquire for AH/ESP only, not for IPCOMP */
552 return;
553 }
554 src_ts = selector2ts(&acquire->sel, TRUE);
555 dst_ts = selector2ts(&acquire->sel, FALSE);
556
557 hydra->kernel_interface->acquire(hydra->kernel_interface, reqid, src_ts,
558 dst_ts);
559 }
560
561 /**
562 * process a XFRM_MSG_EXPIRE from kernel
563 */
564 static void process_expire(private_kernel_netlink_ipsec_t *this, struct nlmsghdr *hdr)
565 {
566 u_int8_t protocol;
567 u_int32_t spi, reqid;
568 struct xfrm_user_expire *expire;
569
570 expire = (struct xfrm_user_expire*)NLMSG_DATA(hdr);
571 protocol = expire->state.id.proto;
572 spi = expire->state.id.spi;
573 reqid = expire->state.reqid;
574
575 DBG2(DBG_KNL, "received a XFRM_MSG_EXPIRE");
576
577 if (protocol != IPPROTO_ESP && protocol != IPPROTO_AH)
578 {
579 DBG2(DBG_KNL, "ignoring XFRM_MSG_EXPIRE for SA with SPI %.8x and "
580 "reqid {%u} which is not a CHILD_SA", ntohl(spi), reqid);
581 return;
582 }
583
584 hydra->kernel_interface->expire(hydra->kernel_interface, reqid, protocol,
585 spi, expire->hard != 0);
586 }
587
588 /**
589 * process a XFRM_MSG_MIGRATE from kernel
590 */
591 static void process_migrate(private_kernel_netlink_ipsec_t *this, struct nlmsghdr *hdr)
592 {
593 traffic_selector_t *src_ts, *dst_ts;
594 host_t *local = NULL, *remote = NULL;
595 host_t *old_src = NULL, *old_dst = NULL;
596 host_t *new_src = NULL, *new_dst = NULL;
597 struct xfrm_userpolicy_id *policy_id;
598 struct rtattr *rta;
599 size_t rtasize;
600 u_int32_t reqid = 0;
601 policy_dir_t dir;
602
603 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
604 rta = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
605 rtasize = XFRM_PAYLOAD(hdr, struct xfrm_userpolicy_id);
606
607 DBG2(DBG_KNL, "received a XFRM_MSG_MIGRATE");
608
609 src_ts = selector2ts(&policy_id->sel, TRUE);
610 dst_ts = selector2ts(&policy_id->sel, FALSE);
611 dir = (policy_dir_t)policy_id->dir;
612
613 DBG2(DBG_KNL, " policy: %R === %R %N", src_ts, dst_ts, policy_dir_names);
614
615 while (RTA_OK(rta, rtasize))
616 {
617 DBG2(DBG_KNL, " %N", xfrm_attr_type_names, rta->rta_type);
618 if (rta->rta_type == XFRMA_KMADDRESS)
619 {
620 struct xfrm_user_kmaddress *kmaddress;
621
622 kmaddress = (struct xfrm_user_kmaddress*)RTA_DATA(rta);
623 local = xfrm2host(kmaddress->family, &kmaddress->local, 0);
624 remote = xfrm2host(kmaddress->family, &kmaddress->remote, 0);
625 DBG2(DBG_KNL, " kmaddress: %H...%H", local, remote);
626 }
627 else if (rta->rta_type == XFRMA_MIGRATE)
628 {
629 struct xfrm_user_migrate *migrate;
630
631 migrate = (struct xfrm_user_migrate*)RTA_DATA(rta);
632 old_src = xfrm2host(migrate->old_family, &migrate->old_saddr, 0);
633 old_dst = xfrm2host(migrate->old_family, &migrate->old_daddr, 0);
634 new_src = xfrm2host(migrate->new_family, &migrate->new_saddr, 0);
635 new_dst = xfrm2host(migrate->new_family, &migrate->new_daddr, 0);
636 reqid = migrate->reqid;
637 DBG2(DBG_KNL, " migrate %H...%H to %H...%H, reqid {%u}",
638 old_src, old_dst, new_src, new_dst, reqid);
639 DESTROY_IF(old_src);
640 DESTROY_IF(old_dst);
641 DESTROY_IF(new_src);
642 DESTROY_IF(new_dst);
643 }
644 rta = RTA_NEXT(rta, rtasize);
645 }
646
647 if (src_ts && dst_ts && local && remote)
648 {
649 hydra->kernel_interface->migrate(hydra->kernel_interface, reqid,
650 src_ts, dst_ts, dir, local, remote);
651 }
652 else
653 {
654 DESTROY_IF(src_ts);
655 DESTROY_IF(dst_ts);
656 DESTROY_IF(local);
657 DESTROY_IF(remote);
658 }
659 }
660
661 /**
662 * process a XFRM_MSG_MAPPING from kernel
663 */
664 static void process_mapping(private_kernel_netlink_ipsec_t *this,
665 struct nlmsghdr *hdr)
666 {
667 u_int32_t spi, reqid;
668 struct xfrm_user_mapping *mapping;
669 host_t *host;
670
671 mapping = (struct xfrm_user_mapping*)NLMSG_DATA(hdr);
672 spi = mapping->id.spi;
673 reqid = mapping->reqid;
674
675 DBG2(DBG_KNL, "received a XFRM_MSG_MAPPING");
676
677 if (mapping->id.proto == IPPROTO_ESP)
678 {
679 host = xfrm2host(mapping->id.family, &mapping->new_saddr,
680 mapping->new_sport);
681 if (host)
682 {
683 hydra->kernel_interface->mapping(hydra->kernel_interface, reqid,
684 spi, host);
685 }
686 }
687 }
688
689 /**
690 * Receives events from kernel
691 */
692 static job_requeue_t receive_events(private_kernel_netlink_ipsec_t *this)
693 {
694 char response[1024];
695 struct nlmsghdr *hdr = (struct nlmsghdr*)response;
696 struct sockaddr_nl addr;
697 socklen_t addr_len = sizeof(addr);
698 int len;
699 bool oldstate;
700
701 oldstate = thread_cancelability(TRUE);
702 len = recvfrom(this->socket_xfrm_events, response, sizeof(response), 0,
703 (struct sockaddr*)&addr, &addr_len);
704 thread_cancelability(oldstate);
705
706 if (len < 0)
707 {
708 switch (errno)
709 {
710 case EINTR:
711 /* interrupted, try again */
712 return JOB_REQUEUE_DIRECT;
713 case EAGAIN:
714 /* no data ready, select again */
715 return JOB_REQUEUE_DIRECT;
716 default:
717 DBG1(DBG_KNL, "unable to receive from xfrm event socket");
718 sleep(1);
719 return JOB_REQUEUE_FAIR;
720 }
721 }
722
723 if (addr.nl_pid != 0)
724 { /* not from kernel. not interested, try another one */
725 return JOB_REQUEUE_DIRECT;
726 }
727
728 while (NLMSG_OK(hdr, len))
729 {
730 switch (hdr->nlmsg_type)
731 {
732 case XFRM_MSG_ACQUIRE:
733 process_acquire(this, hdr);
734 break;
735 case XFRM_MSG_EXPIRE:
736 process_expire(this, hdr);
737 break;
738 case XFRM_MSG_MIGRATE:
739 process_migrate(this, hdr);
740 break;
741 case XFRM_MSG_MAPPING:
742 process_mapping(this, hdr);
743 break;
744 default:
745 DBG1(DBG_KNL, "received unknown event from xfrm event socket: %d", hdr->nlmsg_type);
746 break;
747 }
748 hdr = NLMSG_NEXT(hdr, len);
749 }
750 return JOB_REQUEUE_DIRECT;
751 }
752
753 /**
754 * Get an SPI for a specific protocol from the kernel.
755 */
756 static status_t get_spi_internal(private_kernel_netlink_ipsec_t *this,
757 host_t *src, host_t *dst, u_int8_t proto, u_int32_t min, u_int32_t max,
758 u_int32_t reqid, u_int32_t *spi)
759 {
760 netlink_buf_t request;
761 struct nlmsghdr *hdr, *out;
762 struct xfrm_userspi_info *userspi;
763 u_int32_t received_spi = 0;
764 size_t len;
765
766 memset(&request, 0, sizeof(request));
767
768 hdr = (struct nlmsghdr*)request;
769 hdr->nlmsg_flags = NLM_F_REQUEST;
770 hdr->nlmsg_type = XFRM_MSG_ALLOCSPI;
771 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userspi_info));
772
773 userspi = (struct xfrm_userspi_info*)NLMSG_DATA(hdr);
774 host2xfrm(src, &userspi->info.saddr);
775 host2xfrm(dst, &userspi->info.id.daddr);
776 userspi->info.id.proto = proto;
777 userspi->info.mode = XFRM_MODE_TUNNEL;
778 userspi->info.reqid = reqid;
779 userspi->info.family = src->get_family(src);
780 userspi->min = min;
781 userspi->max = max;
782
783 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
784 {
785 hdr = out;
786 while (NLMSG_OK(hdr, len))
787 {
788 switch (hdr->nlmsg_type)
789 {
790 case XFRM_MSG_NEWSA:
791 {
792 struct xfrm_usersa_info* usersa = NLMSG_DATA(hdr);
793 received_spi = usersa->id.spi;
794 break;
795 }
796 case NLMSG_ERROR:
797 {
798 struct nlmsgerr *err = NLMSG_DATA(hdr);
799
800 DBG1(DBG_KNL, "allocating SPI failed: %s (%d)",
801 strerror(-err->error), -err->error);
802 break;
803 }
804 default:
805 hdr = NLMSG_NEXT(hdr, len);
806 continue;
807 case NLMSG_DONE:
808 break;
809 }
810 break;
811 }
812 free(out);
813 }
814
815 if (received_spi == 0)
816 {
817 return FAILED;
818 }
819
820 *spi = received_spi;
821 return SUCCESS;
822 }
823
824 METHOD(kernel_ipsec_t, get_spi, status_t,
825 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
826 u_int8_t protocol, u_int32_t reqid, u_int32_t *spi)
827 {
828 DBG2(DBG_KNL, "getting SPI for reqid {%u}", reqid);
829
830 if (get_spi_internal(this, src, dst, protocol,
831 0xc0000000, 0xcFFFFFFF, reqid, spi) != SUCCESS)
832 {
833 DBG1(DBG_KNL, "unable to get SPI for reqid {%u}", reqid);
834 return FAILED;
835 }
836
837 DBG2(DBG_KNL, "got SPI %.8x for reqid {%u}", ntohl(*spi), reqid);
838
839 return SUCCESS;
840 }
841
842 METHOD(kernel_ipsec_t, get_cpi, status_t,
843 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
844 u_int32_t reqid, u_int16_t *cpi)
845 {
846 u_int32_t received_spi = 0;
847
848 DBG2(DBG_KNL, "getting CPI for reqid {%u}", reqid);
849
850 if (get_spi_internal(this, src, dst,
851 IPPROTO_COMP, 0x100, 0xEFFF, reqid, &received_spi) != SUCCESS)
852 {
853 DBG1(DBG_KNL, "unable to get CPI for reqid {%u}", reqid);
854 return FAILED;
855 }
856
857 *cpi = htons((u_int16_t)ntohl(received_spi));
858
859 DBG2(DBG_KNL, "got CPI %.4x for reqid {%u}", ntohs(*cpi), reqid);
860
861 return SUCCESS;
862 }
863
864 METHOD(kernel_ipsec_t, add_sa, status_t,
865 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
866 u_int32_t spi, u_int8_t protocol, u_int32_t reqid, mark_t mark,
867 lifetime_cfg_t *lifetime, u_int16_t enc_alg, chunk_t enc_key,
868 u_int16_t int_alg, chunk_t int_key, ipsec_mode_t mode, u_int16_t ipcomp,
869 u_int16_t cpi, bool encap, bool inbound,
870 traffic_selector_t* src_ts, traffic_selector_t* dst_ts)
871 {
872 netlink_buf_t request;
873 char *alg_name;
874 struct nlmsghdr *hdr;
875 struct xfrm_usersa_info *sa;
876 u_int16_t icv_size = 64;
877
878 /* if IPComp is used, we install an additional IPComp SA. if the cpi is 0
879 * we are in the recursive call below */
880 if (ipcomp != IPCOMP_NONE && cpi != 0)
881 {
882 lifetime_cfg_t lft = {{0,0,0},{0,0,0},{0,0,0}};
883 add_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, reqid, mark,
884 &lft, ENCR_UNDEFINED, chunk_empty, AUTH_UNDEFINED, chunk_empty,
885 mode, ipcomp, 0, FALSE, inbound, NULL, NULL);
886 ipcomp = IPCOMP_NONE;
887 /* use transport mode ESP SA, IPComp uses tunnel mode */
888 mode = MODE_TRANSPORT;
889 }
890
891 memset(&request, 0, sizeof(request));
892
893 if (mark.value)
894 {
895 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u} "
896 "(mark %u/0x%8x)", ntohl(spi), reqid, mark.value, mark.mask);
897 }
898 else
899 {
900 DBG2(DBG_KNL, "adding SAD entry with SPI %.8x and reqid {%u}",
901 ntohl(spi), reqid);
902 }
903 hdr = (struct nlmsghdr*)request;
904 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
905 hdr->nlmsg_type = inbound ? XFRM_MSG_UPDSA : XFRM_MSG_NEWSA;
906 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
907
908 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
909 host2xfrm(src, &sa->saddr);
910 host2xfrm(dst, &sa->id.daddr);
911 sa->id.spi = spi;
912 sa->id.proto = protocol;
913 sa->family = src->get_family(src);
914 sa->mode = mode2kernel(mode);
915 switch (mode)
916 {
917 case MODE_TUNNEL:
918 sa->flags |= XFRM_STATE_AF_UNSPEC;
919 break;
920 case MODE_BEET:
921 if(src_ts && dst_ts)
922 {
923 sa->sel = ts2selector(src_ts, dst_ts);
924 }
925 break;
926 default:
927 break;
928 }
929
930 sa->replay_window = (protocol == IPPROTO_COMP) ? 0 : 32;
931 sa->reqid = reqid;
932 sa->lft.soft_byte_limit = XFRM_LIMIT(lifetime->bytes.rekey);
933 sa->lft.hard_byte_limit = XFRM_LIMIT(lifetime->bytes.life);
934 sa->lft.soft_packet_limit = XFRM_LIMIT(lifetime->packets.rekey);
935 sa->lft.hard_packet_limit = XFRM_LIMIT(lifetime->packets.life);
936 /* we use lifetimes since added, not since used */
937 sa->lft.soft_add_expires_seconds = lifetime->time.rekey;
938 sa->lft.hard_add_expires_seconds = lifetime->time.life;
939 sa->lft.soft_use_expires_seconds = 0;
940 sa->lft.hard_use_expires_seconds = 0;
941
942 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_info);
943
944 switch (enc_alg)
945 {
946 case ENCR_UNDEFINED:
947 /* no encryption */
948 break;
949 case ENCR_AES_CCM_ICV16:
950 case ENCR_AES_GCM_ICV16:
951 case ENCR_NULL_AUTH_AES_GMAC:
952 case ENCR_CAMELLIA_CCM_ICV16:
953 icv_size += 32;
954 /* FALL */
955 case ENCR_AES_CCM_ICV12:
956 case ENCR_AES_GCM_ICV12:
957 case ENCR_CAMELLIA_CCM_ICV12:
958 icv_size += 32;
959 /* FALL */
960 case ENCR_AES_CCM_ICV8:
961 case ENCR_AES_GCM_ICV8:
962 case ENCR_CAMELLIA_CCM_ICV8:
963 {
964 struct xfrm_algo_aead *algo;
965
966 alg_name = lookup_algorithm(encryption_algs, enc_alg);
967 if (alg_name == NULL)
968 {
969 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
970 encryption_algorithm_names, enc_alg);
971 return FAILED;
972 }
973 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
974 encryption_algorithm_names, enc_alg, enc_key.len * 8);
975
976 rthdr->rta_type = XFRMA_ALG_AEAD;
977 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_aead) + enc_key.len);
978 hdr->nlmsg_len += rthdr->rta_len;
979 if (hdr->nlmsg_len > sizeof(request))
980 {
981 return FAILED;
982 }
983
984 algo = (struct xfrm_algo_aead*)RTA_DATA(rthdr);
985 algo->alg_key_len = enc_key.len * 8;
986 algo->alg_icv_len = icv_size;
987 strcpy(algo->alg_name, alg_name);
988 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
989
990 rthdr = XFRM_RTA_NEXT(rthdr);
991 break;
992 }
993 default:
994 {
995 struct xfrm_algo *algo;
996
997 alg_name = lookup_algorithm(encryption_algs, enc_alg);
998 if (alg_name == NULL)
999 {
1000 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1001 encryption_algorithm_names, enc_alg);
1002 return FAILED;
1003 }
1004 DBG2(DBG_KNL, " using encryption algorithm %N with key size %d",
1005 encryption_algorithm_names, enc_alg, enc_key.len * 8);
1006
1007 rthdr->rta_type = XFRMA_ALG_CRYPT;
1008 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + enc_key.len);
1009 hdr->nlmsg_len += rthdr->rta_len;
1010 if (hdr->nlmsg_len > sizeof(request))
1011 {
1012 return FAILED;
1013 }
1014
1015 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1016 algo->alg_key_len = enc_key.len * 8;
1017 strcpy(algo->alg_name, alg_name);
1018 memcpy(algo->alg_key, enc_key.ptr, enc_key.len);
1019
1020 rthdr = XFRM_RTA_NEXT(rthdr);
1021 }
1022 }
1023
1024 if (int_alg != AUTH_UNDEFINED)
1025 {
1026 alg_name = lookup_algorithm(integrity_algs, int_alg);
1027 if (alg_name == NULL)
1028 {
1029 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1030 integrity_algorithm_names, int_alg);
1031 return FAILED;
1032 }
1033 DBG2(DBG_KNL, " using integrity algorithm %N with key size %d",
1034 integrity_algorithm_names, int_alg, int_key.len * 8);
1035
1036 if (int_alg == AUTH_HMAC_SHA2_256_128)
1037 {
1038 struct xfrm_algo_auth* algo;
1039
1040 /* the kernel uses SHA256 with 96 bit truncation by default,
1041 * use specified truncation size supported by newer kernels */
1042 rthdr->rta_type = XFRMA_ALG_AUTH_TRUNC;
1043 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo_auth) + int_key.len);
1044
1045 hdr->nlmsg_len += rthdr->rta_len;
1046 if (hdr->nlmsg_len > sizeof(request))
1047 {
1048 return FAILED;
1049 }
1050
1051 algo = (struct xfrm_algo_auth*)RTA_DATA(rthdr);
1052 algo->alg_key_len = int_key.len * 8;
1053 algo->alg_trunc_len = 128;
1054 strcpy(algo->alg_name, alg_name);
1055 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1056 }
1057 else
1058 {
1059 struct xfrm_algo* algo;
1060
1061 rthdr->rta_type = XFRMA_ALG_AUTH;
1062 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo) + int_key.len);
1063
1064 hdr->nlmsg_len += rthdr->rta_len;
1065 if (hdr->nlmsg_len > sizeof(request))
1066 {
1067 return FAILED;
1068 }
1069
1070 algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1071 algo->alg_key_len = int_key.len * 8;
1072 strcpy(algo->alg_name, alg_name);
1073 memcpy(algo->alg_key, int_key.ptr, int_key.len);
1074 }
1075 rthdr = XFRM_RTA_NEXT(rthdr);
1076 }
1077
1078 if (ipcomp != IPCOMP_NONE)
1079 {
1080 rthdr->rta_type = XFRMA_ALG_COMP;
1081 alg_name = lookup_algorithm(compression_algs, ipcomp);
1082 if (alg_name == NULL)
1083 {
1084 DBG1(DBG_KNL, "algorithm %N not supported by kernel!",
1085 ipcomp_transform_names, ipcomp);
1086 return FAILED;
1087 }
1088 DBG2(DBG_KNL, " using compression algorithm %N",
1089 ipcomp_transform_names, ipcomp);
1090
1091 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_algo));
1092 hdr->nlmsg_len += rthdr->rta_len;
1093 if (hdr->nlmsg_len > sizeof(request))
1094 {
1095 return FAILED;
1096 }
1097
1098 struct xfrm_algo* algo = (struct xfrm_algo*)RTA_DATA(rthdr);
1099 algo->alg_key_len = 0;
1100 strcpy(algo->alg_name, alg_name);
1101
1102 rthdr = XFRM_RTA_NEXT(rthdr);
1103 }
1104
1105 if (encap)
1106 {
1107 struct xfrm_encap_tmpl *tmpl;
1108
1109 rthdr->rta_type = XFRMA_ENCAP;
1110 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1111
1112 hdr->nlmsg_len += rthdr->rta_len;
1113 if (hdr->nlmsg_len > sizeof(request))
1114 {
1115 return FAILED;
1116 }
1117
1118 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rthdr);
1119 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1120 tmpl->encap_sport = htons(src->get_port(src));
1121 tmpl->encap_dport = htons(dst->get_port(dst));
1122 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1123 /* encap_oa could probably be derived from the
1124 * traffic selectors [rfc4306, p39]. In the netlink kernel implementation
1125 * pluto does the same as we do here but it uses encap_oa in the
1126 * pfkey implementation. BUT as /usr/src/linux/net/key/af_key.c indicates
1127 * the kernel ignores it anyway
1128 * -> does that mean that NAT-T encap doesn't work in transport mode?
1129 * No. The reason the kernel ignores NAT-OA is that it recomputes
1130 * (or, rather, just ignores) the checksum. If packets pass
1131 * the IPsec checks it marks them "checksum ok" so OA isn't needed. */
1132 rthdr = XFRM_RTA_NEXT(rthdr);
1133 }
1134
1135 if (mark.value)
1136 {
1137 struct xfrm_mark *mrk;
1138
1139 rthdr->rta_type = XFRMA_MARK;
1140 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1141
1142 hdr->nlmsg_len += rthdr->rta_len;
1143 if (hdr->nlmsg_len > sizeof(request))
1144 {
1145 return FAILED;
1146 }
1147
1148 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1149 mrk->v = mark.value;
1150 mrk->m = mark.mask;
1151 rthdr = XFRM_RTA_NEXT(rthdr);
1152 }
1153
1154 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1155 {
1156 if (mark.value)
1157 {
1158 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x "
1159 "(mark %u/0x%8x)", ntohl(spi), mark.value, mark.mask);
1160 }
1161 else
1162 {
1163 DBG1(DBG_KNL, "unable to add SAD entry with SPI %.8x", ntohl(spi));
1164 }
1165 return FAILED;
1166 }
1167 return SUCCESS;
1168 }
1169
1170 /**
1171 * Get the replay state (i.e. sequence numbers) of an SA.
1172 */
1173 static status_t get_replay_state(private_kernel_netlink_ipsec_t *this,
1174 u_int32_t spi, u_int8_t protocol, host_t *dst,
1175 struct xfrm_replay_state *replay)
1176 {
1177 netlink_buf_t request;
1178 struct nlmsghdr *hdr, *out = NULL;
1179 struct xfrm_aevent_id *out_aevent = NULL, *aevent_id;
1180 size_t len;
1181 struct rtattr *rta;
1182 size_t rtasize;
1183
1184 memset(&request, 0, sizeof(request));
1185
1186 DBG2(DBG_KNL, "querying replay state from SAD entry with SPI %.8x", ntohl(spi));
1187
1188 hdr = (struct nlmsghdr*)request;
1189 hdr->nlmsg_flags = NLM_F_REQUEST;
1190 hdr->nlmsg_type = XFRM_MSG_GETAE;
1191 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_aevent_id));
1192
1193 aevent_id = (struct xfrm_aevent_id*)NLMSG_DATA(hdr);
1194 aevent_id->flags = XFRM_AE_RVAL;
1195
1196 host2xfrm(dst, &aevent_id->sa_id.daddr);
1197 aevent_id->sa_id.spi = spi;
1198 aevent_id->sa_id.proto = protocol;
1199 aevent_id->sa_id.family = dst->get_family(dst);
1200
1201 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1202 {
1203 hdr = out;
1204 while (NLMSG_OK(hdr, len))
1205 {
1206 switch (hdr->nlmsg_type)
1207 {
1208 case XFRM_MSG_NEWAE:
1209 {
1210 out_aevent = NLMSG_DATA(hdr);
1211 break;
1212 }
1213 case NLMSG_ERROR:
1214 {
1215 struct nlmsgerr *err = NLMSG_DATA(hdr);
1216 DBG1(DBG_KNL, "querying replay state from SAD entry failed: %s (%d)",
1217 strerror(-err->error), -err->error);
1218 break;
1219 }
1220 default:
1221 hdr = NLMSG_NEXT(hdr, len);
1222 continue;
1223 case NLMSG_DONE:
1224 break;
1225 }
1226 break;
1227 }
1228 }
1229
1230 if (out_aevent == NULL)
1231 {
1232 DBG1(DBG_KNL, "unable to query replay state from SAD entry with SPI %.8x",
1233 ntohl(spi));
1234 free(out);
1235 return FAILED;
1236 }
1237
1238 rta = XFRM_RTA(out, struct xfrm_aevent_id);
1239 rtasize = XFRM_PAYLOAD(out, struct xfrm_aevent_id);
1240 while(RTA_OK(rta, rtasize))
1241 {
1242 if (rta->rta_type == XFRMA_REPLAY_VAL &&
1243 RTA_PAYLOAD(rta) == sizeof(struct xfrm_replay_state))
1244 {
1245 memcpy(replay, RTA_DATA(rta), RTA_PAYLOAD(rta));
1246 free(out);
1247 return SUCCESS;
1248 }
1249 rta = RTA_NEXT(rta, rtasize);
1250 }
1251
1252 DBG1(DBG_KNL, "unable to query replay state from SAD entry with SPI %.8x",
1253 ntohl(spi));
1254 free(out);
1255 return FAILED;
1256 }
1257
1258 METHOD(kernel_ipsec_t, query_sa, status_t,
1259 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1260 u_int32_t spi, u_int8_t protocol, mark_t mark, u_int64_t *bytes)
1261 {
1262 netlink_buf_t request;
1263 struct nlmsghdr *out = NULL, *hdr;
1264 struct xfrm_usersa_id *sa_id;
1265 struct xfrm_usersa_info *sa = NULL;
1266 size_t len;
1267
1268 memset(&request, 0, sizeof(request));
1269
1270 if (mark.value)
1271 {
1272 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x (mark %u/0x%8x)",
1273 ntohl(spi), mark.value, mark.mask);
1274 }
1275 else
1276 {
1277 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x", ntohl(spi));
1278 }
1279 hdr = (struct nlmsghdr*)request;
1280 hdr->nlmsg_flags = NLM_F_REQUEST;
1281 hdr->nlmsg_type = XFRM_MSG_GETSA;
1282 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1283
1284 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1285 host2xfrm(dst, &sa_id->daddr);
1286 sa_id->spi = spi;
1287 sa_id->proto = protocol;
1288 sa_id->family = dst->get_family(dst);
1289
1290 if (mark.value)
1291 {
1292 struct xfrm_mark *mrk;
1293 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1294
1295 rthdr->rta_type = XFRMA_MARK;
1296 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1297 hdr->nlmsg_len += rthdr->rta_len;
1298 if (hdr->nlmsg_len > sizeof(request))
1299 {
1300 return FAILED;
1301 }
1302
1303 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1304 mrk->v = mark.value;
1305 mrk->m = mark.mask;
1306 }
1307
1308 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1309 {
1310 hdr = out;
1311 while (NLMSG_OK(hdr, len))
1312 {
1313 switch (hdr->nlmsg_type)
1314 {
1315 case XFRM_MSG_NEWSA:
1316 {
1317 sa = (struct xfrm_usersa_info*)NLMSG_DATA(hdr);
1318 break;
1319 }
1320 case NLMSG_ERROR:
1321 {
1322 struct nlmsgerr *err = NLMSG_DATA(hdr);
1323
1324 if (mark.value)
1325 {
1326 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1327 "(mark %u/0x%8x) failed: %s (%d)",
1328 ntohl(spi), mark.value, mark.mask,
1329 strerror(-err->error), -err->error);
1330 }
1331 else
1332 {
1333 DBG1(DBG_KNL, "querying SAD entry with SPI %.8x "
1334 "failed: %s (%d)", ntohl(spi),
1335 strerror(-err->error), -err->error);
1336 }
1337 break;
1338 }
1339 default:
1340 hdr = NLMSG_NEXT(hdr, len);
1341 continue;
1342 case NLMSG_DONE:
1343 break;
1344 }
1345 break;
1346 }
1347 }
1348
1349 if (sa == NULL)
1350 {
1351 DBG2(DBG_KNL, "unable to query SAD entry with SPI %.8x", ntohl(spi));
1352 free(out);
1353 return FAILED;
1354 }
1355 *bytes = sa->curlft.bytes;
1356
1357 free(out);
1358 return SUCCESS;
1359 }
1360
1361 METHOD(kernel_ipsec_t, del_sa, status_t,
1362 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1363 u_int32_t spi, u_int8_t protocol, u_int16_t cpi, mark_t mark)
1364 {
1365 netlink_buf_t request;
1366 struct nlmsghdr *hdr;
1367 struct xfrm_usersa_id *sa_id;
1368
1369 /* if IPComp was used, we first delete the additional IPComp SA */
1370 if (cpi)
1371 {
1372 del_sa(this, src, dst, htonl(ntohs(cpi)), IPPROTO_COMP, 0, mark);
1373 }
1374
1375 memset(&request, 0, sizeof(request));
1376
1377 if (mark.value)
1378 {
1379 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x (mark %u/0x%8x)",
1380 ntohl(spi), mark.value, mark.mask);
1381 }
1382 else
1383 {
1384 DBG2(DBG_KNL, "deleting SAD entry with SPI %.8x", ntohl(spi));
1385 }
1386 hdr = (struct nlmsghdr*)request;
1387 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1388 hdr->nlmsg_type = XFRM_MSG_DELSA;
1389 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1390
1391 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1392 host2xfrm(dst, &sa_id->daddr);
1393 sa_id->spi = spi;
1394 sa_id->proto = protocol;
1395 sa_id->family = dst->get_family(dst);
1396
1397 if (mark.value)
1398 {
1399 struct xfrm_mark *mrk;
1400 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_usersa_id);
1401
1402 rthdr->rta_type = XFRMA_MARK;
1403 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1404 hdr->nlmsg_len += rthdr->rta_len;
1405 if (hdr->nlmsg_len > sizeof(request))
1406 {
1407 return FAILED;
1408 }
1409
1410 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1411 mrk->v = mark.value;
1412 mrk->m = mark.mask;
1413 }
1414
1415 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1416 {
1417 if (mark.value)
1418 {
1419 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x "
1420 "(mark %u/0x%8x)", ntohl(spi), mark.value, mark.mask);
1421 }
1422 else
1423 {
1424 DBG1(DBG_KNL, "unable to delete SAD entry with SPI %.8x", ntohl(spi));
1425 }
1426 return FAILED;
1427 }
1428 if (mark.value)
1429 {
1430 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x (mark %u/0x%8x)",
1431 ntohl(spi), mark.value, mark.mask);
1432 }
1433 else
1434 {
1435 DBG2(DBG_KNL, "deleted SAD entry with SPI %.8x", ntohl(spi));
1436 }
1437 return SUCCESS;
1438 }
1439
1440 METHOD(kernel_ipsec_t, update_sa, status_t,
1441 private_kernel_netlink_ipsec_t *this, u_int32_t spi, u_int8_t protocol,
1442 u_int16_t cpi, host_t *src, host_t *dst, host_t *new_src, host_t *new_dst,
1443 bool old_encap, bool new_encap, mark_t mark)
1444 {
1445 netlink_buf_t request;
1446 u_char *pos;
1447 struct nlmsghdr *hdr, *out = NULL;
1448 struct xfrm_usersa_id *sa_id;
1449 struct xfrm_usersa_info *out_sa = NULL, *sa;
1450 size_t len;
1451 struct rtattr *rta;
1452 size_t rtasize;
1453 struct xfrm_encap_tmpl* tmpl = NULL;
1454 bool got_replay_state = FALSE;
1455 struct xfrm_replay_state replay;
1456
1457 /* if IPComp is used, we first update the IPComp SA */
1458 if (cpi)
1459 {
1460 update_sa(this, htonl(ntohs(cpi)), IPPROTO_COMP, 0,
1461 src, dst, new_src, new_dst, FALSE, FALSE, mark);
1462 }
1463
1464 memset(&request, 0, sizeof(request));
1465
1466 DBG2(DBG_KNL, "querying SAD entry with SPI %.8x for update", ntohl(spi));
1467
1468 /* query the existing SA first */
1469 hdr = (struct nlmsghdr*)request;
1470 hdr->nlmsg_flags = NLM_F_REQUEST;
1471 hdr->nlmsg_type = XFRM_MSG_GETSA;
1472 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_id));
1473
1474 sa_id = (struct xfrm_usersa_id*)NLMSG_DATA(hdr);
1475 host2xfrm(dst, &sa_id->daddr);
1476 sa_id->spi = spi;
1477 sa_id->proto = protocol;
1478 sa_id->family = dst->get_family(dst);
1479
1480 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1481 {
1482 hdr = out;
1483 while (NLMSG_OK(hdr, len))
1484 {
1485 switch (hdr->nlmsg_type)
1486 {
1487 case XFRM_MSG_NEWSA:
1488 {
1489 out_sa = NLMSG_DATA(hdr);
1490 break;
1491 }
1492 case NLMSG_ERROR:
1493 {
1494 struct nlmsgerr *err = NLMSG_DATA(hdr);
1495 DBG1(DBG_KNL, "querying SAD entry failed: %s (%d)",
1496 strerror(-err->error), -err->error);
1497 break;
1498 }
1499 default:
1500 hdr = NLMSG_NEXT(hdr, len);
1501 continue;
1502 case NLMSG_DONE:
1503 break;
1504 }
1505 break;
1506 }
1507 }
1508 if (out_sa == NULL)
1509 {
1510 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1511 free(out);
1512 return FAILED;
1513 }
1514
1515 /* try to get the replay state */
1516 if (get_replay_state(this, spi, protocol, dst, &replay) == SUCCESS)
1517 {
1518 got_replay_state = TRUE;
1519 }
1520
1521 /* delete the old SA (without affecting the IPComp SA) */
1522 if (del_sa(this, src, dst, spi, protocol, 0, mark) != SUCCESS)
1523 {
1524 DBG1(DBG_KNL, "unable to delete old SAD entry with SPI %.8x", ntohl(spi));
1525 free(out);
1526 return FAILED;
1527 }
1528
1529 DBG2(DBG_KNL, "updating SAD entry with SPI %.8x from %#H..%#H to %#H..%#H",
1530 ntohl(spi), src, dst, new_src, new_dst);
1531 /* copy over the SA from out to request */
1532 hdr = (struct nlmsghdr*)request;
1533 memcpy(hdr, out, min(out->nlmsg_len, sizeof(request)));
1534 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1535 hdr->nlmsg_type = XFRM_MSG_NEWSA;
1536 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_info));
1537 sa = NLMSG_DATA(hdr);
1538 sa->family = new_dst->get_family(new_dst);
1539
1540 if (!src->ip_equals(src, new_src))
1541 {
1542 host2xfrm(new_src, &sa->saddr);
1543 }
1544 if (!dst->ip_equals(dst, new_dst))
1545 {
1546 host2xfrm(new_dst, &sa->id.daddr);
1547 }
1548
1549 rta = XFRM_RTA(out, struct xfrm_usersa_info);
1550 rtasize = XFRM_PAYLOAD(out, struct xfrm_usersa_info);
1551 pos = (u_char*)XFRM_RTA(hdr, struct xfrm_usersa_info);
1552 while(RTA_OK(rta, rtasize))
1553 {
1554 /* copy all attributes, but not XFRMA_ENCAP if we are disabling it */
1555 if (rta->rta_type != XFRMA_ENCAP || new_encap)
1556 {
1557 if (rta->rta_type == XFRMA_ENCAP)
1558 { /* update encap tmpl */
1559 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1560 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1561 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1562 }
1563 memcpy(pos, rta, rta->rta_len);
1564 pos += RTA_ALIGN(rta->rta_len);
1565 hdr->nlmsg_len += RTA_ALIGN(rta->rta_len);
1566 }
1567 rta = RTA_NEXT(rta, rtasize);
1568 }
1569
1570 rta = (struct rtattr*)pos;
1571 if (tmpl == NULL && new_encap)
1572 { /* add tmpl if we are enabling it */
1573 rta->rta_type = XFRMA_ENCAP;
1574 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_encap_tmpl));
1575
1576 hdr->nlmsg_len += rta->rta_len;
1577 if (hdr->nlmsg_len > sizeof(request))
1578 {
1579 return FAILED;
1580 }
1581
1582 tmpl = (struct xfrm_encap_tmpl*)RTA_DATA(rta);
1583 tmpl->encap_type = UDP_ENCAP_ESPINUDP;
1584 tmpl->encap_sport = ntohs(new_src->get_port(new_src));
1585 tmpl->encap_dport = ntohs(new_dst->get_port(new_dst));
1586 memset(&tmpl->encap_oa, 0, sizeof (xfrm_address_t));
1587
1588 rta = XFRM_RTA_NEXT(rta);
1589 }
1590
1591 if (got_replay_state)
1592 { /* copy the replay data if available */
1593 rta->rta_type = XFRMA_REPLAY_VAL;
1594 rta->rta_len = RTA_LENGTH(sizeof(struct xfrm_replay_state));
1595
1596 hdr->nlmsg_len += rta->rta_len;
1597 if (hdr->nlmsg_len > sizeof(request))
1598 {
1599 return FAILED;
1600 }
1601 memcpy(RTA_DATA(rta), &replay, sizeof(replay));
1602
1603 rta = XFRM_RTA_NEXT(rta);
1604 }
1605
1606 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1607 {
1608 DBG1(DBG_KNL, "unable to update SAD entry with SPI %.8x", ntohl(spi));
1609 free(out);
1610 return FAILED;
1611 }
1612 free(out);
1613
1614 return SUCCESS;
1615 }
1616
1617 METHOD(kernel_ipsec_t, add_policy, status_t,
1618 private_kernel_netlink_ipsec_t *this, host_t *src, host_t *dst,
1619 traffic_selector_t *src_ts, traffic_selector_t *dst_ts,
1620 policy_dir_t direction, u_int32_t spi, u_int8_t protocol,
1621 u_int32_t reqid, mark_t mark, ipsec_mode_t mode, u_int16_t ipcomp,
1622 u_int16_t cpi, bool routed)
1623 {
1624 policy_entry_t *current, *policy;
1625 bool found = FALSE;
1626 netlink_buf_t request;
1627 struct xfrm_userpolicy_info *policy_info;
1628 struct nlmsghdr *hdr;
1629
1630 /* create a policy */
1631 policy = malloc_thing(policy_entry_t);
1632 memset(policy, 0, sizeof(policy_entry_t));
1633 policy->sel = ts2selector(src_ts, dst_ts);
1634 policy->mark = mark.value & mark.mask;
1635 policy->direction = direction;
1636
1637 /* find the policy, which matches EXACTLY */
1638 this->mutex->lock(this->mutex);
1639 current = this->policies->get(this->policies, policy);
1640 if (current)
1641 {
1642 /* use existing policy */
1643 current->refcount++;
1644 if (mark.value)
1645 {
1646 DBG2(DBG_KNL, "policy %R === %R %N (mark %u/0x%8x) "
1647 "already exists, increasing refcount",
1648 src_ts, dst_ts, policy_dir_names, direction,
1649 mark.value, mark.mask);
1650 }
1651 else
1652 {
1653 DBG2(DBG_KNL, "policy %R === %R %N "
1654 "already exists, increasing refcount",
1655 src_ts, dst_ts, policy_dir_names, direction);
1656 }
1657 free(policy);
1658 policy = current;
1659 found = TRUE;
1660 }
1661 else
1662 { /* apply the new one, if we have no such policy */
1663 this->policies->put(this->policies, policy, policy);
1664 policy->refcount = 1;
1665 }
1666
1667 if (mark.value)
1668 {
1669 DBG2(DBG_KNL, "adding policy %R === %R %N (mark %u/0x%8x)",
1670 src_ts, dst_ts, policy_dir_names, direction,
1671 mark.value, mark.mask);
1672 }
1673 else
1674 {
1675 DBG2(DBG_KNL, "adding policy %R === %R %N",
1676 src_ts, dst_ts, policy_dir_names, direction);
1677 }
1678
1679 memset(&request, 0, sizeof(request));
1680 hdr = (struct nlmsghdr*)request;
1681 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1682 hdr->nlmsg_type = found ? XFRM_MSG_UPDPOLICY : XFRM_MSG_NEWPOLICY;
1683 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info));
1684
1685 policy_info = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
1686 policy_info->sel = policy->sel;
1687 policy_info->dir = policy->direction;
1688 /* calculate priority based on source selector size, small size = high prio */
1689 policy_info->priority = routed ? PRIO_LOW : PRIO_HIGH;
1690 policy_info->priority -= policy->sel.prefixlen_s * 10;
1691 policy_info->priority -= policy->sel.proto ? 2 : 0;
1692 policy_info->priority -= policy->sel.sport_mask ? 1 : 0;
1693 policy_info->action = XFRM_POLICY_ALLOW;
1694 policy_info->share = XFRM_SHARE_ANY;
1695 this->mutex->unlock(this->mutex);
1696
1697 /* policies don't expire */
1698 policy_info->lft.soft_byte_limit = XFRM_INF;
1699 policy_info->lft.soft_packet_limit = XFRM_INF;
1700 policy_info->lft.hard_byte_limit = XFRM_INF;
1701 policy_info->lft.hard_packet_limit = XFRM_INF;
1702 policy_info->lft.soft_add_expires_seconds = 0;
1703 policy_info->lft.hard_add_expires_seconds = 0;
1704 policy_info->lft.soft_use_expires_seconds = 0;
1705 policy_info->lft.hard_use_expires_seconds = 0;
1706
1707 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_info);
1708 rthdr->rta_type = XFRMA_TMPL;
1709 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_user_tmpl));
1710
1711 hdr->nlmsg_len += rthdr->rta_len;
1712 if (hdr->nlmsg_len > sizeof(request))
1713 {
1714 return FAILED;
1715 }
1716
1717 struct xfrm_user_tmpl *tmpl = (struct xfrm_user_tmpl*)RTA_DATA(rthdr);
1718
1719 if (ipcomp != IPCOMP_NONE)
1720 {
1721 tmpl->reqid = reqid;
1722 tmpl->id.proto = IPPROTO_COMP;
1723 tmpl->aalgos = tmpl->ealgos = tmpl->calgos = ~0;
1724 tmpl->mode = mode2kernel(mode);
1725 tmpl->optional = direction != POLICY_OUT;
1726 tmpl->family = src->get_family(src);
1727
1728 host2xfrm(src, &tmpl->saddr);
1729 host2xfrm(dst, &tmpl->id.daddr);
1730
1731 /* add an additional xfrm_user_tmpl */
1732 rthdr->rta_len += RTA_LENGTH(sizeof(struct xfrm_user_tmpl));
1733 hdr->nlmsg_len += RTA_LENGTH(sizeof(struct xfrm_user_tmpl));
1734 if (hdr->nlmsg_len > sizeof(request))
1735 {
1736 return FAILED;
1737 }
1738
1739 tmpl++;
1740
1741 /* use transport mode for ESP if we have a tunnel mode IPcomp SA */
1742 mode = MODE_TRANSPORT;
1743 }
1744 else
1745 {
1746 /* when using IPcomp, only the IPcomp SA uses tmp src/dst addresses */
1747 host2xfrm(src, &tmpl->saddr);
1748 host2xfrm(dst, &tmpl->id.daddr);
1749 }
1750
1751 tmpl->reqid = reqid;
1752 tmpl->id.proto = protocol;
1753 tmpl->aalgos = tmpl->ealgos = tmpl->calgos = ~0;
1754 tmpl->mode = mode2kernel(mode);
1755 tmpl->family = src->get_family(src);
1756 rthdr = XFRM_RTA_NEXT(rthdr);
1757
1758 if (mark.value)
1759 {
1760 struct xfrm_mark *mrk;
1761
1762 rthdr->rta_type = XFRMA_MARK;
1763 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1764
1765 hdr->nlmsg_len += rthdr->rta_len;
1766 if (hdr->nlmsg_len > sizeof(request))
1767 {
1768 return FAILED;
1769 }
1770
1771 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1772 mrk->v = mark.value;
1773 mrk->m = mark.mask;
1774 }
1775
1776 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
1777 {
1778 DBG1(DBG_KNL, "unable to add policy %R === %R %N", src_ts, dst_ts,
1779 policy_dir_names, direction);
1780 return FAILED;
1781 }
1782
1783 /* install a route, if:
1784 * - we are NOT updating a policy
1785 * - this is a forward policy (to just get one for each child)
1786 * - we are in tunnel/BEET mode
1787 * - routing is not disabled via strongswan.conf
1788 */
1789 if (policy->route == NULL && direction == POLICY_FWD &&
1790 mode != MODE_TRANSPORT && this->install_routes)
1791 {
1792 route_entry_t *route = malloc_thing(route_entry_t);
1793
1794 if (hydra->kernel_interface->get_address_by_ts(hydra->kernel_interface,
1795 dst_ts, &route->src_ip) == SUCCESS)
1796 {
1797 /* get the nexthop to src (src as we are in POLICY_FWD).*/
1798 route->gateway = hydra->kernel_interface->get_nexthop(
1799 hydra->kernel_interface, src);
1800 /* install route via outgoing interface */
1801 route->if_name = hydra->kernel_interface->get_interface(
1802 hydra->kernel_interface, dst);
1803 route->dst_net = chunk_alloc(policy->sel.family == AF_INET ? 4 : 16);
1804 memcpy(route->dst_net.ptr, &policy->sel.saddr, route->dst_net.len);
1805 route->prefixlen = policy->sel.prefixlen_s;
1806
1807 if (route->if_name)
1808 {
1809 switch (hydra->kernel_interface->add_route(
1810 hydra->kernel_interface, route->dst_net,
1811 route->prefixlen, route->gateway,
1812 route->src_ip, route->if_name))
1813 {
1814 default:
1815 DBG1(DBG_KNL, "unable to install source route for %H",
1816 route->src_ip);
1817 /* FALL */
1818 case ALREADY_DONE:
1819 /* route exists, do not uninstall */
1820 route_entry_destroy(route);
1821 break;
1822 case SUCCESS:
1823 /* cache the installed route */
1824 policy->route = route;
1825 break;
1826 }
1827 }
1828 else
1829 {
1830 route_entry_destroy(route);
1831 }
1832 }
1833 else
1834 {
1835 free(route);
1836 }
1837 }
1838 return SUCCESS;
1839 }
1840
1841 METHOD(kernel_ipsec_t, query_policy, status_t,
1842 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
1843 traffic_selector_t *dst_ts, policy_dir_t direction, mark_t mark,
1844 u_int32_t *use_time)
1845 {
1846 netlink_buf_t request;
1847 struct nlmsghdr *out = NULL, *hdr;
1848 struct xfrm_userpolicy_id *policy_id;
1849 struct xfrm_userpolicy_info *policy = NULL;
1850 size_t len;
1851
1852 memset(&request, 0, sizeof(request));
1853
1854 if (mark.value)
1855 {
1856 DBG2(DBG_KNL, "querying policy %R === %R %N (mark %u/0x%8x)",
1857 src_ts, dst_ts, policy_dir_names, direction,
1858 mark.value, mark.mask);
1859 }
1860 else
1861 {
1862 DBG2(DBG_KNL, "querying policy %R === %R %N", src_ts, dst_ts,
1863 policy_dir_names, direction);
1864 }
1865 hdr = (struct nlmsghdr*)request;
1866 hdr->nlmsg_flags = NLM_F_REQUEST;
1867 hdr->nlmsg_type = XFRM_MSG_GETPOLICY;
1868 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
1869
1870 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
1871 policy_id->sel = ts2selector(src_ts, dst_ts);
1872 policy_id->dir = direction;
1873
1874 if (mark.value)
1875 {
1876 struct xfrm_mark *mrk;
1877 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
1878
1879 rthdr->rta_type = XFRMA_MARK;
1880 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
1881
1882 hdr->nlmsg_len += rthdr->rta_len;
1883 if (hdr->nlmsg_len > sizeof(request))
1884 {
1885 return FAILED;
1886 }
1887
1888 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
1889 mrk->v = mark.value;
1890 mrk->m = mark.mask;
1891 }
1892
1893 if (this->socket_xfrm->send(this->socket_xfrm, hdr, &out, &len) == SUCCESS)
1894 {
1895 hdr = out;
1896 while (NLMSG_OK(hdr, len))
1897 {
1898 switch (hdr->nlmsg_type)
1899 {
1900 case XFRM_MSG_NEWPOLICY:
1901 {
1902 policy = (struct xfrm_userpolicy_info*)NLMSG_DATA(hdr);
1903 break;
1904 }
1905 case NLMSG_ERROR:
1906 {
1907 struct nlmsgerr *err = NLMSG_DATA(hdr);
1908 DBG1(DBG_KNL, "querying policy failed: %s (%d)",
1909 strerror(-err->error), -err->error);
1910 break;
1911 }
1912 default:
1913 hdr = NLMSG_NEXT(hdr, len);
1914 continue;
1915 case NLMSG_DONE:
1916 break;
1917 }
1918 break;
1919 }
1920 }
1921
1922 if (policy == NULL)
1923 {
1924 DBG2(DBG_KNL, "unable to query policy %R === %R %N", src_ts, dst_ts,
1925 policy_dir_names, direction);
1926 free(out);
1927 return FAILED;
1928 }
1929
1930 if (policy->curlft.use_time)
1931 {
1932 /* we need the monotonic time, but the kernel returns system time. */
1933 *use_time = time_monotonic(NULL) - (time(NULL) - policy->curlft.use_time);
1934 }
1935 else
1936 {
1937 *use_time = 0;
1938 }
1939
1940 free(out);
1941 return SUCCESS;
1942 }
1943
1944 METHOD(kernel_ipsec_t, del_policy, status_t,
1945 private_kernel_netlink_ipsec_t *this, traffic_selector_t *src_ts,
1946 traffic_selector_t *dst_ts, policy_dir_t direction, mark_t mark,
1947 bool unrouted)
1948 {
1949 policy_entry_t *current, policy, *to_delete = NULL;
1950 route_entry_t *route;
1951 netlink_buf_t request;
1952 struct nlmsghdr *hdr;
1953 struct xfrm_userpolicy_id *policy_id;
1954
1955 if (mark.value)
1956 {
1957 DBG2(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%8x)",
1958 src_ts, dst_ts, policy_dir_names, direction,
1959 mark.value, mark.mask);
1960 }
1961 else
1962 {
1963 DBG2(DBG_KNL, "deleting policy %R === %R %N",
1964 src_ts, dst_ts, policy_dir_names, direction);
1965 }
1966
1967 /* create a policy */
1968 memset(&policy, 0, sizeof(policy_entry_t));
1969 policy.sel = ts2selector(src_ts, dst_ts);
1970 policy.mark = mark.value & mark.mask;
1971 policy.direction = direction;
1972
1973 /* find the policy */
1974 this->mutex->lock(this->mutex);
1975 current = this->policies->get(this->policies, &policy);
1976 if (current)
1977 {
1978 to_delete = current;
1979 if (--to_delete->refcount > 0)
1980 {
1981 /* is used by more SAs, keep in kernel */
1982 DBG2(DBG_KNL, "policy still used by another CHILD_SA, not removed");
1983 this->mutex->unlock(this->mutex);
1984 return SUCCESS;
1985 }
1986 /* remove if last reference */
1987 this->policies->remove(this->policies, to_delete);
1988 }
1989 this->mutex->unlock(this->mutex);
1990 if (!to_delete)
1991 {
1992 if (mark.value)
1993 {
1994 DBG1(DBG_KNL, "deleting policy %R === %R %N (mark %u/0x%8x) "
1995 "failed, not found", src_ts, dst_ts, policy_dir_names,
1996 direction, mark.value, mark.mask);
1997 }
1998 else
1999 {
2000 DBG1(DBG_KNL, "deleting policy %R === %R %N failed, not found",
2001 src_ts, dst_ts, policy_dir_names, direction);
2002 }
2003 return NOT_FOUND;
2004 }
2005
2006 memset(&request, 0, sizeof(request));
2007
2008 hdr = (struct nlmsghdr*)request;
2009 hdr->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
2010 hdr->nlmsg_type = XFRM_MSG_DELPOLICY;
2011 hdr->nlmsg_len = NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id));
2012
2013 policy_id = (struct xfrm_userpolicy_id*)NLMSG_DATA(hdr);
2014 policy_id->sel = to_delete->sel;
2015 policy_id->dir = direction;
2016
2017 if (mark.value)
2018 {
2019 struct xfrm_mark *mrk;
2020 struct rtattr *rthdr = XFRM_RTA(hdr, struct xfrm_userpolicy_id);
2021
2022 rthdr->rta_type = XFRMA_MARK;
2023 rthdr->rta_len = RTA_LENGTH(sizeof(struct xfrm_mark));
2024 hdr->nlmsg_len += rthdr->rta_len;
2025 if (hdr->nlmsg_len > sizeof(request))
2026 {
2027 return FAILED;
2028 }
2029
2030 mrk = (struct xfrm_mark*)RTA_DATA(rthdr);
2031 mrk->v = mark.value;
2032 mrk->m = mark.mask;
2033 }
2034
2035 route = to_delete->route;
2036 free(to_delete);
2037
2038 if (this->socket_xfrm->send_ack(this->socket_xfrm, hdr) != SUCCESS)
2039 {
2040 if (mark.value)
2041 {
2042 DBG1(DBG_KNL, "unable to delete policy %R === %R %N "
2043 "(mark %u/0x%8x)", src_ts, dst_ts, policy_dir_names,
2044 direction, mark.value, mark.mask);
2045 }
2046 else
2047 {
2048 DBG1(DBG_KNL, "unable to delete policy %R === %R %N",
2049 src_ts, dst_ts, policy_dir_names, direction);
2050 }
2051 return FAILED;
2052 }
2053
2054 if (route)
2055 {
2056 if (hydra->kernel_interface->del_route(hydra->kernel_interface,
2057 route->dst_net, route->prefixlen, route->gateway,
2058 route->src_ip, route->if_name) != SUCCESS)
2059 {
2060 DBG1(DBG_KNL, "error uninstalling route installed with "
2061 "policy %R === %R %N", src_ts, dst_ts,
2062 policy_dir_names, direction);
2063 }
2064 route_entry_destroy(route);
2065 }
2066 return SUCCESS;
2067 }
2068
2069 METHOD(kernel_ipsec_t, bypass_socket, bool,
2070 private_kernel_netlink_ipsec_t *this, int fd, int family)
2071 {
2072 struct xfrm_userpolicy_info policy;
2073 u_int sol, ipsec_policy;
2074
2075 switch (family)
2076 {
2077 case AF_INET:
2078 sol = SOL_IP;
2079 ipsec_policy = IP_XFRM_POLICY;
2080 break;
2081 case AF_INET6:
2082 sol = SOL_IPV6;
2083 ipsec_policy = IPV6_XFRM_POLICY;
2084 break;
2085 default:
2086 return FALSE;
2087 }
2088
2089 memset(&policy, 0, sizeof(policy));
2090 policy.action = XFRM_POLICY_ALLOW;
2091 policy.sel.family = family;
2092
2093 policy.dir = XFRM_POLICY_OUT;
2094 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2095 {
2096 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2097 strerror(errno));
2098 return FALSE;
2099 }
2100 policy.dir = XFRM_POLICY_IN;
2101 if (setsockopt(fd, sol, ipsec_policy, &policy, sizeof(policy)) < 0)
2102 {
2103 DBG1(DBG_KNL, "unable to set IPSEC_POLICY on socket: %s",
2104 strerror(errno));
2105 return FALSE;
2106 }
2107 return TRUE;
2108 }
2109
2110 METHOD(kernel_ipsec_t, destroy, void,
2111 private_kernel_netlink_ipsec_t *this)
2112 {
2113 enumerator_t *enumerator;
2114 policy_entry_t *policy;
2115
2116 if (this->job)
2117 {
2118 this->job->cancel(this->job);
2119 }
2120 if (this->socket_xfrm_events > 0)
2121 {
2122 close(this->socket_xfrm_events);
2123 }
2124 DESTROY_IF(this->socket_xfrm);
2125 enumerator = this->policies->create_enumerator(this->policies);
2126 while (enumerator->enumerate(enumerator, &policy, &policy))
2127 {
2128 free(policy);
2129 }
2130 enumerator->destroy(enumerator);
2131 this->policies->destroy(this->policies);
2132 this->mutex->destroy(this->mutex);
2133 free(this);
2134 }
2135
2136 /*
2137 * Described in header.
2138 */
2139 kernel_netlink_ipsec_t *kernel_netlink_ipsec_create()
2140 {
2141 private_kernel_netlink_ipsec_t *this;
2142 struct sockaddr_nl addr;
2143 int fd;
2144
2145 INIT(this,
2146 .public = {
2147 .interface = {
2148 .get_spi = _get_spi,
2149 .get_cpi = _get_cpi,
2150 .add_sa = _add_sa,
2151 .update_sa = _update_sa,
2152 .query_sa = _query_sa,
2153 .del_sa = _del_sa,
2154 .add_policy = _add_policy,
2155 .query_policy = _query_policy,
2156 .del_policy = _del_policy,
2157 .bypass_socket = _bypass_socket,
2158 .destroy = _destroy,
2159 },
2160 },
2161 .policies = hashtable_create((hashtable_hash_t)policy_hash,
2162 (hashtable_equals_t)policy_equals, 32),
2163 .mutex = mutex_create(MUTEX_TYPE_DEFAULT),
2164 .install_routes = lib->settings->get_bool(lib->settings,
2165 "charon.install_routes", TRUE),
2166 );
2167
2168 /* disable lifetimes for allocated SPIs in kernel */
2169 fd = open("/proc/sys/net/core/xfrm_acq_expires", O_WRONLY);
2170 if (fd)
2171 {
2172 ignore_result(write(fd, "165", 3));
2173 close(fd);
2174 }
2175
2176 this->socket_xfrm = netlink_socket_create(NETLINK_XFRM);
2177 if (!this->socket_xfrm)
2178 {
2179 destroy(this);
2180 return NULL;
2181 }
2182
2183 memset(&addr, 0, sizeof(addr));
2184 addr.nl_family = AF_NETLINK;
2185
2186 /* create and bind XFRM socket for ACQUIRE, EXPIRE, MIGRATE & MAPPING */
2187 this->socket_xfrm_events = socket(AF_NETLINK, SOCK_RAW, NETLINK_XFRM);
2188 if (this->socket_xfrm_events <= 0)
2189 {
2190 DBG1(DBG_KNL, "unable to create XFRM event socket");
2191 destroy(this);
2192 return NULL;
2193 }
2194 addr.nl_groups = XFRMNLGRP(ACQUIRE) | XFRMNLGRP(EXPIRE) |
2195 XFRMNLGRP(MIGRATE) | XFRMNLGRP(MAPPING);
2196 if (bind(this->socket_xfrm_events, (struct sockaddr*)&addr, sizeof(addr)))
2197 {
2198 DBG1(DBG_KNL, "unable to bind XFRM event socket");
2199 destroy(this);
2200 return NULL;
2201 }
2202 this->job = callback_job_create((callback_job_cb_t)receive_events,
2203 this, NULL, NULL);
2204 hydra->processor->queue_job(hydra->processor, (job_t*)this->job);
2205
2206 return &this->public;
2207 }
2208