{
DBG1(DBG_NET, "using receive delay: %dms",
this->receive_delay);
- charon->scheduler->schedule_job_ms(charon->scheduler,
+ hydra->scheduler->schedule_job_ms(hydra->scheduler,
(job_t*)process_message_job_create(message),
this->receive_delay);
return JOB_REQUEUE_DIRECT;
* callback, but from a different thread. we also delay it to avoid
* a race condition during a regular shutdown */
job = callback_job_create(shutdown_callback, NULL, NULL, NULL);
- charon->scheduler->schedule_job(charon->scheduler, (job_t*)job, 1);
+ hydra->scheduler->schedule_job(hydra->scheduler, (job_t*)job, 1);
return FALSE;
}
}
message->destroy(message);
/* schedule next invocation */
- charon->scheduler->schedule_job_ms(charon->scheduler, (job_t*)
+ hydra->scheduler->schedule_job_ms(hydra->scheduler, (job_t*)
callback_job_create((callback_job_cb_t)
send_status, this, NULL, NULL),
this->heartbeat_delay);
expire->reqid = reqid;
expire->type = type;
job = callback_job_create((callback_job_cb_t)sa_expires, expire, free, NULL);
- charon->scheduler->schedule_job(charon->scheduler, (job_t*)job, time);
+ hydra->scheduler->schedule_job(hydra->scheduler, (job_t*)job, time);
}
METHOD(kernel_ipsec_t, get_spi, status_t,
now.tv_usec -= 1000000;
}
this->last_roam = now;
- charon->scheduler->schedule_job_ms(charon->scheduler,
+ hydra->scheduler->schedule_job_ms(hydra->scheduler,
(job_t*)roam_job_create(address), ROAM_DELAY);
}
}
now.tv_usec -= 1000000;
}
this->last_roam = now;
- charon->scheduler->schedule_job_ms(charon->scheduler,
+ hydra->scheduler->schedule_job_ms(hydra->scheduler,
(job_t*)roam_job_create(address), ROAM_DELAY);
}
}
fprintf(out, " job queue load: %d,",
hydra->processor->get_job_load(hydra->processor));
fprintf(out, " scheduled events: %d\n",
- charon->scheduler->get_job_load(charon->scheduler));
+ hydra->scheduler->get_job_load(hydra->scheduler));
fprintf(out, " loaded plugins: ");
enumerator = lib->plugins->create_plugin_enumerator(lib->plugins);
while (enumerator->enumerate(enumerator, &plugin))
#include "inactivity_job.h"
+#include <hydra.h>
#include <daemon.h>
typedef struct private_inactivity_job_t private_inactivity_job_t;
}
else
{
- charon->scheduler->schedule_job(charon->scheduler,
+ hydra->scheduler->schedule_job(hydra->scheduler,
&this->public.job_interface, this->timeout - diff);
rescheduled = TRUE;
}
callback_data_t *data = callback_data_create(this, checklist->connect_id);
job_t *job = (job_t*)callback_job_create((callback_job_cb_t)initiator_finish, data, (callback_job_cleanup_t)callback_data_destroy, NULL);
- charon->scheduler->schedule_job_ms(charon->scheduler, job, ME_WAIT_TO_FINISH);
+ hydra->scheduler->schedule_job_ms(hydra->scheduler, job, ME_WAIT_TO_FINISH);
checklist->is_finishing = TRUE;
}
DBG2(DBG_IKE, "scheduling retransmission %d of pair '%d' in %dms",
retransmission, pair->id, rto);
- charon->scheduler->schedule_job_ms(charon->scheduler, (job_t*)job, rto);
+ hydra->scheduler->schedule_job_ms(hydra->scheduler, (job_t*)job, rto);
}
/**
{
callback_data_t *data = callback_data_create(this, checklist->connect_id);
checklist->sender = (job_t*)callback_job_create((callback_job_cb_t)sender, data, (callback_job_cleanup_t)callback_data_destroy, NULL);
- charon->scheduler->schedule_job_ms(charon->scheduler, checklist->sender, time);
+ hydra->scheduler->schedule_job_ms(hydra->scheduler, checklist->sender, time);
}
/**
diff = 0;
}
job = send_keepalive_job_create(this->ike_sa_id);
- charon->scheduler->schedule_job(charon->scheduler, (job_t*)job,
+ hydra->scheduler->schedule_job(hydra->scheduler, (job_t*)job,
this->keepalive_interval - diff);
}
}
/* recheck in "interval" seconds */
job = (job_t*)send_dpd_job_create(this->ike_sa_id);
- charon->scheduler->schedule_job(charon->scheduler, job, delay - diff);
+ hydra->scheduler->schedule_job(hydra->scheduler, job, delay - diff);
return SUCCESS;
}
{
this->stats[STAT_REKEY] = t + this->stats[STAT_ESTABLISHED];
job = (job_t*)rekey_ike_sa_job_create(this->ike_sa_id, FALSE);
- charon->scheduler->schedule_job(charon->scheduler, job, t);
+ hydra->scheduler->schedule_job(hydra->scheduler, job, t);
DBG1(DBG_IKE, "scheduling rekeying in %ds", t);
}
t = this->peer_cfg->get_reauth_time(this->peer_cfg);
{
this->stats[STAT_REAUTH] = t + this->stats[STAT_ESTABLISHED];
job = (job_t*)rekey_ike_sa_job_create(this->ike_sa_id, TRUE);
- charon->scheduler->schedule_job(charon->scheduler, job, t);
+ hydra->scheduler->schedule_job(hydra->scheduler, job, t);
DBG1(DBG_IKE, "scheduling reauthentication in %ds", t);
}
t = this->peer_cfg->get_over_time(this->peer_cfg);
this->stats[STAT_DELETE] += t;
t = this->stats[STAT_DELETE] - this->stats[STAT_ESTABLISHED];
job = (job_t*)delete_ike_sa_job_create(this->ike_sa_id, TRUE);
- charon->scheduler->schedule_job(charon->scheduler, job, t);
+ hydra->scheduler->schedule_job(hydra->scheduler, job, t);
DBG1(DBG_IKE, "maximum IKE_SA lifetime %ds", t);
}
{
/* delete may fail if a packet gets lost, so set a timeout */
job_t *job = (job_t*)delete_ike_sa_job_create(this->ike_sa_id, TRUE);
- charon->scheduler->schedule_job(charon->scheduler, job,
- HALF_OPEN_IKE_SA_TIMEOUT);
+ hydra->scheduler->schedule_job(hydra->scheduler, job,
+ HALF_OPEN_IKE_SA_TIMEOUT);
break;
}
default:
}
/* add a timeout if peer does not establish it completely */
job = (job_t*)delete_ike_sa_job_create(this->ike_sa_id, FALSE);
- charon->scheduler->schedule_job(charon->scheduler, job,
- HALF_OPEN_IKE_SA_TIMEOUT);
+ hydra->scheduler->schedule_job(hydra->scheduler, job,
+ HALF_OPEN_IKE_SA_TIMEOUT);
}
this->stats[STAT_INBOUND] = time_monotonic(NULL);
/* check if message is trustworthy, and update host information */
this->stats[STAT_REAUTH] = reauth_time;
DBG1(DBG_IKE, "received AUTH_LIFETIME of %ds, scheduling reauthentication"
" in %ds", lifetime, lifetime - reduction);
- charon->scheduler->schedule_job(charon->scheduler,
+ hydra->scheduler->schedule_job(hydra->scheduler,
(job_t*)rekey_ike_sa_job_create(this->ike_sa_id, TRUE),
lifetime - reduction);
}
this->stats[STAT_DELETE] = this->stats[STAT_REAUTH] + delete;
DBG1(DBG_IKE, "rescheduling reauthentication in %ds after rekeying, "
"lifetime reduced to %ds", reauth, delete);
- charon->scheduler->schedule_job(charon->scheduler,
+ hydra->scheduler->schedule_job(hydra->scheduler,
(job_t*)rekey_ike_sa_job_create(this->ike_sa_id, TRUE), reauth);
- charon->scheduler->schedule_job(charon->scheduler,
+ hydra->scheduler->schedule_job(hydra->scheduler,
(job_t*)delete_ike_sa_job_create(this->ike_sa_id, TRUE), delete);
}
/* we have to initate here, there may be new tasks to handle */
#include <math.h>
+#include <hydra.h>
#include <daemon.h>
#include <sa/tasks/ike_init.h>
#include <sa/tasks/ike_natd.h>
this->initiating.retransmitted++;
job = (job_t*)retransmit_job_create(this->initiating.mid,
this->ike_sa->get_id(this->ike_sa));
- charon->scheduler->schedule_job_ms(charon->scheduler, job, timeout);
+ hydra->scheduler->schedule_job_ms(hydra->scheduler, job, timeout);
}
return SUCCESS;
}
#include "child_create.h"
+#include <hydra.h>
#include <daemon.h>
#include <crypto/diffie_hellman.h>
#include <credentials/certificates/x509.h>
{
close_ike = lib->settings->get_bool(lib->settings,
"charon.inactivity_close_ike", FALSE);
- charon->scheduler->schedule_job(charon->scheduler, (job_t*)
+ hydra->scheduler->schedule_job(hydra->scheduler, (job_t*)
inactivity_job_create(this->child_sa->get_reqid(this->child_sa),
timeout, close_ike), timeout);
}
/* we delay the delete for 100ms, as the IKE_AUTH response must arrive
* first */
DBG1(DBG_IKE, "closing IKE_SA due CHILD_SA setup failure");
- charon->scheduler->schedule_job_ms(charon->scheduler, (job_t*)
+ hydra->scheduler->schedule_job_ms(hydra->scheduler, (job_t*)
delete_ike_sa_job_create(this->ike_sa->get_id(this->ike_sa), TRUE),
100);
}
DBG1(DBG_IKE, "CHILD_SA rekeying failed, "
"trying again in %d seconds", retry);
this->child_sa->set_state(this->child_sa, CHILD_INSTALLED);
- charon->scheduler->schedule_job(charon->scheduler, job, retry);
+ hydra->scheduler->schedule_job(hydra->scheduler, job, retry);
}
return SUCCESS;
}
DBG1(DBG_IKE, "IKE_SA rekeying failed, "
"trying again in %d seconds", retry);
this->ike_sa->set_state(this->ike_sa, IKE_ESTABLISHED);
- charon->scheduler->schedule_job(charon->scheduler, job, retry);
+ hydra->scheduler->schedule_job(hydra->scheduler, job, retry);
}
return SUCCESS;
case NEED_MORE:
/* peer should delete this SA. Add a timeout just in case. */
job_t *job = (job_t*)delete_ike_sa_job_create(
other->new_sa->get_id(other->new_sa), TRUE);
- charon->scheduler->schedule_job(charon->scheduler, job, 10);
+ hydra->scheduler->schedule_job(hydra->scheduler, job, 10);
DBG1(DBG_IKE, "IKE_SA rekey collision won, deleting rekeyed IKE_SA");
charon->ike_sa_manager->checkin(charon->ike_sa_manager, other->new_sa);
other->new_sa = NULL;