Commit 242a18d1 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next

Steffen Klassert says:

====================
This pull request is intended for net-next and contains the following changes:

1) Remove a redundant check when initializing the xfrm replay functions,
   from Ulrich Weber.
2) Use a faster per-cpu helper when allocating ipcomt transforms,
   from Shan Wei.
3) Use a static gc threshold value for ipv6, simmilar to what we do
   for ipv4 now.
4) Remove a commented out function call.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f2fb4ab2 0afe21fd
......@@ -327,21 +327,7 @@ static struct ctl_table_header *sysctl_hdr;
int __init xfrm6_init(void)
{
int ret;
unsigned int gc_thresh;
/*
* We need a good default value for the xfrm6 gc threshold.
* In ipv4 we set it to the route hash table size * 8, which
* is half the size of the maximaum route cache for ipv4. It
* would be good to do the same thing for v6, except the table is
* constructed differently here. Here each table for a net namespace
* can have FIB_TABLE_HASHSZ entries, so lets go with the same
* computation that we used for ipv4 here. Also, lets keep the initial
* gc_thresh to a minimum of 1024, since, the ipv6 route cache defaults
* to that as a minimum as well
*/
gc_thresh = FIB6_TABLE_HASHSZ * 8;
xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh;
dst_entries_init(&xfrm6_dst_ops);
ret = xfrm6_policy_init();
......@@ -370,7 +356,6 @@ void xfrm6_fini(void)
if (sysctl_hdr)
unregister_net_sysctl_table(sysctl_hdr);
#endif
//xfrm6_input_fini();
xfrm6_policy_fini();
xfrm6_state_fini();
dst_entries_destroy(&xfrm6_dst_ops);
......
......@@ -276,18 +276,16 @@ static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
struct crypto_comp * __percpu *tfms;
int cpu;
/* This can be any valid CPU ID so we don't need locking. */
cpu = raw_smp_processor_id();
list_for_each_entry(pos, &ipcomp_tfms_list, list) {
struct crypto_comp *tfm;
tfms = pos->tfms;
tfm = *per_cpu_ptr(tfms, cpu);
/* This can be any valid CPU ID so we don't need locking. */
tfm = __this_cpu_read(*pos->tfms);
if (!strcmp(crypto_comp_name(tfm), alg_name)) {
pos->users++;
return tfms;
return pos->tfms;
}
}
......
......@@ -521,13 +521,12 @@ int xfrm_init_replay(struct xfrm_state *x)
replay_esn->bmp_len * sizeof(__u32) * 8)
return -EINVAL;
if ((x->props.flags & XFRM_STATE_ESN) && replay_esn->replay_window == 0)
return -EINVAL;
if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn)
x->repl = &xfrm_replay_esn;
else
x->repl = &xfrm_replay_bmp;
if (x->props.flags & XFRM_STATE_ESN) {
if (replay_esn->replay_window == 0)
return -EINVAL;
x->repl = &xfrm_replay_esn;
} else
x->repl = &xfrm_replay_bmp;
} else
x->repl = &xfrm_replay_legacy;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment