Commit 4cb5e616 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by David S. Miller

[NET]: Fix coding style in net/core/filter.c

parent 8555e904
...@@ -36,14 +36,13 @@ ...@@ -36,14 +36,13 @@
#include <linux/filter.h> #include <linux/filter.h>
/* No hurry in this branch */ /* No hurry in this branch */
static u8 *load_pointer(struct sk_buff *skb, int k) static u8 *load_pointer(struct sk_buff *skb, int k)
{ {
u8 *ptr = NULL; u8 *ptr = NULL;
if (k>=SKF_NET_OFF) if (k >= SKF_NET_OFF)
ptr = skb->nh.raw + k - SKF_NET_OFF; ptr = skb->nh.raw + k - SKF_NET_OFF;
else if (k>=SKF_LL_OFF) else if (k >= SKF_LL_OFF)
ptr = skb->mac.raw + k - SKF_LL_OFF; ptr = skb->mac.raw + k - SKF_LL_OFF;
if (ptr >= skb->head && ptr < skb->tail) if (ptr >= skb->head && ptr < skb->tail)
...@@ -80,268 +79,224 @@ int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) ...@@ -80,268 +79,224 @@ int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
/* /*
* Process array of filter instructions. * Process array of filter instructions.
*/ */
for (pc = 0; pc < flen; pc++) {
for(pc = 0; pc < flen; pc++)
{
fentry = &filter[pc]; fentry = &filter[pc];
switch(fentry->code) switch (fentry->code) {
{ case BPF_ALU|BPF_ADD|BPF_X:
case BPF_ALU|BPF_ADD|BPF_X: A += X;
A += X; continue;
continue; case BPF_ALU|BPF_ADD|BPF_K:
A += fentry->k;
case BPF_ALU|BPF_ADD|BPF_K: continue;
A += fentry->k; case BPF_ALU|BPF_SUB|BPF_X:
continue; A -= X;
continue;
case BPF_ALU|BPF_SUB|BPF_X: case BPF_ALU|BPF_SUB|BPF_K:
A -= X; A -= fentry->k;
continue; continue;
case BPF_ALU|BPF_MUL|BPF_X:
case BPF_ALU|BPF_SUB|BPF_K: A *= X;
A -= fentry->k; continue;
continue; case BPF_ALU|BPF_MUL|BPF_K:
A *= fentry->k;
case BPF_ALU|BPF_MUL|BPF_X: continue;
A *= X; case BPF_ALU|BPF_DIV|BPF_X:
continue; if (X == 0)
return 0;
case BPF_ALU|BPF_MUL|BPF_K: A /= X;
A *= fentry->k; continue;
continue; case BPF_ALU|BPF_DIV|BPF_K:
if (fentry->k == 0)
case BPF_ALU|BPF_DIV|BPF_X: return 0;
if(X == 0) A /= fentry->k;
return (0); continue;
A /= X; case BPF_ALU|BPF_AND|BPF_X:
continue; A &= X;
continue;
case BPF_ALU|BPF_DIV|BPF_K: case BPF_ALU|BPF_AND|BPF_K:
if(fentry->k == 0) A &= fentry->k;
return (0); continue;
A /= fentry->k; case BPF_ALU|BPF_OR|BPF_X:
continue; A |= X;
continue;
case BPF_ALU|BPF_AND|BPF_X: case BPF_ALU|BPF_OR|BPF_K:
A &= X; A |= fentry->k;
continue; continue;
case BPF_ALU|BPF_LSH|BPF_X:
case BPF_ALU|BPF_AND|BPF_K: A <<= X;
A &= fentry->k; continue;
continue; case BPF_ALU|BPF_LSH|BPF_K:
A <<= fentry->k;
case BPF_ALU|BPF_OR|BPF_X: continue;
A |= X; case BPF_ALU|BPF_RSH|BPF_X:
continue; A >>= X;
continue;
case BPF_ALU|BPF_OR|BPF_K: case BPF_ALU|BPF_RSH|BPF_K:
A |= fentry->k; A >>= fentry->k;
continue; continue;
case BPF_ALU|BPF_NEG:
case BPF_ALU|BPF_LSH|BPF_X: A = -A;
A <<= X; continue;
continue; case BPF_JMP|BPF_JA:
pc += fentry->k;
case BPF_ALU|BPF_LSH|BPF_K: continue;
A <<= fentry->k; case BPF_JMP|BPF_JGT|BPF_K:
continue; pc += (A > fentry->k) ? fentry->jt : fentry->jf;
continue;
case BPF_ALU|BPF_RSH|BPF_X: case BPF_JMP|BPF_JGE|BPF_K:
A >>= X; pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
continue; continue;
case BPF_JMP|BPF_JEQ|BPF_K:
case BPF_ALU|BPF_RSH|BPF_K: pc += (A == fentry->k) ? fentry->jt : fentry->jf;
A >>= fentry->k; continue;
continue; case BPF_JMP|BPF_JSET|BPF_K:
pc += (A & fentry->k) ? fentry->jt : fentry->jf;
case BPF_ALU|BPF_NEG: continue;
A = -A; case BPF_JMP|BPF_JGT|BPF_X:
continue; pc += (A > X) ? fentry->jt : fentry->jf;
continue;
case BPF_JMP|BPF_JA: case BPF_JMP|BPF_JGE|BPF_X:
pc += fentry->k; pc += (A >= X) ? fentry->jt : fentry->jf;
continue; continue;
case BPF_JMP|BPF_JEQ|BPF_X:
case BPF_JMP|BPF_JGT|BPF_K: pc += (A == X) ? fentry->jt : fentry->jf;
pc += (A > fentry->k) ? fentry->jt : fentry->jf; continue;
continue; case BPF_JMP|BPF_JSET|BPF_X:
pc += (A & X) ? fentry->jt : fentry->jf;
case BPF_JMP|BPF_JGE|BPF_K: continue;
pc += (A >= fentry->k) ? fentry->jt : fentry->jf; case BPF_LD|BPF_W|BPF_ABS:
continue; k = fentry->k;
load_w:
case BPF_JMP|BPF_JEQ|BPF_K: if (k >= 0 && (unsigned int)(k+sizeof(u32)) <= len) {
pc += (A == fentry->k) ? fentry->jt : fentry->jf; A = ntohl(*(u32*)&data[k]);
continue;
case BPF_JMP|BPF_JSET|BPF_K:
pc += (A & fentry->k) ? fentry->jt : fentry->jf;
continue;
case BPF_JMP|BPF_JGT|BPF_X:
pc += (A > X) ? fentry->jt : fentry->jf;
continue;
case BPF_JMP|BPF_JGE|BPF_X:
pc += (A >= X) ? fentry->jt : fentry->jf;
continue;
case BPF_JMP|BPF_JEQ|BPF_X:
pc += (A == X) ? fentry->jt : fentry->jf;
continue;
case BPF_JMP|BPF_JSET|BPF_X:
pc += (A & X) ? fentry->jt : fentry->jf;
continue; continue;
}
case BPF_LD|BPF_W|BPF_ABS: if (k < 0) {
k = fentry->k; u8 *ptr;
load_w:
if(k >= 0 && (unsigned int)(k+sizeof(u32)) <= len) { if (k >= SKF_AD_OFF)
A = ntohl(*(u32*)&data[k]); break;
ptr = load_pointer(skb, k);
if (ptr) {
A = ntohl(*(u32*)ptr);
continue; continue;
} }
if (k<0) { } else {
u8 *ptr; u32 tmp;
if (!skb_copy_bits(skb, k, &tmp, 4)) {
if (k>=SKF_AD_OFF) A = ntohl(tmp);
break; continue;
if ((ptr = load_pointer(skb, k)) != NULL) {
A = ntohl(*(u32*)ptr);
continue;
}
} else {
u32 tmp;
if (!skb_copy_bits(skb, k, &tmp, 4)) {
A = ntohl(tmp);
continue;
}
} }
return 0; }
return 0;
case BPF_LD|BPF_H|BPF_ABS: case BPF_LD|BPF_H|BPF_ABS:
k = fentry->k; k = fentry->k;
load_h: load_h:
if(k >= 0 && (unsigned int) (k + sizeof(u16)) <= len) { if (k >= 0 && (unsigned int)(k + sizeof(u16)) <= len) {
A = ntohs(*(u16*)&data[k]); A = ntohs(*(u16*)&data[k]);
continue;
}
if (k < 0) {
u8 *ptr;
if (k >= SKF_AD_OFF)
break;
ptr = load_pointer(skb, k);
if (ptr) {
A = ntohs(*(u16*)ptr);
continue; continue;
} }
if (k<0) { } else {
u8 *ptr; u16 tmp;
if (!skb_copy_bits(skb, k, &tmp, 2)) {
if (k>=SKF_AD_OFF) A = ntohs(tmp);
break; continue;
if ((ptr = load_pointer(skb, k)) != NULL) {
A = ntohs(*(u16*)ptr);
continue;
}
} else {
u16 tmp;
if (!skb_copy_bits(skb, k, &tmp, 2)) {
A = ntohs(tmp);
continue;
}
} }
return 0; }
return 0;
case BPF_LD|BPF_B|BPF_ABS: case BPF_LD|BPF_B|BPF_ABS:
k = fentry->k; k = fentry->k;
load_b: load_b:
if(k >= 0 && (unsigned int)k < len) { if (k >= 0 && (unsigned int)k < len) {
A = data[k]; A = data[k];
continue;
}
if (k < 0) {
u8 *ptr;
if (k >= SKF_AD_OFF)
break;
ptr = load_pointer(skb, k);
if (ptr) {
A = *ptr;
continue; continue;
} }
if (k<0) { } else {
u8 *ptr; u8 tmp;
if (!skb_copy_bits(skb, k, &tmp, 1)) {
if (k>=SKF_AD_OFF) A = tmp;
break; continue;
if ((ptr = load_pointer(skb, k)) != NULL) {
A = *ptr;
continue;
}
} else {
u8 tmp;
if (!skb_copy_bits(skb, k, &tmp, 1)) {
A = tmp;
continue;
}
} }
}
return 0;
case BPF_LD|BPF_W|BPF_LEN:
A = len;
continue;
case BPF_LDX|BPF_W|BPF_LEN:
X = len;
continue;
case BPF_LD|BPF_W|BPF_IND:
k = X + fentry->k;
goto load_w;
case BPF_LD|BPF_H|BPF_IND:
k = X + fentry->k;
goto load_h;
case BPF_LD|BPF_B|BPF_IND:
k = X + fentry->k;
goto load_b;
case BPF_LDX|BPF_B|BPF_MSH:
k = fentry->k;
if (k >= 0 && (unsigned int)k >= len)
return 0; return 0;
X = (data[k] & 0xf) << 2;
case BPF_LD|BPF_W|BPF_LEN: continue;
A = len; case BPF_LD|BPF_IMM:
continue; A = fentry->k;
continue;
case BPF_LDX|BPF_W|BPF_LEN: case BPF_LDX|BPF_IMM:
X = len; X = fentry->k;
continue; continue;
case BPF_LD|BPF_MEM:
case BPF_LD|BPF_W|BPF_IND: A = mem[fentry->k];
k = X + fentry->k; continue;
goto load_w; case BPF_LDX|BPF_MEM:
X = mem[fentry->k];
case BPF_LD|BPF_H|BPF_IND: continue;
k = X + fentry->k; case BPF_MISC|BPF_TAX:
goto load_h; X = A;
continue;
case BPF_LD|BPF_B|BPF_IND: case BPF_MISC|BPF_TXA:
k = X + fentry->k; A = X;
goto load_b; continue;
case BPF_RET|BPF_K:
case BPF_LDX|BPF_B|BPF_MSH: return ((unsigned int)fentry->k);
k = fentry->k; case BPF_RET|BPF_A:
if(k >= 0 && (unsigned int)k >= len) return ((unsigned int)A);
return (0); case BPF_ST:
X = (data[k] & 0xf) << 2; mem[fentry->k] = A;
continue; continue;
case BPF_STX:
case BPF_LD|BPF_IMM: mem[fentry->k] = X;
A = fentry->k; continue;
continue; default:
/* Invalid instruction counts as RET */
case BPF_LDX|BPF_IMM: return 0;
X = fentry->k;
continue;
case BPF_LD|BPF_MEM:
A = mem[fentry->k];
continue;
case BPF_LDX|BPF_MEM:
X = mem[fentry->k];
continue;
case BPF_MISC|BPF_TAX:
X = A;
continue;
case BPF_MISC|BPF_TXA:
A = X;
continue;
case BPF_RET|BPF_K:
return ((unsigned int)fentry->k);
case BPF_RET|BPF_A:
return ((unsigned int)A);
case BPF_ST:
mem[fentry->k] = A;
continue;
case BPF_STX:
mem[fentry->k] = X;
continue;
default:
/* Invalid instruction counts as RET */
return (0);
} }
/* Handle ancillary data, which are impossible /*
(or very difficult) to get parsing packet contents. * Handle ancillary data, which are impossible
* (or very difficult) to get parsing packet contents.
*/ */
switch (k-SKF_AD_OFF) { switch (k-SKF_AD_OFF) {
case SKF_AD_PROTOCOL: case SKF_AD_PROTOCOL:
...@@ -358,7 +313,7 @@ int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) ...@@ -358,7 +313,7 @@ int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
} }
} }
return (0); return 0;
} }
/** /**
...@@ -373,75 +328,55 @@ int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) ...@@ -373,75 +328,55 @@ int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
* *
* Returns 0 if the rule set is legal or a negative errno code if not. * Returns 0 if the rule set is legal or a negative errno code if not.
*/ */
int sk_chk_filter(struct sock_filter *filter, int flen) int sk_chk_filter(struct sock_filter *filter, int flen)
{ {
struct sock_filter *ftest; struct sock_filter *ftest;
int pc; int pc;
if ((unsigned int) flen >= (~0U / sizeof(struct sock_filter))) if ((unsigned int)flen >= (~0U / sizeof(struct sock_filter)))
return -EINVAL; return -EINVAL;
/* /* check the filter code now */
* Check the filter code now. for (pc = 0; pc < flen; pc++) {
*/ /* all jumps are forward as they are not signed */
for(pc = 0; pc < flen; pc++) ftest = &filter[pc];
{ if (BPF_CLASS(ftest->code) == BPF_JMP) {
/* /* but they mustn't jump off the end */
* All jumps are forward as they are not signed if (BPF_OP(ftest->code) == BPF_JA) {
*/ /*
* Note, the large ftest->k might cause loops.
ftest = &filter[pc]; * Compare this with conditional jumps below,
if(BPF_CLASS(ftest->code) == BPF_JMP) * where offsets are limited. --ANK (981016)
{
/*
* But they mustn't jump off the end.
*/
if(BPF_OP(ftest->code) == BPF_JA)
{
/* Note, the large ftest->k might cause
loops. Compare this with conditional
jumps below, where offsets are limited. --ANK (981016)
*/ */
if (ftest->k >= (unsigned)(flen-pc-1)) if (ftest->k >= (unsigned)(flen-pc-1))
return -EINVAL; return -EINVAL;
} } else {
else /* for conditionals both must be safe */
{ if (pc + ftest->jt +1 >= flen ||
/* pc + ftest->jf +1 >= flen)
* For conditionals both must be safe
*/
if(pc + ftest->jt +1 >= flen || pc + ftest->jf +1 >= flen)
return -EINVAL; return -EINVAL;
} }
} }
/* /* check that memory operations use valid addresses. */
* Check that memory operations use valid addresses. if (ftest->k >= BPF_MEMWORDS) {
*/ /* but it might not be a memory operation... */
if (ftest->k >= BPF_MEMWORDS)
{
/*
* But it might not be a memory operation...
*/
switch (ftest->code) { switch (ftest->code) {
case BPF_ST: case BPF_ST:
case BPF_STX: case BPF_STX:
case BPF_LD|BPF_MEM: case BPF_LD|BPF_MEM:
case BPF_LDX|BPF_MEM: case BPF_LDX|BPF_MEM:
return -EINVAL; return -EINVAL;
} }
} }
} }
/* /*
* The program must end with a return. We don't care where they * The program must end with a return. We don't care where they
* jumped within the script (its always forwards) but in the * jumped within the script (its always forwards) but in the end
* end they _will_ hit this. * they _will_ hit this.
*/ */
return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
return (BPF_CLASS(filter[flen - 1].code) == BPF_RET)?0:-EINVAL;
} }
/** /**
...@@ -454,7 +389,6 @@ int sk_chk_filter(struct sock_filter *filter, int flen) ...@@ -454,7 +389,6 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
* occurs or there is insufficient memory for the filter a negative * occurs or there is insufficient memory for the filter a negative
* errno code is returned. On success the return is zero. * errno code is returned. On success the return is zero.
*/ */
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{ {
struct sk_filter *fp; struct sk_filter *fp;
...@@ -463,12 +397,11 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) ...@@ -463,12 +397,11 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
/* Make sure new filter is there and in the right amounts. */ /* Make sure new filter is there and in the right amounts. */
if (fprog->filter == NULL || fprog->len > BPF_MAXINSNS) if (fprog->filter == NULL || fprog->len > BPF_MAXINSNS)
return (-EINVAL); return -EINVAL;
fp = (struct sk_filter *)sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
if(fp == NULL)
return (-ENOMEM);
fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
if (!fp)
return -ENOMEM;
if (copy_from_user(fp->insns, fprog->filter, fsize)) { if (copy_from_user(fp->insns, fprog->filter, fsize)) {
sock_kfree_s(sk, fp, fsize+sizeof(*fp)); sock_kfree_s(sk, fp, fsize+sizeof(*fp));
return -EFAULT; return -EFAULT;
...@@ -477,7 +410,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) ...@@ -477,7 +410,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
atomic_set(&fp->refcnt, 1); atomic_set(&fp->refcnt, 1);
fp->len = fprog->len; fp->len = fprog->len;
if ((err = sk_chk_filter(fp->insns, fp->len))==0) { err = sk_chk_filter(fp->insns, fp->len);
if (!err) {
struct sk_filter *old_fp; struct sk_filter *old_fp;
spin_lock_bh(&sk->lock.slock); spin_lock_bh(&sk->lock.slock);
...@@ -489,6 +423,5 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) ...@@ -489,6 +423,5 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
if (fp) if (fp)
sk_filter_release(sk, fp); sk_filter_release(sk, fp);
return err;
return (err);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment