19 #include <linux/module.h>
20 #include <linux/types.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
26 #include <linux/netdevice.h>
34 #include <linux/errno.h>
36 #include <asm/uaccess.h>
37 #include <asm/unaligned.h>
38 #include <linux/filter.h>
41 #include <linux/seccomp.h>
56 if (ptr >= skb->
head && ptr + size <= skb_tail_pointer(skb))
61 static inline void *load_pointer(
const struct sk_buff *
skb,
int k,
65 return skb_header_pointer(skb, k, size, buffer);
94 err = security_sock_rcv_skb(sk, skb);
103 err = pkt_len ? pskb_trim(skb, pkt_len) : -
EPERM;
137 #if defined(CONFIG_X86_32)
138 #define K (fentry->k)
140 const u32 K = fentry->
k;
143 switch (fentry->
code) {
168 A = reciprocal_divide(A, K);
216 fentry += (A >
K) ? fentry->
jt : fentry->
jf;
219 fentry += (A >= K) ? fentry->
jt : fentry->
jf;
222 fentry += (A ==
K) ? fentry->
jt : fentry->
jf;
225 fentry += (A & K) ? fentry->
jt : fentry->
jf;
228 fentry += (A >
X) ? fentry->
jt : fentry->
jf;
231 fentry += (A >= X) ? fentry->
jt : fentry->
jf;
234 fentry += (A ==
X) ? fentry->
jt : fentry->
jf;
237 fentry += (A & X) ? fentry->
jt : fentry->
jf;
242 ptr = load_pointer(skb, k, 4, &tmp);
251 ptr = load_pointer(skb, k, 2, &tmp);
253 A = get_unaligned_be16(ptr);
260 ptr = load_pointer(skb, k, 1, &tmp);
282 ptr = load_pointer(skb, K, 1, &tmp);
284 X = (*(
u8 *)ptr & 0xf) << 2;
325 A = skb->
dev->ifindex;
347 if (skb_is_nonlinear(skb))
349 if (A > skb->
len -
sizeof(
struct nlattr))
355 A = (
void *)nla - (
void *)skb->
data;
363 if (skb_is_nonlinear(skb))
365 if (A > skb->
len -
sizeof(
struct nlattr))
372 nla = nla_find_nested(nla, X);
374 A = (
void *)nla - (
void *)skb->
data;
379 #ifdef CONFIG_SECCOMP_FILTER
381 A = seccomp_bpf_load(fentry->
k);
387 fentry->
jf, fentry->
k);
407 u16 *masks, memvalid = 0;
414 memset(masks, 0xff, flen *
sizeof(*masks));
416 for (pc = 0; pc < flen; pc++) {
417 memvalid &= masks[
pc];
419 switch (filter[pc].
code) {
422 memvalid |= (1 << filter[
pc].
k);
426 if (!(memvalid & (1 << filter[pc].k))) {
433 masks[pc + 1 + filter[
pc].
k] &= memvalid;
445 masks[pc + 1 + filter[
pc].jt] &= memvalid;
446 masks[pc + 1 + filter[
pc].jf] &= memvalid;
476 static const u8 codes[] = {
533 for (pc = 0; pc < flen; pc++) {
569 if (ftest->
k >= (
unsigned int)(flen-pc-1))
581 if (pc + ftest->
jt + 1 >= flen ||
582 pc + ftest->
jf + 1 >= flen)
588 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
589 code = BPF_S_ANC_##CODE; \
609 switch (filter[flen - 1].code) {
612 return check_load_and_stores(filter, flen);
631 static int __sk_prepare_filter(
struct sk_filter *
fp)
674 err = __sk_prepare_filter(fp);
688 sk_filter_release(fp);
723 err = __sk_prepare_filter(fp);
725 sk_filter_uncharge(sk, fp);
734 sk_filter_uncharge(sk, old_fp);
748 sk_filter_uncharge(sk, filter);