Commit f1608920 authored by Alexander Aring's avatar Alexander Aring Committed by Marcel Holtmann

mac802154: use put and get unaligned functions

This patch removes the swap pointer and memmove functionality. Instead
we use the well known put/get unaligned access with specific byte order
handling.
Signed-off-by: default avatarAlexander Aring <aar@pengutronix.de>
Suggested-by: default avatarMarc Kleine-Budde <mkl@pengutronix.de>
Signed-off-by: default avatarMarcel Holtmann <marcel@holtmann.org>
parent 75c6aca4
...@@ -16,10 +16,10 @@ ...@@ -16,10 +16,10 @@
#ifndef NET_MAC802154_H #ifndef NET_MAC802154_H
#define NET_MAC802154_H #define NET_MAC802154_H
#include <asm/unaligned.h>
#include <net/af_ieee802154.h> #include <net/af_ieee802154.h>
#include <linux/ieee802154.h> #include <linux/ieee802154.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/unaligned/memmove.h>
#include <net/cfg802154.h> #include <net/cfg802154.h>
...@@ -254,7 +254,7 @@ static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb) ...@@ -254,7 +254,7 @@ static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb)
return cpu_to_le16(0); return cpu_to_le16(0);
} }
return (__force __le16)__get_unaligned_memmove16(skb_mac_header(skb)); return get_unaligned_le16(skb_mac_header(skb));
} }
/** /**
...@@ -264,7 +264,7 @@ static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb) ...@@ -264,7 +264,7 @@ static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb)
*/ */
static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src) static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src)
{ {
__put_unaligned_memmove64(swab64p(be64_src), le64_dst); put_unaligned_le64(get_unaligned_be64(be64_src), le64_dst);
} }
/** /**
...@@ -274,7 +274,7 @@ static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src) ...@@ -274,7 +274,7 @@ static inline void ieee802154_be64_to_le64(void *le64_dst, const void *be64_src)
*/ */
static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src) static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
{ {
__put_unaligned_memmove64(swab64p(le64_src), be64_dst); put_unaligned_be64(get_unaligned_le64(le64_src), be64_dst);
} }
/** /**
...@@ -284,7 +284,7 @@ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src) ...@@ -284,7 +284,7 @@ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
*/ */
static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src) static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src)
{ {
__put_unaligned_memmove16(swab16p(le16_src), be16_dst); put_unaligned_be16(get_unaligned_le16(le16_src), be16_dst);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment