Commit e804315d authored by Jan Kara's avatar Jan Kara Committed by Ross Zwisler

dax: Define DAX lock bit for radix tree exceptional entry

We will use lowest available bit in the radix tree exceptional entry for
locking of the entry. Define it. Also clean up definitions of DAX entry
type bits in DAX exceptional entries to use defined constants instead of
hardcoding numbers and cleanup checking of these bits to not rely on how
other bits in the entry are set.
Reviewed-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
parent 348e967a
...@@ -32,14 +32,19 @@ ...@@ -32,14 +32,19 @@
#include <linux/pfn_t.h> #include <linux/pfn_t.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#define RADIX_DAX_MASK 0xf /*
#define RADIX_DAX_SHIFT 4 * We use lowest available bit in exceptional entry for locking, other two
#define RADIX_DAX_PTE (0x4 | RADIX_TREE_EXCEPTIONAL_ENTRY) * bits to determine entry type. In total 3 special bits.
#define RADIX_DAX_PMD (0x8 | RADIX_TREE_EXCEPTIONAL_ENTRY) */
#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_MASK) #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)
#define RADIX_DAX_PTE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
#define RADIX_DAX_TYPE_MASK (RADIX_DAX_PTE | RADIX_DAX_PMD)
#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_TYPE_MASK)
#define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT)) #define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
#define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \ #define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE))) RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE) | \
RADIX_TREE_EXCEPTIONAL_ENTRY))
static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax) static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
{ {
......
...@@ -5,6 +5,9 @@ ...@@ -5,6 +5,9 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
/* We use lowest available exceptional entry bit for locking */
#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t, ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
get_block_t, dio_iodone_t, int flags); get_block_t, dio_iodone_t, int flags);
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment