Commit effa04cc authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-4.4/lightnvm' of git://git.kernel.dk/linux-block

Pull lightnvm support from Jens Axboe:
 "This adds support for lightnvm, and adds support to NVMe as well.
  This is pretty exciting, in that it enables new and interesting use
  cases for compatible flash devices.  There's a LWN writeup about an
  earlier posting here:

      https://lwn.net/Articles/641247/

  This has been underway for a while, and should be ready for merging at
  this point"

* 'for-4.4/lightnvm' of git://git.kernel.dk/linux-block:
  nvme: lightnvm: clean up a data type
  lightnvm: refactor phys addrs type to u64
  nvme: LightNVM support
  rrpc: Round-robin sector target with cost-based gc
  gennvm: Generic NVM manager
  lightnvm: Support for Open-Channel SSDs
parents a9aa31cd 5f436e5e
......@@ -149,6 +149,7 @@ Code Seq#(hex) Include File Comments
'K' all linux/kd.h
'L' 00-1F linux/loop.h conflict!
'L' 10-1F drivers/scsi/mpt2sas/mpt2sas_ctl.h conflict!
'L' 20-2F linux/lightnvm.h
'L' E0-FF linux/ppdd.h encrypted disk device driver
<http://linux01.gwdg.de/~alatham/ppdd.html>
'M' all linux/soundcard.h conflict!
......
......@@ -6279,6 +6279,14 @@ F: drivers/nvdimm/pmem.c
F: include/linux/pmem.h
F: arch/*/include/asm/pmem.h
LIGHTNVM PLATFORM SUPPORT
M: Matias Bjorling <mb@lightnvm.io>
W: http://github/OpenChannelSSD
S: Maintained
F: drivers/lightnvm/
F: include/linux/lightnvm.h
F: include/uapi/linux/lightnvm.h
LINUX FOR IBM pSERIES (RS/6000)
M: Paul Mackerras <paulus@au.ibm.com>
W: http://www.ibm.com/linux/ltc/projects/ppc
......
......@@ -44,6 +44,8 @@ source "drivers/net/Kconfig"
source "drivers/isdn/Kconfig"
source "drivers/lightnvm/Kconfig"
# input before char - char/joystick depends on it. As does USB.
source "drivers/input/Kconfig"
......
......@@ -70,6 +70,7 @@ obj-$(CONFIG_NUBUS) += nubus/
obj-y += macintosh/
obj-$(CONFIG_IDE) += ide/
obj-$(CONFIG_SCSI) += scsi/
obj-$(CONFIG_NVM) += lightnvm/
obj-y += nvme/
obj-$(CONFIG_ATA) += ata/
obj-$(CONFIG_TARGET_CORE) += target/
......
#
# Open-Channel SSD NVM configuration
#
menuconfig NVM
bool "Open-Channel SSD target support"
depends on BLOCK
help
Say Y here to get to enable Open-channel SSDs.
Open-Channel SSDs implement a set of extension to SSDs, that
exposes direct access to the underlying non-volatile memory.
If you say N, all options in this submenu will be skipped and disabled
only do this if you know what you are doing.
if NVM
config NVM_DEBUG
bool "Open-Channel SSD debugging support"
---help---
Exposes a debug management interface to create/remove targets at:
/sys/module/lnvm/parameters/configure_debug
It is required to create/remove targets without IOCTLs.
config NVM_GENNVM
tristate "Generic NVM manager for Open-Channel SSDs"
---help---
NVM media manager for Open-Channel SSDs that offload management
functionality to device, while keeping data placement and garbage
collection decisions on the host.
config NVM_RRPC
tristate "Round-robin Hybrid Open-Channel SSD target"
---help---
Allows an open-channel SSD to be exposed as a block device to the
host. The target is implemented using a linear mapping table and
cost-based garbage collection. It is optimized for 4K IO sizes.
endif # NVM
#
# Makefile for Open-Channel SSDs.
#
obj-$(CONFIG_NVM) := core.o
obj-$(CONFIG_NVM_GENNVM) += gennvm.o
obj-$(CONFIG_NVM_RRPC) += rrpc.o
This diff is collapsed.
This diff is collapsed.
/*
* Copyright: Matias Bjorling <mb@bjorling.me>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
*/
#ifndef GENNVM_H_
#define GENNVM_H_
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/lightnvm.h>
struct gen_lun {
struct nvm_lun vlun;
int reserved_blocks;
/* lun block lists */
struct list_head used_list; /* In-use blocks */
struct list_head free_list; /* Not used blocks i.e. released
* and ready for use
*/
struct list_head bb_list; /* Bad blocks. Mutually exclusive with
* free_list and used_list
*/
};
struct gen_nvm {
int nr_luns;
struct gen_lun *luns;
};
#define gennvm_for_each_lun(bm, lun, i) \
for ((i) = 0, lun = &(bm)->luns[0]; \
(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
#endif /* GENNVM_H_ */
This diff is collapsed.
/*
* Copyright (C) 2015 IT University of Copenhagen
* Initial release: Matias Bjorling <m@bjorling.me>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
*/
#ifndef RRPC_H_
#define RRPC_H_
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/vmalloc.h>
#include <linux/lightnvm.h>
/* Run only GC if less than 1/X blocks are free */
#define GC_LIMIT_INVERSE 10
#define GC_TIME_SECS 100
#define RRPC_SECTOR (512)
#define RRPC_EXPOSED_PAGE_SIZE (4096)
#define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
struct rrpc_inflight {
struct list_head reqs;
spinlock_t lock;
};
struct rrpc_inflight_rq {
struct list_head list;
sector_t l_start;
sector_t l_end;
};
struct rrpc_rq {
struct rrpc_inflight_rq inflight_rq;
struct rrpc_addr *addr;
unsigned long flags;
};
struct rrpc_block {
struct nvm_block *parent;
struct list_head prio;
#define MAX_INVALID_PAGES_STORAGE 8
/* Bitmap for invalid page intries */
unsigned long invalid_pages[MAX_INVALID_PAGES_STORAGE];
/* points to the next writable page within a block */
unsigned int next_page;
/* number of pages that are invalid, wrt host page size */
unsigned int nr_invalid_pages;
spinlock_t lock;
atomic_t data_cmnt_size; /* data pages committed to stable storage */
};
struct rrpc_lun {
struct rrpc *rrpc;
struct nvm_lun *parent;
struct rrpc_block *cur, *gc_cur;
struct rrpc_block *blocks; /* Reference to block allocation */
struct list_head prio_list; /* Blocks that may be GC'ed */
struct work_struct ws_gc;
spinlock_t lock;
};
struct rrpc {
/* instance must be kept in top to resolve rrpc in unprep */
struct nvm_tgt_instance instance;
struct nvm_dev *dev;
struct gendisk *disk;
u64 poffset; /* physical page offset */
int lun_offset;
int nr_luns;
struct rrpc_lun *luns;
/* calculated values */
unsigned long long nr_pages;
unsigned long total_blocks;
/* Write strategy variables. Move these into each for structure for each
* strategy
*/
atomic_t next_lun; /* Whenever a page is written, this is updated
* to point to the next write lun
*/
spinlock_t bio_lock;
struct bio_list requeue_bios;
struct work_struct ws_requeue;
/* Simple translation map of logical addresses to physical addresses.
* The logical addresses is known by the host system, while the physical
* addresses are used when writing to the disk block device.
*/
struct rrpc_addr *trans_map;
/* also store a reverse map for garbage collection */
struct rrpc_rev_addr *rev_trans_map;
spinlock_t rev_lock;
struct rrpc_inflight inflights;
mempool_t *addr_pool;
mempool_t *page_pool;
mempool_t *gcb_pool;
mempool_t *rq_pool;
struct timer_list gc_timer;
struct workqueue_struct *krqd_wq;
struct workqueue_struct *kgc_wq;
};
struct rrpc_block_gc {
struct rrpc *rrpc;
struct rrpc_block *rblk;
struct work_struct ws_gc;
};
/* Logical to physical mapping */
struct rrpc_addr {
u64 addr;
struct rrpc_block *rblk;
};
/* Physical to logical mapping */
struct rrpc_rev_addr {
u64 addr;
};
static inline sector_t rrpc_get_laddr(struct bio *bio)
{
return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
}
static inline unsigned int rrpc_get_pages(struct bio *bio)
{
return bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
}
static inline sector_t rrpc_get_sector(sector_t laddr)
{
return laddr * NR_PHY_IN_LOG;
}
static inline int request_intersects(struct rrpc_inflight_rq *r,
sector_t laddr_start, sector_t laddr_end)
{
return (laddr_end >= r->l_start && laddr_end <= r->l_end) &&
(laddr_start >= r->l_start && laddr_start <= r->l_end);
}
static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
unsigned pages, struct rrpc_inflight_rq *r)
{
sector_t laddr_end = laddr + pages - 1;
struct rrpc_inflight_rq *rtmp;
spin_lock_irq(&rrpc->inflights.lock);
list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
/* existing, overlapping request, come back later */
spin_unlock_irq(&rrpc->inflights.lock);
return 1;
}
}
r->l_start = laddr;
r->l_end = laddr_end;
list_add_tail(&r->list, &rrpc->inflights.reqs);
spin_unlock_irq(&rrpc->inflights.lock);
return 0;
}
static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
unsigned pages,
struct rrpc_inflight_rq *r)
{
BUG_ON((laddr + pages) > rrpc->nr_pages);
return __rrpc_lock_laddr(rrpc, laddr, pages, r);
}
static inline struct rrpc_inflight_rq *rrpc_get_inflight_rq(struct nvm_rq *rqd)
{
struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
return &rrqd->inflight_rq;
}
static inline int rrpc_lock_rq(struct rrpc *rrpc, struct bio *bio,
struct nvm_rq *rqd)
{
sector_t laddr = rrpc_get_laddr(bio);
unsigned int pages = rrpc_get_pages(bio);
struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
return rrpc_lock_laddr(rrpc, laddr, pages, r);
}
static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
struct rrpc_inflight_rq *r)
{
unsigned long flags;
spin_lock_irqsave(&rrpc->inflights.lock, flags);
list_del_init(&r->list);
spin_unlock_irqrestore(&rrpc->inflights.lock, flags);
}
static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
{
struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
uint8_t pages = rqd->nr_pages;
BUG_ON((r->l_start + pages) > rrpc->nr_pages);
rrpc_unlock_laddr(rrpc, r);
}
#endif /* RRPC_H_ */
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
nvme-y += pci.o scsi.o
nvme-y += pci.o scsi.o lightnvm.o
This diff is collapsed.
......@@ -22,6 +22,11 @@
extern unsigned char nvme_io_timeout;
#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
enum {
NVME_NS_LBA = 0,
NVME_NS_LIGHTNVM = 1,
};
/*
* Represents an NVM Express device. Each nvme_dev is a PCI function.
*/
......@@ -84,6 +89,7 @@ struct nvme_ns {
u16 ms;
bool ext;
u8 pi_type;
int type;
u64 mode_select_num_blocks;
u32 mode_select_block_len;
};
......@@ -130,4 +136,8 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
int nvme_sg_get_version_num(int __user *ip);
int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id);
int nvme_nvm_register(struct request_queue *q, char *disk_name);
void nvme_nvm_unregister(struct request_queue *q, char *disk_name);
#endif /* _NVME_H */
......@@ -1952,6 +1952,9 @@ static void nvme_free_ns(struct kref *kref)
{
struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
if (ns->type == NVME_NS_LIGHTNVM)
nvme_nvm_unregister(ns->queue, ns->disk->disk_name);
spin_lock(&dev_list_lock);
ns->disk->private_data = NULL;
spin_unlock(&dev_list_lock);
......@@ -2021,6 +2024,16 @@ static int nvme_revalidate_disk(struct gendisk *disk)
return -ENODEV;
}
if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) {
if (nvme_nvm_register(ns->queue, disk->disk_name)) {
dev_warn(dev->dev,
"%s: LightNVM init failure\n", __func__);
kfree(id);
return -ENODEV;
}
ns->type = NVME_NS_LIGHTNVM;
}
old_ms = ns->ms;
lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
ns->lba_shift = id->lbaf[lbaf].ds;
......@@ -2052,7 +2065,9 @@ static int nvme_revalidate_disk(struct gendisk *disk)
!ns->ext)
nvme_init_integrity(ns);
if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
if ((ns->ms && !(ns->ms == 8 && ns->pi_type) &&
!blk_get_integrity(disk)) ||
ns->type == NVME_NS_LIGHTNVM)
set_capacity(disk, 0);
else
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
......@@ -2175,6 +2190,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
goto out_free_disk;
kref_get(&dev->kref);
if (ns->type != NVME_NS_LIGHTNVM) {
add_disk(ns->disk);
if (ns->ms) {
struct block_device *bd = bdget_disk(ns->disk, 0);
......@@ -2187,6 +2203,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
blkdev_reread_part(bd);
blkdev_put(bd, FMODE_READ);
}
}
return;
out_free_disk:
kfree(disk);
......
This diff is collapsed.
/*
* Copyright (C) 2015 CNEX Labs. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
* USA.
*/
#ifndef _UAPI_LINUX_LIGHTNVM_H
#define _UAPI_LINUX_LIGHTNVM_H
#ifdef __KERNEL__
#include <linux/kernel.h>
#include <linux/ioctl.h>
#else /* __KERNEL__ */
#include <stdio.h>
#include <sys/ioctl.h>
#define DISK_NAME_LEN 32
#endif /* __KERNEL__ */
#include <linux/types.h>
#include <linux/ioctl.h>
#define NVM_TTYPE_NAME_MAX 48
#define NVM_TTYPE_MAX 63
#define NVM_CTRL_FILE "/dev/lightnvm/control"
struct nvm_ioctl_info_tgt {
__u32 version[3];
__u32 reserved;
char tgtname[NVM_TTYPE_NAME_MAX];
};
struct nvm_ioctl_info {
__u32 version[3]; /* in/out - major, minor, patch */
__u16 tgtsize; /* number of targets */
__u16 reserved16; /* pad to 4K page */
__u32 reserved[12];
struct nvm_ioctl_info_tgt tgts[NVM_TTYPE_MAX];
};
enum {
NVM_DEVICE_ACTIVE = 1 << 0,
};
struct nvm_ioctl_device_info {
char devname[DISK_NAME_LEN];
char bmname[NVM_TTYPE_NAME_MAX];
__u32 bmversion[3];
__u32 flags;
__u32 reserved[8];
};
struct nvm_ioctl_get_devices {
__u32 nr_devices;
__u32 reserved[31];
struct nvm_ioctl_device_info info[31];
};
struct nvm_ioctl_create_simple {
__u32 lun_begin;
__u32 lun_end;
};
enum {
NVM_CONFIG_TYPE_SIMPLE = 0,
};
struct nvm_ioctl_create_conf {
__u32 type;
union {
struct nvm_ioctl_create_simple s;
};
};
struct nvm_ioctl_create {
char dev[DISK_NAME_LEN]; /* open-channel SSD device */
char tgttype[NVM_TTYPE_NAME_MAX]; /* target type name */
char tgtname[DISK_NAME_LEN]; /* dev to expose target as */
__u32 flags;
struct nvm_ioctl_create_conf conf;
};
struct nvm_ioctl_remove {
char tgtname[DISK_NAME_LEN];
__u32 flags;
};
/* The ioctl type, 'L', 0x20 - 0x2F documented in ioctl-number.txt */
enum {
/* top level cmds */
NVM_INFO_CMD = 0x20,
NVM_GET_DEVICES_CMD,
/* device level cmds */
NVM_DEV_CREATE_CMD,
NVM_DEV_REMOVE_CMD,
};
#define NVM_IOCTL 'L' /* 0x4c */
#define NVM_INFO _IOWR(NVM_IOCTL, NVM_INFO_CMD, \
struct nvm_ioctl_info)
#define NVM_GET_DEVICES _IOR(NVM_IOCTL, NVM_GET_DEVICES_CMD, \
struct nvm_ioctl_get_devices)
#define NVM_DEV_CREATE _IOW(NVM_IOCTL, NVM_DEV_CREATE_CMD, \
struct nvm_ioctl_create)
#define NVM_DEV_REMOVE _IOW(NVM_IOCTL, NVM_DEV_REMOVE_CMD, \
struct nvm_ioctl_remove)
#define NVM_VERSION_MAJOR 1
#define NVM_VERSION_MINOR 0
#define NVM_VERSION_PATCHLEVEL 0
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment