Commit 488f6682 authored by Satya Tangirala's avatar Satya Tangirala Committed by Jens Axboe

block: blk-crypto-fallback for Inline Encryption

Blk-crypto delegates crypto operations to inline encryption hardware
when available. The separately configurable blk-crypto-fallback contains
a software fallback to the kernel crypto API - when enabled, blk-crypto
will use this fallback for en/decryption when inline encryption hardware
is not available.

This lets upper layers not have to worry about whether or not the
underlying device has support for inline encryption before deciding to
specify an encryption context for a bio. It also allows for testing
without actual inline encryption hardware - in particular, it makes it
possible to test the inline encryption code in ext4 and f2fs simply by
running xfstests with the inlinecrypt mount option, which in turn allows
for things like the regular upstream regression testing of ext4 to cover
the inline encryption code paths.

For more details, refer to Documentation/block/inline-encryption.rst.
Signed-off-by: default avatarSatya Tangirala <satyat@google.com>
Reviewed-by: default avatarEric Biggers <ebiggers@google.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d145dc23
...@@ -193,6 +193,16 @@ config BLK_INLINE_ENCRYPTION ...@@ -193,6 +193,16 @@ config BLK_INLINE_ENCRYPTION
block layer handle encryption, so users can take block layer handle encryption, so users can take
advantage of inline encryption hardware if present. advantage of inline encryption hardware if present.
config BLK_INLINE_ENCRYPTION_FALLBACK
bool "Enable crypto API fallback for blk-crypto"
depends on BLK_INLINE_ENCRYPTION
select CRYPTO
select CRYPTO_SKCIPHER
help
Enabling this lets the block layer handle inline encryption
by falling back to the kernel crypto API when inline
encryption hardware is not present.
menu "Partition Types" menu "Partition Types"
source "block/partitions/Kconfig" source "block/partitions/Kconfig"
......
...@@ -37,3 +37,4 @@ obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o ...@@ -37,3 +37,4 @@ obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
obj-$(CONFIG_BLK_PM) += blk-pm.o obj-$(CONFIG_BLK_PM) += blk-pm.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += keyslot-manager.o blk-crypto.o obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += keyslot-manager.o blk-crypto.o
obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o
This diff is collapsed.
...@@ -11,10 +11,13 @@ ...@@ -11,10 +11,13 @@
/* Represents a crypto mode supported by blk-crypto */ /* Represents a crypto mode supported by blk-crypto */
struct blk_crypto_mode { struct blk_crypto_mode {
const char *cipher_str; /* crypto API name (for fallback case) */
unsigned int keysize; /* key size in bytes */ unsigned int keysize; /* key size in bytes */
unsigned int ivsize; /* iv size in bytes */ unsigned int ivsize; /* iv size in bytes */
}; };
extern const struct blk_crypto_mode blk_crypto_modes[];
#ifdef CONFIG_BLK_INLINE_ENCRYPTION #ifdef CONFIG_BLK_INLINE_ENCRYPTION
void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
...@@ -163,4 +166,36 @@ static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq) ...@@ -163,4 +166,36 @@ static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
return BLK_STS_OK; return BLK_STS_OK;
} }
#ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num);
bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr);
int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
#else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
static inline int
blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
{
pr_warn_once("crypto API fallback is disabled\n");
return -ENOPKG;
}
static inline bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
{
pr_warn_once("crypto API fallback disabled; failing request.\n");
(*bio_ptr)->bi_status = BLK_STS_NOTSUPP;
return false;
}
static inline int
blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
{
return 0;
}
#endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
#endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */ #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */
...@@ -19,14 +19,17 @@ ...@@ -19,14 +19,17 @@
const struct blk_crypto_mode blk_crypto_modes[] = { const struct blk_crypto_mode blk_crypto_modes[] = {
[BLK_ENCRYPTION_MODE_AES_256_XTS] = { [BLK_ENCRYPTION_MODE_AES_256_XTS] = {
.cipher_str = "xts(aes)",
.keysize = 64, .keysize = 64,
.ivsize = 16, .ivsize = 16,
}, },
[BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = { [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
.cipher_str = "essiv(cbc(aes),sha256)",
.keysize = 16, .keysize = 16,
.ivsize = 16, .ivsize = 16,
}, },
[BLK_ENCRYPTION_MODE_ADIANTUM] = { [BLK_ENCRYPTION_MODE_ADIANTUM] = {
.cipher_str = "adiantum(xchacha12,aes)",
.keysize = 32, .keysize = 32,
.ivsize = 32, .ivsize = 32,
}, },
...@@ -229,9 +232,16 @@ void __blk_crypto_free_request(struct request *rq) ...@@ -229,9 +232,16 @@ void __blk_crypto_free_request(struct request *rq)
* *
* @bio_ptr: pointer to original bio pointer * @bio_ptr: pointer to original bio pointer
* *
* Succeeds if the bio doesn't have inline encryption enabled or if the bio * If the bio crypt context provided for the bio is supported by the underlying
* crypt context provided for the bio is supported by the underlying device's * device's inline encryption hardware, do nothing.
* inline encryption hardware. Ends the bio with error otherwise. *
* Otherwise, try to perform en/decryption for this bio by falling back to the
* kernel crypto API. When the crypto API fallback is used for encryption,
* blk-crypto may choose to split the bio into 2 - the first one that will
* continue to be processed and the second one that will be resubmitted via
* generic_make_request. A bounce bio will be allocated to encrypt the contents
* of the aforementioned "first one", and *bio_ptr will be updated to this
* bounce bio.
* *
* Caller must ensure bio has bio_crypt_ctx. * Caller must ensure bio has bio_crypt_ctx.
* *
...@@ -243,27 +253,29 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr) ...@@ -243,27 +253,29 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)
{ {
struct bio *bio = *bio_ptr; struct bio *bio = *bio_ptr;
const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key; const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
blk_status_t blk_st = BLK_STS_IOERR;
/* Error if bio has no data. */ /* Error if bio has no data. */
if (WARN_ON_ONCE(!bio_has_data(bio))) if (WARN_ON_ONCE(!bio_has_data(bio))) {
bio->bi_status = BLK_STS_IOERR;
goto fail; goto fail;
}
if (!bio_crypt_check_alignment(bio)) if (!bio_crypt_check_alignment(bio)) {
bio->bi_status = BLK_STS_IOERR;
goto fail; goto fail;
}
/* /*
* Success if device supports the encryption context. * Success if device supports the encryption context, or if we succeeded
* in falling back to the crypto API.
*/ */
if (!blk_ksm_crypto_cfg_supported(bio->bi_disk->queue->ksm, if (blk_ksm_crypto_cfg_supported(bio->bi_disk->queue->ksm,
&bc_key->crypto_cfg)) { &bc_key->crypto_cfg))
blk_st = BLK_STS_NOTSUPP; return true;
goto fail;
}
return true; if (blk_crypto_fallback_bio_prep(bio_ptr))
return true;
fail: fail:
(*bio_ptr)->bi_status = blk_st;
bio_endio(*bio_ptr); bio_endio(*bio_ptr);
return false; return false;
} }
...@@ -329,10 +341,16 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, ...@@ -329,10 +341,16 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
return 0; return 0;
} }
/*
* Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
* request queue it's submitted to supports inline crypto, or the
* blk-crypto-fallback is enabled and supports the cfg).
*/
bool blk_crypto_config_supported(struct request_queue *q, bool blk_crypto_config_supported(struct request_queue *q,
const struct blk_crypto_config *cfg) const struct blk_crypto_config *cfg)
{ {
return blk_ksm_crypto_cfg_supported(q->ksm, cfg); return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
blk_ksm_crypto_cfg_supported(q->ksm, cfg);
} }
/** /**
...@@ -340,17 +358,22 @@ bool blk_crypto_config_supported(struct request_queue *q, ...@@ -340,17 +358,22 @@ bool blk_crypto_config_supported(struct request_queue *q,
* @key: A key to use on the device * @key: A key to use on the device
* @q: the request queue for the device * @q: the request queue for the device
* *
* Upper layers must call this function to ensure that the hardware supports * Upper layers must call this function to ensure that either the hardware
* the key's crypto settings. * supports the key's crypto settings, or the crypto API fallback has transforms
* for the needed mode allocated and ready to go. This function may allocate
* an skcipher, and *should not* be called from the data path, since that might
* cause a deadlock
* *
* Return: 0 on success; -ENOPKG if the hardware doesn't support the key * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and
* blk-crypto-fallback is either disabled or the needed algorithm
* is disabled in the crypto API; or another -errno code.
*/ */
int blk_crypto_start_using_key(const struct blk_crypto_key *key, int blk_crypto_start_using_key(const struct blk_crypto_key *key,
struct request_queue *q) struct request_queue *q)
{ {
if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg)) if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
return 0; return 0;
return -ENOPKG; return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
} }
/** /**
...@@ -372,5 +395,10 @@ int blk_crypto_evict_key(struct request_queue *q, ...@@ -372,5 +395,10 @@ int blk_crypto_evict_key(struct request_queue *q,
if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg)) if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
return blk_ksm_evict_key(q->ksm, key); return blk_ksm_evict_key(q->ksm, key);
return 0; /*
* If the request queue's associated inline encryption hardware didn't
* have support for the key, then the key might have been programmed
* into the fallback keyslot manager, so try to evict from there.
*/
return blk_crypto_fallback_evict_key(key);
} }
...@@ -61,7 +61,7 @@ struct blk_crypto_key { ...@@ -61,7 +61,7 @@ struct blk_crypto_key {
* *
* A bio_crypt_ctx specifies that the contents of the bio will be encrypted (for * A bio_crypt_ctx specifies that the contents of the bio will be encrypted (for
* write requests) or decrypted (for read requests) inline by the storage device * write requests) or decrypted (for read requests) inline by the storage device
* or controller. * or controller, or by the crypto API fallback.
*/ */
struct bio_crypt_ctx { struct bio_crypt_ctx {
const struct blk_crypto_key *bc_key; const struct blk_crypto_key *bc_key;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment