Skip to content

Commit a892c8d

Browse files
Satya Tangiralaaxboe
authored andcommitted
block: Inline encryption support for blk-mq
We must have some way of letting a storage device driver know what encryption context it should use for en/decrypting a request. However, it's the upper layers (like the filesystem/fscrypt) that know about and manages encryption contexts. As such, when the upper layer submits a bio to the block layer, and this bio eventually reaches a device driver with support for inline encryption, the device driver will need to have been told the encryption context for that bio. We want to communicate the encryption context from the upper layer to the storage device along with the bio, when the bio is submitted to the block layer. To do this, we add a struct bio_crypt_ctx to struct bio, which can represent an encryption context (note that we can't use the bi_private field in struct bio to do this because that field does not function to pass information across layers in the storage stack). We also introduce various functions to manipulate the bio_crypt_ctx and make the bio/request merging logic aware of the bio_crypt_ctx. We also make changes to blk-mq to make it handle bios with encryption contexts. blk-mq can merge many bios into the same request. These bios need to have contiguous data unit numbers (the necessary changes to blk-merge are also made to ensure this) - as such, it suffices to keep the data unit number of just the first bio, since that's all a storage driver needs to infer the data unit number to use for each data block in each bio in a request. blk-mq keeps track of the encryption context to be used for all the bios in a request with the request's rq_crypt_ctx. When the first bio is added to an empty request, blk-mq will program the encryption context of that bio into the request_queue's keyslot manager, and store the returned keyslot in the request's rq_crypt_ctx. All the functions to operate on encryption contexts are in blk-crypto.c. Upper layers only need to call bio_crypt_set_ctx with the encryption key, algorithm and data_unit_num; they don't have to worry about getting a keyslot for each encryption context, as blk-mq/blk-crypto handles that. Blk-crypto also makes it possible for request-based layered devices like dm-rq to make use of inline encryption hardware by cloning the rq_crypt_ctx and programming a keyslot in the new request_queue when necessary. Note that any user of the block layer can submit bios with an encryption context, such as filesystems, device-mapper targets, etc. Signed-off-by: Satya Tangirala <[email protected]> Reviewed-by: Eric Biggers <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Signed-off-by: Jens Axboe <[email protected]>
1 parent 1b26283 commit a892c8d

File tree

14 files changed

+684
-7
lines changed

14 files changed

+684
-7
lines changed

block/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,4 +36,4 @@ obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
3636
obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
3737
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
3838
obj-$(CONFIG_BLK_PM) += blk-pm.o
39-
obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += keyslot-manager.o
39+
obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += keyslot-manager.o blk-crypto.o

block/bio.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
#include <linux/blk-cgroup.h>
1919
#include <linux/highmem.h>
2020
#include <linux/sched/sysctl.h>
21+
#include <linux/blk-crypto.h>
2122

2223
#include <trace/events/block.h>
2324
#include "blk.h"
@@ -237,6 +238,8 @@ void bio_uninit(struct bio *bio)
237238

238239
if (bio_integrity(bio))
239240
bio_integrity_free(bio);
241+
242+
bio_crypt_free_ctx(bio);
240243
}
241244
EXPORT_SYMBOL(bio_uninit);
242245

@@ -708,6 +711,8 @@ struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
708711

709712
__bio_clone_fast(b, bio);
710713

714+
bio_crypt_clone(b, bio, gfp_mask);
715+
711716
if (bio_integrity(bio)) {
712717
int ret;
713718

@@ -1172,6 +1177,7 @@ void bio_advance(struct bio *bio, unsigned bytes)
11721177
if (bio_integrity(bio))
11731178
bio_integrity_advance(bio, bytes);
11741179

1180+
bio_crypt_advance(bio, bytes);
11751181
bio_advance_iter(bio, &bio->bi_iter, bytes);
11761182
}
11771183
EXPORT_SYMBOL(bio_advance);

block/blk-core.c

Lines changed: 21 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
#include <linux/bpf.h>
4040
#include <linux/psi.h>
4141
#include <linux/sched/sysctl.h>
42+
#include <linux/blk-crypto.h>
4243

4344
#define CREATE_TRACE_POINTS
4445
#include <trace/events/block.h>
@@ -121,6 +122,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
121122
rq->start_time_ns = ktime_get_ns();
122123
rq->part = NULL;
123124
refcount_set(&rq->ref, 1);
125+
blk_crypto_rq_set_defaults(rq);
124126
}
125127
EXPORT_SYMBOL(blk_rq_init);
126128

@@ -652,6 +654,8 @@ bool bio_attempt_back_merge(struct request *req, struct bio *bio,
652654
req->biotail = bio;
653655
req->__data_len += bio->bi_iter.bi_size;
654656

657+
bio_crypt_free_ctx(bio);
658+
655659
blk_account_io_start(req, false);
656660
return true;
657661
}
@@ -676,6 +680,8 @@ bool bio_attempt_front_merge(struct request *req, struct bio *bio,
676680
req->__sector = bio->bi_iter.bi_sector;
677681
req->__data_len += bio->bi_iter.bi_size;
678682

683+
bio_crypt_do_front_merge(req, bio);
684+
679685
blk_account_io_start(req, false);
680686
return true;
681687
}
@@ -1125,10 +1131,12 @@ blk_qc_t generic_make_request(struct bio *bio)
11251131
/* Create a fresh bio_list for all subordinate requests */
11261132
bio_list_on_stack[1] = bio_list_on_stack[0];
11271133
bio_list_init(&bio_list_on_stack[0]);
1128-
if (q->make_request_fn)
1129-
ret = q->make_request_fn(q, bio);
1130-
else
1131-
ret = blk_mq_make_request(q, bio);
1134+
if (blk_crypto_bio_prep(&bio)) {
1135+
if (q->make_request_fn)
1136+
ret = q->make_request_fn(q, bio);
1137+
else
1138+
ret = blk_mq_make_request(q, bio);
1139+
}
11321140

11331141
blk_queue_exit(q);
11341142

@@ -1167,7 +1175,7 @@ EXPORT_SYMBOL(generic_make_request);
11671175
blk_qc_t direct_make_request(struct bio *bio)
11681176
{
11691177
struct request_queue *q = bio->bi_disk->queue;
1170-
blk_qc_t ret;
1178+
blk_qc_t ret = BLK_QC_T_NONE;
11711179

11721180
if (WARN_ON_ONCE(q->make_request_fn)) {
11731181
bio_io_error(bio);
@@ -1177,7 +1185,8 @@ blk_qc_t direct_make_request(struct bio *bio)
11771185
return BLK_QC_T_NONE;
11781186
if (unlikely(bio_queue_enter(bio)))
11791187
return BLK_QC_T_NONE;
1180-
ret = blk_mq_make_request(q, bio);
1188+
if (blk_crypto_bio_prep(&bio))
1189+
ret = blk_mq_make_request(q, bio);
11811190
blk_queue_exit(q);
11821191
return ret;
11831192
}
@@ -1309,6 +1318,9 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
13091318
should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
13101319
return BLK_STS_IOERR;
13111320

1321+
if (blk_crypto_insert_cloned_request(rq))
1322+
return BLK_STS_IOERR;
1323+
13121324
if (blk_queue_io_stat(q))
13131325
blk_account_io_start(rq, true);
13141326

@@ -1673,6 +1685,9 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
16731685
rq->nr_phys_segments = rq_src->nr_phys_segments;
16741686
rq->ioprio = rq_src->ioprio;
16751687

1688+
if (rq->bio)
1689+
blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask);
1690+
16761691
return 0;
16771692

16781693
free_and_out:

block/blk-crypto-internal.h

Lines changed: 166 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,166 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Copyright 2019 Google LLC
4+
*/
5+
6+
#ifndef __LINUX_BLK_CRYPTO_INTERNAL_H
7+
#define __LINUX_BLK_CRYPTO_INTERNAL_H
8+
9+
#include <linux/bio.h>
10+
#include <linux/blkdev.h>
11+
12+
/* Represents a crypto mode supported by blk-crypto */
13+
struct blk_crypto_mode {
14+
unsigned int keysize; /* key size in bytes */
15+
unsigned int ivsize; /* iv size in bytes */
16+
};
17+
18+
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
19+
20+
void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
21+
unsigned int inc);
22+
23+
bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
24+
25+
bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
26+
struct bio_crypt_ctx *bc2);
27+
28+
static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
29+
struct bio *bio)
30+
{
31+
return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
32+
bio->bi_crypt_context);
33+
}
34+
35+
static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
36+
struct bio *bio)
37+
{
38+
return bio_crypt_ctx_mergeable(bio->bi_crypt_context,
39+
bio->bi_iter.bi_size, req->crypt_ctx);
40+
}
41+
42+
static inline bool bio_crypt_ctx_merge_rq(struct request *req,
43+
struct request *next)
44+
{
45+
return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
46+
next->crypt_ctx);
47+
}
48+
49+
static inline void blk_crypto_rq_set_defaults(struct request *rq)
50+
{
51+
rq->crypt_ctx = NULL;
52+
rq->crypt_keyslot = NULL;
53+
}
54+
55+
static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
56+
{
57+
return rq->crypt_ctx;
58+
}
59+
60+
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
61+
62+
static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
63+
struct bio *bio)
64+
{
65+
return true;
66+
}
67+
68+
static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
69+
struct bio *bio)
70+
{
71+
return true;
72+
}
73+
74+
static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
75+
struct bio *bio)
76+
{
77+
return true;
78+
}
79+
80+
static inline bool bio_crypt_ctx_merge_rq(struct request *req,
81+
struct request *next)
82+
{
83+
return true;
84+
}
85+
86+
static inline void blk_crypto_rq_set_defaults(struct request *rq) { }
87+
88+
static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
89+
{
90+
return false;
91+
}
92+
93+
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
94+
95+
void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
96+
static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
97+
{
98+
if (bio_has_crypt_ctx(bio))
99+
__bio_crypt_advance(bio, bytes);
100+
}
101+
102+
void __bio_crypt_free_ctx(struct bio *bio);
103+
static inline void bio_crypt_free_ctx(struct bio *bio)
104+
{
105+
if (bio_has_crypt_ctx(bio))
106+
__bio_crypt_free_ctx(bio);
107+
}
108+
109+
static inline void bio_crypt_do_front_merge(struct request *rq,
110+
struct bio *bio)
111+
{
112+
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
113+
if (bio_has_crypt_ctx(bio))
114+
memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun,
115+
sizeof(rq->crypt_ctx->bc_dun));
116+
#endif
117+
}
118+
119+
bool __blk_crypto_bio_prep(struct bio **bio_ptr);
120+
static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
121+
{
122+
if (bio_has_crypt_ctx(*bio_ptr))
123+
return __blk_crypto_bio_prep(bio_ptr);
124+
return true;
125+
}
126+
127+
blk_status_t __blk_crypto_init_request(struct request *rq);
128+
static inline blk_status_t blk_crypto_init_request(struct request *rq)
129+
{
130+
if (blk_crypto_rq_is_encrypted(rq))
131+
return __blk_crypto_init_request(rq);
132+
return BLK_STS_OK;
133+
}
134+
135+
void __blk_crypto_free_request(struct request *rq);
136+
static inline void blk_crypto_free_request(struct request *rq)
137+
{
138+
if (blk_crypto_rq_is_encrypted(rq))
139+
__blk_crypto_free_request(rq);
140+
}
141+
142+
void __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
143+
gfp_t gfp_mask);
144+
static inline void blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
145+
gfp_t gfp_mask)
146+
{
147+
if (bio_has_crypt_ctx(bio))
148+
__blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
149+
}
150+
151+
/**
152+
* blk_crypto_insert_cloned_request - Prepare a cloned request to be inserted
153+
* into a request queue.
154+
* @rq: the request being queued
155+
*
156+
* Return: BLK_STS_OK on success, nonzero on error.
157+
*/
158+
static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
159+
{
160+
161+
if (blk_crypto_rq_is_encrypted(rq))
162+
return blk_crypto_init_request(rq);
163+
return BLK_STS_OK;
164+
}
165+
166+
#endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */

0 commit comments

Comments
 (0)