|
35 | 35 | #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
|
36 | 36 | #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
|
37 | 37 |
|
38 |
| -#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) |
39 |
| - |
40 | 38 | /* Optimisation for I/Os between 4k and 128k */
|
41 | 39 | #define NVME_SMALL_POOL_SIZE 256
|
42 | 40 |
|
|
45 | 43 | * require an sg allocation that needs more than a page of data.
|
46 | 44 | */
|
47 | 45 | #define NVME_MAX_KB_SZ 8192
|
48 |
| -#define NVME_MAX_SEGS 128 |
49 |
| -#define NVME_MAX_META_SEGS 15 |
50 | 46 | #define NVME_MAX_NR_DESCRIPTORS 5
|
51 | 47 |
|
| 48 | +/* |
| 49 | + * For data SGLs we support a single descriptors worth of SGL entries, but for |
| 50 | + * now we also limit it to avoid an allocation larger than PAGE_SIZE for the |
| 51 | + * scatterlist. |
| 52 | + */ |
| 53 | +#define NVME_MAX_SEGS \ |
| 54 | + min(NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc), \ |
| 55 | + (PAGE_SIZE / sizeof(struct scatterlist))) |
| 56 | + |
| 57 | +/* |
| 58 | + * For metadata SGLs, only the small descriptor is supported, and the first |
| 59 | + * entry is the segment descriptor, which for the data pointer sits in the SQE. |
| 60 | + */ |
| 61 | +#define NVME_MAX_META_SEGS \ |
| 62 | + ((NVME_SMALL_POOL_SIZE / sizeof(struct nvme_sgl_desc)) - 1) |
| 63 | + |
52 | 64 | static int use_threaded_interrupts;
|
53 | 65 | module_param(use_threaded_interrupts, int, 0444);
|
54 | 66 |
|
@@ -3829,8 +3841,6 @@ static int __init nvme_init(void)
|
3829 | 3841 | BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
|
3830 | 3842 | BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
|
3831 | 3843 | BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
|
3832 |
| - BUILD_BUG_ON(NVME_MAX_SEGS > SGES_PER_PAGE); |
3833 |
| - BUILD_BUG_ON(sizeof(struct scatterlist) * NVME_MAX_SEGS > PAGE_SIZE); |
3834 | 3844 | BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_DESCRIPTORS);
|
3835 | 3845 |
|
3836 | 3846 | return pci_register_driver(&nvme_driver);
|
|
0 commit comments