@@ -68,6 +68,7 @@ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)
6868 * drm_mm_node
6969 * @node: The drm_mm_node.
7070 * @region_start: An offset to add to the dma addresses of the sg list.
71+ * @page_alignment: Required page alignment for each sg entry. Power of two.
7172 *
7273 * Create a struct sg_table, initializing it from a struct drm_mm_node,
7374 * taking a maximum segment length into account, splitting into segments
@@ -77,15 +78,18 @@ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)
7778 * error code cast to an error pointer on failure.
7879 */
7980struct i915_refct_sgt * i915_rsgt_from_mm_node (const struct drm_mm_node * node ,
80- u64 region_start )
81+ u64 region_start ,
82+ u64 page_alignment )
8183{
82- const u64 max_segment = SZ_1G ; /* Do we have a limit on this? */
84+ const u64 max_segment = round_down ( UINT_MAX , page_alignment );
8385 u64 segment_pages = max_segment >> PAGE_SHIFT ;
8486 u64 block_size , offset , prev_end ;
8587 struct i915_refct_sgt * rsgt ;
8688 struct sg_table * st ;
8789 struct scatterlist * sg ;
8890
91+ GEM_BUG_ON (!max_segment );
92+
8993 rsgt = kmalloc (sizeof (* rsgt ), GFP_KERNEL );
9094 if (!rsgt )
9195 return ERR_PTR (- ENOMEM );
@@ -112,6 +116,8 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
112116 sg = __sg_next (sg );
113117
114118 sg_dma_address (sg ) = region_start + offset ;
119+ GEM_BUG_ON (!IS_ALIGNED (sg_dma_address (sg ),
120+ page_alignment ));
115121 sg_dma_len (sg ) = 0 ;
116122 sg -> length = 0 ;
117123 st -> nents ++ ;
@@ -138,6 +144,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
138144 * i915_buddy_block list
139145 * @res: The struct i915_ttm_buddy_resource.
140146 * @region_start: An offset to add to the dma addresses of the sg list.
147+ * @page_alignment: Required page alignment for each sg entry. Power of two.
141148 *
142149 * Create a struct sg_table, initializing it from struct i915_buddy_block list,
143150 * taking a maximum segment length into account, splitting into segments
@@ -147,11 +154,12 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
147154 * error code cast to an error pointer on failure.
148155 */
149156struct i915_refct_sgt * i915_rsgt_from_buddy_resource (struct ttm_resource * res ,
150- u64 region_start )
157+ u64 region_start ,
158+ u64 page_alignment )
151159{
152160 struct i915_ttm_buddy_resource * bman_res = to_ttm_buddy_resource (res );
153161 const u64 size = res -> num_pages << PAGE_SHIFT ;
154- const u64 max_segment = rounddown (UINT_MAX , PAGE_SIZE );
162+ const u64 max_segment = round_down (UINT_MAX , page_alignment );
155163 struct drm_buddy * mm = bman_res -> mm ;
156164 struct list_head * blocks = & bman_res -> blocks ;
157165 struct drm_buddy_block * block ;
@@ -161,6 +169,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
161169 resource_size_t prev_end ;
162170
163171 GEM_BUG_ON (list_empty (blocks ));
172+ GEM_BUG_ON (!max_segment );
164173
165174 rsgt = kmalloc (sizeof (* rsgt ), GFP_KERNEL );
166175 if (!rsgt )
@@ -191,6 +200,8 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
191200 sg = __sg_next (sg );
192201
193202 sg_dma_address (sg ) = region_start + offset ;
203+ GEM_BUG_ON (!IS_ALIGNED (sg_dma_address (sg ),
204+ page_alignment ));
194205 sg_dma_len (sg ) = 0 ;
195206 sg -> length = 0 ;
196207 st -> nents ++ ;
0 commit comments