2323#include "core.h"
2424#include "card.h"
2525
26- #define MMC_QUEUE_BOUNCESZ 65536
27-
2826/*
2927 * Prepare a MMC request. This just filters out odd stuff.
3028 */
@@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q,
150148 queue_flag_set_unlocked (QUEUE_FLAG_SECERASE , q );
151149}
152150
153- static unsigned int mmc_queue_calc_bouncesz (struct mmc_host * host )
154- {
155- unsigned int bouncesz = MMC_QUEUE_BOUNCESZ ;
156-
157- if (host -> max_segs != 1 || (host -> caps & MMC_CAP_NO_BOUNCE_BUFF ))
158- return 0 ;
159-
160- if (bouncesz > host -> max_req_size )
161- bouncesz = host -> max_req_size ;
162- if (bouncesz > host -> max_seg_size )
163- bouncesz = host -> max_seg_size ;
164- if (bouncesz > host -> max_blk_count * 512 )
165- bouncesz = host -> max_blk_count * 512 ;
166-
167- if (bouncesz <= 512 )
168- return 0 ;
169-
170- return bouncesz ;
171- }
172-
173151/**
174152 * mmc_init_request() - initialize the MMC-specific per-request data
175153 * @q: the request queue
@@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req,
184162 struct mmc_card * card = mq -> card ;
185163 struct mmc_host * host = card -> host ;
186164
187- if (card -> bouncesz ) {
188- mq_rq -> bounce_buf = kmalloc (card -> bouncesz , gfp );
189- if (!mq_rq -> bounce_buf )
190- return - ENOMEM ;
191- if (card -> bouncesz > 512 ) {
192- mq_rq -> sg = mmc_alloc_sg (1 , gfp );
193- if (!mq_rq -> sg )
194- return - ENOMEM ;
195- mq_rq -> bounce_sg = mmc_alloc_sg (card -> bouncesz / 512 ,
196- gfp );
197- if (!mq_rq -> bounce_sg )
198- return - ENOMEM ;
199- }
200- } else {
201- mq_rq -> bounce_buf = NULL ;
202- mq_rq -> bounce_sg = NULL ;
203- mq_rq -> sg = mmc_alloc_sg (host -> max_segs , gfp );
204- if (!mq_rq -> sg )
205- return - ENOMEM ;
206- }
165+ mq_rq -> sg = mmc_alloc_sg (host -> max_segs , gfp );
166+ if (!mq_rq -> sg )
167+ return - ENOMEM ;
207168
208169 return 0 ;
209170}
@@ -212,13 +173,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
212173{
213174 struct mmc_queue_req * mq_rq = req_to_mmc_queue_req (req );
214175
215- /* It is OK to kfree(NULL) so this will be smooth */
216- kfree (mq_rq -> bounce_sg );
217- mq_rq -> bounce_sg = NULL ;
218-
219- kfree (mq_rq -> bounce_buf );
220- mq_rq -> bounce_buf = NULL ;
221-
222176 kfree (mq_rq -> sg );
223177 mq_rq -> sg = NULL ;
224178}
@@ -242,12 +196,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
242196 if (mmc_dev (host )-> dma_mask && * mmc_dev (host )-> dma_mask )
243197 limit = (u64 )dma_max_pfn (mmc_dev (host )) << PAGE_SHIFT ;
244198
245- /*
246- * mmc_init_request() depends on card->bouncesz so it must be calculated
247- * before blk_init_allocated_queue() starts allocating requests.
248- */
249- card -> bouncesz = mmc_queue_calc_bouncesz (host );
250-
251199 mq -> card = card ;
252200 mq -> queue = blk_alloc_queue (GFP_KERNEL );
253201 if (!mq -> queue )
@@ -271,17 +219,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
271219 if (mmc_can_erase (card ))
272220 mmc_queue_setup_discard (mq -> queue , card );
273221
274- if (card -> bouncesz ) {
275- blk_queue_max_hw_sectors (mq -> queue , card -> bouncesz / 512 );
276- blk_queue_max_segments (mq -> queue , card -> bouncesz / 512 );
277- blk_queue_max_segment_size (mq -> queue , card -> bouncesz );
278- } else {
279- blk_queue_bounce_limit (mq -> queue , limit );
280- blk_queue_max_hw_sectors (mq -> queue ,
281- min (host -> max_blk_count , host -> max_req_size / 512 ));
282- blk_queue_max_segments (mq -> queue , host -> max_segs );
283- blk_queue_max_segment_size (mq -> queue , host -> max_seg_size );
284- }
222+ blk_queue_bounce_limit (mq -> queue , limit );
223+ blk_queue_max_hw_sectors (mq -> queue ,
224+ min (host -> max_blk_count , host -> max_req_size / 512 ));
225+ blk_queue_max_segments (mq -> queue , host -> max_segs );
226+ blk_queue_max_segment_size (mq -> queue , host -> max_seg_size );
285227
286228 sema_init (& mq -> thread_sem , 1 );
287229
@@ -370,56 +312,7 @@ void mmc_queue_resume(struct mmc_queue *mq)
370312 */
371313unsigned int mmc_queue_map_sg (struct mmc_queue * mq , struct mmc_queue_req * mqrq )
372314{
373- unsigned int sg_len ;
374- size_t buflen ;
375- struct scatterlist * sg ;
376315 struct request * req = mmc_queue_req_to_req (mqrq );
377- int i ;
378-
379- if (!mqrq -> bounce_buf )
380- return blk_rq_map_sg (mq -> queue , req , mqrq -> sg );
381-
382- sg_len = blk_rq_map_sg (mq -> queue , req , mqrq -> bounce_sg );
383-
384- mqrq -> bounce_sg_len = sg_len ;
385-
386- buflen = 0 ;
387- for_each_sg (mqrq -> bounce_sg , sg , sg_len , i )
388- buflen += sg -> length ;
389-
390- sg_init_one (mqrq -> sg , mqrq -> bounce_buf , buflen );
391-
392- return 1 ;
393- }
394-
395- /*
396- * If writing, bounce the data to the buffer before the request
397- * is sent to the host driver
398- */
399- void mmc_queue_bounce_pre (struct mmc_queue_req * mqrq )
400- {
401- if (!mqrq -> bounce_buf )
402- return ;
403-
404- if (rq_data_dir (mmc_queue_req_to_req (mqrq )) != WRITE )
405- return ;
406-
407- sg_copy_to_buffer (mqrq -> bounce_sg , mqrq -> bounce_sg_len ,
408- mqrq -> bounce_buf , mqrq -> sg [0 ].length );
409- }
410-
411- /*
412- * If reading, bounce the data from the buffer after the request
413- * has been handled by the host driver
414- */
415- void mmc_queue_bounce_post (struct mmc_queue_req * mqrq )
416- {
417- if (!mqrq -> bounce_buf )
418- return ;
419-
420- if (rq_data_dir (mmc_queue_req_to_req (mqrq )) != READ )
421- return ;
422316
423- sg_copy_from_buffer (mqrq -> bounce_sg , mqrq -> bounce_sg_len ,
424- mqrq -> bounce_buf , mqrq -> sg [0 ].length );
317+ return blk_rq_map_sg (mq -> queue , req , mqrq -> sg );
425318}
0 commit comments