@@ -219,52 +219,34 @@ static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
219219}
220220
221221static int pblk_setup_w_rq (struct pblk * pblk , struct nvm_rq * rqd ,
222- struct pblk_c_ctx * c_ctx )
222+ struct pblk_c_ctx * c_ctx , struct ppa_addr * erase_ppa )
223223{
224224 struct pblk_line_meta * lm = & pblk -> lm ;
225- struct pblk_line * e_line = pblk_line_get_data_next (pblk );
226- struct ppa_addr erase_ppa ;
225+ struct pblk_line * e_line = pblk_line_get_erase (pblk );
227226 unsigned int valid = c_ctx -> nr_valid ;
228227 unsigned int padded = c_ctx -> nr_padded ;
229228 unsigned int nr_secs = valid + padded ;
230229 unsigned long * lun_bitmap ;
231230 int ret = 0 ;
232231
233232 lun_bitmap = kzalloc (lm -> lun_bitmap_len , GFP_KERNEL );
234- if (!lun_bitmap ) {
235- ret = - ENOMEM ;
236- goto out ;
237- }
233+ if (!lun_bitmap )
234+ return - ENOMEM ;
238235 c_ctx -> lun_bitmap = lun_bitmap ;
239236
240237 ret = pblk_alloc_w_rq (pblk , rqd , nr_secs );
241238 if (ret ) {
242239 kfree (lun_bitmap );
243- goto out ;
240+ return ret ;
244241 }
245242
246- ppa_set_empty (& erase_ppa );
247- if (likely (!e_line || !atomic_read (& e_line -> left_eblks )))
243+ if (likely (!atomic_read (& e_line -> left_eblks ) || !e_line ))
248244 pblk_map_rq (pblk , rqd , c_ctx -> sentry , lun_bitmap , valid , 0 );
249245 else
250246 pblk_map_erase_rq (pblk , rqd , c_ctx -> sentry , lun_bitmap ,
251- valid , & erase_ppa );
247+ valid , erase_ppa );
252248
253- out :
254- if (unlikely (e_line && !ppa_empty (erase_ppa ))) {
255- if (pblk_blk_erase_async (pblk , erase_ppa )) {
256- struct nvm_tgt_dev * dev = pblk -> dev ;
257- struct nvm_geo * geo = & dev -> geo ;
258- int bit ;
259-
260- atomic_inc (& e_line -> left_eblks );
261- bit = erase_ppa .g .lun * geo -> nr_chnls + erase_ppa .g .ch ;
262- WARN_ON (!test_and_clear_bit (bit , e_line -> erase_bitmap ));
263- up (& pblk -> erase_sem );
264- }
265- }
266-
267- return ret ;
249+ return 0 ;
268250}
269251
270252int pblk_setup_w_rec_rq (struct pblk * pblk , struct nvm_rq * rqd ,
@@ -311,16 +293,60 @@ static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
311293 return secs_to_sync ;
312294}
313295
296+ static int pblk_submit_io_set (struct pblk * pblk , struct nvm_rq * rqd )
297+ {
298+ struct pblk_c_ctx * c_ctx = nvm_rq_to_pdu (rqd );
299+ struct ppa_addr erase_ppa ;
300+ int err ;
301+
302+ ppa_set_empty (& erase_ppa );
303+
304+ /* Assign lbas to ppas and populate request structure */
305+ err = pblk_setup_w_rq (pblk , rqd , c_ctx , & erase_ppa );
306+ if (err ) {
307+ pr_err ("pblk: could not setup write request: %d\n" , err );
308+ return NVM_IO_ERR ;
309+ }
310+
311+ /* Submit write for current data line */
312+ err = pblk_submit_io (pblk , rqd );
313+ if (err ) {
314+ pr_err ("pblk: I/O submission failed: %d\n" , err );
315+ return NVM_IO_ERR ;
316+ }
317+
318+ /* Submit available erase for next data line */
319+ if (unlikely (!ppa_empty (erase_ppa )) &&
320+ pblk_blk_erase_async (pblk , erase_ppa )) {
321+ struct pblk_line * e_line = pblk_line_get_erase (pblk );
322+ struct nvm_tgt_dev * dev = pblk -> dev ;
323+ struct nvm_geo * geo = & dev -> geo ;
324+ int bit ;
325+
326+ atomic_inc (& e_line -> left_eblks );
327+ bit = erase_ppa .g .lun * geo -> nr_chnls + erase_ppa .g .ch ;
328+ WARN_ON (!test_and_clear_bit (bit , e_line -> erase_bitmap ));
329+ }
330+
331+ return NVM_IO_OK ;
332+ }
333+
334+ static void pblk_free_write_rqd (struct pblk * pblk , struct nvm_rq * rqd )
335+ {
336+ struct pblk_c_ctx * c_ctx = nvm_rq_to_pdu (rqd );
337+ struct bio * bio = rqd -> bio ;
338+
339+ if (c_ctx -> nr_padded )
340+ pblk_bio_free_pages (pblk , bio , rqd -> nr_ppas , c_ctx -> nr_padded );
341+ }
342+
314343static int pblk_submit_write (struct pblk * pblk )
315344{
316345 struct bio * bio ;
317346 struct nvm_rq * rqd ;
318- struct pblk_c_ctx * c_ctx ;
319- unsigned int pgs_read ;
320347 unsigned int secs_avail , secs_to_sync , secs_to_com ;
321348 unsigned int secs_to_flush ;
322349 unsigned long pos ;
323- int err ;
324350
325351 /* If there are no sectors in the cache, flushes (bios without data)
326352 * will be cleared on the cache threads
@@ -338,7 +364,6 @@ static int pblk_submit_write(struct pblk *pblk)
338364 pr_err ("pblk: cannot allocate write req.\n" );
339365 return 1 ;
340366 }
341- c_ctx = nvm_rq_to_pdu (rqd );
342367
343368 bio = bio_alloc (GFP_KERNEL , pblk -> max_write_pgs );
344369 if (!bio ) {
@@ -358,29 +383,14 @@ static int pblk_submit_write(struct pblk *pblk)
358383 secs_to_com = (secs_to_sync > secs_avail ) ? secs_avail : secs_to_sync ;
359384 pos = pblk_rb_read_commit (& pblk -> rwb , secs_to_com );
360385
361- pgs_read = pblk_rb_read_to_bio (& pblk -> rwb , bio , c_ctx , pos ,
362- secs_to_sync , secs_avail );
363- if (!pgs_read ) {
386+ if (pblk_rb_read_to_bio (& pblk -> rwb , rqd , bio , pos , secs_to_sync ,
387+ secs_avail )) {
364388 pr_err ("pblk: corrupted write bio\n" );
365389 goto fail_put_bio ;
366390 }
367391
368- if (c_ctx -> nr_padded )
369- if (pblk_bio_add_pages (pblk , bio , GFP_KERNEL , c_ctx -> nr_padded ))
370- goto fail_put_bio ;
371-
372- /* Assign lbas to ppas and populate request structure */
373- err = pblk_setup_w_rq (pblk , rqd , c_ctx );
374- if (err ) {
375- pr_err ("pblk: could not setup write request\n" );
392+ if (pblk_submit_io_set (pblk , rqd ))
376393 goto fail_free_bio ;
377- }
378-
379- err = pblk_submit_io (pblk , rqd );
380- if (err ) {
381- pr_err ("pblk: I/O submission failed: %d\n" , err );
382- goto fail_free_bio ;
383- }
384394
385395#ifdef CONFIG_NVM_DEBUG
386396 atomic_long_add (secs_to_sync , & pblk -> sub_writes );
@@ -389,8 +399,7 @@ static int pblk_submit_write(struct pblk *pblk)
389399 return 0 ;
390400
391401fail_free_bio :
392- if (c_ctx -> nr_padded )
393- pblk_bio_free_pages (pblk , bio , secs_to_sync , c_ctx -> nr_padded );
402+ pblk_free_write_rqd (pblk , rqd );
394403fail_put_bio :
395404 bio_put (bio );
396405fail_free_rqd :
0 commit comments