1515
1616#include <linux/clk.h>
1717#include <linux/delay.h>
18+ #include <linux/dmaengine.h>
19+ #include <linux/dma-mapping.h>
1820#include <linux/err.h>
1921#include <linux/errno.h>
2022#include <linux/interrupt.h>
4042#define TRAN_STATE_WORD_ODD_NUM 0x04
4143
4244#define DSPI_FIFO_SIZE 4
45+ #define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
4346
4447#define SPI_MCR 0x00
4548#define SPI_MCR_MASTER (1 << 31)
7174#define SPI_SR_EOQF 0x10000000
7275#define SPI_SR_TCFQF 0x80000000
7376
77+ #define SPI_RSER_TFFFE BIT(25)
78+ #define SPI_RSER_TFFFD BIT(24)
79+ #define SPI_RSER_RFDFE BIT(17)
80+ #define SPI_RSER_RFDFD BIT(16)
81+
7482#define SPI_RSER 0x30
7583#define SPI_RSER_EOQFE 0x10000000
7684#define SPI_RSER_TCFQE 0x80000000
108116
109117#define SPI_TCR_TCNT_MAX 0x10000
110118
119+ #define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
120+
111121struct chip_data {
112122 u32 mcr_val ;
113123 u32 ctar_val ;
@@ -117,6 +127,7 @@ struct chip_data {
117127enum dspi_trans_mode {
118128 DSPI_EOQ_MODE = 0 ,
119129 DSPI_TCFQ_MODE ,
130+ DSPI_DMA_MODE ,
120131};
121132
122133struct fsl_dspi_devtype_data {
@@ -125,7 +136,7 @@ struct fsl_dspi_devtype_data {
125136};
126137
127138static const struct fsl_dspi_devtype_data vf610_data = {
128- .trans_mode = DSPI_EOQ_MODE ,
139+ .trans_mode = DSPI_DMA_MODE ,
129140 .max_clock_factor = 2 ,
130141};
131142
@@ -139,6 +150,22 @@ static const struct fsl_dspi_devtype_data ls2085a_data = {
139150 .max_clock_factor = 8 ,
140151};
141152
153+ struct fsl_dspi_dma {
154+ u32 curr_xfer_len ;
155+
156+ u32 * tx_dma_buf ;
157+ struct dma_chan * chan_tx ;
158+ dma_addr_t tx_dma_phys ;
159+ struct completion cmd_tx_complete ;
160+ struct dma_async_tx_descriptor * tx_desc ;
161+
162+ u32 * rx_dma_buf ;
163+ struct dma_chan * chan_rx ;
164+ dma_addr_t rx_dma_phys ;
165+ struct completion cmd_rx_complete ;
166+ struct dma_async_tx_descriptor * rx_desc ;
167+ };
168+
142169struct fsl_dspi {
143170 struct spi_master * master ;
144171 struct platform_device * pdev ;
@@ -165,6 +192,7 @@ struct fsl_dspi {
165192 u32 waitflags ;
166193
167194 u32 spi_tcnt ;
195+ struct fsl_dspi_dma * dma ;
168196};
169197
170198static inline int is_double_byte_mode (struct fsl_dspi * dspi )
@@ -176,6 +204,263 @@ static inline int is_double_byte_mode(struct fsl_dspi *dspi)
176204 return ((val & SPI_FRAME_BITS_MASK ) == SPI_FRAME_BITS (8 )) ? 0 : 1 ;
177205}
178206
207+ static void dspi_tx_dma_callback (void * arg )
208+ {
209+ struct fsl_dspi * dspi = arg ;
210+ struct fsl_dspi_dma * dma = dspi -> dma ;
211+
212+ complete (& dma -> cmd_tx_complete );
213+ }
214+
215+ static void dspi_rx_dma_callback (void * arg )
216+ {
217+ struct fsl_dspi * dspi = arg ;
218+ struct fsl_dspi_dma * dma = dspi -> dma ;
219+ int rx_word ;
220+ int i , len ;
221+ u16 d ;
222+
223+ rx_word = is_double_byte_mode (dspi );
224+
225+ len = rx_word ? (dma -> curr_xfer_len / 2 ) : dma -> curr_xfer_len ;
226+
227+ if (!(dspi -> dataflags & TRAN_STATE_RX_VOID )) {
228+ for (i = 0 ; i < len ; i ++ ) {
229+ d = dspi -> dma -> rx_dma_buf [i ];
230+ rx_word ? (* (u16 * )dspi -> rx = d ) :
231+ (* (u8 * )dspi -> rx = d );
232+ dspi -> rx += rx_word + 1 ;
233+ }
234+ }
235+
236+ complete (& dma -> cmd_rx_complete );
237+ }
238+
239+ static int dspi_next_xfer_dma_submit (struct fsl_dspi * dspi )
240+ {
241+ struct fsl_dspi_dma * dma = dspi -> dma ;
242+ struct device * dev = & dspi -> pdev -> dev ;
243+ int time_left ;
244+ int tx_word ;
245+ int i , len ;
246+ u16 val ;
247+
248+ tx_word = is_double_byte_mode (dspi );
249+
250+ len = tx_word ? (dma -> curr_xfer_len / 2 ) : dma -> curr_xfer_len ;
251+
252+ for (i = 0 ; i < len - 1 ; i ++ ) {
253+ val = tx_word ? * (u16 * ) dspi -> tx : * (u8 * ) dspi -> tx ;
254+ dspi -> dma -> tx_dma_buf [i ] =
255+ SPI_PUSHR_TXDATA (val ) | SPI_PUSHR_PCS (dspi -> cs ) |
256+ SPI_PUSHR_CTAS (0 ) | SPI_PUSHR_CONT ;
257+ dspi -> tx += tx_word + 1 ;
258+ }
259+
260+ val = tx_word ? * (u16 * ) dspi -> tx : * (u8 * ) dspi -> tx ;
261+ dspi -> dma -> tx_dma_buf [i ] = SPI_PUSHR_TXDATA (val ) |
262+ SPI_PUSHR_PCS (dspi -> cs ) |
263+ SPI_PUSHR_CTAS (0 );
264+ dspi -> tx += tx_word + 1 ;
265+
266+ dma -> tx_desc = dmaengine_prep_slave_single (dma -> chan_tx ,
267+ dma -> tx_dma_phys ,
268+ DSPI_DMA_BUFSIZE , DMA_MEM_TO_DEV ,
269+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK );
270+ if (!dma -> tx_desc ) {
271+ dev_err (dev , "Not able to get desc for DMA xfer\n" );
272+ return - EIO ;
273+ }
274+
275+ dma -> tx_desc -> callback = dspi_tx_dma_callback ;
276+ dma -> tx_desc -> callback_param = dspi ;
277+ if (dma_submit_error (dmaengine_submit (dma -> tx_desc ))) {
278+ dev_err (dev , "DMA submit failed\n" );
279+ return - EINVAL ;
280+ }
281+
282+ dma -> rx_desc = dmaengine_prep_slave_single (dma -> chan_rx ,
283+ dma -> rx_dma_phys ,
284+ DSPI_DMA_BUFSIZE , DMA_DEV_TO_MEM ,
285+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK );
286+ if (!dma -> rx_desc ) {
287+ dev_err (dev , "Not able to get desc for DMA xfer\n" );
288+ return - EIO ;
289+ }
290+
291+ dma -> rx_desc -> callback = dspi_rx_dma_callback ;
292+ dma -> rx_desc -> callback_param = dspi ;
293+ if (dma_submit_error (dmaengine_submit (dma -> rx_desc ))) {
294+ dev_err (dev , "DMA submit failed\n" );
295+ return - EINVAL ;
296+ }
297+
298+ reinit_completion (& dspi -> dma -> cmd_rx_complete );
299+ reinit_completion (& dspi -> dma -> cmd_tx_complete );
300+
301+ dma_async_issue_pending (dma -> chan_rx );
302+ dma_async_issue_pending (dma -> chan_tx );
303+
304+ time_left = wait_for_completion_timeout (& dspi -> dma -> cmd_tx_complete ,
305+ DMA_COMPLETION_TIMEOUT );
306+ if (time_left == 0 ) {
307+ dev_err (dev , "DMA tx timeout\n" );
308+ dmaengine_terminate_all (dma -> chan_tx );
309+ dmaengine_terminate_all (dma -> chan_rx );
310+ return - ETIMEDOUT ;
311+ }
312+
313+ time_left = wait_for_completion_timeout (& dspi -> dma -> cmd_rx_complete ,
314+ DMA_COMPLETION_TIMEOUT );
315+ if (time_left == 0 ) {
316+ dev_err (dev , "DMA rx timeout\n" );
317+ dmaengine_terminate_all (dma -> chan_tx );
318+ dmaengine_terminate_all (dma -> chan_rx );
319+ return - ETIMEDOUT ;
320+ }
321+
322+ return 0 ;
323+ }
324+
325+ static int dspi_dma_xfer (struct fsl_dspi * dspi )
326+ {
327+ struct fsl_dspi_dma * dma = dspi -> dma ;
328+ struct device * dev = & dspi -> pdev -> dev ;
329+ int curr_remaining_bytes ;
330+ int bytes_per_buffer ;
331+ int tx_word ;
332+ int ret = 0 ;
333+
334+ tx_word = is_double_byte_mode (dspi );
335+ curr_remaining_bytes = dspi -> len ;
336+ while (curr_remaining_bytes ) {
337+ /* Check if current transfer fits the DMA buffer */
338+ dma -> curr_xfer_len = curr_remaining_bytes ;
339+ bytes_per_buffer = DSPI_DMA_BUFSIZE /
340+ (DSPI_FIFO_SIZE / (tx_word ? 2 : 1 ));
341+ if (curr_remaining_bytes > bytes_per_buffer )
342+ dma -> curr_xfer_len = bytes_per_buffer ;
343+
344+ ret = dspi_next_xfer_dma_submit (dspi );
345+ if (ret ) {
346+ dev_err (dev , "DMA transfer failed\n" );
347+ goto exit ;
348+
349+ } else {
350+ curr_remaining_bytes -= dma -> curr_xfer_len ;
351+ if (curr_remaining_bytes < 0 )
352+ curr_remaining_bytes = 0 ;
353+ dspi -> len = curr_remaining_bytes ;
354+ }
355+ }
356+
357+ exit :
358+ return ret ;
359+ }
360+
361+ static int dspi_request_dma (struct fsl_dspi * dspi , phys_addr_t phy_addr )
362+ {
363+ struct fsl_dspi_dma * dma ;
364+ struct dma_slave_config cfg ;
365+ struct device * dev = & dspi -> pdev -> dev ;
366+ int ret ;
367+
368+ dma = devm_kzalloc (dev , sizeof (* dma ), GFP_KERNEL );
369+ if (!dma )
370+ return - ENOMEM ;
371+
372+ dma -> chan_rx = dma_request_slave_channel (dev , "rx" );
373+ if (!dma -> chan_rx ) {
374+ dev_err (dev , "rx dma channel not available\n" );
375+ ret = - ENODEV ;
376+ return ret ;
377+ }
378+
379+ dma -> chan_tx = dma_request_slave_channel (dev , "tx" );
380+ if (!dma -> chan_tx ) {
381+ dev_err (dev , "tx dma channel not available\n" );
382+ ret = - ENODEV ;
383+ goto err_tx_channel ;
384+ }
385+
386+ dma -> tx_dma_buf = dma_alloc_coherent (dev , DSPI_DMA_BUFSIZE ,
387+ & dma -> tx_dma_phys , GFP_KERNEL );
388+ if (!dma -> tx_dma_buf ) {
389+ ret = - ENOMEM ;
390+ goto err_tx_dma_buf ;
391+ }
392+
393+ dma -> rx_dma_buf = dma_alloc_coherent (dev , DSPI_DMA_BUFSIZE ,
394+ & dma -> rx_dma_phys , GFP_KERNEL );
395+ if (!dma -> rx_dma_buf ) {
396+ ret = - ENOMEM ;
397+ goto err_rx_dma_buf ;
398+ }
399+
400+ cfg .src_addr = phy_addr + SPI_POPR ;
401+ cfg .dst_addr = phy_addr + SPI_PUSHR ;
402+ cfg .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
403+ cfg .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
404+ cfg .src_maxburst = 1 ;
405+ cfg .dst_maxburst = 1 ;
406+
407+ cfg .direction = DMA_DEV_TO_MEM ;
408+ ret = dmaengine_slave_config (dma -> chan_rx , & cfg );
409+ if (ret ) {
410+ dev_err (dev , "can't configure rx dma channel\n" );
411+ ret = - EINVAL ;
412+ goto err_slave_config ;
413+ }
414+
415+ cfg .direction = DMA_MEM_TO_DEV ;
416+ ret = dmaengine_slave_config (dma -> chan_tx , & cfg );
417+ if (ret ) {
418+ dev_err (dev , "can't configure tx dma channel\n" );
419+ ret = - EINVAL ;
420+ goto err_slave_config ;
421+ }
422+
423+ dspi -> dma = dma ;
424+ init_completion (& dma -> cmd_tx_complete );
425+ init_completion (& dma -> cmd_rx_complete );
426+
427+ return 0 ;
428+
429+ err_slave_config :
430+ devm_kfree (dev , dma -> rx_dma_buf );
431+ err_rx_dma_buf :
432+ devm_kfree (dev , dma -> tx_dma_buf );
433+ err_tx_dma_buf :
434+ dma_release_channel (dma -> chan_tx );
435+ err_tx_channel :
436+ dma_release_channel (dma -> chan_rx );
437+
438+ devm_kfree (dev , dma );
439+ dspi -> dma = NULL ;
440+
441+ return ret ;
442+ }
443+
444+ static void dspi_release_dma (struct fsl_dspi * dspi )
445+ {
446+ struct fsl_dspi_dma * dma = dspi -> dma ;
447+ struct device * dev = & dspi -> pdev -> dev ;
448+
449+ if (dma ) {
450+ if (dma -> chan_tx ) {
451+ dma_unmap_single (dev , dma -> tx_dma_phys ,
452+ DSPI_DMA_BUFSIZE , DMA_TO_DEVICE );
453+ dma_release_channel (dma -> chan_tx );
454+ }
455+
456+ if (dma -> chan_rx ) {
457+ dma_unmap_single (dev , dma -> rx_dma_phys ,
458+ DSPI_DMA_BUFSIZE , DMA_FROM_DEVICE );
459+ dma_release_channel (dma -> chan_rx );
460+ }
461+ }
462+ }
463+
179464static void hz_to_spi_baud (char * pbr , char * br , int speed_hz ,
180465 unsigned long clkrate )
181466{
@@ -424,6 +709,12 @@ static int dspi_transfer_one_message(struct spi_master *master,
424709 regmap_write (dspi -> regmap , SPI_RSER , SPI_RSER_TCFQE );
425710 dspi_tcfq_write (dspi );
426711 break ;
712+ case DSPI_DMA_MODE :
713+ regmap_write (dspi -> regmap , SPI_RSER ,
714+ SPI_RSER_TFFFE | SPI_RSER_TFFFD |
715+ SPI_RSER_RFDFE | SPI_RSER_RFDFD );
716+ status = dspi_dma_xfer (dspi );
717+ goto out ;
427718 default :
428719 dev_err (& dspi -> pdev -> dev , "unsupported trans_mode %u\n" ,
429720 trans_mode );
@@ -733,6 +1024,13 @@ static int dspi_probe(struct platform_device *pdev)
7331024 if (ret )
7341025 goto out_master_put ;
7351026
1027+ if (dspi -> devtype_data -> trans_mode == DSPI_DMA_MODE ) {
1028+ if (dspi_request_dma (dspi , res -> start )) {
1029+ dev_err (& pdev -> dev , "can't get dma channels\n" );
1030+ goto out_clk_put ;
1031+ }
1032+ }
1033+
7361034 master -> max_speed_hz =
7371035 clk_get_rate (dspi -> clk ) / dspi -> devtype_data -> max_clock_factor ;
7381036
@@ -761,6 +1059,7 @@ static int dspi_remove(struct platform_device *pdev)
7611059 struct fsl_dspi * dspi = spi_master_get_devdata (master );
7621060
7631061 /* Disconnect from the SPI framework */
1062+ dspi_release_dma (dspi );
7641063 clk_disable_unprepare (dspi -> clk );
7651064 spi_unregister_master (dspi -> master );
7661065
0 commit comments