1414#include <linux/clk.h>
1515#include <linux/delay.h>
1616#include <linux/device.h>
17+ #include <linux/dmaengine.h>
18+ #include <linux/dma-mapping.h>
1719#include <linux/interrupt.h>
1820#include <linux/io.h>
1921#include <linux/module.h>
5557
5658#define SUN6I_FIFO_CTL_REG 0x18
5759#define SUN6I_FIFO_CTL_RF_RDY_TRIG_LEVEL_MASK 0xff
58- #define SUN6I_FIFO_CTL_RF_RDY_TRIG_LEVEL_BITS 0
60+ #define SUN6I_FIFO_CTL_RF_RDY_TRIG_LEVEL_POS 0
61+ #define SUN6I_FIFO_CTL_RF_DRQ_EN BIT(8)
5962#define SUN6I_FIFO_CTL_RF_RST BIT(15)
6063#define SUN6I_FIFO_CTL_TF_ERQ_TRIG_LEVEL_MASK 0xff
61- #define SUN6I_FIFO_CTL_TF_ERQ_TRIG_LEVEL_BITS 16
64+ #define SUN6I_FIFO_CTL_TF_ERQ_TRIG_LEVEL_POS 16
65+ #define SUN6I_FIFO_CTL_TF_DRQ_EN BIT(24)
6266#define SUN6I_FIFO_CTL_TF_RST BIT(31)
67+ #define SUN6I_FIFO_CTL_DMA_DEDICATE BIT(9)|BIT(25)
6368
6469#define SUN6I_FIFO_STA_REG 0x1c
6570#define SUN6I_FIFO_STA_RF_CNT_MASK 0x7f
@@ -177,6 +182,15 @@ static inline void sun6i_spi_fill_fifo(struct sun6i_spi *sspi, int len)
177182 }
178183}
179184
185+ static bool sun6i_spi_can_dma (struct spi_master * master ,
186+ struct spi_device * spi ,
187+ struct spi_transfer * tfr )
188+ {
189+ struct sun6i_spi * sspi = spi_master_get_devdata (master );
190+
191+ return tfr -> len > sspi -> fifo_depth ;
192+ }
193+
180194static void sun6i_spi_set_cs (struct spi_device * spi , bool enable )
181195{
182196 struct sun6i_spi * sspi = spi_master_get_devdata (spi -> master );
@@ -208,6 +222,9 @@ static size_t sun6i_spi_max_transfer_size(struct spi_device *spi)
208222 struct spi_master * master = spi -> master ;
209223 struct sun6i_spi * sspi = spi_master_get_devdata (master );
210224
225+ if (master -> can_dma )
226+ return SUN6I_MAX_XFER_SIZE ;
227+
211228 return sspi -> fifo_depth ;
212229}
213230
@@ -268,26 +285,190 @@ static int sun6i_spi_wait_for_transfer(struct spi_device *spi,
268285 return 0 ;
269286}
270287
288+ static void sun6i_spi_dma_callback (void * param )
289+ {
290+ struct spi_master * master = param ;
291+
292+ dev_dbg (& master -> dev , "DMA transfer complete\n" );
293+ spi_finalize_current_transfer (master );
294+ }
295+
296+ static int sun6i_spi_dmap_prep_tx (struct spi_master * master ,
297+ struct spi_transfer * tfr ,
298+ dma_cookie_t * cookie )
299+ {
300+ struct dma_async_tx_descriptor * chan_desc = NULL ;
301+
302+ chan_desc = dmaengine_prep_slave_sg (master -> dma_tx ,
303+ tfr -> tx_sg .sgl , tfr -> tx_sg .nents ,
304+ DMA_TO_DEVICE ,
305+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK );
306+ if (!chan_desc ) {
307+ dev_err (& master -> dev ,
308+ "Couldn't prepare TX DMA slave\n" );
309+ return - EIO ;
310+ }
311+
312+ chan_desc -> callback = sun6i_spi_dma_callback ;
313+ chan_desc -> callback_param = master ;
314+
315+ * cookie = dmaengine_submit (chan_desc );
316+ dma_async_issue_pending (master -> dma_tx );
317+
318+ return 0 ;
319+ }
320+
321+ static int sun6i_spi_dmap_prep_rx (struct spi_master * master ,
322+ struct spi_transfer * tfr ,
323+ dma_cookie_t * cookie )
324+ {
325+ struct dma_async_tx_descriptor * chan_desc = NULL ;
326+
327+ chan_desc = dmaengine_prep_slave_sg (master -> dma_rx ,
328+ tfr -> rx_sg .sgl , tfr -> rx_sg .nents ,
329+ DMA_FROM_DEVICE ,
330+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK );
331+ if (!chan_desc ) {
332+ dev_err (& master -> dev ,
333+ "Couldn't prepare RX DMA slave\n" );
334+ return - EIO ;
335+ }
336+
337+ chan_desc -> callback = sun6i_spi_dma_callback ;
338+ chan_desc -> callback_param = master ;
339+
340+ * cookie = dmaengine_submit (chan_desc );
341+ dma_async_issue_pending (master -> dma_rx );
342+
343+ return 0 ;
344+ }
345+
346+ static int sun6i_spi_transfer_one_dma (struct spi_device * spi ,
347+ struct spi_transfer * tfr )
348+ {
349+ struct spi_master * master = spi -> master ;
350+ struct sun6i_spi * sspi = spi_master_get_devdata (master );
351+ dma_cookie_t tx_cookie = 0 ,rx_cookie = 0 ;
352+ enum dma_status status ;
353+ int ret ;
354+ u32 reg , trig_level = 0 ;
355+
356+ dev_dbg (& master -> dev , "Using DMA mode for transfer\n" );
357+
358+ reg = sun6i_spi_read (sspi , SUN6I_FIFO_CTL_REG );
359+
360+ if (sspi -> tx_buf ) {
361+ ret = sun6i_spi_dmap_prep_tx (master , tfr , & tx_cookie );
362+ if (ret )
363+ goto out ;
364+
365+ reg |= SUN6I_FIFO_CTL_TF_DRQ_EN ;
366+
367+ trig_level = sspi -> fifo_depth ;
368+ reg &= ~SUN6I_FIFO_CTL_TF_ERQ_TRIG_LEVEL_MASK ;
369+ reg |= (trig_level << SUN6I_FIFO_CTL_TF_ERQ_TRIG_LEVEL_POS );
370+ }
371+
372+ if (sspi -> rx_buf ) {
373+ ret = sun6i_spi_dmap_prep_rx (master , tfr , & rx_cookie );
374+ if (ret )
375+ goto out ;
376+
377+ reg |= SUN6I_FIFO_CTL_RF_DRQ_EN ;
378+
379+ trig_level = 1 ;
380+ reg &= ~SUN6I_FIFO_CTL_RF_RDY_TRIG_LEVEL_MASK ;
381+ reg |= (trig_level << SUN6I_FIFO_CTL_RF_RDY_TRIG_LEVEL_POS );
382+ }
383+
384+ /* Enable Dedicated DMA requests */
385+ sun6i_spi_write (sspi , SUN6I_FIFO_CTL_REG ,
386+ reg | SUN6I_FIFO_CTL_DMA_DEDICATE );
387+
388+ /* Start transfer */
389+ sun6i_spi_set (sspi , SUN6I_TFR_CTL_REG , SUN6I_TFR_CTL_XCH );
390+
391+ ret = sun6i_spi_wait_for_transfer (spi , tfr );
392+ if (ret )
393+ goto out ;
394+
395+ if (sspi -> tx_buf && (status = dma_async_is_tx_complete (master -> dma_tx ,
396+ tx_cookie , NULL , NULL ))) {
397+ dev_warn (& master -> dev ,
398+ "DMA returned completion status of: %s\n" ,
399+ status == DMA_ERROR ? "error" : "in progress" );
400+ }
401+ if (sspi -> rx_buf && (status = dma_async_is_tx_complete (master -> dma_rx ,
402+ rx_cookie , NULL , NULL ))) {
403+ dev_warn (& master -> dev ,
404+ "DMA returned completion status of: %s\n" ,
405+ status == DMA_ERROR ? "error" : "in progress" );
406+ }
407+
408+ out :
409+ if (ret ) {
410+ dev_dbg (& master -> dev , "DMA channel teardown\n" );
411+ if (sspi -> tx_buf )
412+ dmaengine_terminate_sync (master -> dma_tx );
413+ if (sspi -> rx_buf )
414+ dmaengine_terminate_sync (master -> dma_rx );
415+ }
416+
417+ sun6i_spi_drain_fifo (sspi , sspi -> fifo_depth );
418+
419+ sun6i_spi_write (sspi , SUN6I_INT_CTL_REG , 0 );
420+
421+ return ret ;
422+ }
423+
424+ static int sun6i_spi_transfer_one_pio (struct spi_device * spi ,
425+ struct spi_transfer * tfr )
426+ {
427+ struct spi_master * master = spi -> master ;
428+ struct sun6i_spi * sspi = spi_master_get_devdata (master );
429+ int ret ;
430+
431+ /* Disable DMA requests */
432+ sun6i_spi_write (sspi , SUN6I_FIFO_CTL_REG , 0 );
433+
434+ sun6i_spi_fill_fifo (sspi , sspi -> fifo_depth );
435+
436+ /* Enable transfer complete IRQ */
437+ sun6i_spi_set (sspi , SUN6I_INT_CTL_REG , SUN6I_INT_CTL_TC );
438+
439+ /* Start transfer */
440+ sun6i_spi_set (sspi , SUN6I_TFR_CTL_REG , SUN6I_TFR_CTL_XCH );
441+
442+ ret = sun6i_spi_wait_for_transfer (spi , tfr );
443+
444+ sun6i_spi_write (sspi , SUN6I_INT_CTL_REG , 0 );
445+
446+ return ret ;
447+ }
448+
271449static int sun6i_spi_transfer_one (struct spi_master * master ,
272450 struct spi_device * spi ,
273451 struct spi_transfer * tfr )
274452{
275453 struct sun6i_spi * sspi = spi_master_get_devdata (master );
276- unsigned int mclk_rate , div , timeout ;
277- unsigned int start , end , tx_time ;
454+ unsigned int mclk_rate , div ;
278455 unsigned int tx_len = 0 ;
279- int ret = 0 ;
280456 u32 reg ;
281457
282458 /* A zero length transfer never finishes if programmed
283459 in the hardware */
284460 if (!tfr -> len )
285461 return 0 ;
286462
287- /* Don't support transfer larger than the FIFO */
288- if (tfr -> len > sspi -> fifo_depth )
463+ if (tfr -> len > SUN6I_MAX_XFER_SIZE )
289464 return - EMSGSIZE ;
290465
466+ if (!master -> can_dma ) {
467+ /* Don't support transfer larger than the FIFO */
468+ if (tfr -> len > sspi -> fifo_depth )
469+ return - EMSGSIZE ;
470+ }
471+
291472 sspi -> tx_buf = tfr -> tx_buf ;
292473 sspi -> rx_buf = tfr -> rx_buf ;
293474 sspi -> len = tfr -> len ;
@@ -353,21 +534,10 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
353534 sun6i_spi_write (sspi , SUN6I_BURST_CTL_CNT_REG ,
354535 SUN6I_BURST_CTL_CNT_STC (tx_len ));
355536
356- /* Fill the TX FIFO */
357- sun6i_spi_fill_fifo (sspi , sspi -> fifo_depth );
358-
359- /* Enable transfer complete interrupt */
360- sun6i_spi_set (sspi , SUN6I_INT_CTL_REG , SUN6I_INT_CTL_TC );
361-
362- /* Start the transfer */
363- sun6i_spi_set (sspi , SUN6I_TFR_CTL_REG , SUN6I_TFR_CTL_XCH );
364-
365- /* Wait for completion */
366- ret = sun6i_spi_wait_for_transfer (spi , tfr );
367-
368- sun6i_spi_write (sspi , SUN6I_INT_CTL_REG , 0 );
537+ if (sun6i_spi_can_dma (master , spi , tfr ))
538+ return sun6i_spi_transfer_one_dma (spi , tfr );
369539
370- return ret ;
540+ return sun6i_spi_transfer_one_pio ( spi , tfr ) ;
371541}
372542
373543static irqreturn_t sun6i_spi_handler (int irq , void * dev_id )
@@ -389,6 +559,76 @@ static irqreturn_t sun6i_spi_handler(int irq, void *dev_id)
389559 return IRQ_NONE ;
390560}
391561
562+ static int sun6i_spi_dma_setup (struct platform_device * pdev ,
563+ struct resource * res )
564+ {
565+ struct spi_master * master = platform_get_drvdata (pdev );
566+ struct dma_slave_config dma_sconf ;
567+ int ret ;
568+
569+ master -> dma_tx = dma_request_slave_channel_reason (& pdev -> dev , "tx" );
570+ if (IS_ERR (master -> dma_tx )) {
571+ dev_err (& pdev -> dev , "Unable to acquire DMA TX channel\n" );
572+ ret = PTR_ERR (master -> dma_tx );
573+ goto out ;
574+ }
575+
576+ dma_sconf .direction = DMA_MEM_TO_DEV ;
577+ dma_sconf .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE ;
578+ dma_sconf .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE ;
579+ dma_sconf .dst_addr = res -> start + SUN6I_TXDATA_REG ;
580+ dma_sconf .src_maxburst = 1 ;
581+ dma_sconf .dst_maxburst = 1 ;
582+
583+ ret = dmaengine_slave_config (master -> dma_tx , & dma_sconf );
584+ if (ret ) {
585+ dev_err (& pdev -> dev , "Unable to configure DMA TX slave\n" );
586+ goto err_rel_tx ;
587+ }
588+
589+ master -> dma_rx = dma_request_slave_channel_reason (& pdev -> dev , "rx" );
590+ if (IS_ERR (master -> dma_rx )) {
591+ dev_err (& pdev -> dev , "Unable to acquire DMA RX channel\n" );
592+ ret = PTR_ERR (master -> dma_rx );
593+ goto err_rel_tx ;
594+ }
595+
596+ dma_sconf .direction = DMA_DEV_TO_MEM ;
597+ dma_sconf .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE ;
598+ dma_sconf .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE ;
599+ dma_sconf .src_addr = res -> start + SUN6I_RXDATA_REG ;
600+ dma_sconf .src_maxburst = 1 ;
601+ dma_sconf .dst_maxburst = 1 ;
602+
603+ ret = dmaengine_slave_config (master -> dma_rx , & dma_sconf );
604+ if (ret ) {
605+ dev_err (& pdev -> dev , "Unable to configure DMA RX slave\n" );
606+ goto err_rel_rx ;
607+ }
608+
609+ /* don't set can_dma unless both channels are valid*/
610+ master -> can_dma = sun6i_spi_can_dma ;
611+
612+ return 0 ;
613+
614+ err_rel_rx :
615+ dma_release_channel (master -> dma_rx );
616+ err_rel_tx :
617+ dma_release_channel (master -> dma_tx );
618+ out :
619+ master -> dma_tx = NULL ;
620+ master -> dma_rx = NULL ;
621+ return ret ;
622+ }
623+
624+ static void sun6i_spi_dma_release (struct spi_master * master )
625+ {
626+ if (master -> can_dma ) {
627+ dma_release_channel (master -> dma_rx );
628+ dma_release_channel (master -> dma_tx );
629+ }
630+ }
631+
392632static int sun6i_spi_runtime_resume (struct device * dev )
393633{
394634 struct spi_master * master = dev_get_drvdata (dev );
@@ -510,6 +750,15 @@ static int sun6i_spi_probe(struct platform_device *pdev)
510750 goto err_free_master ;
511751 }
512752
753+ ret = sun6i_spi_dma_setup (pdev , res );
754+ if (ret ) {
755+ if (ret == - EPROBE_DEFER ) {
756+ /* wait for the dma driver to load */
757+ goto err_free_master ;
758+ }
759+ dev_warn (& pdev -> dev , "DMA transfer not supported\n" );
760+ }
761+
513762 /*
514763 * This wake-up/shutdown pattern is to be able to have the
515764 * device woken up, even if runtime_pm is disabled
@@ -536,14 +785,19 @@ static int sun6i_spi_probe(struct platform_device *pdev)
536785 pm_runtime_disable (& pdev -> dev );
537786 sun6i_spi_runtime_suspend (& pdev -> dev );
538787err_free_master :
788+ sun6i_spi_dma_release (master );
539789 spi_master_put (master );
540790 return ret ;
541791}
542792
543793static int sun6i_spi_remove (struct platform_device * pdev )
544794{
795+ struct spi_master * master = platform_get_drvdata (pdev );
796+
545797 pm_runtime_disable (& pdev -> dev );
546798
799+ sun6i_spi_dma_release (master );
800+
547801 return 0 ;
548802}
549803
0 commit comments