@@ -338,7 +338,7 @@ static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
338
338
339
339
/**
340
340
* struct at_dma - internal representation of an Atmel HDMA Controller
341
- * @chan_common: common dmaengine dma_device object members
341
+ * @dma_device: dmaengine dma_device object members
342
342
* @atdma_devtype: identifier of DMA controller compatibility
343
343
* @ch_regs: memory mapped register base
344
344
* @clk: dma controller clock
@@ -348,7 +348,7 @@ static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
348
348
* @chan: channels table to store at_dma_chan structures
349
349
*/
350
350
struct at_dma {
351
- struct dma_device dma_common ;
351
+ struct dma_device dma_device ;
352
352
void __iomem * regs ;
353
353
struct clk * clk ;
354
354
u32 save_imr ;
@@ -368,7 +368,7 @@ struct at_dma {
368
368
369
369
static inline struct at_dma * to_at_dma (struct dma_device * ddev )
370
370
{
371
- return container_of (ddev , struct at_dma , dma_common );
371
+ return container_of (ddev , struct at_dma , dma_device );
372
372
}
373
373
374
374
@@ -1069,11 +1069,11 @@ static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
1069
1069
if (!pending )
1070
1070
break ;
1071
1071
1072
- dev_vdbg (atdma -> dma_common .dev ,
1072
+ dev_vdbg (atdma -> dma_device .dev ,
1073
1073
"interrupt: status = 0x%08x, 0x%08x, 0x%08x\n" ,
1074
1074
status , imr , pending );
1075
1075
1076
- for (i = 0 ; i < atdma -> dma_common .chancnt ; i ++ ) {
1076
+ for (i = 0 ; i < atdma -> dma_device .chancnt ; i ++ ) {
1077
1077
atchan = & atdma -> chan [i ];
1078
1078
if (pending & (AT_DMA_BTC (i ) | AT_DMA_ERR (i ))) {
1079
1079
if (pending & AT_DMA_ERR (i )) {
@@ -2000,7 +2000,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
2000
2000
* We need controller-specific data to set up slave
2001
2001
* transfers.
2002
2002
*/
2003
- BUG_ON (!atslave -> dma_dev || atslave -> dma_dev != atdma -> dma_common .dev );
2003
+ BUG_ON (!atslave -> dma_dev || atslave -> dma_dev != atdma -> dma_device .dev );
2004
2004
2005
2005
/* if cfg configuration specified take it instead of default */
2006
2006
if (atslave -> cfg )
@@ -2011,7 +2011,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
2011
2011
for (i = 0 ; i < init_nr_desc_per_channel ; i ++ ) {
2012
2012
desc = atc_alloc_descriptor (chan , GFP_KERNEL );
2013
2013
if (!desc ) {
2014
- dev_err (atdma -> dma_common .dev ,
2014
+ dev_err (atdma -> dma_device .dev ,
2015
2015
"Only %d initial descriptors\n" , i );
2016
2016
break ;
2017
2017
}
@@ -2255,7 +2255,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
2255
2255
return irq ;
2256
2256
2257
2257
/* discover transaction capabilities */
2258
- atdma -> dma_common .cap_mask = plat_dat -> cap_mask ;
2258
+ atdma -> dma_device .cap_mask = plat_dat -> cap_mask ;
2259
2259
atdma -> all_chan_mask = (1 << plat_dat -> nr_channels ) - 1 ;
2260
2260
2261
2261
atdma -> clk = devm_clk_get (& pdev -> dev , "dma_clk" );
@@ -2299,16 +2299,16 @@ static int __init at_dma_probe(struct platform_device *pdev)
2299
2299
cpu_relax ();
2300
2300
2301
2301
/* initialize channels related values */
2302
- INIT_LIST_HEAD (& atdma -> dma_common .channels );
2302
+ INIT_LIST_HEAD (& atdma -> dma_device .channels );
2303
2303
for (i = 0 ; i < plat_dat -> nr_channels ; i ++ ) {
2304
2304
struct at_dma_chan * atchan = & atdma -> chan [i ];
2305
2305
2306
2306
atchan -> mem_if = AT_DMA_MEM_IF ;
2307
2307
atchan -> per_if = AT_DMA_PER_IF ;
2308
- atchan -> chan_common .device = & atdma -> dma_common ;
2308
+ atchan -> chan_common .device = & atdma -> dma_device ;
2309
2309
dma_cookie_init (& atchan -> chan_common );
2310
2310
list_add_tail (& atchan -> chan_common .device_node ,
2311
- & atdma -> dma_common .channels );
2311
+ & atdma -> dma_device .channels );
2312
2312
2313
2313
atchan -> ch_regs = atdma -> regs + ch_regs (i );
2314
2314
spin_lock_init (& atchan -> lock );
@@ -2323,49 +2323,49 @@ static int __init at_dma_probe(struct platform_device *pdev)
2323
2323
}
2324
2324
2325
2325
/* set base routines */
2326
- atdma -> dma_common .device_alloc_chan_resources = atc_alloc_chan_resources ;
2327
- atdma -> dma_common .device_free_chan_resources = atc_free_chan_resources ;
2328
- atdma -> dma_common .device_tx_status = atc_tx_status ;
2329
- atdma -> dma_common .device_issue_pending = atc_issue_pending ;
2330
- atdma -> dma_common .dev = & pdev -> dev ;
2326
+ atdma -> dma_device .device_alloc_chan_resources = atc_alloc_chan_resources ;
2327
+ atdma -> dma_device .device_free_chan_resources = atc_free_chan_resources ;
2328
+ atdma -> dma_device .device_tx_status = atc_tx_status ;
2329
+ atdma -> dma_device .device_issue_pending = atc_issue_pending ;
2330
+ atdma -> dma_device .dev = & pdev -> dev ;
2331
2331
2332
2332
/* set prep routines based on capability */
2333
- if (dma_has_cap (DMA_INTERLEAVE , atdma -> dma_common .cap_mask ))
2334
- atdma -> dma_common .device_prep_interleaved_dma = atc_prep_dma_interleaved ;
2333
+ if (dma_has_cap (DMA_INTERLEAVE , atdma -> dma_device .cap_mask ))
2334
+ atdma -> dma_device .device_prep_interleaved_dma = atc_prep_dma_interleaved ;
2335
2335
2336
- if (dma_has_cap (DMA_MEMCPY , atdma -> dma_common .cap_mask ))
2337
- atdma -> dma_common .device_prep_dma_memcpy = atc_prep_dma_memcpy ;
2336
+ if (dma_has_cap (DMA_MEMCPY , atdma -> dma_device .cap_mask ))
2337
+ atdma -> dma_device .device_prep_dma_memcpy = atc_prep_dma_memcpy ;
2338
2338
2339
- if (dma_has_cap (DMA_MEMSET , atdma -> dma_common .cap_mask )) {
2340
- atdma -> dma_common .device_prep_dma_memset = atc_prep_dma_memset ;
2341
- atdma -> dma_common .device_prep_dma_memset_sg = atc_prep_dma_memset_sg ;
2342
- atdma -> dma_common .fill_align = DMAENGINE_ALIGN_4_BYTES ;
2339
+ if (dma_has_cap (DMA_MEMSET , atdma -> dma_device .cap_mask )) {
2340
+ atdma -> dma_device .device_prep_dma_memset = atc_prep_dma_memset ;
2341
+ atdma -> dma_device .device_prep_dma_memset_sg = atc_prep_dma_memset_sg ;
2342
+ atdma -> dma_device .fill_align = DMAENGINE_ALIGN_4_BYTES ;
2343
2343
}
2344
2344
2345
- if (dma_has_cap (DMA_SLAVE , atdma -> dma_common .cap_mask )) {
2346
- atdma -> dma_common .device_prep_slave_sg = atc_prep_slave_sg ;
2345
+ if (dma_has_cap (DMA_SLAVE , atdma -> dma_device .cap_mask )) {
2346
+ atdma -> dma_device .device_prep_slave_sg = atc_prep_slave_sg ;
2347
2347
/* controller can do slave DMA: can trigger cyclic transfers */
2348
- dma_cap_set (DMA_CYCLIC , atdma -> dma_common .cap_mask );
2349
- atdma -> dma_common .device_prep_dma_cyclic = atc_prep_dma_cyclic ;
2350
- atdma -> dma_common .device_config = atc_config ;
2351
- atdma -> dma_common .device_pause = atc_pause ;
2352
- atdma -> dma_common .device_resume = atc_resume ;
2353
- atdma -> dma_common .device_terminate_all = atc_terminate_all ;
2354
- atdma -> dma_common .src_addr_widths = ATC_DMA_BUSWIDTHS ;
2355
- atdma -> dma_common .dst_addr_widths = ATC_DMA_BUSWIDTHS ;
2356
- atdma -> dma_common .directions = BIT (DMA_DEV_TO_MEM ) | BIT (DMA_MEM_TO_DEV );
2357
- atdma -> dma_common .residue_granularity = DMA_RESIDUE_GRANULARITY_BURST ;
2348
+ dma_cap_set (DMA_CYCLIC , atdma -> dma_device .cap_mask );
2349
+ atdma -> dma_device .device_prep_dma_cyclic = atc_prep_dma_cyclic ;
2350
+ atdma -> dma_device .device_config = atc_config ;
2351
+ atdma -> dma_device .device_pause = atc_pause ;
2352
+ atdma -> dma_device .device_resume = atc_resume ;
2353
+ atdma -> dma_device .device_terminate_all = atc_terminate_all ;
2354
+ atdma -> dma_device .src_addr_widths = ATC_DMA_BUSWIDTHS ;
2355
+ atdma -> dma_device .dst_addr_widths = ATC_DMA_BUSWIDTHS ;
2356
+ atdma -> dma_device .directions = BIT (DMA_DEV_TO_MEM ) | BIT (DMA_MEM_TO_DEV );
2357
+ atdma -> dma_device .residue_granularity = DMA_RESIDUE_GRANULARITY_BURST ;
2358
2358
}
2359
2359
2360
2360
dma_writel (atdma , EN , AT_DMA_ENABLE );
2361
2361
2362
2362
dev_info (& pdev -> dev , "Atmel AHB DMA Controller ( %s%s%s), %d channels\n" ,
2363
- dma_has_cap (DMA_MEMCPY , atdma -> dma_common .cap_mask ) ? "cpy " : "" ,
2364
- dma_has_cap (DMA_MEMSET , atdma -> dma_common .cap_mask ) ? "set " : "" ,
2365
- dma_has_cap (DMA_SLAVE , atdma -> dma_common .cap_mask ) ? "slave " : "" ,
2363
+ dma_has_cap (DMA_MEMCPY , atdma -> dma_device .cap_mask ) ? "cpy " : "" ,
2364
+ dma_has_cap (DMA_MEMSET , atdma -> dma_device .cap_mask ) ? "set " : "" ,
2365
+ dma_has_cap (DMA_SLAVE , atdma -> dma_device .cap_mask ) ? "slave " : "" ,
2366
2366
plat_dat -> nr_channels );
2367
2367
2368
- err = dma_async_device_register (& atdma -> dma_common );
2368
+ err = dma_async_device_register (& atdma -> dma_device );
2369
2369
if (err ) {
2370
2370
dev_err (& pdev -> dev , "Unable to register: %d.\n" , err );
2371
2371
goto err_dma_async_device_register ;
@@ -2388,7 +2388,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
2388
2388
return 0 ;
2389
2389
2390
2390
err_of_dma_controller_register :
2391
- dma_async_device_unregister (& atdma -> dma_common );
2391
+ dma_async_device_unregister (& atdma -> dma_device );
2392
2392
err_dma_async_device_register :
2393
2393
dma_pool_destroy (atdma -> memset_pool );
2394
2394
err_memset_pool_create :
@@ -2408,13 +2408,13 @@ static int at_dma_remove(struct platform_device *pdev)
2408
2408
at_dma_off (atdma );
2409
2409
if (pdev -> dev .of_node )
2410
2410
of_dma_controller_free (pdev -> dev .of_node );
2411
- dma_async_device_unregister (& atdma -> dma_common );
2411
+ dma_async_device_unregister (& atdma -> dma_device );
2412
2412
2413
2413
dma_pool_destroy (atdma -> memset_pool );
2414
2414
dma_pool_destroy (atdma -> dma_desc_pool );
2415
2415
free_irq (platform_get_irq (pdev , 0 ), atdma );
2416
2416
2417
- list_for_each_entry_safe (chan , _chan , & atdma -> dma_common .channels ,
2417
+ list_for_each_entry_safe (chan , _chan , & atdma -> dma_device .channels ,
2418
2418
device_node ) {
2419
2419
struct at_dma_chan * atchan = to_at_dma_chan (chan );
2420
2420
@@ -2443,7 +2443,7 @@ static int at_dma_prepare(struct device *dev)
2443
2443
struct at_dma * atdma = dev_get_drvdata (dev );
2444
2444
struct dma_chan * chan , * _chan ;
2445
2445
2446
- list_for_each_entry_safe (chan , _chan , & atdma -> dma_common .channels ,
2446
+ list_for_each_entry_safe (chan , _chan , & atdma -> dma_device .channels ,
2447
2447
device_node ) {
2448
2448
struct at_dma_chan * atchan = to_at_dma_chan (chan );
2449
2449
/* wait for transaction completion (except in cyclic case) */
@@ -2478,7 +2478,7 @@ static int at_dma_suspend_noirq(struct device *dev)
2478
2478
struct dma_chan * chan , * _chan ;
2479
2479
2480
2480
/* preserve data */
2481
- list_for_each_entry_safe (chan , _chan , & atdma -> dma_common .channels ,
2481
+ list_for_each_entry_safe (chan , _chan , & atdma -> dma_device .channels ,
2482
2482
device_node ) {
2483
2483
struct at_dma_chan * atchan = to_at_dma_chan (chan );
2484
2484
@@ -2528,7 +2528,7 @@ static int at_dma_resume_noirq(struct device *dev)
2528
2528
2529
2529
/* restore saved data */
2530
2530
dma_writel (atdma , EBCIER , atdma -> save_imr );
2531
- list_for_each_entry_safe (chan , _chan , & atdma -> dma_common .channels ,
2531
+ list_for_each_entry_safe (chan , _chan , & atdma -> dma_device .channels ,
2532
2532
device_node ) {
2533
2533
struct at_dma_chan * atchan = to_at_dma_chan (chan );
2534
2534
0 commit comments