114
114
#define XILINX_VDMA_REG_START_ADDRESS_64 (n ) (0x000c + 8 * (n))
115
115
116
116
/* HW specific definitions */
117
- #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
117
+ #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
118
118
119
119
#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
120
120
(XILINX_DMA_DMASR_FRM_CNT_IRQ | \
165
165
#define XILINX_DMA_COALESCE_MAX 255
166
166
#define XILINX_DMA_NUM_APP_WORDS 5
167
167
168
+ /* Multi-Channel DMA Descriptor offsets*/
169
+ #define XILINX_DMA_MCRX_CDESC (x ) (0x40 + (x-1) * 0x20)
170
+ #define XILINX_DMA_MCRX_TDESC (x ) (0x48 + (x-1) * 0x20)
171
+
172
+ /* Multi-Channel DMA Masks/Shifts */
173
+ #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
174
+ #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
175
+ #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
176
+ #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
177
+ #define XILINX_DMA_BD_STRIDE_SHIFT 0
178
+ #define XILINX_DMA_BD_VSIZE_SHIFT 19
179
+
168
180
/* AXI CDMA Specific Registers/Offsets */
169
181
#define XILINX_CDMA_REG_SRCADDR 0x18
170
182
#define XILINX_CDMA_REG_DSTADDR 0x20
@@ -210,8 +222,8 @@ struct xilinx_axidma_desc_hw {
210
222
u32 next_desc_msb ;
211
223
u32 buf_addr ;
212
224
u32 buf_addr_msb ;
213
- u32 pad1 ;
214
- u32 pad2 ;
225
+ u32 mcdma_control ;
226
+ u32 vsize_stride ;
215
227
u32 control ;
216
228
u32 status ;
217
229
u32 app [XILINX_DMA_NUM_APP_WORDS ];
@@ -349,6 +361,7 @@ struct xilinx_dma_chan {
349
361
struct xilinx_axidma_tx_segment * seg_v ;
350
362
struct xilinx_axidma_tx_segment * cyclic_seg_v ;
351
363
void (* start_transfer )(struct xilinx_dma_chan * chan );
364
+ u16 tdest ;
352
365
};
353
366
354
367
struct xilinx_dma_config {
@@ -365,6 +378,7 @@ struct xilinx_dma_config {
365
378
* @common: DMA device structure
366
379
* @chan: Driver specific DMA channel
367
380
* @has_sg: Specifies whether Scatter-Gather is present or not
381
+ * @mcdma: Specifies whether Multi-Channel is present or not
368
382
* @flush_on_fsync: Flush on frame sync
369
383
* @ext_addr: Indicates 64 bit addressing is supported by dma device
370
384
* @pdev: Platform device structure pointer
@@ -374,13 +388,16 @@ struct xilinx_dma_config {
374
388
* @txs_clk: DMA mm2s stream clock
375
389
* @rx_clk: DMA s2mm clock
376
390
* @rxs_clk: DMA s2mm stream clock
391
+ * @nr_channels: Number of channels DMA device supports
392
+ * @chan_id: DMA channel identifier
377
393
*/
378
394
struct xilinx_dma_device {
379
395
void __iomem * regs ;
380
396
struct device * dev ;
381
397
struct dma_device common ;
382
398
struct xilinx_dma_chan * chan [XILINX_DMA_MAX_CHANS_PER_DEVICE ];
383
399
bool has_sg ;
400
+ bool mcdma ;
384
401
u32 flush_on_fsync ;
385
402
bool ext_addr ;
386
403
struct platform_device * pdev ;
@@ -390,6 +407,8 @@ struct xilinx_dma_device {
390
407
struct clk * txs_clk ;
391
408
struct clk * rx_clk ;
392
409
struct clk * rxs_clk ;
410
+ u32 nr_channels ;
411
+ u32 chan_id ;
393
412
};
394
413
395
414
/* Macros */
@@ -1196,18 +1215,20 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1196
1215
tail_segment = list_last_entry (& tail_desc -> segments ,
1197
1216
struct xilinx_axidma_tx_segment , node );
1198
1217
1199
- old_head = list_first_entry (& head_desc -> segments ,
1200
- struct xilinx_axidma_tx_segment , node );
1201
- new_head = chan -> seg_v ;
1202
- /* Copy Buffer Descriptor fields. */
1203
- new_head -> hw = old_head -> hw ;
1218
+ if (chan -> has_sg && !chan -> xdev -> mcdma ) {
1219
+ old_head = list_first_entry (& head_desc -> segments ,
1220
+ struct xilinx_axidma_tx_segment , node );
1221
+ new_head = chan -> seg_v ;
1222
+ /* Copy Buffer Descriptor fields. */
1223
+ new_head -> hw = old_head -> hw ;
1204
1224
1205
- /* Swap and save new reserve */
1206
- list_replace_init (& old_head -> node , & new_head -> node );
1207
- chan -> seg_v = old_head ;
1225
+ /* Swap and save new reserve */
1226
+ list_replace_init (& old_head -> node , & new_head -> node );
1227
+ chan -> seg_v = old_head ;
1208
1228
1209
- tail_segment -> hw .next_desc = chan -> seg_v -> phys ;
1210
- head_desc -> async_tx .phys = new_head -> phys ;
1229
+ tail_segment -> hw .next_desc = chan -> seg_v -> phys ;
1230
+ head_desc -> async_tx .phys = new_head -> phys ;
1231
+ }
1211
1232
1212
1233
reg = dma_ctrl_read (chan , XILINX_DMA_REG_DMACR );
1213
1234
@@ -1218,23 +1239,53 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1218
1239
dma_ctrl_write (chan , XILINX_DMA_REG_DMACR , reg );
1219
1240
}
1220
1241
1221
- if (chan -> has_sg )
1242
+ if (chan -> has_sg && ! chan -> xdev -> mcdma )
1222
1243
xilinx_write (chan , XILINX_DMA_REG_CURDESC ,
1223
1244
head_desc -> async_tx .phys );
1224
1245
1246
+ if (chan -> has_sg && chan -> xdev -> mcdma ) {
1247
+ if (chan -> direction == DMA_MEM_TO_DEV ) {
1248
+ dma_ctrl_write (chan , XILINX_DMA_REG_CURDESC ,
1249
+ head_desc -> async_tx .phys );
1250
+ } else {
1251
+ if (!chan -> tdest ) {
1252
+ dma_ctrl_write (chan , XILINX_DMA_REG_CURDESC ,
1253
+ head_desc -> async_tx .phys );
1254
+ } else {
1255
+ dma_ctrl_write (chan ,
1256
+ XILINX_DMA_MCRX_CDESC (chan -> tdest ),
1257
+ head_desc -> async_tx .phys );
1258
+ }
1259
+ }
1260
+ }
1261
+
1225
1262
xilinx_dma_start (chan );
1226
1263
1227
1264
if (chan -> err )
1228
1265
return ;
1229
1266
1230
1267
/* Start the transfer */
1231
- if (chan -> has_sg ) {
1268
+ if (chan -> has_sg && ! chan -> xdev -> mcdma ) {
1232
1269
if (chan -> cyclic )
1233
1270
xilinx_write (chan , XILINX_DMA_REG_TAILDESC ,
1234
1271
chan -> cyclic_seg_v -> phys );
1235
1272
else
1236
1273
xilinx_write (chan , XILINX_DMA_REG_TAILDESC ,
1237
1274
tail_segment -> phys );
1275
+ } else if (chan -> has_sg && chan -> xdev -> mcdma ) {
1276
+ if (chan -> direction == DMA_MEM_TO_DEV ) {
1277
+ dma_ctrl_write (chan , XILINX_DMA_REG_TAILDESC ,
1278
+ tail_segment -> phys );
1279
+ } else {
1280
+ if (!chan -> tdest ) {
1281
+ dma_ctrl_write (chan , XILINX_DMA_REG_TAILDESC ,
1282
+ tail_segment -> phys );
1283
+ } else {
1284
+ dma_ctrl_write (chan ,
1285
+ XILINX_DMA_MCRX_TDESC (chan -> tdest ),
1286
+ tail_segment -> phys );
1287
+ }
1288
+ }
1238
1289
} else {
1239
1290
struct xilinx_axidma_tx_segment * segment ;
1240
1291
struct xilinx_axidma_desc_hw * hw ;
@@ -1861,6 +1912,90 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1861
1912
return NULL ;
1862
1913
}
1863
1914
1915
+ /**
1916
+ * xilinx_dma_prep_interleaved - prepare a descriptor for a
1917
+ * DMA_SLAVE transaction
1918
+ * @dchan: DMA channel
1919
+ * @xt: Interleaved template pointer
1920
+ * @flags: transfer ack flags
1921
+ *
1922
+ * Return: Async transaction descriptor on success and NULL on failure
1923
+ */
1924
+ static struct dma_async_tx_descriptor *
1925
+ xilinx_dma_prep_interleaved (struct dma_chan * dchan ,
1926
+ struct dma_interleaved_template * xt ,
1927
+ unsigned long flags )
1928
+ {
1929
+ struct xilinx_dma_chan * chan = to_xilinx_chan (dchan );
1930
+ struct xilinx_dma_tx_descriptor * desc ;
1931
+ struct xilinx_axidma_tx_segment * segment ;
1932
+ struct xilinx_axidma_desc_hw * hw ;
1933
+
1934
+ if (!is_slave_direction (xt -> dir ))
1935
+ return NULL ;
1936
+
1937
+ if (!xt -> numf || !xt -> sgl [0 ].size )
1938
+ return NULL ;
1939
+
1940
+ if (xt -> frame_size != 1 )
1941
+ return NULL ;
1942
+
1943
+ /* Allocate a transaction descriptor. */
1944
+ desc = xilinx_dma_alloc_tx_descriptor (chan );
1945
+ if (!desc )
1946
+ return NULL ;
1947
+
1948
+ chan -> direction = xt -> dir ;
1949
+ dma_async_tx_descriptor_init (& desc -> async_tx , & chan -> common );
1950
+ desc -> async_tx .tx_submit = xilinx_dma_tx_submit ;
1951
+
1952
+ /* Get a free segment */
1953
+ segment = xilinx_axidma_alloc_tx_segment (chan );
1954
+ if (!segment )
1955
+ goto error ;
1956
+
1957
+ hw = & segment -> hw ;
1958
+
1959
+ /* Fill in the descriptor */
1960
+ if (xt -> dir != DMA_MEM_TO_DEV )
1961
+ hw -> buf_addr = xt -> dst_start ;
1962
+ else
1963
+ hw -> buf_addr = xt -> src_start ;
1964
+
1965
+ hw -> mcdma_control = chan -> tdest & XILINX_DMA_BD_TDEST_MASK ;
1966
+ hw -> vsize_stride = (xt -> numf << XILINX_DMA_BD_VSIZE_SHIFT ) &
1967
+ XILINX_DMA_BD_VSIZE_MASK ;
1968
+ hw -> vsize_stride |= (xt -> sgl [0 ].icg + xt -> sgl [0 ].size ) &
1969
+ XILINX_DMA_BD_STRIDE_MASK ;
1970
+ hw -> control = xt -> sgl [0 ].size & XILINX_DMA_BD_HSIZE_MASK ;
1971
+
1972
+ /*
1973
+ * Insert the segment into the descriptor segments
1974
+ * list.
1975
+ */
1976
+ list_add_tail (& segment -> node , & desc -> segments );
1977
+
1978
+
1979
+ segment = list_first_entry (& desc -> segments ,
1980
+ struct xilinx_axidma_tx_segment , node );
1981
+ desc -> async_tx .phys = segment -> phys ;
1982
+
1983
+ /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1984
+ if (xt -> dir == DMA_MEM_TO_DEV ) {
1985
+ segment -> hw .control |= XILINX_DMA_BD_SOP ;
1986
+ segment = list_last_entry (& desc -> segments ,
1987
+ struct xilinx_axidma_tx_segment ,
1988
+ node );
1989
+ segment -> hw .control |= XILINX_DMA_BD_EOP ;
1990
+ }
1991
+
1992
+ return & desc -> async_tx ;
1993
+
1994
+ error :
1995
+ xilinx_dma_free_tx_descriptor (chan , desc );
1996
+ return NULL ;
1997
+ }
1998
+
1864
1999
/**
1865
2000
* xilinx_dma_terminate_all - Halt the channel and free descriptors
1866
2001
* @chan: Driver specific DMA Channel pointer
@@ -2176,7 +2311,7 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2176
2311
* Return: '0' on success and failure value on error
2177
2312
*/
2178
2313
static int xilinx_dma_chan_probe (struct xilinx_dma_device * xdev ,
2179
- struct device_node * node )
2314
+ struct device_node * node , int chan_id )
2180
2315
{
2181
2316
struct xilinx_dma_chan * chan ;
2182
2317
bool has_dre = false;
@@ -2220,7 +2355,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2220
2355
2221
2356
if (of_device_is_compatible (node , "xlnx,axi-vdma-mm2s-channel" )) {
2222
2357
chan -> direction = DMA_MEM_TO_DEV ;
2223
- chan -> id = 0 ;
2358
+ chan -> id = chan_id ;
2359
+ chan -> tdest = chan_id ;
2224
2360
2225
2361
chan -> ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET ;
2226
2362
if (xdev -> dma_config -> dmatype == XDMA_TYPE_VDMA ) {
@@ -2233,7 +2369,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2233
2369
} else if (of_device_is_compatible (node ,
2234
2370
"xlnx,axi-vdma-s2mm-channel" )) {
2235
2371
chan -> direction = DMA_DEV_TO_MEM ;
2236
- chan -> id = 1 ;
2372
+ chan -> id = chan_id ;
2373
+ chan -> tdest = chan_id - xdev -> nr_channels ;
2237
2374
2238
2375
chan -> ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET ;
2239
2376
if (xdev -> dma_config -> dmatype == XDMA_TYPE_VDMA ) {
@@ -2287,6 +2424,32 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2287
2424
return 0 ;
2288
2425
}
2289
2426
2427
+ /**
2428
+ * xilinx_dma_child_probe - Per child node probe
2429
+ * It get number of dma-channels per child node from
2430
+ * device-tree and initializes all the channels.
2431
+ *
2432
+ * @xdev: Driver specific device structure
2433
+ * @node: Device node
2434
+ *
2435
+ * Return: 0 always.
2436
+ */
2437
+ static int xilinx_dma_child_probe (struct xilinx_dma_device * xdev ,
2438
+ struct device_node * node ) {
2439
+ int ret , i , nr_channels = 1 ;
2440
+
2441
+ ret = of_property_read_u32 (node , "dma-channels" , & nr_channels );
2442
+ if ((ret < 0 ) && xdev -> mcdma )
2443
+ dev_warn (xdev -> dev , "missing dma-channels property\n" );
2444
+
2445
+ for (i = 0 ; i < nr_channels ; i ++ )
2446
+ xilinx_dma_chan_probe (xdev , node , xdev -> chan_id ++ );
2447
+
2448
+ xdev -> nr_channels += nr_channels ;
2449
+
2450
+ return 0 ;
2451
+ }
2452
+
2290
2453
/**
2291
2454
* of_dma_xilinx_xlate - Translation function
2292
2455
* @dma_spec: Pointer to DMA specifier as found in the device tree
@@ -2300,7 +2463,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2300
2463
struct xilinx_dma_device * xdev = ofdma -> of_dma_data ;
2301
2464
int chan_id = dma_spec -> args [0 ];
2302
2465
2303
- if (chan_id >= XILINX_DMA_MAX_CHANS_PER_DEVICE || !xdev -> chan [chan_id ])
2466
+ if (chan_id >= xdev -> nr_channels || !xdev -> chan [chan_id ])
2304
2467
return NULL ;
2305
2468
2306
2469
return dma_get_slave_channel (& xdev -> chan [chan_id ]-> common );
@@ -2376,6 +2539,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
2376
2539
2377
2540
/* Retrieve the DMA engine properties from the device tree */
2378
2541
xdev -> has_sg = of_property_read_bool (node , "xlnx,include-sg" );
2542
+ if (xdev -> dma_config -> dmatype == XDMA_TYPE_AXIDMA )
2543
+ xdev -> mcdma = of_property_read_bool (node , "xlnx,mcdma" );
2379
2544
2380
2545
if (xdev -> dma_config -> dmatype == XDMA_TYPE_VDMA ) {
2381
2546
err = of_property_read_u32 (node , "xlnx,num-fstores" ,
@@ -2426,6 +2591,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
2426
2591
xdev -> common .device_prep_slave_sg = xilinx_dma_prep_slave_sg ;
2427
2592
xdev -> common .device_prep_dma_cyclic =
2428
2593
xilinx_dma_prep_dma_cyclic ;
2594
+ xdev -> common .device_prep_interleaved_dma =
2595
+ xilinx_dma_prep_interleaved ;
2429
2596
/* Residue calculation is supported by only AXI DMA */
2430
2597
xdev -> common .residue_granularity =
2431
2598
DMA_RESIDUE_GRANULARITY_SEGMENT ;
@@ -2441,13 +2608,13 @@ static int xilinx_dma_probe(struct platform_device *pdev)
2441
2608
2442
2609
/* Initialize the channels */
2443
2610
for_each_child_of_node (node , child ) {
2444
- err = xilinx_dma_chan_probe (xdev , child );
2611
+ err = xilinx_dma_child_probe (xdev , child );
2445
2612
if (err < 0 )
2446
2613
goto disable_clks ;
2447
2614
}
2448
2615
2449
2616
if (xdev -> dma_config -> dmatype == XDMA_TYPE_VDMA ) {
2450
- for (i = 0 ; i < XILINX_DMA_MAX_CHANS_PER_DEVICE ; i ++ )
2617
+ for (i = 0 ; i < xdev -> nr_channels ; i ++ )
2451
2618
if (xdev -> chan [i ])
2452
2619
xdev -> chan [i ]-> num_frms = num_frames ;
2453
2620
}
@@ -2470,7 +2637,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
2470
2637
disable_clks :
2471
2638
xdma_disable_allclks (xdev );
2472
2639
error :
2473
- for (i = 0 ; i < XILINX_DMA_MAX_CHANS_PER_DEVICE ; i ++ )
2640
+ for (i = 0 ; i < xdev -> nr_channels ; i ++ )
2474
2641
if (xdev -> chan [i ])
2475
2642
xilinx_dma_chan_remove (xdev -> chan [i ]);
2476
2643
@@ -2492,7 +2659,7 @@ static int xilinx_dma_remove(struct platform_device *pdev)
2492
2659
2493
2660
dma_async_device_unregister (& xdev -> common );
2494
2661
2495
- for (i = 0 ; i < XILINX_DMA_MAX_CHANS_PER_DEVICE ; i ++ )
2662
+ for (i = 0 ; i < xdev -> nr_channels ; i ++ )
2496
2663
if (xdev -> chan [i ])
2497
2664
xilinx_dma_chan_remove (xdev -> chan [i ]);
2498
2665
0 commit comments