10
10
#include "ionic_lif.h"
11
11
#include "ionic_txrx.h"
12
12
13
+ static int ionic_maybe_stop_tx (struct ionic_queue * q , int ndescs );
14
+
15
+ static dma_addr_t ionic_tx_map_single (struct ionic_queue * q ,
16
+ void * data , size_t len );
17
+
18
+ static void ionic_tx_clean (struct ionic_queue * q ,
19
+ struct ionic_desc_info * desc_info ,
20
+ struct ionic_cq_info * cq_info ,
21
+ void * cb_arg );
22
+
13
23
static inline void ionic_txq_post (struct ionic_queue * q , bool ring_dbell ,
14
24
ionic_desc_cb cb_func , void * cb_arg )
15
25
{
@@ -297,6 +307,75 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
297
307
return skb ;
298
308
}
299
309
310
+ static void ionic_xdp_tx_desc_clean (struct ionic_queue * q ,
311
+ struct ionic_desc_info * desc_info )
312
+ {
313
+ unsigned int nbufs = desc_info -> nbufs ;
314
+ struct ionic_buf_info * buf_info ;
315
+ struct device * dev = q -> dev ;
316
+
317
+ if (!nbufs )
318
+ return ;
319
+
320
+ buf_info = desc_info -> bufs ;
321
+ dma_unmap_single (dev , buf_info -> dma_addr ,
322
+ buf_info -> len , DMA_TO_DEVICE );
323
+ __free_pages (buf_info -> page , 0 );
324
+ buf_info -> page = NULL ;
325
+
326
+ desc_info -> nbufs = 0 ;
327
+ desc_info -> xdpf = NULL ;
328
+ desc_info -> act = 0 ;
329
+ }
330
+
331
+ static int ionic_xdp_post_frame (struct net_device * netdev ,
332
+ struct ionic_queue * q , struct xdp_frame * frame ,
333
+ enum xdp_action act , struct page * page , int off ,
334
+ bool ring_doorbell )
335
+ {
336
+ struct ionic_desc_info * desc_info ;
337
+ struct ionic_buf_info * buf_info ;
338
+ struct ionic_tx_stats * stats ;
339
+ struct ionic_txq_desc * desc ;
340
+ size_t len = frame -> len ;
341
+ dma_addr_t dma_addr ;
342
+ u64 cmd ;
343
+
344
+ desc_info = & q -> info [q -> head_idx ];
345
+ desc = desc_info -> txq_desc ;
346
+ buf_info = desc_info -> bufs ;
347
+ stats = q_to_tx_stats (q );
348
+
349
+ dma_addr = ionic_tx_map_single (q , frame -> data , len );
350
+ if (dma_mapping_error (q -> dev , dma_addr )) {
351
+ stats -> dma_map_err ++ ;
352
+ return - EIO ;
353
+ }
354
+ buf_info -> dma_addr = dma_addr ;
355
+ buf_info -> len = len ;
356
+ buf_info -> page = page ;
357
+ buf_info -> page_offset = off ;
358
+
359
+ desc_info -> nbufs = 1 ;
360
+ desc_info -> xdpf = frame ;
361
+ desc_info -> act = act ;
362
+
363
+ cmd = encode_txq_desc_cmd (IONIC_TXQ_DESC_OPCODE_CSUM_NONE ,
364
+ 0 , 0 , buf_info -> dma_addr );
365
+ desc -> cmd = cpu_to_le64 (cmd );
366
+ desc -> len = cpu_to_le16 (len );
367
+ desc -> csum_start = 0 ;
368
+ desc -> csum_offset = 0 ;
369
+
370
+ stats -> xdp_frames ++ ;
371
+ stats -> pkts ++ ;
372
+ stats -> bytes += len ;
373
+
374
+ ionic_txq_post (q , ring_doorbell , ionic_tx_clean , NULL );
375
+
376
+ return 0 ;
377
+ }
378
+
300
379
static bool ionic_run_xdp (struct ionic_rx_stats * stats ,
301
380
struct net_device * netdev ,
302
381
struct bpf_prog * xdp_prog ,
@@ -306,6 +385,10 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
306
385
{
307
386
u32 xdp_action = XDP_ABORTED ;
308
387
struct xdp_buff xdp_buf ;
388
+ struct ionic_queue * txq ;
389
+ struct netdev_queue * nq ;
390
+ struct xdp_frame * xdpf ;
391
+ int err = 0 ;
309
392
310
393
xdp_init_buff (& xdp_buf , IONIC_PAGE_SIZE , rxq -> xdp_rxq_info );
311
394
xdp_prepare_buff (& xdp_buf , ionic_rx_buf_va (buf_info ),
@@ -330,14 +413,51 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
330
413
break ;
331
414
332
415
case XDP_TX :
416
+ xdpf = xdp_convert_buff_to_frame (& xdp_buf );
417
+ if (!xdpf )
418
+ goto out_xdp_abort ;
419
+
420
+ txq = rxq -> partner ;
421
+ nq = netdev_get_tx_queue (netdev , txq -> index );
422
+ __netif_tx_lock (nq , smp_processor_id ());
423
+ txq_trans_cond_update (nq );
424
+
425
+ if (netif_tx_queue_stopped (nq ) ||
426
+ unlikely (ionic_maybe_stop_tx (txq , 1 ))) {
427
+ __netif_tx_unlock (nq );
428
+ goto out_xdp_abort ;
429
+ }
430
+
431
+ dma_unmap_page (rxq -> dev , buf_info -> dma_addr ,
432
+ IONIC_PAGE_SIZE , DMA_FROM_DEVICE );
433
+
434
+ err = ionic_xdp_post_frame (netdev , txq , xdpf , XDP_TX ,
435
+ buf_info -> page ,
436
+ buf_info -> page_offset ,
437
+ true);
438
+ __netif_tx_unlock (nq );
439
+ if (err ) {
440
+ netdev_dbg (netdev , "tx ionic_xdp_post_frame err %d\n" , err );
441
+ goto out_xdp_abort ;
442
+ }
443
+ stats -> xdp_tx ++ ;
444
+
445
+ /* the Tx completion will free the buffers */
446
+ break ;
447
+
333
448
case XDP_REDIRECT :
334
449
case XDP_ABORTED :
335
450
default :
336
- trace_xdp_exception (netdev , xdp_prog , xdp_action );
337
- ionic_rx_page_free (rxq , buf_info );
338
- stats -> xdp_aborted ++ ;
451
+ goto out_xdp_abort ;
339
452
}
340
453
454
+ return true;
455
+
456
+ out_xdp_abort :
457
+ trace_xdp_exception (netdev , xdp_prog , xdp_action );
458
+ ionic_rx_page_free (rxq , buf_info );
459
+ stats -> xdp_aborted ++ ;
460
+
341
461
return true;
342
462
}
343
463
@@ -893,6 +1013,16 @@ static void ionic_tx_clean(struct ionic_queue *q,
893
1013
struct sk_buff * skb = cb_arg ;
894
1014
u16 qi ;
895
1015
1016
+ if (desc_info -> xdpf ) {
1017
+ ionic_xdp_tx_desc_clean (q -> partner , desc_info );
1018
+ stats -> clean ++ ;
1019
+
1020
+ if (unlikely (__netif_subqueue_stopped (q -> lif -> netdev , q -> index )))
1021
+ netif_wake_subqueue (q -> lif -> netdev , q -> index );
1022
+
1023
+ return ;
1024
+ }
1025
+
896
1026
ionic_tx_desc_unmap_bufs (q , desc_info );
897
1027
898
1028
if (!skb )
0 commit comments