@@ -98,6 +98,7 @@ static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
98
98
*/
99
99
static void xhci_link_segments (struct xhci_segment * prev ,
100
100
struct xhci_segment * next ,
101
+ unsigned int trbs_per_seg ,
101
102
enum xhci_ring_type type , bool chain_links )
102
103
{
103
104
u32 val ;
@@ -106,16 +107,16 @@ static void xhci_link_segments(struct xhci_segment *prev,
106
107
return ;
107
108
prev -> next = next ;
108
109
if (type != TYPE_EVENT ) {
109
- prev -> trbs [TRBS_PER_SEGMENT - 1 ].link .segment_ptr =
110
+ prev -> trbs [trbs_per_seg - 1 ].link .segment_ptr =
110
111
cpu_to_le64 (next -> dma );
111
112
112
113
/* Set the last TRB in the segment to have a TRB type ID of Link TRB */
113
- val = le32_to_cpu (prev -> trbs [TRBS_PER_SEGMENT - 1 ].link .control );
114
+ val = le32_to_cpu (prev -> trbs [trbs_per_seg - 1 ].link .control );
114
115
val &= ~TRB_TYPE_BITMASK ;
115
116
val |= TRB_TYPE (TRB_LINK );
116
117
if (chain_links )
117
118
val |= TRB_CHAIN ;
118
- prev -> trbs [TRBS_PER_SEGMENT - 1 ].link .control = cpu_to_le32 (val );
119
+ prev -> trbs [trbs_per_seg - 1 ].link .control = cpu_to_le32 (val );
119
120
}
120
121
}
121
122
@@ -139,15 +140,17 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
139
140
(xhci -> quirks & XHCI_AMD_0x96_HOST )));
140
141
141
142
next = ring -> enq_seg -> next ;
142
- xhci_link_segments (ring -> enq_seg , first , ring -> type , chain_links );
143
- xhci_link_segments (last , next , ring -> type , chain_links );
143
+ xhci_link_segments (ring -> enq_seg , first , ring -> trbs_per_seg ,
144
+ ring -> type , chain_links );
145
+ xhci_link_segments (last , next , ring -> trbs_per_seg ,
146
+ ring -> type , chain_links );
144
147
ring -> num_segs += num_segs ;
145
- ring -> num_trbs_free += (TRBS_PER_SEGMENT - 1 ) * num_segs ;
148
+ ring -> num_trbs_free += (ring -> trbs_per_seg - 1 ) * num_segs ;
146
149
147
150
if (ring -> type != TYPE_EVENT && ring -> enq_seg == ring -> last_seg ) {
148
- ring -> last_seg -> trbs [TRBS_PER_SEGMENT - 1 ].link .control
151
+ ring -> last_seg -> trbs [ring -> trbs_per_seg - 1 ].link .control
149
152
&= ~cpu_to_le32 (LINK_TOGGLE );
150
- last -> trbs [TRBS_PER_SEGMENT - 1 ].link .control
153
+ last -> trbs [ring -> trbs_per_seg - 1 ].link .control
151
154
|= cpu_to_le32 (LINK_TOGGLE );
152
155
ring -> last_seg = last ;
153
156
}
@@ -314,14 +317,15 @@ void xhci_initialize_ring_info(struct xhci_ring *ring,
314
317
* Each segment has a link TRB, and leave an extra TRB for SW
315
318
* accounting purpose
316
319
*/
317
- ring -> num_trbs_free = ring -> num_segs * (TRBS_PER_SEGMENT - 1 ) - 1 ;
320
+ ring -> num_trbs_free = ring -> num_segs * (ring -> trbs_per_seg - 1 ) - 1 ;
318
321
}
319
322
320
323
/* Allocate segments and link them for a ring */
321
324
static int xhci_alloc_segments_for_ring (struct xhci_hcd * xhci ,
322
325
struct xhci_segment * * first , struct xhci_segment * * last ,
323
- unsigned int num_segs , unsigned int cycle_state ,
324
- enum xhci_ring_type type , unsigned int max_packet , gfp_t flags )
326
+ unsigned int num_segs , unsigned int trbs_per_seg ,
327
+ unsigned int cycle_state , enum xhci_ring_type type ,
328
+ unsigned int max_packet , gfp_t flags )
325
329
{
326
330
struct xhci_segment * prev ;
327
331
bool chain_links ;
@@ -350,12 +354,12 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
350
354
}
351
355
return - ENOMEM ;
352
356
}
353
- xhci_link_segments (prev , next , type , chain_links );
357
+ xhci_link_segments (prev , next , trbs_per_seg , type , chain_links );
354
358
355
359
prev = next ;
356
360
num_segs -- ;
357
361
}
358
- xhci_link_segments (prev , * first , type , chain_links );
362
+ xhci_link_segments (prev , * first , trbs_per_seg , type , chain_links );
359
363
* last = prev ;
360
364
361
365
return 0 ;
@@ -387,16 +391,17 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
387
391
if (num_segs == 0 )
388
392
return ring ;
389
393
394
+ ring -> trbs_per_seg = TRBS_PER_SEGMENT ;
390
395
ret = xhci_alloc_segments_for_ring (xhci , & ring -> first_seg ,
391
- & ring -> last_seg , num_segs , cycle_state , type ,
392
- max_packet , flags );
396
+ & ring -> last_seg , num_segs , ring -> trbs_per_seg ,
397
+ cycle_state , type , max_packet , flags );
393
398
if (ret )
394
399
goto fail ;
395
400
396
401
/* Only event ring does not use link TRB */
397
402
if (type != TYPE_EVENT ) {
398
403
/* See section 4.9.2.1 and 6.4.4.1 */
399
- ring -> last_seg -> trbs [TRBS_PER_SEGMENT - 1 ].link .control |=
404
+ ring -> last_seg -> trbs [ring -> trbs_per_seg - 1 ].link .control |=
400
405
cpu_to_le32 (LINK_TOGGLE );
401
406
}
402
407
xhci_initialize_ring_info (ring , cycle_state );
@@ -429,16 +434,15 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
429
434
unsigned int num_segs_needed ;
430
435
int ret ;
431
436
432
- num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1 ) - 1 ) /
433
- (TRBS_PER_SEGMENT - 1 );
434
-
437
+ num_segs_needed = (num_trbs + (ring -> trbs_per_seg - 1 ) - 1 ) /
438
+ (ring -> trbs_per_seg - 1 );
435
439
/* Allocate number of segments we needed, or double the ring size */
436
440
num_segs = ring -> num_segs > num_segs_needed ?
437
441
ring -> num_segs : num_segs_needed ;
438
442
439
443
ret = xhci_alloc_segments_for_ring (xhci , & first , & last ,
440
- num_segs , ring -> cycle_state , ring -> type ,
441
- ring -> bounce_buf_len , flags );
444
+ num_segs , ring -> trbs_per_seg , ring -> cycle_state ,
445
+ ring -> type , ring -> bounce_buf_len , flags );
442
446
if (ret )
443
447
return - ENOMEM ;
444
448
@@ -1825,7 +1829,7 @@ int xhci_alloc_erst(struct xhci_hcd *xhci,
1825
1829
for (val = 0 ; val < evt_ring -> num_segs ; val ++ ) {
1826
1830
entry = & erst -> entries [val ];
1827
1831
entry -> seg_addr = cpu_to_le64 (seg -> dma );
1828
- entry -> seg_size = cpu_to_le32 (TRBS_PER_SEGMENT );
1832
+ entry -> seg_size = cpu_to_le32 (evt_ring -> trbs_per_seg );
1829
1833
entry -> rsvd = 0 ;
1830
1834
seg = seg -> next ;
1831
1835
}
0 commit comments