67
67
#define CMDR_SIZE (MB_CMDR_SIZE - CMDR_OFF)
68
68
69
69
/*
70
- * For data area, the block size is PAGE_SIZE and
71
- * the total size is 256K * PAGE_SIZE.
70
+ * For data area, the default block size is PAGE_SIZE and
71
+ * the default total size is 256K * PAGE_SIZE.
72
72
*/
73
- #define DATA_BLOCK_SIZE PAGE_SIZE
74
73
#define DATA_PAGES_PER_BLK 1
75
- #define DATA_BLOCK_BITS_DEF (256 * 1024)
74
+ #define DATA_BLOCK_SIZE (DATA_PAGES_PER_BLK * PAGE_SIZE)
75
+ #define DATA_AREA_PAGES_DEF (256 * 1024)
76
76
77
- #define TCMU_MBS_TO_PAGES (_mbs ) (_mbs << (20 - PAGE_SHIFT))
77
+ #define TCMU_MBS_TO_PAGES (_mbs ) ((size_t) _mbs << (20 - PAGE_SHIFT))
78
78
#define TCMU_PAGES_TO_MBS (_pages ) (_pages >> (20 - PAGE_SHIFT))
79
79
80
80
/*
@@ -138,7 +138,7 @@ struct tcmu_dev {
138
138
/* Offset of data area from start of mb */
139
139
/* Must add data_off and mb_addr to get the address */
140
140
size_t data_off ;
141
- size_t data_size ;
141
+ int data_area_mb ;
142
142
uint32_t max_blocks ;
143
143
size_t mmap_pages ;
144
144
@@ -501,31 +501,39 @@ static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
501
501
502
502
static inline int tcmu_get_empty_block (struct tcmu_dev * udev ,
503
503
struct tcmu_cmd * tcmu_cmd ,
504
- int prev_dbi , int * iov_cnt )
504
+ int prev_dbi , int length , int * iov_cnt )
505
505
{
506
+ XA_STATE (xas , & udev -> data_pages , 0 );
506
507
struct page * page ;
507
- int dbi ;
508
+ int i , cnt , dbi ;
509
+ int page_cnt = DIV_ROUND_UP (length , PAGE_SIZE );
508
510
509
511
dbi = find_first_zero_bit (udev -> data_bitmap , udev -> dbi_thresh );
510
512
if (dbi == udev -> dbi_thresh )
511
513
return -1 ;
512
514
513
- page = xa_load (& udev -> data_pages , dbi );
514
- if (!page ) {
515
- if (atomic_add_return (1 , & global_page_count ) >
516
- tcmu_global_max_pages )
517
- schedule_delayed_work (& tcmu_unmap_work , 0 );
515
+ /* Count the number of already allocated pages */
516
+ xas_set (& xas , dbi * DATA_PAGES_PER_BLK );
517
+ for (cnt = 0 ; xas_next (& xas ) && cnt < page_cnt ;)
518
+ cnt ++ ;
518
519
520
+ for (i = cnt ; i < page_cnt ; i ++ ) {
519
521
/* try to get new page from the mm */
520
522
page = alloc_page (GFP_NOIO );
521
523
if (!page )
522
- goto err_alloc ;
524
+ break ;
523
525
524
- if (xa_store (& udev -> data_pages , dbi , page , GFP_NOIO ))
525
- goto err_insert ;
526
+ if (xa_store (& udev -> data_pages , dbi * DATA_PAGES_PER_BLK + i ,
527
+ page , GFP_NOIO )) {
528
+ __free_page (page );
529
+ break ;
530
+ }
526
531
}
532
+ if (atomic_add_return (i - cnt , & global_page_count ) >
533
+ tcmu_global_max_pages )
534
+ schedule_delayed_work (& tcmu_unmap_work , 0 );
527
535
528
- if (dbi > udev -> dbi_max )
536
+ if (i && dbi > udev -> dbi_max )
529
537
udev -> dbi_max = dbi ;
530
538
531
539
set_bit (dbi , udev -> data_bitmap );
@@ -534,23 +542,19 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
534
542
if (dbi != prev_dbi + 1 )
535
543
* iov_cnt += 1 ;
536
544
537
- return dbi ;
538
- err_insert :
539
- __free_page (page );
540
- err_alloc :
541
- atomic_dec (& global_page_count );
542
- return -1 ;
545
+ return i == page_cnt ? dbi : -1 ;
543
546
}
544
547
545
548
static int tcmu_get_empty_blocks (struct tcmu_dev * udev ,
546
- struct tcmu_cmd * tcmu_cmd , int dbi_cnt )
549
+ struct tcmu_cmd * tcmu_cmd , int length )
547
550
{
548
551
/* start value of dbi + 1 must not be a valid dbi */
549
552
int dbi = -2 ;
550
- int i , iov_cnt = 0 ;
553
+ int blk_len , iov_cnt = 0 ;
551
554
552
- for (i = 0 ; i < dbi_cnt ; i ++ ) {
553
- dbi = tcmu_get_empty_block (udev , tcmu_cmd , dbi , & iov_cnt );
555
+ for (; length > 0 ; length -= DATA_BLOCK_SIZE ) {
556
+ blk_len = min_t (int , length , DATA_BLOCK_SIZE );
557
+ dbi = tcmu_get_empty_block (udev , tcmu_cmd , dbi , blk_len , & iov_cnt );
554
558
if (dbi < 0 )
555
559
return -1 ;
556
560
}
@@ -698,9 +702,11 @@ static inline void tcmu_copy_data(struct tcmu_dev *udev,
698
702
struct scatterlist * sg , unsigned int sg_nents ,
699
703
struct iovec * * iov , size_t data_len )
700
704
{
705
+ XA_STATE (xas , & udev -> data_pages , 0 );
701
706
/* start value of dbi + 1 must not be a valid dbi */
702
707
int dbi = -2 ;
703
- size_t block_remaining , cp_len ;
708
+ size_t page_remaining , cp_len ;
709
+ int page_cnt , page_inx ;
704
710
struct sg_mapping_iter sg_iter ;
705
711
unsigned int sg_flags ;
706
712
struct page * page ;
@@ -718,37 +724,48 @@ static inline void tcmu_copy_data(struct tcmu_dev *udev,
718
724
data_len );
719
725
else
720
726
dbi = tcmu_cmd_get_dbi (tcmu_cmd );
721
- page = tcmu_get_block_page (udev , dbi );
722
- if (direction == TCMU_DATA_AREA_TO_SG )
723
- flush_dcache_page (page );
724
- data_page_start = kmap_atomic (page );
725
- block_remaining = DATA_BLOCK_SIZE ;
726
-
727
- while (block_remaining && data_len ) {
728
- if (!sg_miter_next (& sg_iter )) {
729
- /* set length to 0 to abort outer loop */
730
- data_len = 0 ;
731
- pr_debug ("tcmu_move_data: aborting data copy due to exhausted sg_list\n" );
732
- break ;
727
+
728
+ page_cnt = DIV_ROUND_UP (data_len , PAGE_SIZE );
729
+ if (page_cnt > DATA_PAGES_PER_BLK )
730
+ page_cnt = DATA_PAGES_PER_BLK ;
731
+
732
+ xas_set (& xas , dbi * DATA_PAGES_PER_BLK );
733
+ for (page_inx = 0 ; page_inx < page_cnt && data_len ; page_inx ++ ) {
734
+ page = xas_next (& xas );
735
+
736
+ if (direction == TCMU_DATA_AREA_TO_SG )
737
+ flush_dcache_page (page );
738
+ data_page_start = kmap_atomic (page );
739
+ page_remaining = PAGE_SIZE ;
740
+
741
+ while (page_remaining && data_len ) {
742
+ if (!sg_miter_next (& sg_iter )) {
743
+ /* set length to 0 to abort outer loop */
744
+ data_len = 0 ;
745
+ pr_debug ("%s: aborting data copy due to exhausted sg_list\n" ,
746
+ __func__ );
747
+ break ;
748
+ }
749
+ cp_len = min3 (sg_iter .length , page_remaining ,
750
+ data_len );
751
+
752
+ data_addr = data_page_start +
753
+ PAGE_SIZE - page_remaining ;
754
+ if (direction == TCMU_SG_TO_DATA_AREA )
755
+ memcpy (data_addr , sg_iter .addr , cp_len );
756
+ else
757
+ memcpy (sg_iter .addr , data_addr , cp_len );
758
+
759
+ data_len -= cp_len ;
760
+ page_remaining -= cp_len ;
761
+ sg_iter .consumed = cp_len ;
733
762
}
734
- cp_len = min3 ( sg_iter . length , block_remaining , data_len );
763
+ sg_miter_stop ( & sg_iter );
735
764
736
- data_addr = data_page_start +
737
- DATA_BLOCK_SIZE - block_remaining ;
765
+ kunmap_atomic (data_page_start );
738
766
if (direction == TCMU_SG_TO_DATA_AREA )
739
- memcpy (data_addr , sg_iter .addr , cp_len );
740
- else
741
- memcpy (sg_iter .addr , data_addr , cp_len );
742
-
743
- data_len -= cp_len ;
744
- block_remaining -= cp_len ;
745
- sg_iter .consumed = cp_len ;
767
+ flush_dcache_page (page );
746
768
}
747
- sg_miter_stop (& sg_iter );
748
-
749
- kunmap_atomic (data_page_start );
750
- if (direction == TCMU_SG_TO_DATA_AREA )
751
- flush_dcache_page (page );
752
769
}
753
770
}
754
771
@@ -858,13 +875,12 @@ static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
858
875
udev -> dbi_thresh = udev -> max_blocks ;
859
876
}
860
877
861
- iov_cnt = tcmu_get_empty_blocks (udev , cmd ,
862
- cmd -> dbi_cnt - cmd -> dbi_bidi_cnt );
878
+ iov_cnt = tcmu_get_empty_blocks (udev , cmd , cmd -> se_cmd -> data_length );
863
879
if (iov_cnt < 0 )
864
880
return -1 ;
865
881
866
882
if (cmd -> dbi_bidi_cnt ) {
867
- ret = tcmu_get_empty_blocks (udev , cmd , cmd -> dbi_bidi_cnt );
883
+ ret = tcmu_get_empty_blocks (udev , cmd , cmd -> data_len_bidi );
868
884
if (ret < 0 )
869
885
return -1 ;
870
886
}
@@ -1020,9 +1036,9 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
1020
1036
if (!list_empty (& udev -> qfull_queue ))
1021
1037
goto queue ;
1022
1038
1023
- if (data_length > udev -> data_size ) {
1039
+ if (data_length > udev -> max_blocks * DATA_BLOCK_SIZE ) {
1024
1040
pr_warn ("TCMU: Request of size %zu is too big for %zu data area\n" ,
1025
- data_length , udev -> data_size );
1041
+ data_length , udev -> max_blocks * DATA_BLOCK_SIZE );
1026
1042
* scsi_err = TCM_INVALID_CDB_FIELD ;
1027
1043
return -1 ;
1028
1044
}
@@ -1570,7 +1586,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1570
1586
udev -> cmd_time_out = TCMU_TIME_OUT ;
1571
1587
udev -> qfull_time_out = -1 ;
1572
1588
1573
- udev -> max_blocks = DATA_BLOCK_BITS_DEF ;
1589
+ udev -> max_blocks = DATA_AREA_PAGES_DEF / DATA_PAGES_PER_BLK ;
1590
+ udev -> data_area_mb = TCMU_PAGES_TO_MBS (DATA_AREA_PAGES_DEF );
1574
1591
mutex_init (& udev -> cmdr_lock );
1575
1592
1576
1593
INIT_LIST_HEAD (& udev -> node );
@@ -1607,19 +1624,24 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
1607
1624
return - EINVAL ;
1608
1625
}
1609
1626
1610
- static void tcmu_blocks_release (struct xarray * blocks , unsigned long first ,
1627
+ static u32 tcmu_blocks_release (struct xarray * blocks , unsigned long first ,
1611
1628
unsigned long last )
1612
1629
{
1613
- XA_STATE (xas , blocks , first );
1630
+ XA_STATE (xas , blocks , first * DATA_PAGES_PER_BLK );
1614
1631
struct page * page ;
1632
+ u32 pages_freed = 0 ;
1615
1633
1616
1634
xas_lock (& xas );
1617
- xas_for_each (& xas , page , last ) {
1635
+ xas_for_each (& xas , page , ( last + 1 ) * DATA_PAGES_PER_BLK - 1 ) {
1618
1636
xas_store (& xas , NULL );
1619
1637
__free_page (page );
1620
- atomic_dec ( & global_page_count ) ;
1638
+ pages_freed ++ ;
1621
1639
}
1622
1640
xas_unlock (& xas );
1641
+
1642
+ atomic_sub (pages_freed , & global_page_count );
1643
+
1644
+ return pages_freed ;
1623
1645
}
1624
1646
1625
1647
static void tcmu_remove_all_queued_tmr (struct tcmu_dev * udev )
@@ -2086,6 +2108,7 @@ static int tcmu_configure_device(struct se_device *dev)
2086
2108
struct tcmu_dev * udev = TCMU_DEV (dev );
2087
2109
struct uio_info * info ;
2088
2110
struct tcmu_mailbox * mb ;
2111
+ size_t data_size ;
2089
2112
int ret = 0 ;
2090
2113
2091
2114
ret = tcmu_update_uio_info (udev );
@@ -2113,8 +2136,8 @@ static int tcmu_configure_device(struct se_device *dev)
2113
2136
udev -> cmdr = (void * )mb + CMDR_OFF ;
2114
2137
udev -> cmdr_size = CMDR_SIZE ;
2115
2138
udev -> data_off = MB_CMDR_SIZE ;
2116
- udev -> data_size = udev -> max_blocks * DATA_BLOCK_SIZE ;
2117
- udev -> mmap_pages = (udev -> data_size + MB_CMDR_SIZE ) >> PAGE_SHIFT ;
2139
+ data_size = TCMU_MBS_TO_PAGES ( udev -> data_area_mb ) << PAGE_SHIFT ;
2140
+ udev -> mmap_pages = (data_size + MB_CMDR_SIZE ) >> PAGE_SHIFT ;
2118
2141
udev -> dbi_thresh = 0 ; /* Default in Idle state */
2119
2142
2120
2143
/* Initialise the mailbox of the ring buffer */
@@ -2126,14 +2149,13 @@ static int tcmu_configure_device(struct se_device *dev)
2126
2149
mb -> cmdr_size = udev -> cmdr_size ;
2127
2150
2128
2151
WARN_ON (!PAGE_ALIGNED (udev -> data_off ));
2129
- WARN_ON (udev -> data_size % PAGE_SIZE );
2130
- WARN_ON (udev -> data_size % DATA_BLOCK_SIZE );
2152
+ WARN_ON (data_size % PAGE_SIZE );
2131
2153
2132
2154
info -> version = __stringify (TCMU_MAILBOX_VERSION );
2133
2155
2134
2156
info -> mem [0 ].name = "tcm-user command & data buffer" ;
2135
2157
info -> mem [0 ].addr = (phys_addr_t )(uintptr_t )udev -> mb_addr ;
2136
- info -> mem [0 ].size = udev -> data_size + MB_CMDR_SIZE ;
2158
+ info -> mem [0 ].size = data_size + MB_CMDR_SIZE ;
2137
2159
info -> mem [0 ].memtype = UIO_MEM_NONE ;
2138
2160
2139
2161
info -> irqcontrol = tcmu_irqcontrol ;
@@ -2343,20 +2365,28 @@ static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
2343
2365
2344
2366
static int tcmu_set_max_blocks_param (struct tcmu_dev * udev , substring_t * arg )
2345
2367
{
2346
- int val , ret , blks ;
2368
+ int val , ret ;
2347
2369
2348
2370
ret = match_int (arg , & val );
2349
2371
if (ret < 0 ) {
2350
2372
pr_err ("match_int() failed for max_data_area_mb=. Error %d.\n" ,
2351
2373
ret );
2352
2374
return ret ;
2353
2375
}
2354
-
2355
- blks = TCMU_MBS_TO_PAGES (val ) / DATA_PAGES_PER_BLK ;
2356
- if (blks <= 0 ) {
2376
+ if (val <= 0 ) {
2357
2377
pr_err ("Invalid max_data_area %d.\n" , val );
2358
2378
return - EINVAL ;
2359
2379
}
2380
+ if (val > TCMU_PAGES_TO_MBS (tcmu_global_max_pages )) {
2381
+ pr_err ("%d is too large. Adjusting max_data_area_mb to global limit of %u\n" ,
2382
+ val , TCMU_PAGES_TO_MBS (tcmu_global_max_pages ));
2383
+ val = TCMU_PAGES_TO_MBS (tcmu_global_max_pages );
2384
+ }
2385
+ if (TCMU_MBS_TO_PAGES (val ) < DATA_PAGES_PER_BLK ) {
2386
+ pr_err ("Invalid max_data_area %d (%zu pages): smaller than data_pages_per_blk (%d pages).\n" ,
2387
+ val , TCMU_MBS_TO_PAGES (val ), DATA_PAGES_PER_BLK );
2388
+ return - EINVAL ;
2389
+ }
2360
2390
2361
2391
mutex_lock (& udev -> cmdr_lock );
2362
2392
if (udev -> data_bitmap ) {
@@ -2365,12 +2395,8 @@ static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
2365
2395
goto unlock ;
2366
2396
}
2367
2397
2368
- udev -> max_blocks = blks ;
2369
- if (udev -> max_blocks * DATA_PAGES_PER_BLK > tcmu_global_max_pages ) {
2370
- pr_err ("%d is too large. Adjusting max_data_area_mb to global limit of %u\n" ,
2371
- val , TCMU_PAGES_TO_MBS (tcmu_global_max_pages ));
2372
- udev -> max_blocks = tcmu_global_max_pages / DATA_PAGES_PER_BLK ;
2373
- }
2398
+ udev -> data_area_mb = val ;
2399
+ udev -> max_blocks = TCMU_MBS_TO_PAGES (val ) / DATA_PAGES_PER_BLK ;
2374
2400
2375
2401
unlock :
2376
2402
mutex_unlock (& udev -> cmdr_lock );
@@ -2448,8 +2474,7 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
2448
2474
bl = sprintf (b + bl , "Config: %s " ,
2449
2475
udev -> dev_config [0 ] ? udev -> dev_config : "NULL" );
2450
2476
bl += sprintf (b + bl , "Size: %llu " , udev -> dev_size );
2451
- bl += sprintf (b + bl , "MaxDataAreaMB: %u\n" ,
2452
- TCMU_PAGES_TO_MBS (udev -> max_blocks * DATA_PAGES_PER_BLK ));
2477
+ bl += sprintf (b + bl , "MaxDataAreaMB: %u\n" , udev -> data_area_mb );
2453
2478
2454
2479
return bl ;
2455
2480
}
@@ -2543,8 +2568,7 @@ static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
2543
2568
struct se_dev_attrib , da_group );
2544
2569
struct tcmu_dev * udev = TCMU_DEV (da -> da_dev );
2545
2570
2546
- return snprintf (page , PAGE_SIZE , "%u\n" ,
2547
- TCMU_PAGES_TO_MBS (udev -> max_blocks * DATA_PAGES_PER_BLK ));
2571
+ return snprintf (page , PAGE_SIZE , "%u\n" , udev -> data_area_mb );
2548
2572
}
2549
2573
CONFIGFS_ATTR_RO (tcmu_ , max_data_area_mb );
2550
2574
@@ -2902,7 +2926,8 @@ static void find_free_blocks(void)
2902
2926
{
2903
2927
struct tcmu_dev * udev ;
2904
2928
loff_t off ;
2905
- u32 start , end , block , total_freed = 0 ;
2929
+ u32 pages_freed , total_pages_freed = 0 ;
2930
+ u32 start , end , block , total_blocks_freed = 0 ;
2906
2931
2907
2932
if (atomic_read (& global_page_count ) <= tcmu_global_max_pages )
2908
2933
return ;
@@ -2949,12 +2974,14 @@ static void find_free_blocks(void)
2949
2974
unmap_mapping_range (udev -> inode -> i_mapping , off , 0 , 1 );
2950
2975
2951
2976
/* Release the block pages */
2952
- tcmu_blocks_release (& udev -> data_pages , start , end - 1 );
2977
+ pages_freed = tcmu_blocks_release (& udev -> data_pages , start , end - 1 );
2953
2978
mutex_unlock (& udev -> cmdr_lock );
2954
2979
2955
- total_freed += end - start ;
2956
- pr_debug ("Freed %u blocks (total %u) from %s.\n" , end - start ,
2957
- total_freed , udev -> name );
2980
+ total_pages_freed += pages_freed ;
2981
+ total_blocks_freed += end - start ;
2982
+ pr_debug ("Freed %u pages (total %u) from %u blocks (total %u) from %s.\n" ,
2983
+ pages_freed , total_pages_freed , end - start ,
2984
+ total_blocks_freed , udev -> name );
2958
2985
}
2959
2986
mutex_unlock (& root_udev_mutex );
2960
2987
0 commit comments