@@ -1851,8 +1851,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1851
1851
depth = ext_depth (inode );
1852
1852
if (!path [depth ].p_ext )
1853
1853
goto out ;
1854
- b2 = le32_to_cpu (path [depth ].p_ext -> ee_block );
1855
- b2 &= ~(sbi -> s_cluster_ratio - 1 );
1854
+ b2 = EXT4_LBLK_CMASK (sbi , le32_to_cpu (path [depth ].p_ext -> ee_block ));
1856
1855
1857
1856
/*
1858
1857
* get the next allocated block if the extent in the path
@@ -1862,7 +1861,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1862
1861
b2 = ext4_ext_next_allocated_block (path );
1863
1862
if (b2 == EXT_MAX_BLOCKS )
1864
1863
goto out ;
1865
- b2 &= ~ (sbi -> s_cluster_ratio - 1 );
1864
+ b2 = EXT4_LBLK_CMASK (sbi , b2 );
1866
1865
}
1867
1866
1868
1867
/* check for wrap through zero on extent logical start block*/
@@ -2521,7 +2520,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2521
2520
* extent, we have to mark the cluster as used (store negative
2522
2521
* cluster number in partial_cluster).
2523
2522
*/
2524
- unaligned = pblk & (sbi -> s_cluster_ratio - 1 );
2523
+ unaligned = EXT4_PBLK_COFF (sbi , pblk );
2525
2524
if (unaligned && (ee_len == num ) &&
2526
2525
(* partial_cluster != - ((long long )EXT4_B2C (sbi , pblk ))))
2527
2526
* partial_cluster = EXT4_B2C (sbi , pblk );
@@ -2615,7 +2614,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2615
2614
* accidentally freeing it later on
2616
2615
*/
2617
2616
pblk = ext4_ext_pblock (ex );
2618
- if (pblk & (sbi -> s_cluster_ratio - 1 ))
2617
+ if (EXT4_PBLK_COFF (sbi , pblk ))
2619
2618
* partial_cluster =
2620
2619
- ((long long )EXT4_B2C (sbi , pblk ));
2621
2620
ex -- ;
@@ -3770,7 +3769,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
3770
3769
{
3771
3770
struct ext4_sb_info * sbi = EXT4_SB (inode -> i_sb );
3772
3771
ext4_lblk_t lblk_start , lblk_end ;
3773
- lblk_start = lblk & (~( sbi -> s_cluster_ratio - 1 ) );
3772
+ lblk_start = EXT4_LBLK_CMASK ( sbi , lblk );
3774
3773
lblk_end = lblk_start + sbi -> s_cluster_ratio - 1 ;
3775
3774
3776
3775
return ext4_find_delalloc_range (inode , lblk_start , lblk_end );
@@ -3829,17 +3828,17 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3829
3828
trace_ext4_get_reserved_cluster_alloc (inode , lblk_start , num_blks );
3830
3829
3831
3830
/* Check towards left side */
3832
- c_offset = lblk_start & (sbi -> s_cluster_ratio - 1 );
3831
+ c_offset = EXT4_LBLK_COFF (sbi , lblk_start );
3833
3832
if (c_offset ) {
3834
- lblk_from = lblk_start & (~( sbi -> s_cluster_ratio - 1 ) );
3833
+ lblk_from = EXT4_LBLK_CMASK ( sbi , lblk_start );
3835
3834
lblk_to = lblk_from + c_offset - 1 ;
3836
3835
3837
3836
if (ext4_find_delalloc_range (inode , lblk_from , lblk_to ))
3838
3837
allocated_clusters -- ;
3839
3838
}
3840
3839
3841
3840
/* Now check towards right. */
3842
- c_offset = ( lblk_start + num_blks ) & ( sbi -> s_cluster_ratio - 1 );
3841
+ c_offset = EXT4_LBLK_COFF ( sbi , lblk_start + num_blks );
3843
3842
if (allocated_clusters && c_offset ) {
3844
3843
lblk_from = lblk_start + num_blks ;
3845
3844
lblk_to = lblk_from + (sbi -> s_cluster_ratio - c_offset ) - 1 ;
@@ -4047,7 +4046,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
4047
4046
struct ext4_ext_path * path )
4048
4047
{
4049
4048
struct ext4_sb_info * sbi = EXT4_SB (sb );
4050
- ext4_lblk_t c_offset = map -> m_lblk & (sbi -> s_cluster_ratio - 1 );
4049
+ ext4_lblk_t c_offset = EXT4_LBLK_COFF (sbi , map -> m_lblk );
4051
4050
ext4_lblk_t ex_cluster_start , ex_cluster_end ;
4052
4051
ext4_lblk_t rr_cluster_start ;
4053
4052
ext4_lblk_t ee_block = le32_to_cpu (ex -> ee_block );
@@ -4065,8 +4064,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
4065
4064
(rr_cluster_start == ex_cluster_start )) {
4066
4065
if (rr_cluster_start == ex_cluster_end )
4067
4066
ee_start += ee_len - 1 ;
4068
- map -> m_pblk = (ee_start & ~(sbi -> s_cluster_ratio - 1 )) +
4069
- c_offset ;
4067
+ map -> m_pblk = EXT4_PBLK_CMASK (sbi , ee_start ) + c_offset ;
4070
4068
map -> m_len = min (map -> m_len ,
4071
4069
(unsigned ) sbi -> s_cluster_ratio - c_offset );
4072
4070
/*
@@ -4220,7 +4218,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4220
4218
*/
4221
4219
map -> m_flags &= ~EXT4_MAP_FROM_CLUSTER ;
4222
4220
newex .ee_block = cpu_to_le32 (map -> m_lblk );
4223
- cluster_offset = map -> m_lblk & (sbi -> s_cluster_ratio - 1 );
4221
+ cluster_offset = EXT4_LBLK_CMASK (sbi , map -> m_lblk );
4224
4222
4225
4223
/*
4226
4224
* If we are doing bigalloc, check to see if the extent returned
@@ -4288,7 +4286,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4288
4286
* needed so that future calls to get_implied_cluster_alloc()
4289
4287
* work correctly.
4290
4288
*/
4291
- offset = map -> m_lblk & (sbi -> s_cluster_ratio - 1 );
4289
+ offset = EXT4_LBLK_COFF (sbi , map -> m_lblk );
4292
4290
ar .len = EXT4_NUM_B2C (sbi , offset + allocated );
4293
4291
ar .goal -= offset ;
4294
4292
ar .logical -= offset ;
0 commit comments