@@ -109,7 +109,11 @@ static inline void spin_unlock_bucket(unsigned int hash)
109
109
static void gfs2_qd_dealloc (struct rcu_head * rcu )
110
110
{
111
111
struct gfs2_quota_data * qd = container_of (rcu , struct gfs2_quota_data , qd_rcu );
112
+ struct gfs2_sbd * sdp = qd -> qd_sbd ;
113
+
112
114
kmem_cache_free (gfs2_quotad_cachep , qd );
115
+ if (atomic_dec_and_test (& sdp -> sd_quota_count ))
116
+ wake_up (& sdp -> sd_kill_wait );
113
117
}
114
118
115
119
static void gfs2_qd_dispose (struct gfs2_quota_data * qd )
@@ -143,7 +147,6 @@ static void gfs2_qd_list_dispose(struct list_head *list)
143
147
list_del (& qd -> qd_lru );
144
148
145
149
gfs2_qd_dispose (qd );
146
- atomic_dec (& sdp -> sd_quota_count );
147
150
}
148
151
}
149
152
@@ -317,13 +320,24 @@ static void qd_hold(struct gfs2_quota_data *qd)
317
320
318
321
static void qd_put (struct gfs2_quota_data * qd )
319
322
{
323
+ struct gfs2_sbd * sdp ;
324
+
320
325
if (lockref_put_or_lock (& qd -> qd_lockref ))
321
326
return ;
322
327
328
+ BUG_ON (__lockref_is_dead (& qd -> qd_lockref ));
329
+ sdp = qd -> qd_sbd ;
330
+ if (unlikely (!test_bit (SDF_JOURNAL_LIVE , & sdp -> sd_flags ))) {
331
+ lockref_mark_dead (& qd -> qd_lockref );
332
+ spin_unlock (& qd -> qd_lockref .lock );
333
+
334
+ gfs2_qd_dispose (qd );
335
+ return ;
336
+ }
337
+
323
338
qd -> qd_lockref .count = 0 ;
324
339
list_lru_add (& gfs2_qd_lru , & qd -> qd_lru );
325
340
spin_unlock (& qd -> qd_lockref .lock );
326
-
327
341
}
328
342
329
343
static int slot_get (struct gfs2_quota_data * qd )
@@ -1465,16 +1479,33 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1465
1479
{
1466
1480
struct gfs2_quota_data * qd ;
1467
1481
LIST_HEAD (dispose );
1482
+ int count ;
1483
+
1484
+ BUG_ON (test_bit (SDF_JOURNAL_LIVE , & sdp -> sd_flags ));
1468
1485
1469
1486
spin_lock (& qd_lock );
1470
1487
list_for_each_entry (qd , & sdp -> sd_quota_list , qd_list ) {
1488
+ spin_lock (& qd -> qd_lockref .lock );
1489
+ if (qd -> qd_lockref .count != 0 ) {
1490
+ spin_unlock (& qd -> qd_lockref .lock );
1491
+ continue ;
1492
+ }
1493
+ lockref_mark_dead (& qd -> qd_lockref );
1494
+ spin_unlock (& qd -> qd_lockref .lock );
1495
+
1471
1496
list_lru_del (& gfs2_qd_lru , & qd -> qd_lru );
1472
1497
list_add (& qd -> qd_lru , & dispose );
1473
1498
}
1474
1499
spin_unlock (& qd_lock );
1475
1500
1476
1501
gfs2_qd_list_dispose (& dispose );
1477
- gfs2_assert_warn (sdp , !atomic_read (& sdp -> sd_quota_count ));
1502
+
1503
+ wait_event_timeout (sdp -> sd_kill_wait ,
1504
+ (count = atomic_read (& sdp -> sd_quota_count )) == 0 ,
1505
+ HZ * 60 );
1506
+
1507
+ if (count != 0 )
1508
+ fs_err (sdp , "%d left-over quota data objects\n" , count );
1478
1509
1479
1510
kvfree (sdp -> sd_quota_bitmap );
1480
1511
sdp -> sd_quota_bitmap = NULL ;
0 commit comments