@@ -131,13 +131,14 @@ static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr,
131
131
put_net (net );
132
132
}
133
133
134
- static int device_cmp (struct nf_conn * i , void * ifindex )
134
+ static int device_cmp (struct nf_conn * i , void * arg )
135
135
{
136
136
const struct nf_conn_nat * nat = nfct_nat (i );
137
+ const struct masq_dev_work * w = arg ;
137
138
138
139
if (!nat )
139
140
return 0 ;
140
- return nat -> masq_index == ( int )( long ) ifindex ;
141
+ return nat -> masq_index == w -> ifindex ;
141
142
}
142
143
143
144
static int masq_device_event (struct notifier_block * this ,
@@ -153,44 +154,54 @@ static int masq_device_event(struct notifier_block *this,
153
154
* and forget them.
154
155
*/
155
156
156
- nf_ct_iterate_cleanup_net (net , device_cmp ,
157
- ( void * )( long ) dev -> ifindex , 0 , 0 );
157
+ nf_nat_masq_schedule (net , NULL , dev -> ifindex ,
158
+ device_cmp , GFP_KERNEL );
158
159
}
159
160
160
161
return NOTIFY_DONE ;
161
162
}
162
163
163
164
static int inet_cmp (struct nf_conn * ct , void * ptr )
164
165
{
165
- struct in_ifaddr * ifa = (struct in_ifaddr * )ptr ;
166
- struct net_device * dev = ifa -> ifa_dev -> dev ;
167
166
struct nf_conntrack_tuple * tuple ;
167
+ struct masq_dev_work * w = ptr ;
168
168
169
- if (!device_cmp (ct , ( void * )( long ) dev -> ifindex ))
169
+ if (!device_cmp (ct , ptr ))
170
170
return 0 ;
171
171
172
172
tuple = & ct -> tuplehash [IP_CT_DIR_REPLY ].tuple ;
173
173
174
- return ifa -> ifa_address == tuple -> dst .u3 . ip ;
174
+ return nf_inet_addr_cmp ( & w -> addr , & tuple -> dst .u3 ) ;
175
175
}
176
176
177
177
static int masq_inet_event (struct notifier_block * this ,
178
178
unsigned long event ,
179
179
void * ptr )
180
180
{
181
- struct in_device * idev = ((struct in_ifaddr * )ptr )-> ifa_dev ;
182
- struct net * net = dev_net (idev -> dev );
181
+ const struct in_ifaddr * ifa = ptr ;
182
+ const struct in_device * idev ;
183
+ const struct net_device * dev ;
184
+ union nf_inet_addr addr ;
185
+
186
+ if (event != NETDEV_DOWN )
187
+ return NOTIFY_DONE ;
183
188
184
189
/* The masq_dev_notifier will catch the case of the device going
185
190
* down. So if the inetdev is dead and being destroyed we have
186
191
* no work to do. Otherwise this is an individual address removal
187
192
* and we have to perform the flush.
188
193
*/
194
+ idev = ifa -> ifa_dev ;
189
195
if (idev -> dead )
190
196
return NOTIFY_DONE ;
191
197
192
- if (event == NETDEV_DOWN )
193
- nf_ct_iterate_cleanup_net (net , inet_cmp , ptr , 0 , 0 );
198
+ memset (& addr , 0 , sizeof (addr ));
199
+
200
+ addr .ip = ifa -> ifa_address ;
201
+
202
+ dev = idev -> dev ;
203
+ nf_nat_masq_schedule (dev_net (idev -> dev ), & addr , dev -> ifindex ,
204
+ inet_cmp , GFP_KERNEL );
194
205
195
206
return NOTIFY_DONE ;
196
207
}
@@ -253,19 +264,6 @@ nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
253
264
}
254
265
EXPORT_SYMBOL_GPL (nf_nat_masquerade_ipv6 );
255
266
256
- static int inet6_cmp (struct nf_conn * ct , void * work )
257
- {
258
- struct masq_dev_work * w = (struct masq_dev_work * )work ;
259
- struct nf_conntrack_tuple * tuple ;
260
-
261
- if (!device_cmp (ct , (void * )(long )w -> ifindex ))
262
- return 0 ;
263
-
264
- tuple = & ct -> tuplehash [IP_CT_DIR_REPLY ].tuple ;
265
-
266
- return nf_inet_addr_cmp (& w -> addr , & tuple -> dst .u3 );
267
- }
268
-
269
267
/* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep).
270
268
*
271
269
* Defer it to the system workqueue.
@@ -289,7 +287,7 @@ static int masq_inet6_event(struct notifier_block *this,
289
287
290
288
addr .in6 = ifa -> addr ;
291
289
292
- nf_nat_masq_schedule (dev_net (dev ), & addr , dev -> ifindex , inet6_cmp ,
290
+ nf_nat_masq_schedule (dev_net (dev ), & addr , dev -> ifindex , inet_cmp ,
293
291
GFP_ATOMIC );
294
292
return NOTIFY_DONE ;
295
293
}
0 commit comments