@@ -2918,6 +2918,12 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
29182918 return 0 ;
29192919}
29202920
2921+ static void dev_change_rx_flags (struct net_device * dev , int flags )
2922+ {
2923+ if (dev -> flags & IFF_UP && dev -> change_rx_flags )
2924+ dev -> change_rx_flags (dev , flags );
2925+ }
2926+
29212927static int __dev_set_promiscuity (struct net_device * dev , int inc )
29222928{
29232929 unsigned short old_flags = dev -> flags ;
@@ -2955,8 +2961,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
29552961 current -> uid , current -> gid ,
29562962 audit_get_sessionid (current ));
29572963
2958- if (dev -> change_rx_flags )
2959- dev -> change_rx_flags (dev , IFF_PROMISC );
2964+ dev_change_rx_flags (dev , IFF_PROMISC );
29602965 }
29612966 return 0 ;
29622967}
@@ -3022,8 +3027,7 @@ int dev_set_allmulti(struct net_device *dev, int inc)
30223027 }
30233028 }
30243029 if (dev -> flags ^ old_flags ) {
3025- if (dev -> change_rx_flags )
3026- dev -> change_rx_flags (dev , IFF_ALLMULTI );
3030+ dev_change_rx_flags (dev , IFF_ALLMULTI );
30273031 dev_set_rx_mode (dev );
30283032 }
30293033 return 0 ;
@@ -3347,8 +3351,8 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
33473351 * Load in the correct multicast list now the flags have changed.
33483352 */
33493353
3350- if (dev -> change_rx_flags && (old_flags ^ flags ) & IFF_MULTICAST )
3351- dev -> change_rx_flags (dev , IFF_MULTICAST );
3354+ if ((old_flags ^ flags ) & IFF_MULTICAST )
3355+ dev_change_rx_flags (dev , IFF_MULTICAST );
33523356
33533357 dev_set_rx_mode (dev );
33543358
@@ -3808,14 +3812,11 @@ static int dev_new_index(struct net *net)
38083812}
38093813
38103814/* Delayed registration/unregisteration */
3811- static DEFINE_SPINLOCK (net_todo_list_lock );
38123815static LIST_HEAD (net_todo_list );
38133816
38143817static void net_set_todo (struct net_device * dev )
38153818{
3816- spin_lock (& net_todo_list_lock );
38173819 list_add_tail (& dev -> todo_list , & net_todo_list );
3818- spin_unlock (& net_todo_list_lock );
38193820}
38203821
38213822static void rollback_registered (struct net_device * dev )
@@ -4142,33 +4143,24 @@ static void netdev_wait_allrefs(struct net_device *dev)
41424143 * free_netdev(y1);
41434144 * free_netdev(y2);
41444145 *
4145- * We are invoked by rtnl_unlock() after it drops the semaphore .
4146+ * We are invoked by rtnl_unlock().
41464147 * This allows us to deal with problems:
41474148 * 1) We can delete sysfs objects which invoke hotplug
41484149 * without deadlocking with linkwatch via keventd.
41494150 * 2) Since we run with the RTNL semaphore not held, we can sleep
41504151 * safely in order to wait for the netdev refcnt to drop to zero.
4152+ *
4153+ * We must not return until all unregister events added during
4154+ * the interval the lock was held have been completed.
41514155 */
4152- static DEFINE_MUTEX (net_todo_run_mutex );
41534156void netdev_run_todo (void )
41544157{
41554158 struct list_head list ;
41564159
4157- /* Need to guard against multiple cpu's getting out of order. */
4158- mutex_lock (& net_todo_run_mutex );
4159-
4160- /* Not safe to do outside the semaphore. We must not return
4161- * until all unregister events invoked by the local processor
4162- * have been completed (either by this todo run, or one on
4163- * another cpu).
4164- */
4165- if (list_empty (& net_todo_list ))
4166- goto out ;
4167-
41684160 /* Snapshot list, allow later requests */
4169- spin_lock (& net_todo_list_lock );
41704161 list_replace_init (& net_todo_list , & list );
4171- spin_unlock (& net_todo_list_lock );
4162+
4163+ __rtnl_unlock ();
41724164
41734165 while (!list_empty (& list )) {
41744166 struct net_device * dev
@@ -4200,9 +4192,6 @@ void netdev_run_todo(void)
42004192 /* Free network device */
42014193 kobject_put (& dev -> dev .kobj );
42024194 }
4203-
4204- out :
4205- mutex_unlock (& net_todo_run_mutex );
42064195}
42074196
42084197static struct net_device_stats * internal_stats (struct net_device * dev )
0 commit comments