@@ -631,15 +631,11 @@ EXPORT_TRACEPOINT_SYMBOL(module_get);
631631/* Init the unload section of the module. */
632632static int module_unload_init (struct module * mod )
633633{
634- mod -> refptr = alloc_percpu (struct module_ref );
635- if (!mod -> refptr )
636- return - ENOMEM ;
637-
638634 INIT_LIST_HEAD (& mod -> source_list );
639635 INIT_LIST_HEAD (& mod -> target_list );
640636
641637 /* Hold reference count during initialization. */
642- raw_cpu_write ( mod -> refptr -> incs , 1 );
638+ atomic_set ( & mod -> refcnt , 1 );
643639
644640 return 0 ;
645641}
@@ -721,8 +717,6 @@ static void module_unload_free(struct module *mod)
721717 kfree (use );
722718 }
723719 mutex_unlock (& module_mutex );
724-
725- free_percpu (mod -> refptr );
726720}
727721
728722#ifdef CONFIG_MODULE_FORCE_UNLOAD
@@ -772,28 +766,7 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
772766
773767unsigned long module_refcount (struct module * mod )
774768{
775- unsigned long incs = 0 , decs = 0 ;
776- int cpu ;
777-
778- for_each_possible_cpu (cpu )
779- decs += per_cpu_ptr (mod -> refptr , cpu )-> decs ;
780- /*
781- * ensure the incs are added up after the decs.
782- * module_put ensures incs are visible before decs with smp_wmb.
783- *
784- * This 2-count scheme avoids the situation where the refcount
785- * for CPU0 is read, then CPU0 increments the module refcount,
786- * then CPU1 drops that refcount, then the refcount for CPU1 is
787- * read. We would record a decrement but not its corresponding
788- * increment so we would see a low count (disaster).
789- *
790- * Rare situation? But module_refcount can be preempted, and we
791- * might be tallying up 4096+ CPUs. So it is not impossible.
792- */
793- smp_rmb ();
794- for_each_possible_cpu (cpu )
795- incs += per_cpu_ptr (mod -> refptr , cpu )-> incs ;
796- return incs - decs ;
769+ return (unsigned long )atomic_read (& mod -> refcnt );
797770}
798771EXPORT_SYMBOL (module_refcount );
799772
@@ -935,7 +908,7 @@ void __module_get(struct module *module)
935908{
936909 if (module ) {
937910 preempt_disable ();
938- __this_cpu_inc ( module -> refptr -> incs );
911+ atomic_inc ( & module -> refcnt );
939912 trace_module_get (module , _RET_IP_ );
940913 preempt_enable ();
941914 }
@@ -950,7 +923,7 @@ bool try_module_get(struct module *module)
950923 preempt_disable ();
951924
952925 if (likely (module_is_live (module ))) {
953- __this_cpu_inc ( module -> refptr -> incs );
926+ atomic_inc ( & module -> refcnt );
954927 trace_module_get (module , _RET_IP_ );
955928 } else
956929 ret = false;
@@ -965,9 +938,7 @@ void module_put(struct module *module)
965938{
966939 if (module ) {
967940 preempt_disable ();
968- smp_wmb (); /* see comment in module_refcount */
969- __this_cpu_inc (module -> refptr -> decs );
970-
941+ atomic_dec (& module -> refcnt );
971942 trace_module_put (module , _RET_IP_ );
972943 preempt_enable ();
973944 }
0 commit comments