Skip to content

Commit d330076

Browse files
committed
Merge tag 'slab-for-6.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab fix from Vlastimil Babka: - A fix from Waiman Long to avoid a theoretical deadlock reported by lockdep. * tag 'slab-for-6.0-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: mm/slab_common: Deleting kobject in kmem_cache_destroy() without holding slab_mutex/cpu_hotplug_lock
2 parents 2880e1a + 0495e33 commit d330076

File tree

1 file changed

+29
-16
lines changed

1 file changed

+29
-16
lines changed

mm/slab_common.c

Lines changed: 29 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -392,6 +392,28 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
392392
}
393393
EXPORT_SYMBOL(kmem_cache_create);
394394

395+
#ifdef SLAB_SUPPORTS_SYSFS
396+
/*
397+
* For a given kmem_cache, kmem_cache_destroy() should only be called
398+
* once or there will be a use-after-free problem. The actual deletion
399+
* and release of the kobject does not need slab_mutex or cpu_hotplug_lock
400+
* protection. So they are now done without holding those locks.
401+
*
402+
* Note that there will be a slight delay in the deletion of sysfs files
403+
* if kmem_cache_release() is called indrectly from a work function.
404+
*/
405+
static void kmem_cache_release(struct kmem_cache *s)
406+
{
407+
sysfs_slab_unlink(s);
408+
sysfs_slab_release(s);
409+
}
410+
#else
411+
static void kmem_cache_release(struct kmem_cache *s)
412+
{
413+
slab_kmem_cache_release(s);
414+
}
415+
#endif
416+
395417
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
396418
{
397419
LIST_HEAD(to_destroy);
@@ -418,11 +440,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
418440
list_for_each_entry_safe(s, s2, &to_destroy, list) {
419441
debugfs_slab_release(s);
420442
kfence_shutdown_cache(s);
421-
#ifdef SLAB_SUPPORTS_SYSFS
422-
sysfs_slab_release(s);
423-
#else
424-
slab_kmem_cache_release(s);
425-
#endif
443+
kmem_cache_release(s);
426444
}
427445
}
428446

@@ -437,20 +455,11 @@ static int shutdown_cache(struct kmem_cache *s)
437455
list_del(&s->list);
438456

439457
if (s->flags & SLAB_TYPESAFE_BY_RCU) {
440-
#ifdef SLAB_SUPPORTS_SYSFS
441-
sysfs_slab_unlink(s);
442-
#endif
443458
list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
444459
schedule_work(&slab_caches_to_rcu_destroy_work);
445460
} else {
446461
kfence_shutdown_cache(s);
447462
debugfs_slab_release(s);
448-
#ifdef SLAB_SUPPORTS_SYSFS
449-
sysfs_slab_unlink(s);
450-
sysfs_slab_release(s);
451-
#else
452-
slab_kmem_cache_release(s);
453-
#endif
454463
}
455464

456465
return 0;
@@ -465,14 +474,16 @@ void slab_kmem_cache_release(struct kmem_cache *s)
465474

466475
void kmem_cache_destroy(struct kmem_cache *s)
467476
{
477+
int refcnt;
478+
468479
if (unlikely(!s) || !kasan_check_byte(s))
469480
return;
470481

471482
cpus_read_lock();
472483
mutex_lock(&slab_mutex);
473484

474-
s->refcount--;
475-
if (s->refcount)
485+
refcnt = --s->refcount;
486+
if (refcnt)
476487
goto out_unlock;
477488

478489
WARN(shutdown_cache(s),
@@ -481,6 +492,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
481492
out_unlock:
482493
mutex_unlock(&slab_mutex);
483494
cpus_read_unlock();
495+
if (!refcnt && !(s->flags & SLAB_TYPESAFE_BY_RCU))
496+
kmem_cache_release(s);
484497
}
485498
EXPORT_SYMBOL(kmem_cache_destroy);
486499

0 commit comments

Comments
 (0)