@@ -140,6 +140,13 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value)
140140{
141141 void * shadow_start , * shadow_end ;
142142
143+ /*
144+ * Perform shadow offset calculation based on untagged address, as
145+ * some of the callers (e.g. kasan_poison_object_data) pass tagged
146+ * addresses to this function.
147+ */
148+ address = reset_tag (address );
149+
143150 shadow_start = kasan_mem_to_shadow (address );
144151 shadow_end = kasan_mem_to_shadow (address + size );
145152
@@ -148,11 +155,24 @@ void kasan_poison_shadow(const void *address, size_t size, u8 value)
148155
149156void kasan_unpoison_shadow (const void * address , size_t size )
150157{
151- kasan_poison_shadow (address , size , 0 );
158+ u8 tag = get_tag (address );
159+
160+ /*
161+ * Perform shadow offset calculation based on untagged address, as
162+ * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
163+ * addresses to this function.
164+ */
165+ address = reset_tag (address );
166+
167+ kasan_poison_shadow (address , size , tag );
152168
153169 if (size & KASAN_SHADOW_MASK ) {
154170 u8 * shadow = (u8 * )kasan_mem_to_shadow (address + size );
155- * shadow = size & KASAN_SHADOW_MASK ;
171+
172+ if (IS_ENABLED (CONFIG_KASAN_SW_TAGS ))
173+ * shadow = tag ;
174+ else
175+ * shadow = size & KASAN_SHADOW_MASK ;
156176 }
157177}
158178
@@ -200,8 +220,9 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark)
200220
201221void kasan_alloc_pages (struct page * page , unsigned int order )
202222{
203- if (likely (!PageHighMem (page )))
204- kasan_unpoison_shadow (page_address (page ), PAGE_SIZE << order );
223+ if (unlikely (PageHighMem (page )))
224+ return ;
225+ kasan_unpoison_shadow (page_address (page ), PAGE_SIZE << order );
205226}
206227
207228void kasan_free_pages (struct page * page , unsigned int order )
@@ -218,6 +239,9 @@ void kasan_free_pages(struct page *page, unsigned int order)
218239 */
219240static inline unsigned int optimal_redzone (unsigned int object_size )
220241{
242+ if (IS_ENABLED (CONFIG_KASAN_SW_TAGS ))
243+ return 0 ;
244+
221245 return
222246 object_size <= 64 - 16 ? 16 :
223247 object_size <= 128 - 32 ? 32 :
@@ -232,27 +256,28 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
232256 slab_flags_t * flags )
233257{
234258 unsigned int orig_size = * size ;
259+ unsigned int redzone_size ;
235260 int redzone_adjust ;
236261
237262 /* Add alloc meta. */
238263 cache -> kasan_info .alloc_meta_offset = * size ;
239264 * size += sizeof (struct kasan_alloc_meta );
240265
241266 /* Add free meta. */
242- if (cache -> flags & SLAB_TYPESAFE_BY_RCU || cache -> ctor ||
243- cache -> object_size < sizeof (struct kasan_free_meta )) {
267+ if (IS_ENABLED (CONFIG_KASAN_GENERIC ) &&
268+ (cache -> flags & SLAB_TYPESAFE_BY_RCU || cache -> ctor ||
269+ cache -> object_size < sizeof (struct kasan_free_meta ))) {
244270 cache -> kasan_info .free_meta_offset = * size ;
245271 * size += sizeof (struct kasan_free_meta );
246272 }
247- redzone_adjust = optimal_redzone (cache -> object_size ) -
248- (* size - cache -> object_size );
249273
274+ redzone_size = optimal_redzone (cache -> object_size );
275+ redzone_adjust = redzone_size - (* size - cache -> object_size );
250276 if (redzone_adjust > 0 )
251277 * size += redzone_adjust ;
252278
253279 * size = min_t (unsigned int , KMALLOC_MAX_SIZE ,
254- max (* size , cache -> object_size +
255- optimal_redzone (cache -> object_size )));
280+ max (* size , cache -> object_size + redzone_size ));
256281
257282 /*
258283 * If the metadata doesn't fit, don't enable KASAN at all.
@@ -265,6 +290,8 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
265290 return ;
266291 }
267292
293+ cache -> align = round_up (cache -> align , KASAN_SHADOW_SCALE_SIZE );
294+
268295 * flags |= SLAB_KASAN ;
269296}
270297
@@ -309,6 +336,32 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
309336 KASAN_KMALLOC_REDZONE );
310337}
311338
339+ /*
340+ * Since it's desirable to only call object contructors once during slab
341+ * allocation, we preassign tags to all such objects. Also preassign tags for
342+ * SLAB_TYPESAFE_BY_RCU slabs to avoid use-after-free reports.
343+ * For SLAB allocator we can't preassign tags randomly since the freelist is
344+ * stored as an array of indexes instead of a linked list. Assign tags based
345+ * on objects indexes, so that objects that are next to each other get
346+ * different tags.
347+ * After a tag is assigned, the object always gets allocated with the same tag.
348+ * The reason is that we can't change tags for objects with constructors on
349+ * reallocation (even for non-SLAB_TYPESAFE_BY_RCU), because the constructor
350+ * code can save the pointer to the object somewhere (e.g. in the object
351+ * itself). Then if we retag it, the old saved pointer will become invalid.
352+ */
353+ static u8 assign_tag (struct kmem_cache * cache , const void * object , bool new )
354+ {
355+ if (!cache -> ctor && !(cache -> flags & SLAB_TYPESAFE_BY_RCU ))
356+ return new ? KASAN_TAG_KERNEL : random_tag ();
357+
358+ #ifdef CONFIG_SLAB
359+ return (u8 )obj_to_index (cache , virt_to_page (object ), (void * )object );
360+ #else
361+ return new ? random_tag () : get_tag (object );
362+ #endif
363+ }
364+
312365void * kasan_init_slab_obj (struct kmem_cache * cache , const void * object )
313366{
314367 struct kasan_alloc_meta * alloc_info ;
@@ -319,6 +372,9 @@ void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
319372 alloc_info = get_alloc_info (cache , object );
320373 __memset (alloc_info , 0 , sizeof (* alloc_info ));
321374
375+ if (IS_ENABLED (CONFIG_KASAN_SW_TAGS ))
376+ object = set_tag (object , assign_tag (cache , object , true));
377+
322378 return (void * )object ;
323379}
324380
@@ -327,15 +383,30 @@ void *kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
327383 return kasan_kmalloc (cache , object , cache -> object_size , flags );
328384}
329385
386+ static inline bool shadow_invalid (u8 tag , s8 shadow_byte )
387+ {
388+ if (IS_ENABLED (CONFIG_KASAN_GENERIC ))
389+ return shadow_byte < 0 ||
390+ shadow_byte >= KASAN_SHADOW_SCALE_SIZE ;
391+ else
392+ return tag != (u8 )shadow_byte ;
393+ }
394+
330395static bool __kasan_slab_free (struct kmem_cache * cache , void * object ,
331396 unsigned long ip , bool quarantine )
332397{
333398 s8 shadow_byte ;
399+ u8 tag ;
400+ void * tagged_object ;
334401 unsigned long rounded_up_size ;
335402
403+ tag = get_tag (object );
404+ tagged_object = object ;
405+ object = reset_tag (object );
406+
336407 if (unlikely (nearest_obj (cache , virt_to_head_page (object ), object ) !=
337408 object )) {
338- kasan_report_invalid_free (object , ip );
409+ kasan_report_invalid_free (tagged_object , ip );
339410 return true;
340411 }
341412
@@ -344,20 +415,22 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
344415 return false;
345416
346417 shadow_byte = READ_ONCE (* (s8 * )kasan_mem_to_shadow (object ));
347- if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE ) {
348- kasan_report_invalid_free (object , ip );
418+ if (shadow_invalid ( tag , shadow_byte ) ) {
419+ kasan_report_invalid_free (tagged_object , ip );
349420 return true;
350421 }
351422
352423 rounded_up_size = round_up (cache -> object_size , KASAN_SHADOW_SCALE_SIZE );
353424 kasan_poison_shadow (object , rounded_up_size , KASAN_KMALLOC_FREE );
354425
355- if (!quarantine || unlikely (!(cache -> flags & SLAB_KASAN )))
426+ if ((IS_ENABLED (CONFIG_KASAN_GENERIC ) && !quarantine ) ||
427+ unlikely (!(cache -> flags & SLAB_KASAN )))
356428 return false;
357429
358430 set_track (& get_alloc_info (cache , object )-> free_track , GFP_NOWAIT );
359431 quarantine_put (get_free_info (cache , object ), cache );
360- return true;
432+
433+ return IS_ENABLED (CONFIG_KASAN_GENERIC );
361434}
362435
363436bool kasan_slab_free (struct kmem_cache * cache , void * object , unsigned long ip )
@@ -370,6 +443,7 @@ void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
370443{
371444 unsigned long redzone_start ;
372445 unsigned long redzone_end ;
446+ u8 tag ;
373447
374448 if (gfpflags_allow_blocking (flags ))
375449 quarantine_reduce ();
@@ -382,14 +456,18 @@ void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
382456 redzone_end = round_up ((unsigned long )object + cache -> object_size ,
383457 KASAN_SHADOW_SCALE_SIZE );
384458
385- kasan_unpoison_shadow (object , size );
459+ if (IS_ENABLED (CONFIG_KASAN_SW_TAGS ))
460+ tag = assign_tag (cache , object , false);
461+
462+ /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
463+ kasan_unpoison_shadow (set_tag (object , tag ), size );
386464 kasan_poison_shadow ((void * )redzone_start , redzone_end - redzone_start ,
387465 KASAN_KMALLOC_REDZONE );
388466
389467 if (cache -> flags & SLAB_KASAN )
390468 set_track (& get_alloc_info (cache , object )-> alloc_track , flags );
391469
392- return ( void * ) object ;
470+ return set_tag ( object , tag ) ;
393471}
394472EXPORT_SYMBOL (kasan_kmalloc );
395473
@@ -439,7 +517,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
439517 page = virt_to_head_page (ptr );
440518
441519 if (unlikely (!PageSlab (page ))) {
442- if (ptr != page_address (page )) {
520+ if (reset_tag ( ptr ) != page_address (page )) {
443521 kasan_report_invalid_free (ptr , ip );
444522 return ;
445523 }
@@ -452,7 +530,7 @@ void kasan_poison_kfree(void *ptr, unsigned long ip)
452530
453531void kasan_kfree_large (void * ptr , unsigned long ip )
454532{
455- if (ptr != page_address (virt_to_head_page (ptr )))
533+ if (reset_tag ( ptr ) != page_address (virt_to_head_page (ptr )))
456534 kasan_report_invalid_free (ptr , ip );
457535 /* The object will be poisoned by page_alloc. */
458536}
0 commit comments