|
15 | 15 | static inline int init_new_context(struct task_struct *tsk, |
16 | 16 | struct mm_struct *mm) |
17 | 17 | { |
| 18 | + spin_lock_init(&mm->context.list_lock); |
| 19 | + INIT_LIST_HEAD(&mm->context.pgtable_list); |
| 20 | + INIT_LIST_HEAD(&mm->context.gmap_list); |
18 | 21 | cpumask_clear(&mm->context.cpu_attach_mask); |
19 | 22 | atomic_set(&mm->context.attach_count, 0); |
20 | 23 | mm->context.flush_mm = 0; |
21 | | - mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; |
22 | | - mm->context.asce_bits |= _ASCE_TYPE_REGION3; |
23 | 24 | #ifdef CONFIG_PGSTE |
24 | 25 | mm->context.alloc_pgste = page_table_allocate_pgste; |
25 | 26 | mm->context.has_pgste = 0; |
26 | 27 | mm->context.use_skey = 0; |
27 | 28 | #endif |
28 | | - mm->context.asce_limit = STACK_TOP_MAX; |
| 29 | + if (mm->context.asce_limit == 0) { |
| 30 | + /* context created by exec, set asce limit to 4TB */ |
| 31 | + mm->context.asce_bits = _ASCE_TABLE_LENGTH | |
| 32 | + _ASCE_USER_BITS | _ASCE_TYPE_REGION3; |
| 33 | + mm->context.asce_limit = STACK_TOP_MAX; |
| 34 | + } else if (mm->context.asce_limit == (1UL << 31)) { |
| 35 | + mm_inc_nr_pmds(mm); |
| 36 | + } |
29 | 37 | crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); |
30 | 38 | return 0; |
31 | 39 | } |
@@ -111,8 +119,6 @@ static inline void activate_mm(struct mm_struct *prev, |
111 | 119 | static inline void arch_dup_mmap(struct mm_struct *oldmm, |
112 | 120 | struct mm_struct *mm) |
113 | 121 | { |
114 | | - if (oldmm->context.asce_limit < mm->context.asce_limit) |
115 | | - crst_table_downgrade(mm, oldmm->context.asce_limit); |
116 | 122 | } |
117 | 123 |
|
118 | 124 | static inline void arch_exit_mmap(struct mm_struct *mm) |
|
0 commit comments