@@ -114,23 +114,23 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
114
114
115
115
#ifdef CONFIG_NUMA_BALANCING
116
116
static inline void change_pmd_protnuma (struct mm_struct * mm , unsigned long addr ,
117
- pmd_t * pmd )
117
+ pmd_t * pmd )
118
118
{
119
119
spin_lock (& mm -> page_table_lock );
120
120
set_pmd_at (mm , addr & PMD_MASK , pmd , pmd_mknuma (* pmd ));
121
121
spin_unlock (& mm -> page_table_lock );
122
122
}
123
123
#else
124
124
static inline void change_pmd_protnuma (struct mm_struct * mm , unsigned long addr ,
125
- pmd_t * pmd )
125
+ pmd_t * pmd )
126
126
{
127
127
BUG ();
128
128
}
129
129
#endif /* CONFIG_NUMA_BALANCING */
130
130
131
- static inline unsigned long change_pmd_range (struct vm_area_struct * vma , pud_t * pud ,
132
- unsigned long addr , unsigned long end , pgprot_t newprot ,
133
- int dirty_accountable , int prot_numa )
131
+ static inline unsigned long change_pmd_range (struct vm_area_struct * vma ,
132
+ pud_t * pud , unsigned long addr , unsigned long end ,
133
+ pgprot_t newprot , int dirty_accountable , int prot_numa )
134
134
{
135
135
pmd_t * pmd ;
136
136
unsigned long next ;
@@ -143,7 +143,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *
143
143
if (pmd_trans_huge (* pmd )) {
144
144
if (next - addr != HPAGE_PMD_SIZE )
145
145
split_huge_page_pmd (vma , addr , pmd );
146
- else if (change_huge_pmd (vma , pmd , addr , newprot , prot_numa )) {
146
+ else if (change_huge_pmd (vma , pmd , addr , newprot ,
147
+ prot_numa )) {
147
148
pages += HPAGE_PMD_NR ;
148
149
continue ;
149
150
}
@@ -167,9 +168,9 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *
167
168
return pages ;
168
169
}
169
170
170
- static inline unsigned long change_pud_range (struct vm_area_struct * vma , pgd_t * pgd ,
171
- unsigned long addr , unsigned long end , pgprot_t newprot ,
172
- int dirty_accountable , int prot_numa )
171
+ static inline unsigned long change_pud_range (struct vm_area_struct * vma ,
172
+ pgd_t * pgd , unsigned long addr , unsigned long end ,
173
+ pgprot_t newprot , int dirty_accountable , int prot_numa )
173
174
{
174
175
pud_t * pud ;
175
176
unsigned long next ;
@@ -304,7 +305,8 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
304
305
dirty_accountable = 1 ;
305
306
}
306
307
307
- change_protection (vma , start , end , vma -> vm_page_prot , dirty_accountable , 0 );
308
+ change_protection (vma , start , end , vma -> vm_page_prot ,
309
+ dirty_accountable , 0 );
308
310
309
311
vm_stat_account (mm , oldflags , vma -> vm_file , - nrpages );
310
312
vm_stat_account (mm , newflags , vma -> vm_file , nrpages );
@@ -361,8 +363,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
361
363
error = - EINVAL ;
362
364
if (!(vma -> vm_flags & VM_GROWSDOWN ))
363
365
goto out ;
364
- }
365
- else {
366
+ } else {
366
367
if (vma -> vm_start > start )
367
368
goto out ;
368
369
if (unlikely (grows & PROT_GROWSUP )) {
@@ -378,9 +379,10 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
378
379
for (nstart = start ; ; ) {
379
380
unsigned long newflags ;
380
381
381
- /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
382
+ /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
382
383
383
- newflags = vm_flags | (vma -> vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC ));
384
+ newflags = vm_flags ;
385
+ newflags |= (vma -> vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC ));
384
386
385
387
/* newflags >> 4 shift VM_MAY% in place of VM_% */
386
388
if ((newflags & ~(newflags >> 4 )) & (VM_READ | VM_WRITE | VM_EXEC )) {
0 commit comments