28
28
#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
29
29
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
30
30
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
31
- /* bit 57 for PMD_SECT_SPLITTING */
31
+ #define PTE_WRITE (_AT(pteval_t, 1) << 57)
32
32
#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
33
33
34
34
/*
@@ -67,29 +67,29 @@ extern pgprot_t pgprot_default;
67
67
68
68
#define _MOD_PROT (p , b ) __pgprot_modify(p, 0, b)
69
69
70
- #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN)
71
- #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
72
- #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
73
- #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY )
74
- #define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY )
75
- #define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY )
76
- #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY )
77
- #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
78
- #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
70
+ #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_PXN | PTE_UXN)
71
+ #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE )
72
+ #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE )
73
+ #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
74
+ #define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
75
+ #define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
76
+ #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
77
+ #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE )
78
+ #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY | PTE_WRITE )
79
79
80
80
#define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP)
81
81
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
82
82
83
83
#define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
84
84
#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN)
85
85
86
- #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN)
87
- #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
88
- #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
89
- #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY )
90
- #define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY )
91
- #define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY )
92
- #define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY )
86
+ #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
87
+ #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE )
88
+ #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE )
89
+ #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
90
+ #define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
91
+ #define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
92
+ #define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
93
93
94
94
#endif /* __ASSEMBLY__ */
95
95
@@ -140,22 +140,53 @@ extern struct page *empty_zero_page;
140
140
#define pte_dirty (pte ) (pte_val(pte) & PTE_DIRTY)
141
141
#define pte_young (pte ) (pte_val(pte) & PTE_AF)
142
142
#define pte_special (pte ) (pte_val(pte) & PTE_SPECIAL)
143
- #define pte_write (pte ) (!( pte_val(pte) & PTE_RDONLY) )
143
+ #define pte_write (pte ) (pte_val(pte) & PTE_WRITE )
144
144
#define pte_exec (pte ) (!(pte_val(pte) & PTE_UXN))
145
145
146
146
#define pte_valid_user (pte ) \
147
147
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
148
148
149
- #define PTE_BIT_FUNC (fn ,op ) \
150
- static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
149
+ static inline pte_t pte_wrprotect (pte_t pte )
150
+ {
151
+ pte_val (pte ) &= ~PTE_WRITE ;
152
+ return pte ;
153
+ }
154
+
155
+ static inline pte_t pte_mkwrite (pte_t pte )
156
+ {
157
+ pte_val (pte ) |= PTE_WRITE ;
158
+ return pte ;
159
+ }
160
+
161
+ static inline pte_t pte_mkclean (pte_t pte )
162
+ {
163
+ pte_val (pte ) &= ~PTE_DIRTY ;
164
+ return pte ;
165
+ }
166
+
167
+ static inline pte_t pte_mkdirty (pte_t pte )
168
+ {
169
+ pte_val (pte ) |= PTE_DIRTY ;
170
+ return pte ;
171
+ }
151
172
152
- PTE_BIT_FUNC (wrprotect , |= PTE_RDONLY );
153
- PTE_BIT_FUNC (mkwrite , & = ~PTE_RDONLY );
154
- PTE_BIT_FUNC (mkclean , & = ~PTE_DIRTY );
155
- PTE_BIT_FUNC (mkdirty , |= PTE_DIRTY );
156
- PTE_BIT_FUNC (mkold , & = ~PTE_AF );
157
- PTE_BIT_FUNC (mkyoung , |= PTE_AF );
158
- PTE_BIT_FUNC (mkspecial , |= PTE_SPECIAL );
173
+ static inline pte_t pte_mkold (pte_t pte )
174
+ {
175
+ pte_val (pte ) &= ~PTE_AF ;
176
+ return pte ;
177
+ }
178
+
179
+ static inline pte_t pte_mkyoung (pte_t pte )
180
+ {
181
+ pte_val (pte ) |= PTE_AF ;
182
+ return pte ;
183
+ }
184
+
185
+ static inline pte_t pte_mkspecial (pte_t pte )
186
+ {
187
+ pte_val (pte ) |= PTE_SPECIAL ;
188
+ return pte ;
189
+ }
159
190
160
191
static inline void set_pte (pte_t * ptep , pte_t pte )
161
192
{
@@ -170,8 +201,10 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
170
201
if (pte_valid_user (pte )) {
171
202
if (pte_exec (pte ))
172
203
__sync_icache_dcache (pte , addr );
173
- if (!pte_dirty (pte ))
174
- pte = pte_wrprotect (pte );
204
+ if (pte_dirty (pte ) && pte_write (pte ))
205
+ pte_val (pte ) &= ~PTE_RDONLY ;
206
+ else
207
+ pte_val (pte ) |= PTE_RDONLY ;
175
208
}
176
209
177
210
set_pte (ptep , pte );
@@ -345,7 +378,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
345
378
static inline pte_t pte_modify (pte_t pte , pgprot_t newprot )
346
379
{
347
380
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
348
- PTE_PROT_NONE | PTE_VALID ;
381
+ PTE_PROT_NONE | PTE_VALID | PTE_WRITE ;
349
382
pte_val (pte ) = (pte_val (pte ) & ~mask ) | (pgprot_val (newprot ) & mask );
350
383
return pte ;
351
384
}
0 commit comments