@@ -108,54 +108,66 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
108
108
return 0 ;
109
109
}
110
110
111
+ static inline void __meminit print_mapping (unsigned long start ,
112
+ unsigned long end ,
113
+ unsigned long size )
114
+ {
115
+ if (end <= start )
116
+ return ;
117
+
118
+ pr_info ("Mapped range 0x%lx - 0x%lx with 0x%lx\n" , start , end , size );
119
+ }
120
+
121
+ static int __meminit create_physical_mapping (unsigned long start ,
122
+ unsigned long end )
123
+ {
124
+ unsigned long addr , mapping_size = 0 ;
125
+
126
+ start = _ALIGN_UP (start , PAGE_SIZE );
127
+ for (addr = start ; addr < end ; addr += mapping_size ) {
128
+ unsigned long gap , previous_size ;
129
+ int rc ;
130
+
131
+ gap = end - addr ;
132
+ previous_size = mapping_size ;
133
+
134
+ if (IS_ALIGNED (addr , PUD_SIZE ) && gap >= PUD_SIZE &&
135
+ mmu_psize_defs [MMU_PAGE_1G ].shift )
136
+ mapping_size = PUD_SIZE ;
137
+ else if (IS_ALIGNED (addr , PMD_SIZE ) && gap >= PMD_SIZE &&
138
+ mmu_psize_defs [MMU_PAGE_2M ].shift )
139
+ mapping_size = PMD_SIZE ;
140
+ else
141
+ mapping_size = PAGE_SIZE ;
142
+
143
+ if (mapping_size != previous_size ) {
144
+ print_mapping (start , addr , previous_size );
145
+ start = addr ;
146
+ }
147
+
148
+ rc = radix__map_kernel_page ((unsigned long )__va (addr ), addr ,
149
+ PAGE_KERNEL_X , mapping_size );
150
+ if (rc )
151
+ return rc ;
152
+ }
153
+
154
+ print_mapping (start , addr , mapping_size );
155
+ return 0 ;
156
+ }
157
+
111
158
static void __init radix_init_pgtable (void )
112
159
{
113
- int loop_count ;
114
- u64 base , end , start_addr ;
115
160
unsigned long rts_field ;
116
161
struct memblock_region * reg ;
117
- unsigned long linear_page_size ;
118
162
119
163
/* We don't support slb for radix */
120
164
mmu_slb_size = 0 ;
121
165
/*
122
166
* Create the linear mapping, using standard page size for now
123
167
*/
124
- loop_count = 0 ;
125
- for_each_memblock (memory , reg ) {
126
-
127
- start_addr = reg -> base ;
128
-
129
- redo :
130
- if (loop_count < 1 && mmu_psize_defs [MMU_PAGE_1G ].shift )
131
- linear_page_size = PUD_SIZE ;
132
- else if (loop_count < 2 && mmu_psize_defs [MMU_PAGE_2M ].shift )
133
- linear_page_size = PMD_SIZE ;
134
- else
135
- linear_page_size = PAGE_SIZE ;
136
-
137
- base = _ALIGN_UP (start_addr , linear_page_size );
138
- end = _ALIGN_DOWN (reg -> base + reg -> size , linear_page_size );
139
-
140
- pr_info ("Mapping range 0x%lx - 0x%lx with 0x%lx\n" ,
141
- (unsigned long )base , (unsigned long )end ,
142
- linear_page_size );
143
-
144
- while (base < end ) {
145
- radix__map_kernel_page ((unsigned long )__va (base ),
146
- base , PAGE_KERNEL_X ,
147
- linear_page_size );
148
- base += linear_page_size ;
149
- }
150
- /*
151
- * map the rest using lower page size
152
- */
153
- if (end < reg -> base + reg -> size ) {
154
- start_addr = end ;
155
- loop_count ++ ;
156
- goto redo ;
157
- }
158
- }
168
+ for_each_memblock (memory , reg )
169
+ WARN_ON (create_physical_mapping (reg -> base ,
170
+ reg -> base + reg -> size ));
159
171
/*
160
172
* Allocate Partition table and process table for the
161
173
* host.
0 commit comments