@@ -328,22 +328,30 @@ void __init cleanup_highmap(void)
328
328
}
329
329
}
330
330
331
+ /*
332
+ * Create PTE level page table mapping for physical addresses.
333
+ * It returns the last physical address mapped.
334
+ */
331
335
static unsigned long __meminit
332
- phys_pte_init (pte_t * pte_page , unsigned long addr , unsigned long end ,
336
+ phys_pte_init (pte_t * pte_page , unsigned long paddr , unsigned long paddr_end ,
333
337
pgprot_t prot )
334
338
{
335
- unsigned long pages = 0 , next ;
336
- unsigned long last_map_addr = end ;
339
+ unsigned long pages = 0 , paddr_next ;
340
+ unsigned long paddr_last = paddr_end ;
341
+ pte_t * pte ;
337
342
int i ;
338
343
339
- pte_t * pte = pte_page + pte_index (addr );
344
+ pte = pte_page + pte_index (paddr );
345
+ i = pte_index (paddr );
340
346
341
- for (i = pte_index ( addr ) ; i < PTRS_PER_PTE ; i ++ , addr = next , pte ++ ) {
342
- next = (addr & PAGE_MASK ) + PAGE_SIZE ;
343
- if (addr >= end ) {
347
+ for (; i < PTRS_PER_PTE ; i ++ , paddr = paddr_next , pte ++ ) {
348
+ paddr_next = (paddr & PAGE_MASK ) + PAGE_SIZE ;
349
+ if (paddr >= paddr_end ) {
344
350
if (!after_bootmem &&
345
- !e820_any_mapped (addr & PAGE_MASK , next , E820_RAM ) &&
346
- !e820_any_mapped (addr & PAGE_MASK , next , E820_RESERVED_KERN ))
351
+ !e820_any_mapped (paddr & PAGE_MASK , paddr_next ,
352
+ E820_RAM ) &&
353
+ !e820_any_mapped (paddr & PAGE_MASK , paddr_next ,
354
+ E820_RESERVED_KERN ))
347
355
set_pte (pte , __pte (0 ));
348
356
continue ;
349
357
}
@@ -361,37 +369,44 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
361
369
}
362
370
363
371
if (0 )
364
- printk (" pte=%p addr=%lx pte=%016lx\n" ,
365
- pte , addr , pfn_pte (addr >> PAGE_SHIFT , PAGE_KERNEL ).pte );
372
+ pr_info (" pte=%p addr=%lx pte=%016lx\n" , pte , paddr ,
373
+ pfn_pte (paddr >> PAGE_SHIFT , PAGE_KERNEL ).pte );
366
374
pages ++ ;
367
- set_pte (pte , pfn_pte (addr >> PAGE_SHIFT , prot ));
368
- last_map_addr = (addr & PAGE_MASK ) + PAGE_SIZE ;
375
+ set_pte (pte , pfn_pte (paddr >> PAGE_SHIFT , prot ));
376
+ paddr_last = (paddr & PAGE_MASK ) + PAGE_SIZE ;
369
377
}
370
378
371
379
update_page_count (PG_LEVEL_4K , pages );
372
380
373
- return last_map_addr ;
381
+ return paddr_last ;
374
382
}
375
383
384
+ /*
385
+ * Create PMD level page table mapping for physical addresses. The virtual
386
+ * and physical address have to be aligned at this level.
387
+ * It returns the last physical address mapped.
388
+ */
376
389
static unsigned long __meminit
377
- phys_pmd_init (pmd_t * pmd_page , unsigned long address , unsigned long end ,
390
+ phys_pmd_init (pmd_t * pmd_page , unsigned long paddr , unsigned long paddr_end ,
378
391
unsigned long page_size_mask , pgprot_t prot )
379
392
{
380
- unsigned long pages = 0 , next ;
381
- unsigned long last_map_addr = end ;
393
+ unsigned long pages = 0 , paddr_next ;
394
+ unsigned long paddr_last = paddr_end ;
382
395
383
- int i = pmd_index (address );
396
+ int i = pmd_index (paddr );
384
397
385
- for (; i < PTRS_PER_PMD ; i ++ , address = next ) {
386
- pmd_t * pmd = pmd_page + pmd_index (address );
398
+ for (; i < PTRS_PER_PMD ; i ++ , paddr = paddr_next ) {
399
+ pmd_t * pmd = pmd_page + pmd_index (paddr );
387
400
pte_t * pte ;
388
401
pgprot_t new_prot = prot ;
389
402
390
- next = (address & PMD_MASK ) + PMD_SIZE ;
391
- if (address >= end ) {
403
+ paddr_next = (paddr & PMD_MASK ) + PMD_SIZE ;
404
+ if (paddr >= paddr_end ) {
392
405
if (!after_bootmem &&
393
- !e820_any_mapped (address & PMD_MASK , next , E820_RAM ) &&
394
- !e820_any_mapped (address & PMD_MASK , next , E820_RESERVED_KERN ))
406
+ !e820_any_mapped (paddr & PMD_MASK , paddr_next ,
407
+ E820_RAM ) &&
408
+ !e820_any_mapped (paddr & PMD_MASK , paddr_next ,
409
+ E820_RESERVED_KERN ))
395
410
set_pmd (pmd , __pmd (0 ));
396
411
continue ;
397
412
}
@@ -400,8 +415,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
400
415
if (!pmd_large (* pmd )) {
401
416
spin_lock (& init_mm .page_table_lock );
402
417
pte = (pte_t * )pmd_page_vaddr (* pmd );
403
- last_map_addr = phys_pte_init (pte , address ,
404
- end , prot );
418
+ paddr_last = phys_pte_init (pte , paddr ,
419
+ paddr_end , prot );
405
420
spin_unlock (& init_mm .page_table_lock );
406
421
continue ;
407
422
}
@@ -420,7 +435,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
420
435
if (page_size_mask & (1 << PG_LEVEL_2M )) {
421
436
if (!after_bootmem )
422
437
pages ++ ;
423
- last_map_addr = next ;
438
+ paddr_last = paddr_next ;
424
439
continue ;
425
440
}
426
441
new_prot = pte_pgprot (pte_clrhuge (* (pte_t * )pmd ));
@@ -430,51 +445,60 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
430
445
pages ++ ;
431
446
spin_lock (& init_mm .page_table_lock );
432
447
set_pte ((pte_t * )pmd ,
433
- pfn_pte ((address & PMD_MASK ) >> PAGE_SHIFT ,
448
+ pfn_pte ((paddr & PMD_MASK ) >> PAGE_SHIFT ,
434
449
__pgprot (pgprot_val (prot ) | _PAGE_PSE )));
435
450
spin_unlock (& init_mm .page_table_lock );
436
- last_map_addr = next ;
451
+ paddr_last = paddr_next ;
437
452
continue ;
438
453
}
439
454
440
455
pte = alloc_low_page ();
441
- last_map_addr = phys_pte_init (pte , address , end , new_prot );
456
+ paddr_last = phys_pte_init (pte , paddr , paddr_end , new_prot );
442
457
443
458
spin_lock (& init_mm .page_table_lock );
444
459
pmd_populate_kernel (& init_mm , pmd , pte );
445
460
spin_unlock (& init_mm .page_table_lock );
446
461
}
447
462
update_page_count (PG_LEVEL_2M , pages );
448
- return last_map_addr ;
463
+ return paddr_last ;
449
464
}
450
465
466
+ /*
467
+ * Create PUD level page table mapping for physical addresses. The virtual
468
+ * and physical address have to be aligned at this level.
469
+ * It returns the last physical address mapped.
470
+ */
451
471
static unsigned long __meminit
452
- phys_pud_init (pud_t * pud_page , unsigned long addr , unsigned long end ,
453
- unsigned long page_size_mask )
472
+ phys_pud_init (pud_t * pud_page , unsigned long paddr , unsigned long paddr_end ,
473
+ unsigned long page_size_mask )
454
474
{
455
- unsigned long pages = 0 , next ;
456
- unsigned long last_map_addr = end ;
457
- int i = pud_index (addr );
475
+ unsigned long pages = 0 , paddr_next ;
476
+ unsigned long paddr_last = paddr_end ;
477
+ int i = pud_index (paddr );
458
478
459
- for (; i < PTRS_PER_PUD ; i ++ , addr = next ) {
460
- pud_t * pud = pud_page + pud_index (addr );
479
+ for (; i < PTRS_PER_PUD ; i ++ , paddr = paddr_next ) {
480
+ pud_t * pud = pud_page + pud_index (paddr );
461
481
pmd_t * pmd ;
462
482
pgprot_t prot = PAGE_KERNEL ;
463
483
464
- next = (addr & PUD_MASK ) + PUD_SIZE ;
465
- if (addr >= end ) {
484
+ paddr_next = (paddr & PUD_MASK ) + PUD_SIZE ;
485
+ if (paddr >= paddr_end ) {
466
486
if (!after_bootmem &&
467
- !e820_any_mapped (addr & PUD_MASK , next , E820_RAM ) &&
468
- !e820_any_mapped (addr & PUD_MASK , next , E820_RESERVED_KERN ))
487
+ !e820_any_mapped (paddr & PUD_MASK , paddr_next ,
488
+ E820_RAM ) &&
489
+ !e820_any_mapped (paddr & PUD_MASK , paddr_next ,
490
+ E820_RESERVED_KERN ))
469
491
set_pud (pud , __pud (0 ));
470
492
continue ;
471
493
}
472
494
473
495
if (pud_val (* pud )) {
474
496
if (!pud_large (* pud )) {
475
497
pmd = pmd_offset (pud , 0 );
476
- last_map_addr = phys_pmd_init (pmd , addr , end ,
477
- page_size_mask , prot );
498
+ paddr_last = phys_pmd_init (pmd , paddr ,
499
+ paddr_end ,
500
+ page_size_mask ,
501
+ prot );
478
502
__flush_tlb_all ();
479
503
continue ;
480
504
}
@@ -493,7 +517,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
493
517
if (page_size_mask & (1 << PG_LEVEL_1G )) {
494
518
if (!after_bootmem )
495
519
pages ++ ;
496
- last_map_addr = next ;
520
+ paddr_last = paddr_next ;
497
521
continue ;
498
522
}
499
523
prot = pte_pgprot (pte_clrhuge (* (pte_t * )pud ));
@@ -503,16 +527,16 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
503
527
pages ++ ;
504
528
spin_lock (& init_mm .page_table_lock );
505
529
set_pte ((pte_t * )pud ,
506
- pfn_pte ((addr & PUD_MASK ) >> PAGE_SHIFT ,
530
+ pfn_pte ((paddr & PUD_MASK ) >> PAGE_SHIFT ,
507
531
PAGE_KERNEL_LARGE ));
508
532
spin_unlock (& init_mm .page_table_lock );
509
- last_map_addr = next ;
533
+ paddr_last = paddr_next ;
510
534
continue ;
511
535
}
512
536
513
537
pmd = alloc_low_page ();
514
- last_map_addr = phys_pmd_init (pmd , addr , end , page_size_mask ,
515
- prot );
538
+ paddr_last = phys_pmd_init (pmd , paddr , paddr_end ,
539
+ page_size_mask , prot );
516
540
517
541
spin_lock (& init_mm .page_table_lock );
518
542
pud_populate (& init_mm , pud , pmd );
@@ -522,38 +546,44 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
522
546
523
547
update_page_count (PG_LEVEL_1G , pages );
524
548
525
- return last_map_addr ;
549
+ return paddr_last ;
526
550
}
527
551
552
+ /*
553
+ * Create page table mapping for the physical memory for specific physical
554
+ * addresses. The virtual and physical addresses have to be aligned on PUD level
555
+ * down. It returns the last physical address mapped.
556
+ */
528
557
unsigned long __meminit
529
- kernel_physical_mapping_init (unsigned long start ,
530
- unsigned long end ,
558
+ kernel_physical_mapping_init (unsigned long paddr_start ,
559
+ unsigned long paddr_end ,
531
560
unsigned long page_size_mask )
532
561
{
533
562
bool pgd_changed = false;
534
- unsigned long next , last_map_addr = end ;
535
- unsigned long addr ;
563
+ unsigned long vaddr , vaddr_start , vaddr_end , vaddr_next , paddr_last ;
536
564
537
- start = (unsigned long )__va (start );
538
- end = (unsigned long )__va (end );
539
- addr = start ;
565
+ paddr_last = paddr_end ;
566
+ vaddr = (unsigned long )__va (paddr_start );
567
+ vaddr_end = (unsigned long )__va (paddr_end );
568
+ vaddr_start = vaddr ;
540
569
541
- for (; start < end ; start = next ) {
542
- pgd_t * pgd = pgd_offset_k (start );
570
+ for (; vaddr < vaddr_end ; vaddr = vaddr_next ) {
571
+ pgd_t * pgd = pgd_offset_k (vaddr );
543
572
pud_t * pud ;
544
573
545
- next = (start & PGDIR_MASK ) + PGDIR_SIZE ;
574
+ vaddr_next = (vaddr & PGDIR_MASK ) + PGDIR_SIZE ;
546
575
547
576
if (pgd_val (* pgd )) {
548
577
pud = (pud_t * )pgd_page_vaddr (* pgd );
549
- last_map_addr = phys_pud_init (pud , __pa (start ),
550
- __pa (end ), page_size_mask );
578
+ paddr_last = phys_pud_init (pud , __pa (vaddr ),
579
+ __pa (vaddr_end ),
580
+ page_size_mask );
551
581
continue ;
552
582
}
553
583
554
584
pud = alloc_low_page ();
555
- last_map_addr = phys_pud_init (pud , __pa (start ), __pa (end ),
556
- page_size_mask );
585
+ paddr_last = phys_pud_init (pud , __pa (vaddr ), __pa (vaddr_end ),
586
+ page_size_mask );
557
587
558
588
spin_lock (& init_mm .page_table_lock );
559
589
pgd_populate (& init_mm , pgd , pud );
@@ -562,11 +592,11 @@ kernel_physical_mapping_init(unsigned long start,
562
592
}
563
593
564
594
if (pgd_changed )
565
- sync_global_pgds (addr , end - 1 , 0 );
595
+ sync_global_pgds (vaddr_start , vaddr_end - 1 , 0 );
566
596
567
597
__flush_tlb_all ();
568
598
569
- return last_map_addr ;
599
+ return paddr_last ;
570
600
}
571
601
572
602
#ifndef CONFIG_NUMA
0 commit comments