@@ -496,283 +496,6 @@ do { \
496
496
497
497
extern void __put_user_unknown (void );
498
498
499
- /*
500
- * ul{b,h,w} are macros and there are no equivalent macros for EVA.
501
- * EVA unaligned access is handled in the ADE exception handler.
502
- */
503
- #ifndef CONFIG_EVA
504
- /*
505
- * put_user_unaligned: - Write a simple value into user space.
506
- * @x: Value to copy to user space.
507
- * @ptr: Destination address, in user space.
508
- *
509
- * Context: User context only. This function may sleep if pagefaults are
510
- * enabled.
511
- *
512
- * This macro copies a single simple value from kernel space to user
513
- * space. It supports simple types like char and int, but not larger
514
- * data types like structures or arrays.
515
- *
516
- * @ptr must have pointer-to-simple-variable type, and @x must be assignable
517
- * to the result of dereferencing @ptr.
518
- *
519
- * Returns zero on success, or -EFAULT on error.
520
- */
521
- #define put_user_unaligned (x ,ptr ) \
522
- __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
523
-
524
- /*
525
- * get_user_unaligned: - Get a simple variable from user space.
526
- * @x: Variable to store result.
527
- * @ptr: Source address, in user space.
528
- *
529
- * Context: User context only. This function may sleep if pagefaults are
530
- * enabled.
531
- *
532
- * This macro copies a single simple variable from user space to kernel
533
- * space. It supports simple types like char and int, but not larger
534
- * data types like structures or arrays.
535
- *
536
- * @ptr must have pointer-to-simple-variable type, and the result of
537
- * dereferencing @ptr must be assignable to @x without a cast.
538
- *
539
- * Returns zero on success, or -EFAULT on error.
540
- * On error, the variable @x is set to zero.
541
- */
542
- #define get_user_unaligned (x ,ptr ) \
543
- __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
544
-
545
- /*
546
- * __put_user_unaligned: - Write a simple value into user space, with less checking.
547
- * @x: Value to copy to user space.
548
- * @ptr: Destination address, in user space.
549
- *
550
- * Context: User context only. This function may sleep if pagefaults are
551
- * enabled.
552
- *
553
- * This macro copies a single simple value from kernel space to user
554
- * space. It supports simple types like char and int, but not larger
555
- * data types like structures or arrays.
556
- *
557
- * @ptr must have pointer-to-simple-variable type, and @x must be assignable
558
- * to the result of dereferencing @ptr.
559
- *
560
- * Caller must check the pointer with access_ok() before calling this
561
- * function.
562
- *
563
- * Returns zero on success, or -EFAULT on error.
564
- */
565
- #define __put_user_unaligned (x ,ptr ) \
566
- __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
567
-
568
- /*
569
- * __get_user_unaligned: - Get a simple variable from user space, with less checking.
570
- * @x: Variable to store result.
571
- * @ptr: Source address, in user space.
572
- *
573
- * Context: User context only. This function may sleep if pagefaults are
574
- * enabled.
575
- *
576
- * This macro copies a single simple variable from user space to kernel
577
- * space. It supports simple types like char and int, but not larger
578
- * data types like structures or arrays.
579
- *
580
- * @ptr must have pointer-to-simple-variable type, and the result of
581
- * dereferencing @ptr must be assignable to @x without a cast.
582
- *
583
- * Caller must check the pointer with access_ok() before calling this
584
- * function.
585
- *
586
- * Returns zero on success, or -EFAULT on error.
587
- * On error, the variable @x is set to zero.
588
- */
589
- #define __get_user_unaligned (x ,ptr ) \
590
- __get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
591
-
592
- /*
593
- * Yuck. We need two variants, one for 64bit operation and one
594
- * for 32 bit mode and old iron.
595
- */
596
- #ifdef CONFIG_32BIT
597
- #define __GET_USER_UNALIGNED_DW (val , ptr ) \
598
- __get_user_unaligned_asm_ll32(val, ptr)
599
- #endif
600
- #ifdef CONFIG_64BIT
601
- #define __GET_USER_UNALIGNED_DW (val , ptr ) \
602
- __get_user_unaligned_asm(val, "uld", ptr)
603
- #endif
604
-
605
- extern void __get_user_unaligned_unknown (void );
606
-
607
- #define __get_user_unaligned_common (val , size , ptr ) \
608
- do { \
609
- switch (size) { \
610
- case 1: __get_data_asm(val, "lb", ptr); break; \
611
- case 2: __get_data_unaligned_asm(val, "ulh", ptr); break; \
612
- case 4: __get_data_unaligned_asm(val, "ulw", ptr); break; \
613
- case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
614
- default: __get_user_unaligned_unknown(); break; \
615
- } \
616
- } while (0)
617
-
618
- #define __get_user_unaligned_nocheck (x ,ptr ,size ) \
619
- ({ \
620
- int __gu_err; \
621
- \
622
- __get_user_unaligned_common((x), size, ptr); \
623
- __gu_err; \
624
- })
625
-
626
- #define __get_user_unaligned_check (x ,ptr ,size ) \
627
- ({ \
628
- int __gu_err = -EFAULT; \
629
- const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
630
- \
631
- if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
632
- __get_user_unaligned_common((x), size, __gu_ptr); \
633
- \
634
- __gu_err; \
635
- })
636
-
637
- #define __get_data_unaligned_asm (val , insn , addr ) \
638
- { \
639
- long __gu_tmp; \
640
- \
641
- __asm__ __volatile__( \
642
- "1: " insn " %1, %3 \n" \
643
- "2: \n" \
644
- " .insn \n" \
645
- " .section .fixup,\"ax\" \n" \
646
- "3: li %0, %4 \n" \
647
- " move %1, $0 \n" \
648
- " j 2b \n" \
649
- " .previous \n" \
650
- " .section __ex_table,\"a\" \n" \
651
- " "__UA_ADDR "\t1b, 3b \n" \
652
- " "__UA_ADDR "\t1b + 4, 3b \n" \
653
- " .previous \n" \
654
- : "=r" (__gu_err), "=r" (__gu_tmp) \
655
- : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
656
- \
657
- (val) = (__typeof__(*(addr))) __gu_tmp; \
658
- }
659
-
660
- /*
661
- * Get a long long 64 using 32 bit registers.
662
- */
663
- #define __get_user_unaligned_asm_ll32 (val , addr ) \
664
- { \
665
- unsigned long long __gu_tmp; \
666
- \
667
- __asm__ __volatile__( \
668
- "1: ulw %1, (%3) \n" \
669
- "2: ulw %D1, 4(%3) \n" \
670
- " move %0, $0 \n" \
671
- "3: \n" \
672
- " .insn \n" \
673
- " .section .fixup,\"ax\" \n" \
674
- "4: li %0, %4 \n" \
675
- " move %1, $0 \n" \
676
- " move %D1, $0 \n" \
677
- " j 3b \n" \
678
- " .previous \n" \
679
- " .section __ex_table,\"a\" \n" \
680
- " " __UA_ADDR " 1b, 4b \n" \
681
- " " __UA_ADDR " 1b + 4, 4b \n" \
682
- " " __UA_ADDR " 2b, 4b \n" \
683
- " " __UA_ADDR " 2b + 4, 4b \n" \
684
- " .previous \n" \
685
- : "=r" (__gu_err), "=&r" (__gu_tmp) \
686
- : "0" (0), "r" (addr), "i" (-EFAULT)); \
687
- (val) = (__typeof__(*(addr))) __gu_tmp; \
688
- }
689
-
690
- /*
691
- * Yuck. We need two variants, one for 64bit operation and one
692
- * for 32 bit mode and old iron.
693
- */
694
- #ifdef CONFIG_32BIT
695
- #define __PUT_USER_UNALIGNED_DW (ptr ) __put_user_unaligned_asm_ll32(ptr)
696
- #endif
697
- #ifdef CONFIG_64BIT
698
- #define __PUT_USER_UNALIGNED_DW (ptr ) __put_user_unaligned_asm("usd", ptr)
699
- #endif
700
-
701
- #define __put_user_unaligned_common (ptr , size ) \
702
- do { \
703
- switch (size) { \
704
- case 1: __put_data_asm("sb", ptr); break; \
705
- case 2: __put_user_unaligned_asm("ush", ptr); break; \
706
- case 4: __put_user_unaligned_asm("usw", ptr); break; \
707
- case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
708
- default: __put_user_unaligned_unknown(); break; \
709
- } while (0)
710
-
711
- #define __put_user_unaligned_nocheck (x ,ptr ,size ) \
712
- ({ \
713
- __typeof__(*(ptr)) __pu_val; \
714
- int __pu_err = 0; \
715
- \
716
- __pu_val = (x); \
717
- __put_user_unaligned_common(ptr, size); \
718
- __pu_err; \
719
- })
720
-
721
- #define __put_user_unaligned_check (x ,ptr ,size ) \
722
- ({ \
723
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
724
- __typeof__(*(ptr)) __pu_val = (x); \
725
- int __pu_err = -EFAULT; \
726
- \
727
- if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
728
- __put_user_unaligned_common(__pu_addr, size); \
729
- \
730
- __pu_err; \
731
- })
732
-
733
- #define __put_user_unaligned_asm (insn , ptr ) \
734
- { \
735
- __asm__ __volatile__( \
736
- "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
737
- "2: \n" \
738
- " .insn \n" \
739
- " .section .fixup,\"ax\" \n" \
740
- "3: li %0, %4 \n" \
741
- " j 2b \n" \
742
- " .previous \n" \
743
- " .section __ex_table,\"a\" \n" \
744
- " " __UA_ADDR " 1b, 3b \n" \
745
- " .previous \n" \
746
- : "=r" (__pu_err) \
747
- : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
748
- "i" (-EFAULT)); \
749
- }
750
-
751
- #define __put_user_unaligned_asm_ll32 (ptr ) \
752
- { \
753
- __asm__ __volatile__( \
754
- "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
755
- "2: sw %D2, 4(%3) \n" \
756
- "3: \n" \
757
- " .insn \n" \
758
- " .section .fixup,\"ax\" \n" \
759
- "4: li %0, %4 \n" \
760
- " j 3b \n" \
761
- " .previous \n" \
762
- " .section __ex_table,\"a\" \n" \
763
- " " __UA_ADDR " 1b, 4b \n" \
764
- " " __UA_ADDR " 1b + 4, 4b \n" \
765
- " " __UA_ADDR " 2b, 4b \n" \
766
- " " __UA_ADDR " 2b + 4, 4b \n" \
767
- " .previous" \
768
- : "=r" (__pu_err) \
769
- : "0" (0), "r" (__pu_val), "r" (ptr), \
770
- "i" (-EFAULT)); \
771
- }
772
-
773
- extern void __put_user_unaligned_unknown (void );
774
- #endif
775
-
776
499
/*
777
500
* We're generating jump to subroutines which will be outside the range of
778
501
* jump instructions
0 commit comments