|
44 | 44 | * along with this program; if not, write to the Free Software
|
45 | 45 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
46 | 46 | */
|
| 47 | +#include <linux/compat.h> |
47 | 48 | #include <linux/slab.h>
|
48 | 49 | #include <linux/poll.h>
|
49 | 50 | #include <linux/fs.h>
|
|
173 | 174 | * double_lock_hb() and double_unlock_hb(), respectively.
|
174 | 175 | */
|
175 | 176 |
|
176 |
| -#ifndef CONFIG_HAVE_FUTEX_CMPXCHG |
177 |
| -int __read_mostly futex_cmpxchg_enabled; |
| 177 | +#ifdef CONFIG_HAVE_FUTEX_CMPXCHG |
| 178 | +#define futex_cmpxchg_enabled 1 |
| 179 | +#else |
| 180 | +static int __read_mostly futex_cmpxchg_enabled; |
178 | 181 | #endif
|
179 | 182 |
|
180 | 183 | /*
|
@@ -3360,7 +3363,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
|
3360 | 3363 | * Process a futex-list entry, check whether it's owned by the
|
3361 | 3364 | * dying task, and do notification if so:
|
3362 | 3365 | */
|
3363 |
| -int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) |
| 3366 | +static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) |
3364 | 3367 | {
|
3365 | 3368 | u32 uval, uninitialized_var(nval), mval;
|
3366 | 3369 |
|
@@ -3589,6 +3592,192 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
|
3589 | 3592 | return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
|
3590 | 3593 | }
|
3591 | 3594 |
|
| 3595 | +#ifdef CONFIG_COMPAT |
| 3596 | +/* |
| 3597 | + * Fetch a robust-list pointer. Bit 0 signals PI futexes: |
| 3598 | + */ |
| 3599 | +static inline int |
| 3600 | +compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, |
| 3601 | + compat_uptr_t __user *head, unsigned int *pi) |
| 3602 | +{ |
| 3603 | + if (get_user(*uentry, head)) |
| 3604 | + return -EFAULT; |
| 3605 | + |
| 3606 | + *entry = compat_ptr((*uentry) & ~1); |
| 3607 | + *pi = (unsigned int)(*uentry) & 1; |
| 3608 | + |
| 3609 | + return 0; |
| 3610 | +} |
| 3611 | + |
| 3612 | +static void __user *futex_uaddr(struct robust_list __user *entry, |
| 3613 | + compat_long_t futex_offset) |
| 3614 | +{ |
| 3615 | + compat_uptr_t base = ptr_to_compat(entry); |
| 3616 | + void __user *uaddr = compat_ptr(base + futex_offset); |
| 3617 | + |
| 3618 | + return uaddr; |
| 3619 | +} |
| 3620 | + |
| 3621 | +/* |
| 3622 | + * Walk curr->robust_list (very carefully, it's a userspace list!) |
| 3623 | + * and mark any locks found there dead, and notify any waiters. |
| 3624 | + * |
| 3625 | + * We silently return on any sign of list-walking problem. |
| 3626 | + */ |
| 3627 | +void compat_exit_robust_list(struct task_struct *curr) |
| 3628 | +{ |
| 3629 | + struct compat_robust_list_head __user *head = curr->compat_robust_list; |
| 3630 | + struct robust_list __user *entry, *next_entry, *pending; |
| 3631 | + unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
| 3632 | + unsigned int uninitialized_var(next_pi); |
| 3633 | + compat_uptr_t uentry, next_uentry, upending; |
| 3634 | + compat_long_t futex_offset; |
| 3635 | + int rc; |
| 3636 | + |
| 3637 | + if (!futex_cmpxchg_enabled) |
| 3638 | + return; |
| 3639 | + |
| 3640 | + /* |
| 3641 | + * Fetch the list head (which was registered earlier, via |
| 3642 | + * sys_set_robust_list()): |
| 3643 | + */ |
| 3644 | + if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) |
| 3645 | + return; |
| 3646 | + /* |
| 3647 | + * Fetch the relative futex offset: |
| 3648 | + */ |
| 3649 | + if (get_user(futex_offset, &head->futex_offset)) |
| 3650 | + return; |
| 3651 | + /* |
| 3652 | + * Fetch any possibly pending lock-add first, and handle it |
| 3653 | + * if it exists: |
| 3654 | + */ |
| 3655 | + if (compat_fetch_robust_entry(&upending, &pending, |
| 3656 | + &head->list_op_pending, &pip)) |
| 3657 | + return; |
| 3658 | + |
| 3659 | + next_entry = NULL; /* avoid warning with gcc */ |
| 3660 | + while (entry != (struct robust_list __user *) &head->list) { |
| 3661 | + /* |
| 3662 | + * Fetch the next entry in the list before calling |
| 3663 | + * handle_futex_death: |
| 3664 | + */ |
| 3665 | + rc = compat_fetch_robust_entry(&next_uentry, &next_entry, |
| 3666 | + (compat_uptr_t __user *)&entry->next, &next_pi); |
| 3667 | + /* |
| 3668 | + * A pending lock might already be on the list, so |
| 3669 | + * dont process it twice: |
| 3670 | + */ |
| 3671 | + if (entry != pending) { |
| 3672 | + void __user *uaddr = futex_uaddr(entry, futex_offset); |
| 3673 | + |
| 3674 | + if (handle_futex_death(uaddr, curr, pi)) |
| 3675 | + return; |
| 3676 | + } |
| 3677 | + if (rc) |
| 3678 | + return; |
| 3679 | + uentry = next_uentry; |
| 3680 | + entry = next_entry; |
| 3681 | + pi = next_pi; |
| 3682 | + /* |
| 3683 | + * Avoid excessively long or circular lists: |
| 3684 | + */ |
| 3685 | + if (!--limit) |
| 3686 | + break; |
| 3687 | + |
| 3688 | + cond_resched(); |
| 3689 | + } |
| 3690 | + if (pending) { |
| 3691 | + void __user *uaddr = futex_uaddr(pending, futex_offset); |
| 3692 | + |
| 3693 | + handle_futex_death(uaddr, curr, pip); |
| 3694 | + } |
| 3695 | +} |
| 3696 | + |
| 3697 | +COMPAT_SYSCALL_DEFINE2(set_robust_list, |
| 3698 | + struct compat_robust_list_head __user *, head, |
| 3699 | + compat_size_t, len) |
| 3700 | +{ |
| 3701 | + if (!futex_cmpxchg_enabled) |
| 3702 | + return -ENOSYS; |
| 3703 | + |
| 3704 | + if (unlikely(len != sizeof(*head))) |
| 3705 | + return -EINVAL; |
| 3706 | + |
| 3707 | + current->compat_robust_list = head; |
| 3708 | + |
| 3709 | + return 0; |
| 3710 | +} |
| 3711 | + |
| 3712 | +COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, |
| 3713 | + compat_uptr_t __user *, head_ptr, |
| 3714 | + compat_size_t __user *, len_ptr) |
| 3715 | +{ |
| 3716 | + struct compat_robust_list_head __user *head; |
| 3717 | + unsigned long ret; |
| 3718 | + struct task_struct *p; |
| 3719 | + |
| 3720 | + if (!futex_cmpxchg_enabled) |
| 3721 | + return -ENOSYS; |
| 3722 | + |
| 3723 | + rcu_read_lock(); |
| 3724 | + |
| 3725 | + ret = -ESRCH; |
| 3726 | + if (!pid) |
| 3727 | + p = current; |
| 3728 | + else { |
| 3729 | + p = find_task_by_vpid(pid); |
| 3730 | + if (!p) |
| 3731 | + goto err_unlock; |
| 3732 | + } |
| 3733 | + |
| 3734 | + ret = -EPERM; |
| 3735 | + if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) |
| 3736 | + goto err_unlock; |
| 3737 | + |
| 3738 | + head = p->compat_robust_list; |
| 3739 | + rcu_read_unlock(); |
| 3740 | + |
| 3741 | + if (put_user(sizeof(*head), len_ptr)) |
| 3742 | + return -EFAULT; |
| 3743 | + return put_user(ptr_to_compat(head), head_ptr); |
| 3744 | + |
| 3745 | +err_unlock: |
| 3746 | + rcu_read_unlock(); |
| 3747 | + |
| 3748 | + return ret; |
| 3749 | +} |
| 3750 | + |
| 3751 | +COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, |
| 3752 | + struct old_timespec32 __user *, utime, u32 __user *, uaddr2, |
| 3753 | + u32, val3) |
| 3754 | +{ |
| 3755 | + struct timespec ts; |
| 3756 | + ktime_t t, *tp = NULL; |
| 3757 | + int val2 = 0; |
| 3758 | + int cmd = op & FUTEX_CMD_MASK; |
| 3759 | + |
| 3760 | + if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || |
| 3761 | + cmd == FUTEX_WAIT_BITSET || |
| 3762 | + cmd == FUTEX_WAIT_REQUEUE_PI)) { |
| 3763 | + if (compat_get_timespec(&ts, utime)) |
| 3764 | + return -EFAULT; |
| 3765 | + if (!timespec_valid(&ts)) |
| 3766 | + return -EINVAL; |
| 3767 | + |
| 3768 | + t = timespec_to_ktime(ts); |
| 3769 | + if (cmd == FUTEX_WAIT) |
| 3770 | + t = ktime_add_safe(ktime_get(), t); |
| 3771 | + tp = &t; |
| 3772 | + } |
| 3773 | + if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || |
| 3774 | + cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) |
| 3775 | + val2 = (int) (unsigned long) utime; |
| 3776 | + |
| 3777 | + return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); |
| 3778 | +} |
| 3779 | +#endif /* CONFIG_COMPAT */ |
| 3780 | + |
3592 | 3781 | static void __init futex_detect_cmpxchg(void)
|
3593 | 3782 | {
|
3594 | 3783 | #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
|
|
0 commit comments