|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* XDP user-space packet buffer |
| 3 | + * Copyright(c) 2018 Intel Corporation. |
| 4 | + * |
| 5 | + * This program is free software; you can redistribute it and/or modify it |
| 6 | + * under the terms and conditions of the GNU General Public License, |
| 7 | + * version 2, as published by the Free Software Foundation. |
| 8 | + * |
| 9 | + * This program is distributed in the hope it will be useful, but WITHOUT |
| 10 | + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | + * more details. |
| 13 | + */ |
| 14 | + |
| 15 | +#include <linux/init.h> |
| 16 | +#include <linux/sched/mm.h> |
| 17 | +#include <linux/sched/signal.h> |
| 18 | +#include <linux/sched/task.h> |
| 19 | +#include <linux/uaccess.h> |
| 20 | +#include <linux/slab.h> |
| 21 | +#include <linux/bpf.h> |
| 22 | +#include <linux/mm.h> |
| 23 | + |
| 24 | +#include "xdp_umem.h" |
| 25 | + |
| 26 | +#define XDP_UMEM_MIN_FRAME_SIZE 2048 |
| 27 | + |
| 28 | +int xdp_umem_create(struct xdp_umem **umem) |
| 29 | +{ |
| 30 | + *umem = kzalloc(sizeof(**umem), GFP_KERNEL); |
| 31 | + |
| 32 | + if (!(*umem)) |
| 33 | + return -ENOMEM; |
| 34 | + |
| 35 | + return 0; |
| 36 | +} |
| 37 | + |
| 38 | +static void xdp_umem_unpin_pages(struct xdp_umem *umem) |
| 39 | +{ |
| 40 | + unsigned int i; |
| 41 | + |
| 42 | + if (umem->pgs) { |
| 43 | + for (i = 0; i < umem->npgs; i++) { |
| 44 | + struct page *page = umem->pgs[i]; |
| 45 | + |
| 46 | + set_page_dirty_lock(page); |
| 47 | + put_page(page); |
| 48 | + } |
| 49 | + |
| 50 | + kfree(umem->pgs); |
| 51 | + umem->pgs = NULL; |
| 52 | + } |
| 53 | +} |
| 54 | + |
| 55 | +static void xdp_umem_unaccount_pages(struct xdp_umem *umem) |
| 56 | +{ |
| 57 | + if (umem->user) { |
| 58 | + atomic_long_sub(umem->npgs, &umem->user->locked_vm); |
| 59 | + free_uid(umem->user); |
| 60 | + } |
| 61 | +} |
| 62 | + |
| 63 | +static void xdp_umem_release(struct xdp_umem *umem) |
| 64 | +{ |
| 65 | + struct task_struct *task; |
| 66 | + struct mm_struct *mm; |
| 67 | + |
| 68 | + if (umem->pgs) { |
| 69 | + xdp_umem_unpin_pages(umem); |
| 70 | + |
| 71 | + task = get_pid_task(umem->pid, PIDTYPE_PID); |
| 72 | + put_pid(umem->pid); |
| 73 | + if (!task) |
| 74 | + goto out; |
| 75 | + mm = get_task_mm(task); |
| 76 | + put_task_struct(task); |
| 77 | + if (!mm) |
| 78 | + goto out; |
| 79 | + |
| 80 | + mmput(mm); |
| 81 | + umem->pgs = NULL; |
| 82 | + } |
| 83 | + |
| 84 | + xdp_umem_unaccount_pages(umem); |
| 85 | +out: |
| 86 | + kfree(umem); |
| 87 | +} |
| 88 | + |
| 89 | +static void xdp_umem_release_deferred(struct work_struct *work) |
| 90 | +{ |
| 91 | + struct xdp_umem *umem = container_of(work, struct xdp_umem, work); |
| 92 | + |
| 93 | + xdp_umem_release(umem); |
| 94 | +} |
| 95 | + |
| 96 | +void xdp_get_umem(struct xdp_umem *umem) |
| 97 | +{ |
| 98 | + atomic_inc(&umem->users); |
| 99 | +} |
| 100 | + |
| 101 | +void xdp_put_umem(struct xdp_umem *umem) |
| 102 | +{ |
| 103 | + if (!umem) |
| 104 | + return; |
| 105 | + |
| 106 | + if (atomic_dec_and_test(&umem->users)) { |
| 107 | + INIT_WORK(&umem->work, xdp_umem_release_deferred); |
| 108 | + schedule_work(&umem->work); |
| 109 | + } |
| 110 | +} |
| 111 | + |
| 112 | +static int xdp_umem_pin_pages(struct xdp_umem *umem) |
| 113 | +{ |
| 114 | + unsigned int gup_flags = FOLL_WRITE; |
| 115 | + long npgs; |
| 116 | + int err; |
| 117 | + |
| 118 | + umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs), GFP_KERNEL); |
| 119 | + if (!umem->pgs) |
| 120 | + return -ENOMEM; |
| 121 | + |
| 122 | + down_write(¤t->mm->mmap_sem); |
| 123 | + npgs = get_user_pages(umem->address, umem->npgs, |
| 124 | + gup_flags, &umem->pgs[0], NULL); |
| 125 | + up_write(¤t->mm->mmap_sem); |
| 126 | + |
| 127 | + if (npgs != umem->npgs) { |
| 128 | + if (npgs >= 0) { |
| 129 | + umem->npgs = npgs; |
| 130 | + err = -ENOMEM; |
| 131 | + goto out_pin; |
| 132 | + } |
| 133 | + err = npgs; |
| 134 | + goto out_pgs; |
| 135 | + } |
| 136 | + return 0; |
| 137 | + |
| 138 | +out_pin: |
| 139 | + xdp_umem_unpin_pages(umem); |
| 140 | +out_pgs: |
| 141 | + kfree(umem->pgs); |
| 142 | + umem->pgs = NULL; |
| 143 | + return err; |
| 144 | +} |
| 145 | + |
| 146 | +static int xdp_umem_account_pages(struct xdp_umem *umem) |
| 147 | +{ |
| 148 | + unsigned long lock_limit, new_npgs, old_npgs; |
| 149 | + |
| 150 | + if (capable(CAP_IPC_LOCK)) |
| 151 | + return 0; |
| 152 | + |
| 153 | + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
| 154 | + umem->user = get_uid(current_user()); |
| 155 | + |
| 156 | + do { |
| 157 | + old_npgs = atomic_long_read(&umem->user->locked_vm); |
| 158 | + new_npgs = old_npgs + umem->npgs; |
| 159 | + if (new_npgs > lock_limit) { |
| 160 | + free_uid(umem->user); |
| 161 | + umem->user = NULL; |
| 162 | + return -ENOBUFS; |
| 163 | + } |
| 164 | + } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs, |
| 165 | + new_npgs) != old_npgs); |
| 166 | + return 0; |
| 167 | +} |
| 168 | + |
| 169 | +int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) |
| 170 | +{ |
| 171 | + u32 frame_size = mr->frame_size, frame_headroom = mr->frame_headroom; |
| 172 | + u64 addr = mr->addr, size = mr->len; |
| 173 | + unsigned int nframes, nfpp; |
| 174 | + int size_chk, err; |
| 175 | + |
| 176 | + if (!umem) |
| 177 | + return -EINVAL; |
| 178 | + |
| 179 | + if (frame_size < XDP_UMEM_MIN_FRAME_SIZE || frame_size > PAGE_SIZE) { |
| 180 | + /* Strictly speaking we could support this, if: |
| 181 | + * - huge pages, or* |
| 182 | + * - using an IOMMU, or |
| 183 | + * - making sure the memory area is consecutive |
| 184 | + * but for now, we simply say "computer says no". |
| 185 | + */ |
| 186 | + return -EINVAL; |
| 187 | + } |
| 188 | + |
| 189 | + if (!is_power_of_2(frame_size)) |
| 190 | + return -EINVAL; |
| 191 | + |
| 192 | + if (!PAGE_ALIGNED(addr)) { |
| 193 | + /* Memory area has to be page size aligned. For |
| 194 | + * simplicity, this might change. |
| 195 | + */ |
| 196 | + return -EINVAL; |
| 197 | + } |
| 198 | + |
| 199 | + if ((addr + size) < addr) |
| 200 | + return -EINVAL; |
| 201 | + |
| 202 | + nframes = size / frame_size; |
| 203 | + if (nframes == 0 || nframes > UINT_MAX) |
| 204 | + return -EINVAL; |
| 205 | + |
| 206 | + nfpp = PAGE_SIZE / frame_size; |
| 207 | + if (nframes < nfpp || nframes % nfpp) |
| 208 | + return -EINVAL; |
| 209 | + |
| 210 | + frame_headroom = ALIGN(frame_headroom, 64); |
| 211 | + |
| 212 | + size_chk = frame_size - frame_headroom - XDP_PACKET_HEADROOM; |
| 213 | + if (size_chk < 0) |
| 214 | + return -EINVAL; |
| 215 | + |
| 216 | + umem->pid = get_task_pid(current, PIDTYPE_PID); |
| 217 | + umem->size = (size_t)size; |
| 218 | + umem->address = (unsigned long)addr; |
| 219 | + umem->props.frame_size = frame_size; |
| 220 | + umem->props.nframes = nframes; |
| 221 | + umem->frame_headroom = frame_headroom; |
| 222 | + umem->npgs = size / PAGE_SIZE; |
| 223 | + umem->pgs = NULL; |
| 224 | + umem->user = NULL; |
| 225 | + |
| 226 | + umem->frame_size_log2 = ilog2(frame_size); |
| 227 | + umem->nfpp_mask = nfpp - 1; |
| 228 | + umem->nfpplog2 = ilog2(nfpp); |
| 229 | + atomic_set(&umem->users, 1); |
| 230 | + |
| 231 | + err = xdp_umem_account_pages(umem); |
| 232 | + if (err) |
| 233 | + goto out; |
| 234 | + |
| 235 | + err = xdp_umem_pin_pages(umem); |
| 236 | + if (err) |
| 237 | + goto out_account; |
| 238 | + return 0; |
| 239 | + |
| 240 | +out_account: |
| 241 | + xdp_umem_unaccount_pages(umem); |
| 242 | +out: |
| 243 | + put_pid(umem->pid); |
| 244 | + return err; |
| 245 | +} |
0 commit comments