|
| 1 | +/* |
| 2 | + * The MORUS-1280 Authenticated-Encryption Algorithm |
| 3 | + * Common glue skeleton |
| 4 | + * |
| 5 | + * Copyright (c) 2016-2018 Ondrej Mosnacek <omosnacek@gmail.com> |
| 6 | + * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. |
| 7 | + * |
| 8 | + * This program is free software; you can redistribute it and/or modify it |
| 9 | + * under the terms of the GNU General Public License as published by the Free |
| 10 | + * Software Foundation; either version 2 of the License, or (at your option) |
| 11 | + * any later version. |
| 12 | + */ |
| 13 | + |
| 14 | +#include <crypto/cryptd.h> |
| 15 | +#include <crypto/internal/aead.h> |
| 16 | +#include <crypto/internal/skcipher.h> |
| 17 | +#include <crypto/morus1280_glue.h> |
| 18 | +#include <crypto/scatterwalk.h> |
| 19 | +#include <linux/err.h> |
| 20 | +#include <linux/init.h> |
| 21 | +#include <linux/kernel.h> |
| 22 | +#include <linux/module.h> |
| 23 | +#include <linux/scatterlist.h> |
| 24 | +#include <asm/fpu/api.h> |
| 25 | + |
| 26 | +struct morus1280_state { |
| 27 | + struct morus1280_block s[MORUS_STATE_BLOCKS]; |
| 28 | +}; |
| 29 | + |
| 30 | +struct morus1280_ops { |
| 31 | + int (*skcipher_walk_init)(struct skcipher_walk *walk, |
| 32 | + struct aead_request *req, bool atomic); |
| 33 | + |
| 34 | + void (*crypt_blocks)(void *state, const void *src, void *dst, |
| 35 | + unsigned int length); |
| 36 | + void (*crypt_tail)(void *state, const void *src, void *dst, |
| 37 | + unsigned int length); |
| 38 | +}; |
| 39 | + |
| 40 | +static void crypto_morus1280_glue_process_ad( |
| 41 | + struct morus1280_state *state, |
| 42 | + const struct morus1280_glue_ops *ops, |
| 43 | + struct scatterlist *sg_src, unsigned int assoclen) |
| 44 | +{ |
| 45 | + struct scatter_walk walk; |
| 46 | + struct morus1280_block buf; |
| 47 | + unsigned int pos = 0; |
| 48 | + |
| 49 | + scatterwalk_start(&walk, sg_src); |
| 50 | + while (assoclen != 0) { |
| 51 | + unsigned int size = scatterwalk_clamp(&walk, assoclen); |
| 52 | + unsigned int left = size; |
| 53 | + void *mapped = scatterwalk_map(&walk); |
| 54 | + const u8 *src = (const u8 *)mapped; |
| 55 | + |
| 56 | + if (pos + size >= MORUS1280_BLOCK_SIZE) { |
| 57 | + if (pos > 0) { |
| 58 | + unsigned int fill = MORUS1280_BLOCK_SIZE - pos; |
| 59 | + memcpy(buf.bytes + pos, src, fill); |
| 60 | + ops->ad(state, buf.bytes, MORUS1280_BLOCK_SIZE); |
| 61 | + pos = 0; |
| 62 | + left -= fill; |
| 63 | + src += fill; |
| 64 | + } |
| 65 | + |
| 66 | + ops->ad(state, src, left); |
| 67 | + src += left & ~(MORUS1280_BLOCK_SIZE - 1); |
| 68 | + left &= MORUS1280_BLOCK_SIZE - 1; |
| 69 | + } |
| 70 | + |
| 71 | + memcpy(buf.bytes + pos, src, left); |
| 72 | + |
| 73 | + pos += left; |
| 74 | + assoclen -= size; |
| 75 | + scatterwalk_unmap(mapped); |
| 76 | + scatterwalk_advance(&walk, size); |
| 77 | + scatterwalk_done(&walk, 0, assoclen); |
| 78 | + } |
| 79 | + |
| 80 | + if (pos > 0) { |
| 81 | + memset(buf.bytes + pos, 0, MORUS1280_BLOCK_SIZE - pos); |
| 82 | + ops->ad(state, buf.bytes, MORUS1280_BLOCK_SIZE); |
| 83 | + } |
| 84 | +} |
| 85 | + |
| 86 | +static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state, |
| 87 | + struct morus1280_ops ops, |
| 88 | + struct aead_request *req) |
| 89 | +{ |
| 90 | + struct skcipher_walk walk; |
| 91 | + u8 *cursor_src, *cursor_dst; |
| 92 | + unsigned int chunksize, base; |
| 93 | + |
| 94 | + ops.skcipher_walk_init(&walk, req, false); |
| 95 | + |
| 96 | + while (walk.nbytes) { |
| 97 | + cursor_src = walk.src.virt.addr; |
| 98 | + cursor_dst = walk.dst.virt.addr; |
| 99 | + chunksize = walk.nbytes; |
| 100 | + |
| 101 | + ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize); |
| 102 | + |
| 103 | + base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1); |
| 104 | + cursor_src += base; |
| 105 | + cursor_dst += base; |
| 106 | + chunksize &= MORUS1280_BLOCK_SIZE - 1; |
| 107 | + |
| 108 | + if (chunksize > 0) |
| 109 | + ops.crypt_tail(state, cursor_src, cursor_dst, |
| 110 | + chunksize); |
| 111 | + |
| 112 | + skcipher_walk_done(&walk, 0); |
| 113 | + } |
| 114 | +} |
| 115 | + |
| 116 | +int crypto_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key, |
| 117 | + unsigned int keylen) |
| 118 | +{ |
| 119 | + struct morus1280_ctx *ctx = crypto_aead_ctx(aead); |
| 120 | + |
| 121 | + if (keylen == MORUS1280_BLOCK_SIZE) { |
| 122 | + memcpy(ctx->key.bytes, key, MORUS1280_BLOCK_SIZE); |
| 123 | + } else if (keylen == MORUS1280_BLOCK_SIZE / 2) { |
| 124 | + memcpy(ctx->key.bytes, key, keylen); |
| 125 | + memcpy(ctx->key.bytes + keylen, key, keylen); |
| 126 | + } else { |
| 127 | + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 128 | + return -EINVAL; |
| 129 | + } |
| 130 | + |
| 131 | + return 0; |
| 132 | +} |
| 133 | +EXPORT_SYMBOL_GPL(crypto_morus1280_glue_setkey); |
| 134 | + |
| 135 | +int crypto_morus1280_glue_setauthsize(struct crypto_aead *tfm, |
| 136 | + unsigned int authsize) |
| 137 | +{ |
| 138 | + return (authsize <= MORUS_MAX_AUTH_SIZE) ? 0 : -EINVAL; |
| 139 | +} |
| 140 | +EXPORT_SYMBOL_GPL(crypto_morus1280_glue_setauthsize); |
| 141 | + |
| 142 | +static void crypto_morus1280_glue_crypt(struct aead_request *req, |
| 143 | + struct morus1280_ops ops, |
| 144 | + unsigned int cryptlen, |
| 145 | + struct morus1280_block *tag_xor) |
| 146 | +{ |
| 147 | + struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
| 148 | + struct morus1280_ctx *ctx = crypto_aead_ctx(tfm); |
| 149 | + struct morus1280_state state; |
| 150 | + |
| 151 | + kernel_fpu_begin(); |
| 152 | + |
| 153 | + ctx->ops->init(&state, &ctx->key, req->iv); |
| 154 | + crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen); |
| 155 | + crypto_morus1280_glue_process_crypt(&state, ops, req); |
| 156 | + ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen); |
| 157 | + |
| 158 | + kernel_fpu_end(); |
| 159 | +} |
| 160 | + |
| 161 | +int crypto_morus1280_glue_encrypt(struct aead_request *req) |
| 162 | +{ |
| 163 | + struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
| 164 | + struct morus1280_ctx *ctx = crypto_aead_ctx(tfm); |
| 165 | + struct morus1280_ops OPS = { |
| 166 | + .skcipher_walk_init = skcipher_walk_aead_encrypt, |
| 167 | + .crypt_blocks = ctx->ops->enc, |
| 168 | + .crypt_tail = ctx->ops->enc_tail, |
| 169 | + }; |
| 170 | + |
| 171 | + struct morus1280_block tag = {}; |
| 172 | + unsigned int authsize = crypto_aead_authsize(tfm); |
| 173 | + unsigned int cryptlen = req->cryptlen; |
| 174 | + |
| 175 | + crypto_morus1280_glue_crypt(req, OPS, cryptlen, &tag); |
| 176 | + |
| 177 | + scatterwalk_map_and_copy(tag.bytes, req->dst, |
| 178 | + req->assoclen + cryptlen, authsize, 1); |
| 179 | + return 0; |
| 180 | +} |
| 181 | +EXPORT_SYMBOL_GPL(crypto_morus1280_glue_encrypt); |
| 182 | + |
| 183 | +int crypto_morus1280_glue_decrypt(struct aead_request *req) |
| 184 | +{ |
| 185 | + static const u8 zeros[MORUS1280_BLOCK_SIZE] = {}; |
| 186 | + |
| 187 | + struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
| 188 | + struct morus1280_ctx *ctx = crypto_aead_ctx(tfm); |
| 189 | + struct morus1280_ops OPS = { |
| 190 | + .skcipher_walk_init = skcipher_walk_aead_decrypt, |
| 191 | + .crypt_blocks = ctx->ops->dec, |
| 192 | + .crypt_tail = ctx->ops->dec_tail, |
| 193 | + }; |
| 194 | + |
| 195 | + struct morus1280_block tag; |
| 196 | + unsigned int authsize = crypto_aead_authsize(tfm); |
| 197 | + unsigned int cryptlen = req->cryptlen - authsize; |
| 198 | + |
| 199 | + scatterwalk_map_and_copy(tag.bytes, req->src, |
| 200 | + req->assoclen + cryptlen, authsize, 0); |
| 201 | + |
| 202 | + crypto_morus1280_glue_crypt(req, OPS, cryptlen, &tag); |
| 203 | + |
| 204 | + return crypto_memneq(tag.bytes, zeros, authsize) ? -EBADMSG : 0; |
| 205 | +} |
| 206 | +EXPORT_SYMBOL_GPL(crypto_morus1280_glue_decrypt); |
| 207 | + |
| 208 | +void crypto_morus1280_glue_init_ops(struct crypto_aead *aead, |
| 209 | + const struct morus1280_glue_ops *ops) |
| 210 | +{ |
| 211 | + struct morus1280_ctx *ctx = crypto_aead_ctx(aead); |
| 212 | + ctx->ops = ops; |
| 213 | +} |
| 214 | +EXPORT_SYMBOL_GPL(crypto_morus1280_glue_init_ops); |
| 215 | + |
| 216 | +int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key, |
| 217 | + unsigned int keylen) |
| 218 | +{ |
| 219 | + struct cryptd_aead **ctx = crypto_aead_ctx(aead); |
| 220 | + struct cryptd_aead *cryptd_tfm = *ctx; |
| 221 | + |
| 222 | + return crypto_aead_setkey(&cryptd_tfm->base, key, keylen); |
| 223 | +} |
| 224 | +EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setkey); |
| 225 | + |
| 226 | +int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead, |
| 227 | + unsigned int authsize) |
| 228 | +{ |
| 229 | + struct cryptd_aead **ctx = crypto_aead_ctx(aead); |
| 230 | + struct cryptd_aead *cryptd_tfm = *ctx; |
| 231 | + |
| 232 | + return crypto_aead_setauthsize(&cryptd_tfm->base, authsize); |
| 233 | +} |
| 234 | +EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setauthsize); |
| 235 | + |
| 236 | +int cryptd_morus1280_glue_encrypt(struct aead_request *req) |
| 237 | +{ |
| 238 | + struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 239 | + struct cryptd_aead **ctx = crypto_aead_ctx(aead); |
| 240 | + struct cryptd_aead *cryptd_tfm = *ctx; |
| 241 | + |
| 242 | + aead = &cryptd_tfm->base; |
| 243 | + if (irq_fpu_usable() && (!in_atomic() || |
| 244 | + !cryptd_aead_queued(cryptd_tfm))) |
| 245 | + aead = cryptd_aead_child(cryptd_tfm); |
| 246 | + |
| 247 | + aead_request_set_tfm(req, aead); |
| 248 | + |
| 249 | + return crypto_aead_encrypt(req); |
| 250 | +} |
| 251 | +EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_encrypt); |
| 252 | + |
| 253 | +int cryptd_morus1280_glue_decrypt(struct aead_request *req) |
| 254 | +{ |
| 255 | + struct crypto_aead *aead = crypto_aead_reqtfm(req); |
| 256 | + struct cryptd_aead **ctx = crypto_aead_ctx(aead); |
| 257 | + struct cryptd_aead *cryptd_tfm = *ctx; |
| 258 | + |
| 259 | + aead = &cryptd_tfm->base; |
| 260 | + if (irq_fpu_usable() && (!in_atomic() || |
| 261 | + !cryptd_aead_queued(cryptd_tfm))) |
| 262 | + aead = cryptd_aead_child(cryptd_tfm); |
| 263 | + |
| 264 | + aead_request_set_tfm(req, aead); |
| 265 | + |
| 266 | + return crypto_aead_decrypt(req); |
| 267 | +} |
| 268 | +EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_decrypt); |
| 269 | + |
| 270 | +int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead) |
| 271 | +{ |
| 272 | + struct cryptd_aead *cryptd_tfm; |
| 273 | + struct cryptd_aead **ctx = crypto_aead_ctx(aead); |
| 274 | + const char *name = crypto_aead_alg(aead)->base.cra_driver_name; |
| 275 | + char internal_name[CRYPTO_MAX_ALG_NAME]; |
| 276 | + |
| 277 | + if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name) |
| 278 | + >= CRYPTO_MAX_ALG_NAME) |
| 279 | + return -ENAMETOOLONG; |
| 280 | + |
| 281 | + cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL, |
| 282 | + CRYPTO_ALG_INTERNAL); |
| 283 | + if (IS_ERR(cryptd_tfm)) |
| 284 | + return PTR_ERR(cryptd_tfm); |
| 285 | + |
| 286 | + *ctx = cryptd_tfm; |
| 287 | + crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base)); |
| 288 | + return 0; |
| 289 | +} |
| 290 | +EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_init_tfm); |
| 291 | + |
| 292 | +void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead) |
| 293 | +{ |
| 294 | + struct cryptd_aead **ctx = crypto_aead_ctx(aead); |
| 295 | + |
| 296 | + cryptd_free_aead(*ctx); |
| 297 | +} |
| 298 | +EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_exit_tfm); |
| 299 | + |
| 300 | +MODULE_LICENSE("GPL"); |
| 301 | +MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>"); |
| 302 | +MODULE_DESCRIPTION("MORUS-1280 AEAD mode -- glue for optimizations"); |
0 commit comments