13
13
#include <linux/dmaengine.h>
14
14
#include <linux/omap-dma.h>
15
15
#include <linux/interrupt.h>
16
+ #include <linux/pm_runtime.h>
16
17
#include <crypto/aes.h>
17
18
#include <crypto/gcm.h>
18
19
#include <crypto/scatterwalk.h>
@@ -29,11 +30,13 @@ static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
29
30
{
30
31
struct aead_request * req = dd -> aead_req ;
31
32
32
- dd -> flags &= ~FLAGS_BUSY ;
33
33
dd -> in_sg = NULL ;
34
34
dd -> out_sg = NULL ;
35
35
36
- req -> base .complete (& req -> base , ret );
36
+ crypto_finalize_aead_request (dd -> engine , req , ret );
37
+
38
+ pm_runtime_mark_last_busy (dd -> dev );
39
+ pm_runtime_put_autosuspend (dd -> dev );
37
40
}
38
41
39
42
static void omap_aes_gcm_done_task (struct omap_aes_dev * dd )
@@ -81,7 +84,6 @@ static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
81
84
}
82
85
83
86
omap_aes_gcm_finish_req (dd , ret );
84
- omap_aes_gcm_handle_queue (dd , NULL );
85
87
}
86
88
87
89
static int omap_aes_gcm_copy_buffers (struct omap_aes_dev * dd ,
@@ -127,6 +129,9 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
127
129
if (cryptlen ) {
128
130
tmp = scatterwalk_ffwd (sg_arr , req -> src , req -> assoclen );
129
131
132
+ if (nsg )
133
+ sg_unmark_end (dd -> in_sgl );
134
+
130
135
ret = omap_crypto_align_sg (& tmp , cryptlen ,
131
136
AES_BLOCK_SIZE , & dd -> in_sgl [nsg ],
132
137
OMAP_CRYPTO_COPY_DATA |
@@ -146,7 +151,7 @@ static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
146
151
dd -> out_sg = req -> dst ;
147
152
dd -> orig_out = req -> dst ;
148
153
149
- dd -> out_sg = scatterwalk_ffwd (sg_arr , req -> dst , assoclen );
154
+ dd -> out_sg = scatterwalk_ffwd (sg_arr , req -> dst , req -> assoclen );
150
155
151
156
flags = 0 ;
152
157
if (req -> src == req -> dst || dd -> out_sg == sg_arr )
@@ -202,37 +207,21 @@ void omap_aes_gcm_dma_out_callback(void *data)
202
207
static int omap_aes_gcm_handle_queue (struct omap_aes_dev * dd ,
203
208
struct aead_request * req )
204
209
{
205
- struct omap_aes_gcm_ctx * ctx ;
206
- struct aead_request * backlog ;
207
- struct omap_aes_reqctx * rctx ;
208
- unsigned long flags ;
209
- int err , ret = 0 ;
210
-
211
- spin_lock_irqsave (& dd -> lock , flags );
212
- if (req )
213
- ret = aead_enqueue_request (& dd -> aead_queue , req );
214
- if (dd -> flags & FLAGS_BUSY ) {
215
- spin_unlock_irqrestore (& dd -> lock , flags );
216
- return ret ;
217
- }
218
-
219
- backlog = aead_get_backlog (& dd -> aead_queue );
220
- req = aead_dequeue_request (& dd -> aead_queue );
221
210
if (req )
222
- dd -> flags |= FLAGS_BUSY ;
223
- spin_unlock_irqrestore (& dd -> lock , flags );
224
-
225
- if (!req )
226
- return ret ;
211
+ return crypto_transfer_aead_request_to_engine (dd -> engine , req );
227
212
228
- if ( backlog )
229
- backlog -> base . complete ( & backlog -> base , - EINPROGRESS );
213
+ return 0 ;
214
+ }
230
215
231
- ctx = crypto_aead_ctx (crypto_aead_reqtfm (req ));
232
- rctx = aead_request_ctx (req );
216
+ static int omap_aes_gcm_prepare_req (struct crypto_engine * engine , void * areq )
217
+ {
218
+ struct aead_request * req = container_of (areq , struct aead_request ,
219
+ base );
220
+ struct omap_aes_reqctx * rctx = aead_request_ctx (req );
221
+ struct omap_aes_dev * dd = rctx -> dd ;
222
+ struct omap_aes_gcm_ctx * ctx = crypto_aead_ctx (crypto_aead_reqtfm (req ));
223
+ int err ;
233
224
234
- dd -> ctx = & ctx -> octx ;
235
- rctx -> dd = dd ;
236
225
dd -> aead_req = req ;
237
226
238
227
rctx -> mode &= FLAGS_MODE_MASK ;
@@ -242,20 +231,9 @@ static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
242
231
if (err )
243
232
return err ;
244
233
245
- err = omap_aes_write_ctrl (dd );
246
- if (!err ) {
247
- if (dd -> in_sg_len )
248
- err = omap_aes_crypt_dma_start (dd );
249
- else
250
- omap_aes_gcm_dma_out_callback (dd );
251
- }
252
-
253
- if (err ) {
254
- omap_aes_gcm_finish_req (dd , err );
255
- omap_aes_gcm_handle_queue (dd , NULL );
256
- }
234
+ dd -> ctx = & ctx -> octx ;
257
235
258
- return ret ;
236
+ return omap_aes_write_ctrl ( dd ) ;
259
237
}
260
238
261
239
static int omap_aes_gcm_crypt (struct aead_request * req , unsigned long mode )
@@ -378,3 +356,35 @@ int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent,
378
356
{
379
357
return crypto_rfc4106_check_authsize (authsize );
380
358
}
359
+
360
+ static int omap_aes_gcm_crypt_req (struct crypto_engine * engine , void * areq )
361
+ {
362
+ struct aead_request * req = container_of (areq , struct aead_request ,
363
+ base );
364
+ struct omap_aes_reqctx * rctx = aead_request_ctx (req );
365
+ struct omap_aes_dev * dd = rctx -> dd ;
366
+ int ret = 0 ;
367
+
368
+ if (!dd )
369
+ return - ENODEV ;
370
+
371
+ if (dd -> in_sg_len )
372
+ ret = omap_aes_crypt_dma_start (dd );
373
+ else
374
+ omap_aes_gcm_dma_out_callback (dd );
375
+
376
+ return ret ;
377
+ }
378
+
379
+ int omap_aes_gcm_cra_init (struct crypto_aead * tfm )
380
+ {
381
+ struct omap_aes_ctx * ctx = crypto_aead_ctx (tfm );
382
+
383
+ ctx -> enginectx .op .prepare_request = omap_aes_gcm_prepare_req ;
384
+ ctx -> enginectx .op .unprepare_request = NULL ;
385
+ ctx -> enginectx .op .do_one_request = omap_aes_gcm_crypt_req ;
386
+
387
+ crypto_aead_set_reqsize (tfm , sizeof (struct omap_aes_reqctx ));
388
+
389
+ return 0 ;
390
+ }
0 commit comments