Skip to content

Commit a983633

Browse files
committed
Merge branch 'tls-Fix-issues-in-tls_device'
Boris Pismenny says: ==================== tls: Fix issues in tls_device This series fixes issues encountered in tls_device code paths, which were introduced recently. Additionally, this series includes a fix for tls software only receive flow, which causes corruption of payload received by user space applications. This series was tested using the OpenSSL integration of KTLS - https://github.com/mellan ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
2 parents 7d82737 + d069b78 commit a983633

File tree

4 files changed

+71
-59
lines changed

4 files changed

+71
-59
lines changed

include/net/tls.h

Lines changed: 7 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -199,10 +199,6 @@ struct tls_offload_context_tx {
199199
(ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
200200
TLS_DRIVER_STATE_SIZE)
201201

202-
enum {
203-
TLS_PENDING_CLOSED_RECORD
204-
};
205-
206202
struct cipher_context {
207203
char *iv;
208204
char *rec_seq;
@@ -335,17 +331,14 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx,
335331
int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
336332
int flags);
337333

338-
int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx,
339-
int flags, long *timeo);
340-
341334
static inline struct tls_msg *tls_msg(struct sk_buff *skb)
342335
{
343336
return (struct tls_msg *)strp_msg(skb);
344337
}
345338

346-
static inline bool tls_is_pending_closed_record(struct tls_context *ctx)
339+
static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
347340
{
348-
return test_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
341+
return !!ctx->partially_sent_record;
349342
}
350343

351344
static inline int tls_complete_pending_work(struct sock *sk,
@@ -357,17 +350,12 @@ static inline int tls_complete_pending_work(struct sock *sk,
357350
if (unlikely(sk->sk_write_pending))
358351
rc = wait_on_pending_writer(sk, timeo);
359352

360-
if (!rc && tls_is_pending_closed_record(ctx))
361-
rc = tls_push_pending_closed_record(sk, ctx, flags, timeo);
353+
if (!rc && tls_is_partially_sent_record(ctx))
354+
rc = tls_push_partial_record(sk, ctx, flags);
362355

363356
return rc;
364357
}
365358

366-
static inline bool tls_is_partially_sent_record(struct tls_context *ctx)
367-
{
368-
return !!ctx->partially_sent_record;
369-
}
370-
371359
static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
372360
{
373361
return tls_ctx->pending_open_record_frags;
@@ -531,6 +519,9 @@ static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
531519
return !!tls_sw_ctx_tx(ctx);
532520
}
533521

522+
void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
523+
void tls_device_write_space(struct sock *sk, struct tls_context *ctx);
524+
534525
static inline struct tls_offload_context_rx *
535526
tls_offload_ctx_rx(const struct tls_context *tls_ctx)
536527
{

net/tls/tls_device.c

Lines changed: 22 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -271,7 +271,6 @@ static int tls_push_record(struct sock *sk,
271271
list_add_tail(&record->list, &offload_ctx->records_list);
272272
spin_unlock_irq(&offload_ctx->lock);
273273
offload_ctx->open_record = NULL;
274-
set_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
275274
tls_advance_record_sn(sk, &ctx->tx, ctx->crypto_send.info.version);
276275

277276
for (i = 0; i < record->num_frags; i++) {
@@ -368,9 +367,11 @@ static int tls_push_data(struct sock *sk,
368367
return -sk->sk_err;
369368

370369
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
371-
rc = tls_complete_pending_work(sk, tls_ctx, flags, &timeo);
372-
if (rc < 0)
373-
return rc;
370+
if (tls_is_partially_sent_record(tls_ctx)) {
371+
rc = tls_push_partial_record(sk, tls_ctx, flags);
372+
if (rc < 0)
373+
return rc;
374+
}
374375

375376
pfrag = sk_page_frag(sk);
376377

@@ -545,6 +546,23 @@ static int tls_device_push_pending_record(struct sock *sk, int flags)
545546
return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
546547
}
547548

549+
void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
550+
{
551+
int rc = 0;
552+
553+
if (!sk->sk_write_pending && tls_is_partially_sent_record(ctx)) {
554+
gfp_t sk_allocation = sk->sk_allocation;
555+
556+
sk->sk_allocation = GFP_ATOMIC;
557+
rc = tls_push_partial_record(sk, ctx,
558+
MSG_DONTWAIT | MSG_NOSIGNAL);
559+
sk->sk_allocation = sk_allocation;
560+
}
561+
562+
if (!rc)
563+
ctx->sk_write_space(sk);
564+
}
565+
548566
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
549567
{
550568
struct tls_context *tls_ctx = tls_get_ctx(sk);

net/tls/tls_main.c

Lines changed: 6 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -209,23 +209,9 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
209209
return tls_push_sg(sk, ctx, sg, offset, flags);
210210
}
211211

212-
int tls_push_pending_closed_record(struct sock *sk,
213-
struct tls_context *tls_ctx,
214-
int flags, long *timeo)
215-
{
216-
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
217-
218-
if (tls_is_partially_sent_record(tls_ctx) ||
219-
!list_empty(&ctx->tx_list))
220-
return tls_tx_records(sk, flags);
221-
else
222-
return tls_ctx->push_pending_record(sk, flags);
223-
}
224-
225212
static void tls_write_space(struct sock *sk)
226213
{
227214
struct tls_context *ctx = tls_get_ctx(sk);
228-
struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
229215

230216
/* If in_tcp_sendpages call lower protocol write space handler
231217
* to ensure we wake up any waiting operations there. For example
@@ -236,14 +222,12 @@ static void tls_write_space(struct sock *sk)
236222
return;
237223
}
238224

239-
/* Schedule the transmission if tx list is ready */
240-
if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) {
241-
/* Schedule the transmission */
242-
if (!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
243-
schedule_delayed_work(&tx_ctx->tx_work.work, 0);
244-
}
245-
246-
ctx->sk_write_space(sk);
225+
#ifdef CONFIG_TLS_DEVICE
226+
if (ctx->tx_conf == TLS_HW)
227+
tls_device_write_space(sk, ctx);
228+
else
229+
#endif
230+
tls_sw_write_space(sk, ctx);
247231
}
248232

249233
static void tls_ctx_free(struct tls_context *ctx)

net/tls/tls_sw.c

Lines changed: 36 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1467,23 +1467,26 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
14671467
struct strp_msg *rxm = strp_msg(skb);
14681468
int err = 0;
14691469

1470+
if (!ctx->decrypted) {
14701471
#ifdef CONFIG_TLS_DEVICE
1471-
err = tls_device_decrypted(sk, skb);
1472-
if (err < 0)
1473-
return err;
1472+
err = tls_device_decrypted(sk, skb);
1473+
if (err < 0)
1474+
return err;
14741475
#endif
1475-
if (!ctx->decrypted) {
1476-
err = decrypt_internal(sk, skb, dest, NULL, chunk, zc, async);
1477-
if (err < 0) {
1478-
if (err == -EINPROGRESS)
1479-
tls_advance_record_sn(sk, &tls_ctx->rx,
1480-
version);
1476+
/* Still not decrypted after tls_device */
1477+
if (!ctx->decrypted) {
1478+
err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
1479+
async);
1480+
if (err < 0) {
1481+
if (err == -EINPROGRESS)
1482+
tls_advance_record_sn(sk, &tls_ctx->rx,
1483+
version);
14811484

1482-
return err;
1485+
return err;
1486+
}
14831487
}
14841488

14851489
rxm->full_len -= padding_length(ctx, tls_ctx, skb);
1486-
14871490
rxm->offset += prot->prepend_size;
14881491
rxm->full_len -= prot->overhead_size;
14891492
tls_advance_record_sn(sk, &tls_ctx->rx, version);
@@ -1693,7 +1696,8 @@ int tls_sw_recvmsg(struct sock *sk,
16931696
bool zc = false;
16941697
int to_decrypt;
16951698
int chunk = 0;
1696-
bool async;
1699+
bool async_capable;
1700+
bool async = false;
16971701

16981702
skb = tls_wait_data(sk, psock, flags, timeo, &err);
16991703
if (!skb) {
@@ -1727,21 +1731,23 @@ int tls_sw_recvmsg(struct sock *sk,
17271731

17281732
/* Do not use async mode if record is non-data */
17291733
if (ctx->control == TLS_RECORD_TYPE_DATA)
1730-
async = ctx->async_capable;
1734+
async_capable = ctx->async_capable;
17311735
else
1732-
async = false;
1736+
async_capable = false;
17331737

17341738
err = decrypt_skb_update(sk, skb, &msg->msg_iter,
1735-
&chunk, &zc, async);
1739+
&chunk, &zc, async_capable);
17361740
if (err < 0 && err != -EINPROGRESS) {
17371741
tls_err_abort(sk, EBADMSG);
17381742
goto recv_end;
17391743
}
17401744

1741-
if (err == -EINPROGRESS)
1745+
if (err == -EINPROGRESS) {
1746+
async = true;
17421747
num_async++;
1743-
else if (prot->version == TLS_1_3_VERSION)
1748+
} else if (prot->version == TLS_1_3_VERSION) {
17441749
tlm->control = ctx->control;
1750+
}
17451751

17461752
/* If the type of records being processed is not known yet,
17471753
* set it to record type just dequeued. If it is already known,
@@ -2126,6 +2132,19 @@ static void tx_work_handler(struct work_struct *work)
21262132
release_sock(sk);
21272133
}
21282134

2135+
void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2136+
{
2137+
struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2138+
2139+
/* Schedule the transmission if tx list is ready */
2140+
if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) {
2141+
/* Schedule the transmission */
2142+
if (!test_and_set_bit(BIT_TX_SCHEDULED,
2143+
&tx_ctx->tx_bitmask))
2144+
schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2145+
}
2146+
}
2147+
21292148
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
21302149
{
21312150
struct tls_context *tls_ctx = tls_get_ctx(sk);

0 commit comments

Comments
 (0)