@@ -49,12 +49,6 @@ static const u8 iprm_shutdown[8] =
49
49
50
50
#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
51
51
52
- /* macros to set/get socket control buffer at correct offset */
53
- #define CB_TAG (skb ) ((skb)->cb) /* iucv message tag */
54
- #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
55
- #define CB_TRGCLS (skb ) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
56
- #define CB_TRGCLS_LEN (TRGCLS_SIZE)
57
-
58
52
#define __iucv_sock_wait (sk , condition , timeo , ret ) \
59
53
do { \
60
54
DEFINE_WAIT(__wait); \
@@ -1141,7 +1135,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1141
1135
1142
1136
/* increment and save iucv message tag for msg_completion cbk */
1143
1137
txmsg .tag = iucv -> send_tag ++ ;
1144
- memcpy ( CB_TAG ( skb ), & txmsg .tag , CB_TAG_LEN ) ;
1138
+ IUCV_SKB_CB ( skb )-> tag = txmsg .tag ;
1145
1139
1146
1140
if (iucv -> transport == AF_IUCV_TRANS_HIPER ) {
1147
1141
atomic_inc (& iucv -> msg_sent );
@@ -1224,7 +1218,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1224
1218
return - ENOMEM ;
1225
1219
1226
1220
/* copy target class to control buffer of new skb */
1227
- memcpy ( CB_TRGCLS ( nskb ), CB_TRGCLS (skb ), CB_TRGCLS_LEN ) ;
1221
+ IUCV_SKB_CB ( nskb )-> class = IUCV_SKB_CB (skb )-> class ;
1228
1222
1229
1223
/* copy data fragment */
1230
1224
memcpy (nskb -> data , skb -> data + copied , size );
@@ -1256,7 +1250,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1256
1250
1257
1251
/* store msg target class in the second 4 bytes of skb ctrl buffer */
1258
1252
/* Note: the first 4 bytes are reserved for msg tag */
1259
- memcpy ( CB_TRGCLS ( skb ), & msg -> class , CB_TRGCLS_LEN ) ;
1253
+ IUCV_SKB_CB ( skb )-> class = msg -> class ;
1260
1254
1261
1255
/* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1262
1256
if ((msg -> flags & IUCV_IPRMDATA ) && len > 7 ) {
@@ -1292,6 +1286,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1292
1286
}
1293
1287
}
1294
1288
1289
+ IUCV_SKB_CB (skb )-> offset = 0 ;
1295
1290
if (sock_queue_rcv_skb (sk , skb ))
1296
1291
skb_queue_head (& iucv_sk (sk )-> backlog_skb_q , skb );
1297
1292
}
@@ -1327,6 +1322,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1327
1322
unsigned int copied , rlen ;
1328
1323
struct sk_buff * skb , * rskb , * cskb ;
1329
1324
int err = 0 ;
1325
+ u32 offset ;
1330
1326
1331
1327
msg -> msg_namelen = 0 ;
1332
1328
@@ -1348,13 +1344,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1348
1344
return err ;
1349
1345
}
1350
1346
1351
- rlen = skb -> len ; /* real length of skb */
1347
+ offset = IUCV_SKB_CB (skb )-> offset ;
1348
+ rlen = skb -> len - offset ; /* real length of skb */
1352
1349
copied = min_t (unsigned int , rlen , len );
1353
1350
if (!rlen )
1354
1351
sk -> sk_shutdown = sk -> sk_shutdown | RCV_SHUTDOWN ;
1355
1352
1356
1353
cskb = skb ;
1357
- if (skb_copy_datagram_iovec (cskb , 0 , msg -> msg_iov , copied )) {
1354
+ if (skb_copy_datagram_iovec (cskb , offset , msg -> msg_iov , copied )) {
1358
1355
if (!(flags & MSG_PEEK ))
1359
1356
skb_queue_head (& sk -> sk_receive_queue , skb );
1360
1357
return - EFAULT ;
@@ -1372,7 +1369,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1372
1369
* get the trgcls from the control buffer of the skb due to
1373
1370
* fragmentation of original iucv message. */
1374
1371
err = put_cmsg (msg , SOL_IUCV , SCM_IUCV_TRGCLS ,
1375
- CB_TRGCLS_LEN , CB_TRGCLS (skb ));
1372
+ sizeof (IUCV_SKB_CB (skb )-> class ),
1373
+ (void * )& IUCV_SKB_CB (skb )-> class );
1376
1374
if (err ) {
1377
1375
if (!(flags & MSG_PEEK ))
1378
1376
skb_queue_head (& sk -> sk_receive_queue , skb );
@@ -1384,9 +1382,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1384
1382
1385
1383
/* SOCK_STREAM: re-queue skb if it contains unreceived data */
1386
1384
if (sk -> sk_type == SOCK_STREAM ) {
1387
- skb_pull (skb , copied );
1388
- if (skb -> len ) {
1389
- skb_queue_head (& sk -> sk_receive_queue , skb );
1385
+ if (copied < rlen ) {
1386
+ IUCV_SKB_CB (skb )-> offset = offset + copied ;
1390
1387
goto done ;
1391
1388
}
1392
1389
}
@@ -1405,6 +1402,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1405
1402
spin_lock_bh (& iucv -> message_q .lock );
1406
1403
rskb = skb_dequeue (& iucv -> backlog_skb_q );
1407
1404
while (rskb ) {
1405
+ IUCV_SKB_CB (rskb )-> offset = 0 ;
1408
1406
if (sock_queue_rcv_skb (sk , rskb )) {
1409
1407
skb_queue_head (& iucv -> backlog_skb_q ,
1410
1408
rskb );
@@ -1832,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
1832
1830
spin_lock_irqsave (& list -> lock , flags );
1833
1831
1834
1832
while (list_skb != (struct sk_buff * )list ) {
1835
- if (! memcmp ( & msg -> tag , CB_TAG (list_skb ), CB_TAG_LEN ) ) {
1833
+ if (msg -> tag != IUCV_SKB_CB (list_skb )-> tag ) {
1836
1834
this = list_skb ;
1837
1835
break ;
1838
1836
}
@@ -2093,6 +2091,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2093
2091
skb_pull (skb , sizeof (struct af_iucv_trans_hdr ));
2094
2092
skb_reset_transport_header (skb );
2095
2093
skb_reset_network_header (skb );
2094
+ IUCV_SKB_CB (skb )-> offset = 0 ;
2096
2095
spin_lock (& iucv -> message_q .lock );
2097
2096
if (skb_queue_empty (& iucv -> backlog_skb_q )) {
2098
2097
if (sock_queue_rcv_skb (sk , skb )) {
@@ -2197,8 +2196,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2197
2196
/* fall through and receive zero length data */
2198
2197
case 0 :
2199
2198
/* plain data frame */
2200
- memcpy (CB_TRGCLS (skb ), & trans_hdr -> iucv_hdr .class ,
2201
- CB_TRGCLS_LEN );
2199
+ IUCV_SKB_CB (skb )-> class = trans_hdr -> iucv_hdr .class ;
2202
2200
err = afiucv_hs_callback_rx (sk , skb );
2203
2201
break ;
2204
2202
default :
0 commit comments