Skip to content

Commit 762dfd1

Browse files
piastrySteve French
authored andcommitted
CIFS: Cleanup demupltiplex thread exiting code
Reviewed-and-Tested-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Pavel Shilovsky <piastryyy@gmail.com> Signed-off-by: Steve French <sfrench@us.ibm.com>
1 parent ad69bae commit 762dfd1

File tree

1 file changed

+96
-77
lines changed

1 file changed

+96
-77
lines changed

fs/cifs/connect.c

Lines changed: 96 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -531,6 +531,101 @@ find_cifs_mid(struct TCP_Server_Info *server, struct smb_hdr *buf,
531531
return ret;
532532
}
533533

534+
static void clean_demultiplex_info(struct TCP_Server_Info *server)
535+
{
536+
int length;
537+
538+
/* take it off the list, if it's not already */
539+
spin_lock(&cifs_tcp_ses_lock);
540+
list_del_init(&server->tcp_ses_list);
541+
spin_unlock(&cifs_tcp_ses_lock);
542+
543+
spin_lock(&GlobalMid_Lock);
544+
server->tcpStatus = CifsExiting;
545+
spin_unlock(&GlobalMid_Lock);
546+
wake_up_all(&server->response_q);
547+
548+
/*
549+
* Check if we have blocked requests that need to free. Note that
550+
* cifs_max_pending is normally 50, but can be set at module install
551+
* time to as little as two.
552+
*/
553+
spin_lock(&GlobalMid_Lock);
554+
if (atomic_read(&server->inFlight) >= cifs_max_pending)
555+
atomic_set(&server->inFlight, cifs_max_pending - 1);
556+
/*
557+
* We do not want to set the max_pending too low or we could end up
558+
* with the counter going negative.
559+
*/
560+
spin_unlock(&GlobalMid_Lock);
561+
/*
562+
* Although there should not be any requests blocked on this queue it
563+
* can not hurt to be paranoid and try to wake up requests that may
564+
* haven been blocked when more than 50 at time were on the wire to the
565+
* same server - they now will see the session is in exit state and get
566+
* out of SendReceive.
567+
*/
568+
wake_up_all(&server->request_q);
569+
/* give those requests time to exit */
570+
msleep(125);
571+
572+
if (server->ssocket) {
573+
sock_release(server->ssocket);
574+
server->ssocket = NULL;
575+
}
576+
577+
if (!list_empty(&server->pending_mid_q)) {
578+
struct list_head dispose_list;
579+
struct mid_q_entry *mid_entry;
580+
struct list_head *tmp, *tmp2;
581+
582+
INIT_LIST_HEAD(&dispose_list);
583+
spin_lock(&GlobalMid_Lock);
584+
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
585+
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
586+
cFYI(1, "Clearing mid 0x%x", mid_entry->mid);
587+
mid_entry->midState = MID_SHUTDOWN;
588+
list_move(&mid_entry->qhead, &dispose_list);
589+
}
590+
spin_unlock(&GlobalMid_Lock);
591+
592+
/* now walk dispose list and issue callbacks */
593+
list_for_each_safe(tmp, tmp2, &dispose_list) {
594+
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
595+
cFYI(1, "Callback mid 0x%x", mid_entry->mid);
596+
list_del_init(&mid_entry->qhead);
597+
mid_entry->callback(mid_entry);
598+
}
599+
/* 1/8th of sec is more than enough time for them to exit */
600+
msleep(125);
601+
}
602+
603+
if (!list_empty(&server->pending_mid_q)) {
604+
/*
605+
* mpx threads have not exited yet give them at least the smb
606+
* send timeout time for long ops.
607+
*
608+
* Due to delays on oplock break requests, we need to wait at
609+
* least 45 seconds before giving up on a request getting a
610+
* response and going ahead and killing cifsd.
611+
*/
612+
cFYI(1, "Wait for exit from demultiplex thread");
613+
msleep(46000);
614+
/*
615+
* If threads still have not exited they are probably never
616+
* coming home not much else we can do but free the memory.
617+
*/
618+
}
619+
620+
kfree(server->hostname);
621+
kfree(server);
622+
623+
length = atomic_dec_return(&tcpSesAllocCount);
624+
if (length > 0)
625+
mempool_resize(cifs_req_poolp, length + cifs_min_rcv,
626+
GFP_KERNEL);
627+
}
628+
534629
static int
535630
cifs_demultiplex_thread(void *p)
536631
{
@@ -541,7 +636,6 @@ cifs_demultiplex_thread(void *p)
541636
struct smb_hdr *smb_buffer = NULL;
542637
struct msghdr smb_msg;
543638
struct kvec iov;
544-
struct list_head *tmp, *tmp2;
545639
struct task_struct *task_to_wake = NULL;
546640
struct mid_q_entry *mid_entry;
547641
bool isLargeBuf = false;
@@ -678,88 +772,13 @@ cifs_demultiplex_thread(void *p)
678772
}
679773
} /* end while !EXITING */
680774

681-
/* take it off the list, if it's not already */
682-
spin_lock(&cifs_tcp_ses_lock);
683-
list_del_init(&server->tcp_ses_list);
684-
spin_unlock(&cifs_tcp_ses_lock);
685-
686-
spin_lock(&GlobalMid_Lock);
687-
server->tcpStatus = CifsExiting;
688-
spin_unlock(&GlobalMid_Lock);
689-
wake_up_all(&server->response_q);
690-
691-
/* check if we have blocked requests that need to free */
692-
/* Note that cifs_max_pending is normally 50, but
693-
can be set at module install time to as little as two */
694-
spin_lock(&GlobalMid_Lock);
695-
if (atomic_read(&server->inFlight) >= cifs_max_pending)
696-
atomic_set(&server->inFlight, cifs_max_pending - 1);
697-
/* We do not want to set the max_pending too low or we
698-
could end up with the counter going negative */
699-
spin_unlock(&GlobalMid_Lock);
700-
/* Although there should not be any requests blocked on
701-
this queue it can not hurt to be paranoid and try to wake up requests
702-
that may haven been blocked when more than 50 at time were on the wire
703-
to the same server - they now will see the session is in exit state
704-
and get out of SendReceive. */
705-
wake_up_all(&server->request_q);
706-
/* give those requests time to exit */
707-
msleep(125);
708-
709-
if (server->ssocket) {
710-
sock_release(server->ssocket);
711-
server->ssocket = NULL;
712-
}
713775
/* buffer usually freed in free_mid - need to free it here on exit */
714776
cifs_buf_release(bigbuf);
715777
if (smallbuf) /* no sense logging a debug message if NULL */
716778
cifs_small_buf_release(smallbuf);
717779

718-
if (!list_empty(&server->pending_mid_q)) {
719-
struct list_head dispose_list;
720-
721-
INIT_LIST_HEAD(&dispose_list);
722-
spin_lock(&GlobalMid_Lock);
723-
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
724-
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
725-
cFYI(1, "Clearing mid 0x%x", mid_entry->mid);
726-
mid_entry->midState = MID_SHUTDOWN;
727-
list_move(&mid_entry->qhead, &dispose_list);
728-
}
729-
spin_unlock(&GlobalMid_Lock);
730-
731-
/* now walk dispose list and issue callbacks */
732-
list_for_each_safe(tmp, tmp2, &dispose_list) {
733-
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
734-
cFYI(1, "Callback mid 0x%x", mid_entry->mid);
735-
list_del_init(&mid_entry->qhead);
736-
mid_entry->callback(mid_entry);
737-
}
738-
/* 1/8th of sec is more than enough time for them to exit */
739-
msleep(125);
740-
}
741-
742-
if (!list_empty(&server->pending_mid_q)) {
743-
/* mpx threads have not exited yet give them
744-
at least the smb send timeout time for long ops */
745-
/* due to delays on oplock break requests, we need
746-
to wait at least 45 seconds before giving up
747-
on a request getting a response and going ahead
748-
and killing cifsd */
749-
cFYI(1, "Wait for exit from demultiplex thread");
750-
msleep(46000);
751-
/* if threads still have not exited they are probably never
752-
coming home not much else we can do but free the memory */
753-
}
754-
755-
kfree(server->hostname);
756780
task_to_wake = xchg(&server->tsk, NULL);
757-
kfree(server);
758-
759-
length = atomic_dec_return(&tcpSesAllocCount);
760-
if (length > 0)
761-
mempool_resize(cifs_req_poolp, length + cifs_min_rcv,
762-
GFP_KERNEL);
781+
clean_demultiplex_info(server);
763782

764783
/* if server->tsk was NULL then wait for a signal before exiting */
765784
if (!task_to_wake) {

0 commit comments

Comments
 (0)