Commit 41e5a6ac80c600e1f8bda0a4871f0b797e097d78
Committed by
Linus Torvalds
1 parent
343f1fe6f2
Exists in
master
and in
7 other branches
[PATCH] v9fs: signal handling fixes
Multiple races can happen when v9fs is interrupted by a signal and Tflush message is sent to the server. After v9fs sends Tflush it doesn't wait until it receives Rflush, and possibly the response of the original message. This behavior may confuse v9fs what fids are allocated by the file server. This patch fixes the races and the fid allocation. Signed-off-by: Latchesar Ionkov <lucho@ionkov.net> Cc: Eric Van Hensbergen <ericvh@hera.kernel.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 4 changed files with 158 additions and 100 deletions Side-by-side Diff
fs/9p/mux.c
... | ... | @@ -50,15 +50,23 @@ |
50 | 50 | Wpending = 8, /* can write */ |
51 | 51 | }; |
52 | 52 | |
53 | +enum { | |
54 | + None, | |
55 | + Flushing, | |
56 | + Flushed, | |
57 | +}; | |
58 | + | |
53 | 59 | struct v9fs_mux_poll_task; |
54 | 60 | |
55 | 61 | struct v9fs_req { |
62 | + spinlock_t lock; | |
56 | 63 | int tag; |
57 | 64 | struct v9fs_fcall *tcall; |
58 | 65 | struct v9fs_fcall *rcall; |
59 | 66 | int err; |
60 | 67 | v9fs_mux_req_callback cb; |
61 | 68 | void *cba; |
69 | + int flush; | |
62 | 70 | struct list_head req_list; |
63 | 71 | }; |
64 | 72 | |
65 | 73 | |
... | ... | @@ -96,8 +104,8 @@ |
96 | 104 | |
97 | 105 | struct v9fs_mux_rpc { |
98 | 106 | struct v9fs_mux_data *m; |
99 | - struct v9fs_req *req; | |
100 | 107 | int err; |
108 | + struct v9fs_fcall *tcall; | |
101 | 109 | struct v9fs_fcall *rcall; |
102 | 110 | wait_queue_head_t wqueue; |
103 | 111 | }; |
104 | 112 | |
... | ... | @@ -524,10 +532,9 @@ |
524 | 532 | |
525 | 533 | static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req) |
526 | 534 | { |
527 | - int ecode, tag; | |
535 | + int ecode; | |
528 | 536 | struct v9fs_str *ename; |
529 | 537 | |
530 | - tag = req->tag; | |
531 | 538 | if (!req->err && req->rcall->id == RERROR) { |
532 | 539 | ecode = req->rcall->params.rerror.errno; |
533 | 540 | ename = &req->rcall->params.rerror.error; |
... | ... | @@ -553,23 +560,6 @@ |
553 | 560 | if (!req->err) |
554 | 561 | req->err = -EIO; |
555 | 562 | } |
556 | - | |
557 | - if (req->err == ERREQFLUSH) | |
558 | - return; | |
559 | - | |
560 | - if (req->cb) { | |
561 | - dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n", | |
562 | - req->tcall, req->rcall); | |
563 | - | |
564 | - (*req->cb) (req->cba, req->tcall, req->rcall, req->err); | |
565 | - req->cb = NULL; | |
566 | - } else | |
567 | - kfree(req->rcall); | |
568 | - | |
569 | - v9fs_mux_put_tag(m, tag); | |
570 | - | |
571 | - wake_up(&m->equeue); | |
572 | - kfree(req); | |
573 | 563 | } |
574 | 564 | |
575 | 565 | /** |
576 | 566 | |
577 | 567 | |
578 | 568 | |
... | ... | @@ -669,17 +659,26 @@ |
669 | 659 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { |
670 | 660 | if (rreq->tag == rcall->tag) { |
671 | 661 | req = rreq; |
672 | - req->rcall = rcall; | |
673 | - list_del(&req->req_list); | |
674 | - spin_unlock(&m->lock); | |
675 | - process_request(m, req); | |
662 | + if (req->flush != Flushing) | |
663 | + list_del(&req->req_list); | |
676 | 664 | break; |
677 | 665 | } |
678 | - | |
679 | 666 | } |
667 | + spin_unlock(&m->lock); | |
680 | 668 | |
681 | - if (!req) { | |
682 | - spin_unlock(&m->lock); | |
669 | + if (req) { | |
670 | + req->rcall = rcall; | |
671 | + process_request(m, req); | |
672 | + | |
673 | + if (req->flush != Flushing) { | |
674 | + if (req->cb) | |
675 | + (*req->cb) (req, req->cba); | |
676 | + else | |
677 | + kfree(req->rcall); | |
678 | + | |
679 | + wake_up(&m->equeue); | |
680 | + } | |
681 | + } else { | |
683 | 682 | if (err >= 0 && rcall->id != RFLUSH) |
684 | 683 | dprintk(DEBUG_ERROR, |
685 | 684 | "unexpected response mux %p id %d tag %d\n", |
... | ... | @@ -746,7 +745,6 @@ |
746 | 745 | return ERR_PTR(-ENOMEM); |
747 | 746 | |
748 | 747 | v9fs_set_tag(tc, n); |
749 | - | |
750 | 748 | if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) { |
751 | 749 | char buf[150]; |
752 | 750 | |
753 | 751 | |
... | ... | @@ -754,12 +752,14 @@ |
754 | 752 | printk(KERN_NOTICE "<<< %p %s\n", m, buf); |
755 | 753 | } |
756 | 754 | |
755 | + spin_lock_init(&req->lock); | |
757 | 756 | req->tag = n; |
758 | 757 | req->tcall = tc; |
759 | 758 | req->rcall = NULL; |
760 | 759 | req->err = 0; |
761 | 760 | req->cb = cb; |
762 | 761 | req->cba = cba; |
762 | + req->flush = None; | |
763 | 763 | |
764 | 764 | spin_lock(&m->lock); |
765 | 765 | list_add_tail(&req->req_list, &m->unsent_req_list); |
766 | 766 | |
767 | 767 | |
768 | 768 | |
769 | 769 | |
770 | 770 | |
771 | 771 | |
772 | 772 | |
773 | 773 | |
774 | 774 | |
775 | 775 | |
776 | 776 | |
777 | 777 | |
778 | 778 | |
779 | 779 | |
780 | 780 | |
... | ... | @@ -776,72 +776,108 @@ |
776 | 776 | return req; |
777 | 777 | } |
778 | 778 | |
779 | -static void v9fs_mux_flush_cb(void *a, struct v9fs_fcall *tc, | |
780 | - struct v9fs_fcall *rc, int err) | |
779 | +static void v9fs_mux_free_request(struct v9fs_mux_data *m, struct v9fs_req *req) | |
781 | 780 | { |
781 | + v9fs_mux_put_tag(m, req->tag); | |
782 | + kfree(req); | |
783 | +} | |
784 | + | |
785 | +static void v9fs_mux_flush_cb(struct v9fs_req *freq, void *a) | |
786 | +{ | |
782 | 787 | v9fs_mux_req_callback cb; |
783 | 788 | int tag; |
784 | 789 | struct v9fs_mux_data *m; |
785 | - struct v9fs_req *req, *rptr; | |
790 | + struct v9fs_req *req, *rreq, *rptr; | |
786 | 791 | |
787 | 792 | m = a; |
788 | - dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, tc, | |
789 | - rc, err, tc->params.tflush.oldtag); | |
793 | + dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, | |
794 | + freq->tcall, freq->rcall, freq->err, | |
795 | + freq->tcall->params.tflush.oldtag); | |
790 | 796 | |
791 | 797 | spin_lock(&m->lock); |
792 | 798 | cb = NULL; |
793 | - tag = tc->params.tflush.oldtag; | |
794 | - list_for_each_entry_safe(req, rptr, &m->req_list, req_list) { | |
795 | - if (req->tag == tag) { | |
799 | + tag = freq->tcall->params.tflush.oldtag; | |
800 | + req = NULL; | |
801 | + list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { | |
802 | + if (rreq->tag == tag) { | |
803 | + req = rreq; | |
796 | 804 | list_del(&req->req_list); |
797 | - if (req->cb) { | |
798 | - cb = req->cb; | |
799 | - req->cb = NULL; | |
800 | - spin_unlock(&m->lock); | |
801 | - (*cb) (req->cba, req->tcall, req->rcall, | |
802 | - req->err); | |
803 | - } | |
804 | - kfree(req); | |
805 | - wake_up(&m->equeue); | |
806 | 805 | break; |
807 | 806 | } |
808 | 807 | } |
808 | + spin_unlock(&m->lock); | |
809 | 809 | |
810 | - if (!cb) | |
811 | - spin_unlock(&m->lock); | |
810 | + if (req) { | |
811 | + spin_lock(&req->lock); | |
812 | + req->flush = Flushed; | |
813 | + spin_unlock(&req->lock); | |
812 | 814 | |
813 | - v9fs_mux_put_tag(m, tag); | |
814 | - kfree(tc); | |
815 | - kfree(rc); | |
815 | + if (req->cb) | |
816 | + (*req->cb) (req, req->cba); | |
817 | + else | |
818 | + kfree(req->rcall); | |
819 | + | |
820 | + wake_up(&m->equeue); | |
821 | + } | |
822 | + | |
823 | + kfree(freq->tcall); | |
824 | + kfree(freq->rcall); | |
825 | + v9fs_mux_free_request(m, freq); | |
816 | 826 | } |
817 | 827 | |
818 | -static void | |
828 | +static int | |
819 | 829 | v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req) |
820 | 830 | { |
821 | 831 | struct v9fs_fcall *fc; |
832 | + struct v9fs_req *rreq, *rptr; | |
822 | 833 | |
823 | 834 | dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag); |
824 | 835 | |
836 | + /* if a response was received for a request, do nothing */ | |
837 | + spin_lock(&req->lock); | |
838 | + if (req->rcall || req->err) { | |
839 | + spin_unlock(&req->lock); | |
840 | + dprintk(DEBUG_MUX, "mux %p req %p response already received\n", m, req); | |
841 | + return 0; | |
842 | + } | |
843 | + | |
844 | + req->flush = Flushing; | |
845 | + spin_unlock(&req->lock); | |
846 | + | |
847 | + spin_lock(&m->lock); | |
848 | + /* if the request is not sent yet, just remove it from the list */ | |
849 | + list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) { | |
850 | + if (rreq->tag == req->tag) { | |
851 | + dprintk(DEBUG_MUX, "mux %p req %p request is not sent yet\n", m, req); | |
852 | + list_del(&rreq->req_list); | |
853 | + req->flush = Flushed; | |
854 | + spin_unlock(&m->lock); | |
855 | + if (req->cb) | |
856 | + (*req->cb) (req, req->cba); | |
857 | + return 0; | |
858 | + } | |
859 | + } | |
860 | + spin_unlock(&m->lock); | |
861 | + | |
862 | + clear_thread_flag(TIF_SIGPENDING); | |
825 | 863 | fc = v9fs_create_tflush(req->tag); |
826 | 864 | v9fs_send_request(m, fc, v9fs_mux_flush_cb, m); |
865 | + return 1; | |
827 | 866 | } |
828 | 867 | |
829 | 868 | static void |
830 | -v9fs_mux_rpc_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc, int err) | |
869 | +v9fs_mux_rpc_cb(struct v9fs_req *req, void *a) | |
831 | 870 | { |
832 | 871 | struct v9fs_mux_rpc *r; |
833 | 872 | |
834 | - if (err == ERREQFLUSH) { | |
835 | - kfree(rc); | |
836 | - dprintk(DEBUG_MUX, "err req flush\n"); | |
837 | - return; | |
838 | - } | |
839 | - | |
873 | + dprintk(DEBUG_MUX, "req %p r %p\n", req, a); | |
840 | 874 | r = a; |
841 | - dprintk(DEBUG_MUX, "mux %p req %p tc %p rc %p err %d\n", r->m, r->req, | |
842 | - tc, rc, err); | |
843 | - r->rcall = rc; | |
844 | - r->err = err; | |
875 | + r->rcall = req->rcall; | |
876 | + r->err = req->err; | |
877 | + | |
878 | + if (req->flush!=None && !req->err) | |
879 | + r->err = -ERESTARTSYS; | |
880 | + | |
845 | 881 | wake_up(&r->wqueue); |
846 | 882 | } |
847 | 883 | |
848 | 884 | |
... | ... | @@ -856,12 +892,13 @@ |
856 | 892 | v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc, |
857 | 893 | struct v9fs_fcall **rc) |
858 | 894 | { |
859 | - int err; | |
895 | + int err, sigpending; | |
860 | 896 | unsigned long flags; |
861 | 897 | struct v9fs_req *req; |
862 | 898 | struct v9fs_mux_rpc r; |
863 | 899 | |
864 | 900 | r.err = 0; |
901 | + r.tcall = tc; | |
865 | 902 | r.rcall = NULL; |
866 | 903 | r.m = m; |
867 | 904 | init_waitqueue_head(&r.wqueue); |
868 | 905 | |
869 | 906 | |
870 | 907 | |
871 | 908 | |
872 | 909 | |
873 | 910 | |
874 | 911 | |
... | ... | @@ -869,49 +906,51 @@ |
869 | 906 | if (rc) |
870 | 907 | *rc = NULL; |
871 | 908 | |
909 | + sigpending = 0; | |
910 | + if (signal_pending(current)) { | |
911 | + sigpending = 1; | |
912 | + clear_thread_flag(TIF_SIGPENDING); | |
913 | + } | |
914 | + | |
872 | 915 | req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r); |
873 | 916 | if (IS_ERR(req)) { |
874 | 917 | err = PTR_ERR(req); |
875 | 918 | dprintk(DEBUG_MUX, "error %d\n", err); |
876 | - return PTR_ERR(req); | |
919 | + return err; | |
877 | 920 | } |
878 | 921 | |
879 | - r.req = req; | |
880 | - dprintk(DEBUG_MUX, "mux %p tc %p tag %d rpc %p req %p\n", m, tc, | |
881 | - req->tag, &r, req); | |
882 | 922 | err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0); |
883 | 923 | if (r.err < 0) |
884 | 924 | err = r.err; |
885 | 925 | |
886 | 926 | if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) { |
887 | - spin_lock(&m->lock); | |
888 | - req->tcall = NULL; | |
889 | - req->err = ERREQFLUSH; | |
890 | - spin_unlock(&m->lock); | |
927 | + if (v9fs_mux_flush_request(m, req)) { | |
928 | + /* wait until we get response of the flush message */ | |
929 | + do { | |
930 | + clear_thread_flag(TIF_SIGPENDING); | |
931 | + err = wait_event_interruptible(r.wqueue, | |
932 | + r.rcall || r.err); | |
933 | + } while (!r.rcall && !r.err && err==-ERESTARTSYS && | |
934 | + m->trans->status==Connected && !m->err); | |
935 | + } | |
936 | + sigpending = 1; | |
937 | + } | |
891 | 938 | |
892 | - clear_thread_flag(TIF_SIGPENDING); | |
893 | - v9fs_mux_flush_request(m, req); | |
939 | + if (sigpending) { | |
894 | 940 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
895 | 941 | recalc_sigpending(); |
896 | 942 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
897 | 943 | } |
898 | 944 | |
899 | - if (!err) { | |
900 | - if (r.rcall) | |
901 | - dprintk(DEBUG_MUX, "got response id %d tag %d\n", | |
902 | - r.rcall->id, r.rcall->tag); | |
903 | - | |
904 | - if (rc) | |
905 | - *rc = r.rcall; | |
906 | - else | |
907 | - kfree(r.rcall); | |
908 | - } else { | |
945 | + if (rc) | |
946 | + *rc = r.rcall; | |
947 | + else | |
909 | 948 | kfree(r.rcall); |
910 | - dprintk(DEBUG_MUX, "got error %d\n", err); | |
911 | - if (err > 0) | |
912 | - err = -EIO; | |
913 | - } | |
914 | 949 | |
950 | + v9fs_mux_free_request(m, req); | |
951 | + if (err > 0) | |
952 | + err = -EIO; | |
953 | + | |
915 | 954 | return err; |
916 | 955 | } |
917 | 956 | |
918 | 957 | |
... | ... | @@ -951,12 +990,15 @@ |
951 | 990 | struct v9fs_req *req, *rtmp; |
952 | 991 | LIST_HEAD(cancel_list); |
953 | 992 | |
954 | - dprintk(DEBUG_MUX, "mux %p err %d\n", m, err); | |
993 | + dprintk(DEBUG_ERROR, "mux %p err %d\n", m, err); | |
955 | 994 | m->err = err; |
956 | 995 | spin_lock(&m->lock); |
957 | 996 | list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { |
958 | 997 | list_move(&req->req_list, &cancel_list); |
959 | 998 | } |
999 | + list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { | |
1000 | + list_move(&req->req_list, &cancel_list); | |
1001 | + } | |
960 | 1002 | spin_unlock(&m->lock); |
961 | 1003 | |
962 | 1004 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { |
963 | 1005 | |
... | ... | @@ -965,11 +1007,9 @@ |
965 | 1007 | req->err = err; |
966 | 1008 | |
967 | 1009 | if (req->cb) |
968 | - (*req->cb) (req->cba, req->tcall, req->rcall, req->err); | |
1010 | + (*req->cb) (req, req->cba); | |
969 | 1011 | else |
970 | 1012 | kfree(req->rcall); |
971 | - | |
972 | - kfree(req); | |
973 | 1013 | } |
974 | 1014 | |
975 | 1015 | wake_up(&m->equeue); |
fs/9p/mux.h
... | ... | @@ -24,6 +24,7 @@ |
24 | 24 | */ |
25 | 25 | |
26 | 26 | struct v9fs_mux_data; |
27 | +struct v9fs_req; | |
27 | 28 | |
28 | 29 | /** |
29 | 30 | * v9fs_mux_req_callback - callback function that is called when the |
... | ... | @@ -36,8 +37,7 @@ |
36 | 37 | * @rc - response call |
37 | 38 | * @err - error code (non-zero if error occured) |
38 | 39 | */ |
39 | -typedef void (*v9fs_mux_req_callback)(void *a, struct v9fs_fcall *tc, | |
40 | - struct v9fs_fcall *rc, int err); | |
40 | +typedef void (*v9fs_mux_req_callback)(struct v9fs_req *req, void *a); | |
41 | 41 | |
42 | 42 | int v9fs_mux_global_init(void); |
43 | 43 | void v9fs_mux_global_exit(void); |
fs/9p/vfs_file.c
... | ... | @@ -72,11 +72,17 @@ |
72 | 72 | return -ENOSPC; |
73 | 73 | } |
74 | 74 | |
75 | - err = v9fs_t_walk(v9ses, vfid->fid, fid, NULL, NULL); | |
75 | + err = v9fs_t_walk(v9ses, vfid->fid, fid, NULL, &fcall); | |
76 | 76 | if (err < 0) { |
77 | 77 | dprintk(DEBUG_ERROR, "rewalk didn't work\n"); |
78 | - goto put_fid; | |
78 | + if (fcall && fcall->id == RWALK) | |
79 | + goto clunk_fid; | |
80 | + else { | |
81 | + v9fs_put_idpool(fid, &v9ses->fidpool); | |
82 | + goto free_fcall; | |
83 | + } | |
79 | 84 | } |
85 | + kfree(fcall); | |
80 | 86 | |
81 | 87 | /* TODO: do special things for O_EXCL, O_NOFOLLOW, O_SYNC */ |
82 | 88 | /* translate open mode appropriately */ |
... | ... | @@ -109,8 +115,7 @@ |
109 | 115 | clunk_fid: |
110 | 116 | v9fs_t_clunk(v9ses, fid); |
111 | 117 | |
112 | -put_fid: | |
113 | - v9fs_put_idpool(fid, &v9ses->fidpool); | |
118 | +free_fcall: | |
114 | 119 | kfree(fcall); |
115 | 120 | |
116 | 121 | return err; |
fs/9p/vfs_inode.c
... | ... | @@ -270,7 +270,10 @@ |
270 | 270 | err = v9fs_t_walk(v9ses, pfid, fid, NULL, &fcall); |
271 | 271 | if (err < 0) { |
272 | 272 | PRINT_FCALL_ERROR("clone error", fcall); |
273 | - goto put_fid; | |
273 | + if (fcall && fcall->id == RWALK) | |
274 | + goto clunk_fid; | |
275 | + else | |
276 | + goto put_fid; | |
274 | 277 | } |
275 | 278 | kfree(fcall); |
276 | 279 | |
... | ... | @@ -322,6 +325,9 @@ |
322 | 325 | &fcall); |
323 | 326 | |
324 | 327 | if (err < 0) { |
328 | + if (fcall && fcall->id == RWALK) | |
329 | + goto clunk_fid; | |
330 | + | |
325 | 331 | PRINT_FCALL_ERROR("walk error", fcall); |
326 | 332 | v9fs_put_idpool(nfid, &v9ses->fidpool); |
327 | 333 | goto error; |
328 | 334 | |
329 | 335 | |
330 | 336 | |
... | ... | @@ -640,19 +646,26 @@ |
640 | 646 | } |
641 | 647 | |
642 | 648 | result = v9fs_t_walk(v9ses, dirfidnum, newfid, |
643 | - (char *)dentry->d_name.name, NULL); | |
649 | + (char *)dentry->d_name.name, &fcall); | |
650 | + | |
644 | 651 | if (result < 0) { |
645 | - v9fs_put_idpool(newfid, &v9ses->fidpool); | |
652 | + if (fcall && fcall->id == RWALK) | |
653 | + v9fs_t_clunk(v9ses, newfid); | |
654 | + else | |
655 | + v9fs_put_idpool(newfid, &v9ses->fidpool); | |
656 | + | |
646 | 657 | if (result == -ENOENT) { |
647 | 658 | d_add(dentry, NULL); |
648 | 659 | dprintk(DEBUG_VFS, |
649 | 660 | "Return negative dentry %p count %d\n", |
650 | 661 | dentry, atomic_read(&dentry->d_count)); |
662 | + kfree(fcall); | |
651 | 663 | return NULL; |
652 | 664 | } |
653 | 665 | dprintk(DEBUG_ERROR, "walk error:%d\n", result); |
654 | 666 | goto FreeFcall; |
655 | 667 | } |
668 | + kfree(fcall); | |
656 | 669 | |
657 | 670 | result = v9fs_t_stat(v9ses, newfid, &fcall); |
658 | 671 | if (result < 0) { |