Blame view
fs/dlm/requestqueue.c
5.24 KB
e7fd41792 [DLM] The core of... |
1 2 3 |
/****************************************************************************** ******************************************************************************* ** |
c36258b59 [DLM] block dlm_r... |
4 |
** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved. |
e7fd41792 [DLM] The core of... |
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions ** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" #include "member.h" #include "lock.h" #include "dir.h" #include "config.h" #include "requestqueue.h" struct rq_entry { struct list_head list; int nodeid; |
c36258b59 [DLM] block dlm_r... |
23 |
char request[0]; |
e7fd41792 [DLM] The core of... |
24 25 26 27 28 29 30 31 |
}; /* * Requests received while the lockspace is in recovery get added to the * request queue and processed when recovery is complete. This happens when * the lockspace is suspended on some nodes before it is on others, or the * lockspace is enabled on some while still suspended on others. */ |
c36258b59 [DLM] block dlm_r... |
32 |
void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd) |
e7fd41792 [DLM] The core of... |
33 34 35 |
{ struct rq_entry *e; int length = hd->h_length; |
e7fd41792 [DLM] The core of... |
36 37 |
e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL); if (!e) { |
c36258b59 [DLM] block dlm_r... |
38 39 |
log_print("dlm_add_requestqueue: out of memory len %d", length); return; |
e7fd41792 [DLM] The core of... |
40 41 42 43 |
} e->nodeid = nodeid; memcpy(e->request, hd, length); |
901359256 [DLM] Update DLM ... |
44 |
mutex_lock(&ls->ls_requestqueue_mutex); |
c36258b59 [DLM] block dlm_r... |
45 |
list_add_tail(&e->list, &ls->ls_requestqueue); |
901359256 [DLM] Update DLM ... |
46 |
mutex_unlock(&ls->ls_requestqueue_mutex); |
e7fd41792 [DLM] The core of... |
47 |
} |
c36258b59 [DLM] block dlm_r... |
48 49 50 51 52 53 54 55 56 57 |
/* * Called by dlm_recoverd to process normal messages saved while recovery was * happening. Normal locking has been enabled before this is called. dlm_recv * upon receiving a message, will wait for all saved messages to be drained * here before processing the message it got. If a new dlm_ls_stop() arrives * while we're processing these saved messages, it may block trying to suspend * dlm_recv if dlm_recv is waiting for us in dlm_wait_requestqueue. In that * case, we don't abort since locking_stopped is still 0. If dlm_recv is not * waiting for us, then this processing may be aborted due to locking_stopped. */ |
e7fd41792 [DLM] The core of... |
58 59 60 |
int dlm_process_requestqueue(struct dlm_ls *ls) { struct rq_entry *e; |
e7fd41792 [DLM] The core of... |
61 |
int error = 0; |
901359256 [DLM] Update DLM ... |
62 |
mutex_lock(&ls->ls_requestqueue_mutex); |
e7fd41792 [DLM] The core of... |
63 64 65 |
for (;;) { if (list_empty(&ls->ls_requestqueue)) { |
901359256 [DLM] Update DLM ... |
66 |
mutex_unlock(&ls->ls_requestqueue_mutex); |
e7fd41792 [DLM] The core of... |
67 68 69 70 |
error = 0; break; } e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); |
901359256 [DLM] Update DLM ... |
71 |
mutex_unlock(&ls->ls_requestqueue_mutex); |
e7fd41792 [DLM] The core of... |
72 |
|
c36258b59 [DLM] block dlm_r... |
73 |
dlm_receive_message_saved(ls, (struct dlm_message *)e->request); |
e7fd41792 [DLM] The core of... |
74 |
|
901359256 [DLM] Update DLM ... |
75 |
mutex_lock(&ls->ls_requestqueue_mutex); |
e7fd41792 [DLM] The core of... |
76 77 78 79 80 |
list_del(&e->list); kfree(e); if (dlm_locking_stopped(ls)) { log_debug(ls, "process_requestqueue abort running"); |
901359256 [DLM] Update DLM ... |
81 |
mutex_unlock(&ls->ls_requestqueue_mutex); |
e7fd41792 [DLM] The core of... |
82 83 84 85 86 87 88 89 90 91 92 |
error = -EINTR; break; } schedule(); } return error; } /* * After recovery is done, locking is resumed and dlm_recoverd takes all the |
c36258b59 [DLM] block dlm_r... |
93 94 95 96 97 98 |
* saved requests and processes them as they would have been by dlm_recv. At * the same time, dlm_recv will start receiving new requests from remote nodes. * We want to delay dlm_recv processing new requests until dlm_recoverd has * finished processing the old saved requests. We don't check for locking * stopped here because dlm_ls_stop won't stop locking until it's suspended us * (dlm_recv). |
e7fd41792 [DLM] The core of... |
99 100 101 102 103 |
*/ void dlm_wait_requestqueue(struct dlm_ls *ls) { for (;;) { |
901359256 [DLM] Update DLM ... |
104 |
mutex_lock(&ls->ls_requestqueue_mutex); |
e7fd41792 [DLM] The core of... |
105 106 |
if (list_empty(&ls->ls_requestqueue)) break; |
901359256 [DLM] Update DLM ... |
107 |
mutex_unlock(&ls->ls_requestqueue_mutex); |
e7fd41792 [DLM] The core of... |
108 109 |
schedule(); } |
901359256 [DLM] Update DLM ... |
110 |
mutex_unlock(&ls->ls_requestqueue_mutex); |
e7fd41792 [DLM] The core of... |
111 112 113 114 115 |
} static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid) { uint32_t type = ms->m_type; |
2896ee37c [DLM] fix add_req... |
116 117 118 |
/* the ls is being cleaned up and freed by release_lockspace */ if (!ls->ls_count) return 1; |
e7fd41792 [DLM] The core of... |
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
if (dlm_is_removed(ls, nodeid)) return 1; /* directory operations are always purged because the directory is always rebuilt during recovery and the lookups resent */ if (type == DLM_MSG_REMOVE || type == DLM_MSG_LOOKUP || type == DLM_MSG_LOOKUP_REPLY) return 1; if (!dlm_no_directory(ls)) return 0; /* with no directory, the master is likely to change as a part of recovery; requests to/from the defunct master need to be purged */ switch (type) { case DLM_MSG_REQUEST: case DLM_MSG_CONVERT: case DLM_MSG_UNLOCK: case DLM_MSG_CANCEL: /* we're no longer the master of this resource, the sender will resend to the new master (see waiter_needs_recovery) */ if (dlm_hash2nodeid(ls, ms->m_hash) != dlm_our_nodeid()) return 1; break; case DLM_MSG_REQUEST_REPLY: case DLM_MSG_CONVERT_REPLY: case DLM_MSG_UNLOCK_REPLY: case DLM_MSG_CANCEL_REPLY: case DLM_MSG_GRANT: /* this reply is from the former master of the resource, we'll resend to the new master if needed */ if (dlm_hash2nodeid(ls, ms->m_hash) != nodeid) return 1; break; } return 0; } void dlm_purge_requestqueue(struct dlm_ls *ls) { struct dlm_message *ms; struct rq_entry *e, *safe; |
901359256 [DLM] Update DLM ... |
168 |
mutex_lock(&ls->ls_requestqueue_mutex); |
e7fd41792 [DLM] The core of... |
169 170 171 172 173 174 175 176 |
list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { ms = (struct dlm_message *) e->request; if (purge_request(ls, ms, e->nodeid)) { list_del(&e->list); kfree(e); } } |
901359256 [DLM] Update DLM ... |
177 |
mutex_unlock(&ls->ls_requestqueue_mutex); |
e7fd41792 [DLM] The core of... |
178 |
} |