Commit 98dcd59dd063dd8099d8dbccd84a40e927dc7138
Committed by
Greg Kroah-Hartman
1 parent
f6a4e494e0
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
misc: hpilo: increase number of max supported channels
Increase number of supported channels from 8 to 24. Make the number of channels configurable via module parameter max_ccb. Signed-off-by: Mark Rusk <mark.rusk@hp.com> Signed-off-by: Tony Camuso <tony.camuso@hp.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Showing 2 changed files with 24 additions and 13 deletions Inline Diff
drivers/misc/hpilo.c
1 | /* | 1 | /* |
2 | * Driver for the HP iLO management processor. | 2 | * Driver for the HP iLO management processor. |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. | 4 | * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. |
5 | * David Altobelli <david.altobelli@hp.com> | 5 | * David Altobelli <david.altobelli@hp.com> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/fs.h> | 14 | #include <linux/fs.h> |
15 | #include <linux/pci.h> | 15 | #include <linux/pci.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/ioport.h> | 17 | #include <linux/ioport.h> |
18 | #include <linux/device.h> | 18 | #include <linux/device.h> |
19 | #include <linux/file.h> | 19 | #include <linux/file.h> |
20 | #include <linux/cdev.h> | 20 | #include <linux/cdev.h> |
21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
22 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include <linux/uaccess.h> | 24 | #include <linux/uaccess.h> |
25 | #include <linux/io.h> | 25 | #include <linux/io.h> |
26 | #include <linux/wait.h> | 26 | #include <linux/wait.h> |
27 | #include <linux/poll.h> | 27 | #include <linux/poll.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include "hpilo.h" | 29 | #include "hpilo.h" |
30 | 30 | ||
31 | static struct class *ilo_class; | 31 | static struct class *ilo_class; |
32 | static unsigned int ilo_major; | 32 | static unsigned int ilo_major; |
33 | static unsigned int max_ccb = MIN_CCB; | ||
33 | static char ilo_hwdev[MAX_ILO_DEV]; | 34 | static char ilo_hwdev[MAX_ILO_DEV]; |
34 | 35 | ||
35 | static inline int get_entry_id(int entry) | 36 | static inline int get_entry_id(int entry) |
36 | { | 37 | { |
37 | return (entry & ENTRY_MASK_DESCRIPTOR) >> ENTRY_BITPOS_DESCRIPTOR; | 38 | return (entry & ENTRY_MASK_DESCRIPTOR) >> ENTRY_BITPOS_DESCRIPTOR; |
38 | } | 39 | } |
39 | 40 | ||
40 | static inline int get_entry_len(int entry) | 41 | static inline int get_entry_len(int entry) |
41 | { | 42 | { |
42 | return ((entry & ENTRY_MASK_QWORDS) >> ENTRY_BITPOS_QWORDS) << 3; | 43 | return ((entry & ENTRY_MASK_QWORDS) >> ENTRY_BITPOS_QWORDS) << 3; |
43 | } | 44 | } |
44 | 45 | ||
45 | static inline int mk_entry(int id, int len) | 46 | static inline int mk_entry(int id, int len) |
46 | { | 47 | { |
47 | int qlen = len & 7 ? (len >> 3) + 1 : len >> 3; | 48 | int qlen = len & 7 ? (len >> 3) + 1 : len >> 3; |
48 | return id << ENTRY_BITPOS_DESCRIPTOR | qlen << ENTRY_BITPOS_QWORDS; | 49 | return id << ENTRY_BITPOS_DESCRIPTOR | qlen << ENTRY_BITPOS_QWORDS; |
49 | } | 50 | } |
50 | 51 | ||
51 | static inline int desc_mem_sz(int nr_entry) | 52 | static inline int desc_mem_sz(int nr_entry) |
52 | { | 53 | { |
53 | return nr_entry << L2_QENTRY_SZ; | 54 | return nr_entry << L2_QENTRY_SZ; |
54 | } | 55 | } |
55 | 56 | ||
56 | /* | 57 | /* |
57 | * FIFO queues, shared with hardware. | 58 | * FIFO queues, shared with hardware. |
58 | * | 59 | * |
59 | * If a queue has empty slots, an entry is added to the queue tail, | 60 | * If a queue has empty slots, an entry is added to the queue tail, |
60 | * and that entry is marked as occupied. | 61 | * and that entry is marked as occupied. |
61 | * Entries can be dequeued from the head of the list, when the device | 62 | * Entries can be dequeued from the head of the list, when the device |
62 | * has marked the entry as consumed. | 63 | * has marked the entry as consumed. |
63 | * | 64 | * |
64 | * Returns true on successful queue/dequeue, false on failure. | 65 | * Returns true on successful queue/dequeue, false on failure. |
65 | */ | 66 | */ |
66 | static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry) | 67 | static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry) |
67 | { | 68 | { |
68 | struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); | 69 | struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); |
69 | unsigned long flags; | 70 | unsigned long flags; |
70 | int ret = 0; | 71 | int ret = 0; |
71 | 72 | ||
72 | spin_lock_irqsave(&hw->fifo_lock, flags); | 73 | spin_lock_irqsave(&hw->fifo_lock, flags); |
73 | if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask] | 74 | if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask] |
74 | & ENTRY_MASK_O)) { | 75 | & ENTRY_MASK_O)) { |
75 | fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |= | 76 | fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |= |
76 | (entry & ENTRY_MASK_NOSTATE) | fifo_q->merge; | 77 | (entry & ENTRY_MASK_NOSTATE) | fifo_q->merge; |
77 | fifo_q->tail += 1; | 78 | fifo_q->tail += 1; |
78 | ret = 1; | 79 | ret = 1; |
79 | } | 80 | } |
80 | spin_unlock_irqrestore(&hw->fifo_lock, flags); | 81 | spin_unlock_irqrestore(&hw->fifo_lock, flags); |
81 | 82 | ||
82 | return ret; | 83 | return ret; |
83 | } | 84 | } |
84 | 85 | ||
85 | static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry) | 86 | static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry) |
86 | { | 87 | { |
87 | struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); | 88 | struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); |
88 | unsigned long flags; | 89 | unsigned long flags; |
89 | int ret = 0; | 90 | int ret = 0; |
90 | u64 c; | 91 | u64 c; |
91 | 92 | ||
92 | spin_lock_irqsave(&hw->fifo_lock, flags); | 93 | spin_lock_irqsave(&hw->fifo_lock, flags); |
93 | c = fifo_q->fifobar[fifo_q->head & fifo_q->imask]; | 94 | c = fifo_q->fifobar[fifo_q->head & fifo_q->imask]; |
94 | if (c & ENTRY_MASK_C) { | 95 | if (c & ENTRY_MASK_C) { |
95 | if (entry) | 96 | if (entry) |
96 | *entry = c & ENTRY_MASK_NOSTATE; | 97 | *entry = c & ENTRY_MASK_NOSTATE; |
97 | 98 | ||
98 | fifo_q->fifobar[fifo_q->head & fifo_q->imask] = | 99 | fifo_q->fifobar[fifo_q->head & fifo_q->imask] = |
99 | (c | ENTRY_MASK) + 1; | 100 | (c | ENTRY_MASK) + 1; |
100 | fifo_q->head += 1; | 101 | fifo_q->head += 1; |
101 | ret = 1; | 102 | ret = 1; |
102 | } | 103 | } |
103 | spin_unlock_irqrestore(&hw->fifo_lock, flags); | 104 | spin_unlock_irqrestore(&hw->fifo_lock, flags); |
104 | 105 | ||
105 | return ret; | 106 | return ret; |
106 | } | 107 | } |
107 | 108 | ||
108 | static int fifo_check_recv(struct ilo_hwinfo *hw, char *fifobar) | 109 | static int fifo_check_recv(struct ilo_hwinfo *hw, char *fifobar) |
109 | { | 110 | { |
110 | struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); | 111 | struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); |
111 | unsigned long flags; | 112 | unsigned long flags; |
112 | int ret = 0; | 113 | int ret = 0; |
113 | u64 c; | 114 | u64 c; |
114 | 115 | ||
115 | spin_lock_irqsave(&hw->fifo_lock, flags); | 116 | spin_lock_irqsave(&hw->fifo_lock, flags); |
116 | c = fifo_q->fifobar[fifo_q->head & fifo_q->imask]; | 117 | c = fifo_q->fifobar[fifo_q->head & fifo_q->imask]; |
117 | if (c & ENTRY_MASK_C) | 118 | if (c & ENTRY_MASK_C) |
118 | ret = 1; | 119 | ret = 1; |
119 | spin_unlock_irqrestore(&hw->fifo_lock, flags); | 120 | spin_unlock_irqrestore(&hw->fifo_lock, flags); |
120 | 121 | ||
121 | return ret; | 122 | return ret; |
122 | } | 123 | } |
123 | 124 | ||
124 | static int ilo_pkt_enqueue(struct ilo_hwinfo *hw, struct ccb *ccb, | 125 | static int ilo_pkt_enqueue(struct ilo_hwinfo *hw, struct ccb *ccb, |
125 | int dir, int id, int len) | 126 | int dir, int id, int len) |
126 | { | 127 | { |
127 | char *fifobar; | 128 | char *fifobar; |
128 | int entry; | 129 | int entry; |
129 | 130 | ||
130 | if (dir == SENDQ) | 131 | if (dir == SENDQ) |
131 | fifobar = ccb->ccb_u1.send_fifobar; | 132 | fifobar = ccb->ccb_u1.send_fifobar; |
132 | else | 133 | else |
133 | fifobar = ccb->ccb_u3.recv_fifobar; | 134 | fifobar = ccb->ccb_u3.recv_fifobar; |
134 | 135 | ||
135 | entry = mk_entry(id, len); | 136 | entry = mk_entry(id, len); |
136 | return fifo_enqueue(hw, fifobar, entry); | 137 | return fifo_enqueue(hw, fifobar, entry); |
137 | } | 138 | } |
138 | 139 | ||
139 | static int ilo_pkt_dequeue(struct ilo_hwinfo *hw, struct ccb *ccb, | 140 | static int ilo_pkt_dequeue(struct ilo_hwinfo *hw, struct ccb *ccb, |
140 | int dir, int *id, int *len, void **pkt) | 141 | int dir, int *id, int *len, void **pkt) |
141 | { | 142 | { |
142 | char *fifobar, *desc; | 143 | char *fifobar, *desc; |
143 | int entry = 0, pkt_id = 0; | 144 | int entry = 0, pkt_id = 0; |
144 | int ret; | 145 | int ret; |
145 | 146 | ||
146 | if (dir == SENDQ) { | 147 | if (dir == SENDQ) { |
147 | fifobar = ccb->ccb_u1.send_fifobar; | 148 | fifobar = ccb->ccb_u1.send_fifobar; |
148 | desc = ccb->ccb_u2.send_desc; | 149 | desc = ccb->ccb_u2.send_desc; |
149 | } else { | 150 | } else { |
150 | fifobar = ccb->ccb_u3.recv_fifobar; | 151 | fifobar = ccb->ccb_u3.recv_fifobar; |
151 | desc = ccb->ccb_u4.recv_desc; | 152 | desc = ccb->ccb_u4.recv_desc; |
152 | } | 153 | } |
153 | 154 | ||
154 | ret = fifo_dequeue(hw, fifobar, &entry); | 155 | ret = fifo_dequeue(hw, fifobar, &entry); |
155 | if (ret) { | 156 | if (ret) { |
156 | pkt_id = get_entry_id(entry); | 157 | pkt_id = get_entry_id(entry); |
157 | if (id) | 158 | if (id) |
158 | *id = pkt_id; | 159 | *id = pkt_id; |
159 | if (len) | 160 | if (len) |
160 | *len = get_entry_len(entry); | 161 | *len = get_entry_len(entry); |
161 | if (pkt) | 162 | if (pkt) |
162 | *pkt = (void *)(desc + desc_mem_sz(pkt_id)); | 163 | *pkt = (void *)(desc + desc_mem_sz(pkt_id)); |
163 | } | 164 | } |
164 | 165 | ||
165 | return ret; | 166 | return ret; |
166 | } | 167 | } |
167 | 168 | ||
168 | static int ilo_pkt_recv(struct ilo_hwinfo *hw, struct ccb *ccb) | 169 | static int ilo_pkt_recv(struct ilo_hwinfo *hw, struct ccb *ccb) |
169 | { | 170 | { |
170 | char *fifobar = ccb->ccb_u3.recv_fifobar; | 171 | char *fifobar = ccb->ccb_u3.recv_fifobar; |
171 | 172 | ||
172 | return fifo_check_recv(hw, fifobar); | 173 | return fifo_check_recv(hw, fifobar); |
173 | } | 174 | } |
174 | 175 | ||
175 | static inline void doorbell_set(struct ccb *ccb) | 176 | static inline void doorbell_set(struct ccb *ccb) |
176 | { | 177 | { |
177 | iowrite8(1, ccb->ccb_u5.db_base); | 178 | iowrite8(1, ccb->ccb_u5.db_base); |
178 | } | 179 | } |
179 | 180 | ||
180 | static inline void doorbell_clr(struct ccb *ccb) | 181 | static inline void doorbell_clr(struct ccb *ccb) |
181 | { | 182 | { |
182 | iowrite8(2, ccb->ccb_u5.db_base); | 183 | iowrite8(2, ccb->ccb_u5.db_base); |
183 | } | 184 | } |
184 | 185 | ||
185 | static inline int ctrl_set(int l2sz, int idxmask, int desclim) | 186 | static inline int ctrl_set(int l2sz, int idxmask, int desclim) |
186 | { | 187 | { |
187 | int active = 0, go = 1; | 188 | int active = 0, go = 1; |
188 | return l2sz << CTRL_BITPOS_L2SZ | | 189 | return l2sz << CTRL_BITPOS_L2SZ | |
189 | idxmask << CTRL_BITPOS_FIFOINDEXMASK | | 190 | idxmask << CTRL_BITPOS_FIFOINDEXMASK | |
190 | desclim << CTRL_BITPOS_DESCLIMIT | | 191 | desclim << CTRL_BITPOS_DESCLIMIT | |
191 | active << CTRL_BITPOS_A | | 192 | active << CTRL_BITPOS_A | |
192 | go << CTRL_BITPOS_G; | 193 | go << CTRL_BITPOS_G; |
193 | } | 194 | } |
194 | 195 | ||
195 | static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz) | 196 | static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz) |
196 | { | 197 | { |
197 | /* for simplicity, use the same parameters for send and recv ctrls */ | 198 | /* for simplicity, use the same parameters for send and recv ctrls */ |
198 | ccb->send_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1); | 199 | ccb->send_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1); |
199 | ccb->recv_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1); | 200 | ccb->recv_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1); |
200 | } | 201 | } |
201 | 202 | ||
202 | static inline int fifo_sz(int nr_entry) | 203 | static inline int fifo_sz(int nr_entry) |
203 | { | 204 | { |
204 | /* size of a fifo is determined by the number of entries it contains */ | 205 | /* size of a fifo is determined by the number of entries it contains */ |
205 | return (nr_entry * sizeof(u64)) + FIFOHANDLESIZE; | 206 | return (nr_entry * sizeof(u64)) + FIFOHANDLESIZE; |
206 | } | 207 | } |
207 | 208 | ||
208 | static void fifo_setup(void *base_addr, int nr_entry) | 209 | static void fifo_setup(void *base_addr, int nr_entry) |
209 | { | 210 | { |
210 | struct fifo *fifo_q = base_addr; | 211 | struct fifo *fifo_q = base_addr; |
211 | int i; | 212 | int i; |
212 | 213 | ||
213 | /* set up an empty fifo */ | 214 | /* set up an empty fifo */ |
214 | fifo_q->head = 0; | 215 | fifo_q->head = 0; |
215 | fifo_q->tail = 0; | 216 | fifo_q->tail = 0; |
216 | fifo_q->reset = 0; | 217 | fifo_q->reset = 0; |
217 | fifo_q->nrents = nr_entry; | 218 | fifo_q->nrents = nr_entry; |
218 | fifo_q->imask = nr_entry - 1; | 219 | fifo_q->imask = nr_entry - 1; |
219 | fifo_q->merge = ENTRY_MASK_O; | 220 | fifo_q->merge = ENTRY_MASK_O; |
220 | 221 | ||
221 | for (i = 0; i < nr_entry; i++) | 222 | for (i = 0; i < nr_entry; i++) |
222 | fifo_q->fifobar[i] = 0; | 223 | fifo_q->fifobar[i] = 0; |
223 | } | 224 | } |
224 | 225 | ||
225 | static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data) | 226 | static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data) |
226 | { | 227 | { |
227 | struct ccb *driver_ccb = &data->driver_ccb; | 228 | struct ccb *driver_ccb = &data->driver_ccb; |
228 | struct ccb __iomem *device_ccb = data->mapped_ccb; | 229 | struct ccb __iomem *device_ccb = data->mapped_ccb; |
229 | int retries; | 230 | int retries; |
230 | 231 | ||
231 | /* complicated dance to tell the hw we are stopping */ | 232 | /* complicated dance to tell the hw we are stopping */ |
232 | doorbell_clr(driver_ccb); | 233 | doorbell_clr(driver_ccb); |
233 | iowrite32(ioread32(&device_ccb->send_ctrl) & ~(1 << CTRL_BITPOS_G), | 234 | iowrite32(ioread32(&device_ccb->send_ctrl) & ~(1 << CTRL_BITPOS_G), |
234 | &device_ccb->send_ctrl); | 235 | &device_ccb->send_ctrl); |
235 | iowrite32(ioread32(&device_ccb->recv_ctrl) & ~(1 << CTRL_BITPOS_G), | 236 | iowrite32(ioread32(&device_ccb->recv_ctrl) & ~(1 << CTRL_BITPOS_G), |
236 | &device_ccb->recv_ctrl); | 237 | &device_ccb->recv_ctrl); |
237 | 238 | ||
238 | /* give iLO some time to process stop request */ | 239 | /* give iLO some time to process stop request */ |
239 | for (retries = MAX_WAIT; retries > 0; retries--) { | 240 | for (retries = MAX_WAIT; retries > 0; retries--) { |
240 | doorbell_set(driver_ccb); | 241 | doorbell_set(driver_ccb); |
241 | udelay(WAIT_TIME); | 242 | udelay(WAIT_TIME); |
242 | if (!(ioread32(&device_ccb->send_ctrl) & (1 << CTRL_BITPOS_A)) | 243 | if (!(ioread32(&device_ccb->send_ctrl) & (1 << CTRL_BITPOS_A)) |
243 | && | 244 | && |
244 | !(ioread32(&device_ccb->recv_ctrl) & (1 << CTRL_BITPOS_A))) | 245 | !(ioread32(&device_ccb->recv_ctrl) & (1 << CTRL_BITPOS_A))) |
245 | break; | 246 | break; |
246 | } | 247 | } |
247 | if (retries == 0) | 248 | if (retries == 0) |
248 | dev_err(&pdev->dev, "Closing, but controller still active\n"); | 249 | dev_err(&pdev->dev, "Closing, but controller still active\n"); |
249 | 250 | ||
250 | /* clear the hw ccb */ | 251 | /* clear the hw ccb */ |
251 | memset_io(device_ccb, 0, sizeof(struct ccb)); | 252 | memset_io(device_ccb, 0, sizeof(struct ccb)); |
252 | 253 | ||
253 | /* free resources used to back send/recv queues */ | 254 | /* free resources used to back send/recv queues */ |
254 | pci_free_consistent(pdev, data->dma_size, data->dma_va, data->dma_pa); | 255 | pci_free_consistent(pdev, data->dma_size, data->dma_va, data->dma_pa); |
255 | } | 256 | } |
256 | 257 | ||
257 | static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) | 258 | static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) |
258 | { | 259 | { |
259 | char *dma_va; | 260 | char *dma_va; |
260 | dma_addr_t dma_pa; | 261 | dma_addr_t dma_pa; |
261 | struct ccb *driver_ccb, *ilo_ccb; | 262 | struct ccb *driver_ccb, *ilo_ccb; |
262 | 263 | ||
263 | driver_ccb = &data->driver_ccb; | 264 | driver_ccb = &data->driver_ccb; |
264 | ilo_ccb = &data->ilo_ccb; | 265 | ilo_ccb = &data->ilo_ccb; |
265 | 266 | ||
266 | data->dma_size = 2 * fifo_sz(NR_QENTRY) + | 267 | data->dma_size = 2 * fifo_sz(NR_QENTRY) + |
267 | 2 * desc_mem_sz(NR_QENTRY) + | 268 | 2 * desc_mem_sz(NR_QENTRY) + |
268 | ILO_START_ALIGN + ILO_CACHE_SZ; | 269 | ILO_START_ALIGN + ILO_CACHE_SZ; |
269 | 270 | ||
270 | data->dma_va = pci_alloc_consistent(hw->ilo_dev, data->dma_size, | 271 | data->dma_va = pci_alloc_consistent(hw->ilo_dev, data->dma_size, |
271 | &data->dma_pa); | 272 | &data->dma_pa); |
272 | if (!data->dma_va) | 273 | if (!data->dma_va) |
273 | return -ENOMEM; | 274 | return -ENOMEM; |
274 | 275 | ||
275 | dma_va = (char *)data->dma_va; | 276 | dma_va = (char *)data->dma_va; |
276 | dma_pa = data->dma_pa; | 277 | dma_pa = data->dma_pa; |
277 | 278 | ||
278 | memset(dma_va, 0, data->dma_size); | 279 | memset(dma_va, 0, data->dma_size); |
279 | 280 | ||
280 | dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN); | 281 | dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN); |
281 | dma_pa = roundup(dma_pa, ILO_START_ALIGN); | 282 | dma_pa = roundup(dma_pa, ILO_START_ALIGN); |
282 | 283 | ||
283 | /* | 284 | /* |
284 | * Create two ccb's, one with virt addrs, one with phys addrs. | 285 | * Create two ccb's, one with virt addrs, one with phys addrs. |
285 | * Copy the phys addr ccb to device shared mem. | 286 | * Copy the phys addr ccb to device shared mem. |
286 | */ | 287 | */ |
287 | ctrl_setup(driver_ccb, NR_QENTRY, L2_QENTRY_SZ); | 288 | ctrl_setup(driver_ccb, NR_QENTRY, L2_QENTRY_SZ); |
288 | ctrl_setup(ilo_ccb, NR_QENTRY, L2_QENTRY_SZ); | 289 | ctrl_setup(ilo_ccb, NR_QENTRY, L2_QENTRY_SZ); |
289 | 290 | ||
290 | fifo_setup(dma_va, NR_QENTRY); | 291 | fifo_setup(dma_va, NR_QENTRY); |
291 | driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE; | 292 | driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE; |
292 | ilo_ccb->ccb_u1.send_fifobar_pa = dma_pa + FIFOHANDLESIZE; | 293 | ilo_ccb->ccb_u1.send_fifobar_pa = dma_pa + FIFOHANDLESIZE; |
293 | dma_va += fifo_sz(NR_QENTRY); | 294 | dma_va += fifo_sz(NR_QENTRY); |
294 | dma_pa += fifo_sz(NR_QENTRY); | 295 | dma_pa += fifo_sz(NR_QENTRY); |
295 | 296 | ||
296 | dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ); | 297 | dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ); |
297 | dma_pa = roundup(dma_pa, ILO_CACHE_SZ); | 298 | dma_pa = roundup(dma_pa, ILO_CACHE_SZ); |
298 | 299 | ||
299 | fifo_setup(dma_va, NR_QENTRY); | 300 | fifo_setup(dma_va, NR_QENTRY); |
300 | driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE; | 301 | driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE; |
301 | ilo_ccb->ccb_u3.recv_fifobar_pa = dma_pa + FIFOHANDLESIZE; | 302 | ilo_ccb->ccb_u3.recv_fifobar_pa = dma_pa + FIFOHANDLESIZE; |
302 | dma_va += fifo_sz(NR_QENTRY); | 303 | dma_va += fifo_sz(NR_QENTRY); |
303 | dma_pa += fifo_sz(NR_QENTRY); | 304 | dma_pa += fifo_sz(NR_QENTRY); |
304 | 305 | ||
305 | driver_ccb->ccb_u2.send_desc = dma_va; | 306 | driver_ccb->ccb_u2.send_desc = dma_va; |
306 | ilo_ccb->ccb_u2.send_desc_pa = dma_pa; | 307 | ilo_ccb->ccb_u2.send_desc_pa = dma_pa; |
307 | dma_pa += desc_mem_sz(NR_QENTRY); | 308 | dma_pa += desc_mem_sz(NR_QENTRY); |
308 | dma_va += desc_mem_sz(NR_QENTRY); | 309 | dma_va += desc_mem_sz(NR_QENTRY); |
309 | 310 | ||
310 | driver_ccb->ccb_u4.recv_desc = dma_va; | 311 | driver_ccb->ccb_u4.recv_desc = dma_va; |
311 | ilo_ccb->ccb_u4.recv_desc_pa = dma_pa; | 312 | ilo_ccb->ccb_u4.recv_desc_pa = dma_pa; |
312 | 313 | ||
313 | driver_ccb->channel = slot; | 314 | driver_ccb->channel = slot; |
314 | ilo_ccb->channel = slot; | 315 | ilo_ccb->channel = slot; |
315 | 316 | ||
316 | driver_ccb->ccb_u5.db_base = hw->db_vaddr + (slot << L2_DB_SIZE); | 317 | driver_ccb->ccb_u5.db_base = hw->db_vaddr + (slot << L2_DB_SIZE); |
317 | ilo_ccb->ccb_u5.db_base = NULL; /* hw ccb's doorbell is not used */ | 318 | ilo_ccb->ccb_u5.db_base = NULL; /* hw ccb's doorbell is not used */ |
318 | 319 | ||
319 | return 0; | 320 | return 0; |
320 | } | 321 | } |
321 | 322 | ||
322 | static void ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) | 323 | static void ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) |
323 | { | 324 | { |
324 | int pkt_id, pkt_sz; | 325 | int pkt_id, pkt_sz; |
325 | struct ccb *driver_ccb = &data->driver_ccb; | 326 | struct ccb *driver_ccb = &data->driver_ccb; |
326 | 327 | ||
327 | /* copy the ccb with physical addrs to device memory */ | 328 | /* copy the ccb with physical addrs to device memory */ |
328 | data->mapped_ccb = (struct ccb __iomem *) | 329 | data->mapped_ccb = (struct ccb __iomem *) |
329 | (hw->ram_vaddr + (slot * ILOHW_CCB_SZ)); | 330 | (hw->ram_vaddr + (slot * ILOHW_CCB_SZ)); |
330 | memcpy_toio(data->mapped_ccb, &data->ilo_ccb, sizeof(struct ccb)); | 331 | memcpy_toio(data->mapped_ccb, &data->ilo_ccb, sizeof(struct ccb)); |
331 | 332 | ||
332 | /* put packets on the send and receive queues */ | 333 | /* put packets on the send and receive queues */ |
333 | pkt_sz = 0; | 334 | pkt_sz = 0; |
334 | for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) { | 335 | for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) { |
335 | ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, pkt_sz); | 336 | ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, pkt_sz); |
336 | doorbell_set(driver_ccb); | 337 | doorbell_set(driver_ccb); |
337 | } | 338 | } |
338 | 339 | ||
339 | pkt_sz = desc_mem_sz(1); | 340 | pkt_sz = desc_mem_sz(1); |
340 | for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) | 341 | for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) |
341 | ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, pkt_sz); | 342 | ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, pkt_sz); |
342 | 343 | ||
343 | /* the ccb is ready to use */ | 344 | /* the ccb is ready to use */ |
344 | doorbell_clr(driver_ccb); | 345 | doorbell_clr(driver_ccb); |
345 | } | 346 | } |
346 | 347 | ||
347 | static int ilo_ccb_verify(struct ilo_hwinfo *hw, struct ccb_data *data) | 348 | static int ilo_ccb_verify(struct ilo_hwinfo *hw, struct ccb_data *data) |
348 | { | 349 | { |
349 | int pkt_id, i; | 350 | int pkt_id, i; |
350 | struct ccb *driver_ccb = &data->driver_ccb; | 351 | struct ccb *driver_ccb = &data->driver_ccb; |
351 | 352 | ||
352 | /* make sure iLO is really handling requests */ | 353 | /* make sure iLO is really handling requests */ |
353 | for (i = MAX_WAIT; i > 0; i--) { | 354 | for (i = MAX_WAIT; i > 0; i--) { |
354 | if (ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, NULL, NULL)) | 355 | if (ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, NULL, NULL)) |
355 | break; | 356 | break; |
356 | udelay(WAIT_TIME); | 357 | udelay(WAIT_TIME); |
357 | } | 358 | } |
358 | 359 | ||
359 | if (i == 0) { | 360 | if (i == 0) { |
360 | dev_err(&hw->ilo_dev->dev, "Open could not dequeue a packet\n"); | 361 | dev_err(&hw->ilo_dev->dev, "Open could not dequeue a packet\n"); |
361 | return -EBUSY; | 362 | return -EBUSY; |
362 | } | 363 | } |
363 | 364 | ||
364 | ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, 0); | 365 | ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, 0); |
365 | doorbell_set(driver_ccb); | 366 | doorbell_set(driver_ccb); |
366 | return 0; | 367 | return 0; |
367 | } | 368 | } |
368 | 369 | ||
369 | static inline int is_channel_reset(struct ccb *ccb) | 370 | static inline int is_channel_reset(struct ccb *ccb) |
370 | { | 371 | { |
371 | /* check for this particular channel needing a reset */ | 372 | /* check for this particular channel needing a reset */ |
372 | return FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset; | 373 | return FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset; |
373 | } | 374 | } |
374 | 375 | ||
375 | static inline void set_channel_reset(struct ccb *ccb) | 376 | static inline void set_channel_reset(struct ccb *ccb) |
376 | { | 377 | { |
377 | /* set a flag indicating this channel needs a reset */ | 378 | /* set a flag indicating this channel needs a reset */ |
378 | FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset = 1; | 379 | FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset = 1; |
379 | } | 380 | } |
380 | 381 | ||
381 | static inline int get_device_outbound(struct ilo_hwinfo *hw) | 382 | static inline int get_device_outbound(struct ilo_hwinfo *hw) |
382 | { | 383 | { |
383 | return ioread32(&hw->mmio_vaddr[DB_OUT]); | 384 | return ioread32(&hw->mmio_vaddr[DB_OUT]); |
384 | } | 385 | } |
385 | 386 | ||
386 | static inline int is_db_reset(int db_out) | 387 | static inline int is_db_reset(int db_out) |
387 | { | 388 | { |
388 | return db_out & (1 << DB_RESET); | 389 | return db_out & (1 << DB_RESET); |
389 | } | 390 | } |
390 | 391 | ||
391 | static inline int is_device_reset(struct ilo_hwinfo *hw) | 392 | static inline int is_device_reset(struct ilo_hwinfo *hw) |
392 | { | 393 | { |
393 | /* check for global reset condition */ | 394 | /* check for global reset condition */ |
394 | return is_db_reset(get_device_outbound(hw)); | 395 | return is_db_reset(get_device_outbound(hw)); |
395 | } | 396 | } |
396 | 397 | ||
397 | static inline void clear_pending_db(struct ilo_hwinfo *hw, int clr) | 398 | static inline void clear_pending_db(struct ilo_hwinfo *hw, int clr) |
398 | { | 399 | { |
399 | iowrite32(clr, &hw->mmio_vaddr[DB_OUT]); | 400 | iowrite32(clr, &hw->mmio_vaddr[DB_OUT]); |
400 | } | 401 | } |
401 | 402 | ||
402 | static inline void clear_device(struct ilo_hwinfo *hw) | 403 | static inline void clear_device(struct ilo_hwinfo *hw) |
403 | { | 404 | { |
404 | /* clear the device (reset bits, pending channel entries) */ | 405 | /* clear the device (reset bits, pending channel entries) */ |
405 | clear_pending_db(hw, -1); | 406 | clear_pending_db(hw, -1); |
406 | } | 407 | } |
407 | 408 | ||
408 | static inline void ilo_enable_interrupts(struct ilo_hwinfo *hw) | 409 | static inline void ilo_enable_interrupts(struct ilo_hwinfo *hw) |
409 | { | 410 | { |
410 | iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) | 1, &hw->mmio_vaddr[DB_IRQ]); | 411 | iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) | 1, &hw->mmio_vaddr[DB_IRQ]); |
411 | } | 412 | } |
412 | 413 | ||
413 | static inline void ilo_disable_interrupts(struct ilo_hwinfo *hw) | 414 | static inline void ilo_disable_interrupts(struct ilo_hwinfo *hw) |
414 | { | 415 | { |
415 | iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) & ~1, | 416 | iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) & ~1, |
416 | &hw->mmio_vaddr[DB_IRQ]); | 417 | &hw->mmio_vaddr[DB_IRQ]); |
417 | } | 418 | } |
418 | 419 | ||
419 | static void ilo_set_reset(struct ilo_hwinfo *hw) | 420 | static void ilo_set_reset(struct ilo_hwinfo *hw) |
420 | { | 421 | { |
421 | int slot; | 422 | int slot; |
422 | 423 | ||
423 | /* | 424 | /* |
424 | * Mapped memory is zeroed on ilo reset, so set a per ccb flag | 425 | * Mapped memory is zeroed on ilo reset, so set a per ccb flag |
425 | * to indicate that this ccb needs to be closed and reopened. | 426 | * to indicate that this ccb needs to be closed and reopened. |
426 | */ | 427 | */ |
427 | for (slot = 0; slot < MAX_CCB; slot++) { | 428 | for (slot = 0; slot < max_ccb; slot++) { |
428 | if (!hw->ccb_alloc[slot]) | 429 | if (!hw->ccb_alloc[slot]) |
429 | continue; | 430 | continue; |
430 | set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb); | 431 | set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb); |
431 | } | 432 | } |
432 | } | 433 | } |
433 | 434 | ||
434 | static ssize_t ilo_read(struct file *fp, char __user *buf, | 435 | static ssize_t ilo_read(struct file *fp, char __user *buf, |
435 | size_t len, loff_t *off) | 436 | size_t len, loff_t *off) |
436 | { | 437 | { |
437 | int err, found, cnt, pkt_id, pkt_len; | 438 | int err, found, cnt, pkt_id, pkt_len; |
438 | struct ccb_data *data = fp->private_data; | 439 | struct ccb_data *data = fp->private_data; |
439 | struct ccb *driver_ccb = &data->driver_ccb; | 440 | struct ccb *driver_ccb = &data->driver_ccb; |
440 | struct ilo_hwinfo *hw = data->ilo_hw; | 441 | struct ilo_hwinfo *hw = data->ilo_hw; |
441 | void *pkt; | 442 | void *pkt; |
442 | 443 | ||
443 | if (is_channel_reset(driver_ccb)) { | 444 | if (is_channel_reset(driver_ccb)) { |
444 | /* | 445 | /* |
445 | * If the device has been reset, applications | 446 | * If the device has been reset, applications |
446 | * need to close and reopen all ccbs. | 447 | * need to close and reopen all ccbs. |
447 | */ | 448 | */ |
448 | return -ENODEV; | 449 | return -ENODEV; |
449 | } | 450 | } |
450 | 451 | ||
451 | /* | 452 | /* |
452 | * This function is to be called when data is expected | 453 | * This function is to be called when data is expected |
453 | * in the channel, and will return an error if no packet is found | 454 | * in the channel, and will return an error if no packet is found |
454 | * during the loop below. The sleep/retry logic is to allow | 455 | * during the loop below. The sleep/retry logic is to allow |
455 | * applications to call read() immediately post write(), | 456 | * applications to call read() immediately post write(), |
456 | * and give iLO some time to process the sent packet. | 457 | * and give iLO some time to process the sent packet. |
457 | */ | 458 | */ |
458 | cnt = 20; | 459 | cnt = 20; |
459 | do { | 460 | do { |
460 | /* look for a received packet */ | 461 | /* look for a received packet */ |
461 | found = ilo_pkt_dequeue(hw, driver_ccb, RECVQ, &pkt_id, | 462 | found = ilo_pkt_dequeue(hw, driver_ccb, RECVQ, &pkt_id, |
462 | &pkt_len, &pkt); | 463 | &pkt_len, &pkt); |
463 | if (found) | 464 | if (found) |
464 | break; | 465 | break; |
465 | cnt--; | 466 | cnt--; |
466 | msleep(100); | 467 | msleep(100); |
467 | } while (!found && cnt); | 468 | } while (!found && cnt); |
468 | 469 | ||
469 | if (!found) | 470 | if (!found) |
470 | return -EAGAIN; | 471 | return -EAGAIN; |
471 | 472 | ||
472 | /* only copy the length of the received packet */ | 473 | /* only copy the length of the received packet */ |
473 | if (pkt_len < len) | 474 | if (pkt_len < len) |
474 | len = pkt_len; | 475 | len = pkt_len; |
475 | 476 | ||
476 | err = copy_to_user(buf, pkt, len); | 477 | err = copy_to_user(buf, pkt, len); |
477 | 478 | ||
478 | /* return the received packet to the queue */ | 479 | /* return the received packet to the queue */ |
479 | ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, desc_mem_sz(1)); | 480 | ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, desc_mem_sz(1)); |
480 | 481 | ||
481 | return err ? -EFAULT : len; | 482 | return err ? -EFAULT : len; |
482 | } | 483 | } |
483 | 484 | ||
484 | static ssize_t ilo_write(struct file *fp, const char __user *buf, | 485 | static ssize_t ilo_write(struct file *fp, const char __user *buf, |
485 | size_t len, loff_t *off) | 486 | size_t len, loff_t *off) |
486 | { | 487 | { |
487 | int err, pkt_id, pkt_len; | 488 | int err, pkt_id, pkt_len; |
488 | struct ccb_data *data = fp->private_data; | 489 | struct ccb_data *data = fp->private_data; |
489 | struct ccb *driver_ccb = &data->driver_ccb; | 490 | struct ccb *driver_ccb = &data->driver_ccb; |
490 | struct ilo_hwinfo *hw = data->ilo_hw; | 491 | struct ilo_hwinfo *hw = data->ilo_hw; |
491 | void *pkt; | 492 | void *pkt; |
492 | 493 | ||
493 | if (is_channel_reset(driver_ccb)) | 494 | if (is_channel_reset(driver_ccb)) |
494 | return -ENODEV; | 495 | return -ENODEV; |
495 | 496 | ||
496 | /* get a packet to send the user command */ | 497 | /* get a packet to send the user command */ |
497 | if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt)) | 498 | if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt)) |
498 | return -EBUSY; | 499 | return -EBUSY; |
499 | 500 | ||
500 | /* limit the length to the length of the packet */ | 501 | /* limit the length to the length of the packet */ |
501 | if (pkt_len < len) | 502 | if (pkt_len < len) |
502 | len = pkt_len; | 503 | len = pkt_len; |
503 | 504 | ||
504 | /* on failure, set the len to 0 to return empty packet to the device */ | 505 | /* on failure, set the len to 0 to return empty packet to the device */ |
505 | err = copy_from_user(pkt, buf, len); | 506 | err = copy_from_user(pkt, buf, len); |
506 | if (err) | 507 | if (err) |
507 | len = 0; | 508 | len = 0; |
508 | 509 | ||
509 | /* send the packet */ | 510 | /* send the packet */ |
510 | ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, len); | 511 | ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, len); |
511 | doorbell_set(driver_ccb); | 512 | doorbell_set(driver_ccb); |
512 | 513 | ||
513 | return err ? -EFAULT : len; | 514 | return err ? -EFAULT : len; |
514 | } | 515 | } |
515 | 516 | ||
516 | static unsigned int ilo_poll(struct file *fp, poll_table *wait) | 517 | static unsigned int ilo_poll(struct file *fp, poll_table *wait) |
517 | { | 518 | { |
518 | struct ccb_data *data = fp->private_data; | 519 | struct ccb_data *data = fp->private_data; |
519 | struct ccb *driver_ccb = &data->driver_ccb; | 520 | struct ccb *driver_ccb = &data->driver_ccb; |
520 | 521 | ||
521 | poll_wait(fp, &data->ccb_waitq, wait); | 522 | poll_wait(fp, &data->ccb_waitq, wait); |
522 | 523 | ||
523 | if (is_channel_reset(driver_ccb)) | 524 | if (is_channel_reset(driver_ccb)) |
524 | return POLLERR; | 525 | return POLLERR; |
525 | else if (ilo_pkt_recv(data->ilo_hw, driver_ccb)) | 526 | else if (ilo_pkt_recv(data->ilo_hw, driver_ccb)) |
526 | return POLLIN | POLLRDNORM; | 527 | return POLLIN | POLLRDNORM; |
527 | 528 | ||
528 | return 0; | 529 | return 0; |
529 | } | 530 | } |
530 | 531 | ||
531 | static int ilo_close(struct inode *ip, struct file *fp) | 532 | static int ilo_close(struct inode *ip, struct file *fp) |
532 | { | 533 | { |
533 | int slot; | 534 | int slot; |
534 | struct ccb_data *data; | 535 | struct ccb_data *data; |
535 | struct ilo_hwinfo *hw; | 536 | struct ilo_hwinfo *hw; |
536 | unsigned long flags; | 537 | unsigned long flags; |
537 | 538 | ||
538 | slot = iminor(ip) % MAX_CCB; | 539 | slot = iminor(ip) % max_ccb; |
539 | hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); | 540 | hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); |
540 | 541 | ||
541 | spin_lock(&hw->open_lock); | 542 | spin_lock(&hw->open_lock); |
542 | 543 | ||
543 | if (hw->ccb_alloc[slot]->ccb_cnt == 1) { | 544 | if (hw->ccb_alloc[slot]->ccb_cnt == 1) { |
544 | 545 | ||
545 | data = fp->private_data; | 546 | data = fp->private_data; |
546 | 547 | ||
547 | spin_lock_irqsave(&hw->alloc_lock, flags); | 548 | spin_lock_irqsave(&hw->alloc_lock, flags); |
548 | hw->ccb_alloc[slot] = NULL; | 549 | hw->ccb_alloc[slot] = NULL; |
549 | spin_unlock_irqrestore(&hw->alloc_lock, flags); | 550 | spin_unlock_irqrestore(&hw->alloc_lock, flags); |
550 | 551 | ||
551 | ilo_ccb_close(hw->ilo_dev, data); | 552 | ilo_ccb_close(hw->ilo_dev, data); |
552 | 553 | ||
553 | kfree(data); | 554 | kfree(data); |
554 | } else | 555 | } else |
555 | hw->ccb_alloc[slot]->ccb_cnt--; | 556 | hw->ccb_alloc[slot]->ccb_cnt--; |
556 | 557 | ||
557 | spin_unlock(&hw->open_lock); | 558 | spin_unlock(&hw->open_lock); |
558 | 559 | ||
559 | return 0; | 560 | return 0; |
560 | } | 561 | } |
561 | 562 | ||
562 | static int ilo_open(struct inode *ip, struct file *fp) | 563 | static int ilo_open(struct inode *ip, struct file *fp) |
563 | { | 564 | { |
564 | int slot, error; | 565 | int slot, error; |
565 | struct ccb_data *data; | 566 | struct ccb_data *data; |
566 | struct ilo_hwinfo *hw; | 567 | struct ilo_hwinfo *hw; |
567 | unsigned long flags; | 568 | unsigned long flags; |
568 | 569 | ||
569 | slot = iminor(ip) % MAX_CCB; | 570 | slot = iminor(ip) % max_ccb; |
570 | hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); | 571 | hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); |
571 | 572 | ||
572 | /* new ccb allocation */ | 573 | /* new ccb allocation */ |
573 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 574 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
574 | if (!data) | 575 | if (!data) |
575 | return -ENOMEM; | 576 | return -ENOMEM; |
576 | 577 | ||
577 | spin_lock(&hw->open_lock); | 578 | spin_lock(&hw->open_lock); |
578 | 579 | ||
579 | /* each fd private_data holds sw/hw view of ccb */ | 580 | /* each fd private_data holds sw/hw view of ccb */ |
580 | if (hw->ccb_alloc[slot] == NULL) { | 581 | if (hw->ccb_alloc[slot] == NULL) { |
581 | /* create a channel control block for this minor */ | 582 | /* create a channel control block for this minor */ |
582 | error = ilo_ccb_setup(hw, data, slot); | 583 | error = ilo_ccb_setup(hw, data, slot); |
583 | if (error) { | 584 | if (error) { |
584 | kfree(data); | 585 | kfree(data); |
585 | goto out; | 586 | goto out; |
586 | } | 587 | } |
587 | 588 | ||
588 | data->ccb_cnt = 1; | 589 | data->ccb_cnt = 1; |
589 | data->ccb_excl = fp->f_flags & O_EXCL; | 590 | data->ccb_excl = fp->f_flags & O_EXCL; |
590 | data->ilo_hw = hw; | 591 | data->ilo_hw = hw; |
591 | init_waitqueue_head(&data->ccb_waitq); | 592 | init_waitqueue_head(&data->ccb_waitq); |
592 | 593 | ||
593 | /* write the ccb to hw */ | 594 | /* write the ccb to hw */ |
594 | spin_lock_irqsave(&hw->alloc_lock, flags); | 595 | spin_lock_irqsave(&hw->alloc_lock, flags); |
595 | ilo_ccb_open(hw, data, slot); | 596 | ilo_ccb_open(hw, data, slot); |
596 | hw->ccb_alloc[slot] = data; | 597 | hw->ccb_alloc[slot] = data; |
597 | spin_unlock_irqrestore(&hw->alloc_lock, flags); | 598 | spin_unlock_irqrestore(&hw->alloc_lock, flags); |
598 | 599 | ||
599 | /* make sure the channel is functional */ | 600 | /* make sure the channel is functional */ |
600 | error = ilo_ccb_verify(hw, data); | 601 | error = ilo_ccb_verify(hw, data); |
601 | if (error) { | 602 | if (error) { |
602 | 603 | ||
603 | spin_lock_irqsave(&hw->alloc_lock, flags); | 604 | spin_lock_irqsave(&hw->alloc_lock, flags); |
604 | hw->ccb_alloc[slot] = NULL; | 605 | hw->ccb_alloc[slot] = NULL; |
605 | spin_unlock_irqrestore(&hw->alloc_lock, flags); | 606 | spin_unlock_irqrestore(&hw->alloc_lock, flags); |
606 | 607 | ||
607 | ilo_ccb_close(hw->ilo_dev, data); | 608 | ilo_ccb_close(hw->ilo_dev, data); |
608 | 609 | ||
609 | kfree(data); | 610 | kfree(data); |
610 | goto out; | 611 | goto out; |
611 | } | 612 | } |
612 | 613 | ||
613 | } else { | 614 | } else { |
614 | kfree(data); | 615 | kfree(data); |
615 | if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) { | 616 | if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) { |
616 | /* | 617 | /* |
617 | * The channel exists, and either this open | 618 | * The channel exists, and either this open |
618 | * or a previous open of this channel wants | 619 | * or a previous open of this channel wants |
619 | * exclusive access. | 620 | * exclusive access. |
620 | */ | 621 | */ |
621 | error = -EBUSY; | 622 | error = -EBUSY; |
622 | } else { | 623 | } else { |
623 | hw->ccb_alloc[slot]->ccb_cnt++; | 624 | hw->ccb_alloc[slot]->ccb_cnt++; |
624 | error = 0; | 625 | error = 0; |
625 | } | 626 | } |
626 | } | 627 | } |
627 | out: | 628 | out: |
628 | spin_unlock(&hw->open_lock); | 629 | spin_unlock(&hw->open_lock); |
629 | 630 | ||
630 | if (!error) | 631 | if (!error) |
631 | fp->private_data = hw->ccb_alloc[slot]; | 632 | fp->private_data = hw->ccb_alloc[slot]; |
632 | 633 | ||
633 | return error; | 634 | return error; |
634 | } | 635 | } |
635 | 636 | ||
636 | static const struct file_operations ilo_fops = { | 637 | static const struct file_operations ilo_fops = { |
637 | .owner = THIS_MODULE, | 638 | .owner = THIS_MODULE, |
638 | .read = ilo_read, | 639 | .read = ilo_read, |
639 | .write = ilo_write, | 640 | .write = ilo_write, |
640 | .poll = ilo_poll, | 641 | .poll = ilo_poll, |
641 | .open = ilo_open, | 642 | .open = ilo_open, |
642 | .release = ilo_close, | 643 | .release = ilo_close, |
643 | .llseek = noop_llseek, | 644 | .llseek = noop_llseek, |
644 | }; | 645 | }; |
645 | 646 | ||
646 | static irqreturn_t ilo_isr(int irq, void *data) | 647 | static irqreturn_t ilo_isr(int irq, void *data) |
647 | { | 648 | { |
648 | struct ilo_hwinfo *hw = data; | 649 | struct ilo_hwinfo *hw = data; |
649 | int pending, i; | 650 | int pending, i; |
650 | 651 | ||
651 | spin_lock(&hw->alloc_lock); | 652 | spin_lock(&hw->alloc_lock); |
652 | 653 | ||
653 | /* check for ccbs which have data */ | 654 | /* check for ccbs which have data */ |
654 | pending = get_device_outbound(hw); | 655 | pending = get_device_outbound(hw); |
655 | if (!pending) { | 656 | if (!pending) { |
656 | spin_unlock(&hw->alloc_lock); | 657 | spin_unlock(&hw->alloc_lock); |
657 | return IRQ_NONE; | 658 | return IRQ_NONE; |
658 | } | 659 | } |
659 | 660 | ||
660 | if (is_db_reset(pending)) { | 661 | if (is_db_reset(pending)) { |
661 | /* wake up all ccbs if the device was reset */ | 662 | /* wake up all ccbs if the device was reset */ |
662 | pending = -1; | 663 | pending = -1; |
663 | ilo_set_reset(hw); | 664 | ilo_set_reset(hw); |
664 | } | 665 | } |
665 | 666 | ||
666 | for (i = 0; i < MAX_CCB; i++) { | 667 | for (i = 0; i < max_ccb; i++) { |
667 | if (!hw->ccb_alloc[i]) | 668 | if (!hw->ccb_alloc[i]) |
668 | continue; | 669 | continue; |
669 | if (pending & (1 << i)) | 670 | if (pending & (1 << i)) |
670 | wake_up_interruptible(&hw->ccb_alloc[i]->ccb_waitq); | 671 | wake_up_interruptible(&hw->ccb_alloc[i]->ccb_waitq); |
671 | } | 672 | } |
672 | 673 | ||
673 | /* clear the device of the channels that have been handled */ | 674 | /* clear the device of the channels that have been handled */ |
674 | clear_pending_db(hw, pending); | 675 | clear_pending_db(hw, pending); |
675 | 676 | ||
676 | spin_unlock(&hw->alloc_lock); | 677 | spin_unlock(&hw->alloc_lock); |
677 | 678 | ||
678 | return IRQ_HANDLED; | 679 | return IRQ_HANDLED; |
679 | } | 680 | } |
680 | 681 | ||
681 | static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) | 682 | static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) |
682 | { | 683 | { |
683 | pci_iounmap(pdev, hw->db_vaddr); | 684 | pci_iounmap(pdev, hw->db_vaddr); |
684 | pci_iounmap(pdev, hw->ram_vaddr); | 685 | pci_iounmap(pdev, hw->ram_vaddr); |
685 | pci_iounmap(pdev, hw->mmio_vaddr); | 686 | pci_iounmap(pdev, hw->mmio_vaddr); |
686 | } | 687 | } |
687 | 688 | ||
688 | static int __devinit ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) | 689 | static int __devinit ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) |
689 | { | 690 | { |
690 | int error = -ENOMEM; | 691 | int error = -ENOMEM; |
691 | 692 | ||
692 | /* map the memory mapped i/o registers */ | 693 | /* map the memory mapped i/o registers */ |
693 | hw->mmio_vaddr = pci_iomap(pdev, 1, 0); | 694 | hw->mmio_vaddr = pci_iomap(pdev, 1, 0); |
694 | if (hw->mmio_vaddr == NULL) { | 695 | if (hw->mmio_vaddr == NULL) { |
695 | dev_err(&pdev->dev, "Error mapping mmio\n"); | 696 | dev_err(&pdev->dev, "Error mapping mmio\n"); |
696 | goto out; | 697 | goto out; |
697 | } | 698 | } |
698 | 699 | ||
699 | /* map the adapter shared memory region */ | 700 | /* map the adapter shared memory region */ |
700 | hw->ram_vaddr = pci_iomap(pdev, 2, MAX_CCB * ILOHW_CCB_SZ); | 701 | hw->ram_vaddr = pci_iomap(pdev, 2, max_ccb * ILOHW_CCB_SZ); |
701 | if (hw->ram_vaddr == NULL) { | 702 | if (hw->ram_vaddr == NULL) { |
702 | dev_err(&pdev->dev, "Error mapping shared mem\n"); | 703 | dev_err(&pdev->dev, "Error mapping shared mem\n"); |
703 | goto mmio_free; | 704 | goto mmio_free; |
704 | } | 705 | } |
705 | 706 | ||
706 | /* map the doorbell aperture */ | 707 | /* map the doorbell aperture */ |
707 | hw->db_vaddr = pci_iomap(pdev, 3, MAX_CCB * ONE_DB_SIZE); | 708 | hw->db_vaddr = pci_iomap(pdev, 3, max_ccb * ONE_DB_SIZE); |
708 | if (hw->db_vaddr == NULL) { | 709 | if (hw->db_vaddr == NULL) { |
709 | dev_err(&pdev->dev, "Error mapping doorbell\n"); | 710 | dev_err(&pdev->dev, "Error mapping doorbell\n"); |
710 | goto ram_free; | 711 | goto ram_free; |
711 | } | 712 | } |
712 | 713 | ||
713 | return 0; | 714 | return 0; |
714 | ram_free: | 715 | ram_free: |
715 | pci_iounmap(pdev, hw->ram_vaddr); | 716 | pci_iounmap(pdev, hw->ram_vaddr); |
716 | mmio_free: | 717 | mmio_free: |
717 | pci_iounmap(pdev, hw->mmio_vaddr); | 718 | pci_iounmap(pdev, hw->mmio_vaddr); |
718 | out: | 719 | out: |
719 | return error; | 720 | return error; |
720 | } | 721 | } |
721 | 722 | ||
722 | static void ilo_remove(struct pci_dev *pdev) | 723 | static void ilo_remove(struct pci_dev *pdev) |
723 | { | 724 | { |
724 | int i, minor; | 725 | int i, minor; |
725 | struct ilo_hwinfo *ilo_hw = pci_get_drvdata(pdev); | 726 | struct ilo_hwinfo *ilo_hw = pci_get_drvdata(pdev); |
726 | 727 | ||
727 | clear_device(ilo_hw); | 728 | clear_device(ilo_hw); |
728 | 729 | ||
729 | minor = MINOR(ilo_hw->cdev.dev); | 730 | minor = MINOR(ilo_hw->cdev.dev); |
730 | for (i = minor; i < minor + MAX_CCB; i++) | 731 | for (i = minor; i < minor + max_ccb; i++) |
731 | device_destroy(ilo_class, MKDEV(ilo_major, i)); | 732 | device_destroy(ilo_class, MKDEV(ilo_major, i)); |
732 | 733 | ||
733 | cdev_del(&ilo_hw->cdev); | 734 | cdev_del(&ilo_hw->cdev); |
734 | ilo_disable_interrupts(ilo_hw); | 735 | ilo_disable_interrupts(ilo_hw); |
735 | free_irq(pdev->irq, ilo_hw); | 736 | free_irq(pdev->irq, ilo_hw); |
736 | ilo_unmap_device(pdev, ilo_hw); | 737 | ilo_unmap_device(pdev, ilo_hw); |
737 | pci_release_regions(pdev); | 738 | pci_release_regions(pdev); |
738 | pci_disable_device(pdev); | 739 | pci_disable_device(pdev); |
739 | kfree(ilo_hw); | 740 | kfree(ilo_hw); |
740 | ilo_hwdev[(minor / MAX_CCB)] = 0; | 741 | ilo_hwdev[(minor / max_ccb)] = 0; |
741 | } | 742 | } |
742 | 743 | ||
743 | static int __devinit ilo_probe(struct pci_dev *pdev, | 744 | static int __devinit ilo_probe(struct pci_dev *pdev, |
744 | const struct pci_device_id *ent) | 745 | const struct pci_device_id *ent) |
745 | { | 746 | { |
746 | int devnum, minor, start, error; | 747 | int devnum, minor, start, error; |
747 | struct ilo_hwinfo *ilo_hw; | 748 | struct ilo_hwinfo *ilo_hw; |
748 | 749 | ||
750 | if (max_ccb > MAX_CCB) | ||
751 | max_ccb = MAX_CCB; | ||
752 | else if (max_ccb < MIN_CCB) | ||
753 | max_ccb = MIN_CCB; | ||
754 | |||
749 | /* find a free range for device files */ | 755 | /* find a free range for device files */ |
750 | for (devnum = 0; devnum < MAX_ILO_DEV; devnum++) { | 756 | for (devnum = 0; devnum < MAX_ILO_DEV; devnum++) { |
751 | if (ilo_hwdev[devnum] == 0) { | 757 | if (ilo_hwdev[devnum] == 0) { |
752 | ilo_hwdev[devnum] = 1; | 758 | ilo_hwdev[devnum] = 1; |
753 | break; | 759 | break; |
754 | } | 760 | } |
755 | } | 761 | } |
756 | 762 | ||
757 | if (devnum == MAX_ILO_DEV) { | 763 | if (devnum == MAX_ILO_DEV) { |
758 | dev_err(&pdev->dev, "Error finding free device\n"); | 764 | dev_err(&pdev->dev, "Error finding free device\n"); |
759 | return -ENODEV; | 765 | return -ENODEV; |
760 | } | 766 | } |
761 | 767 | ||
762 | /* track global allocations for this device */ | 768 | /* track global allocations for this device */ |
763 | error = -ENOMEM; | 769 | error = -ENOMEM; |
764 | ilo_hw = kzalloc(sizeof(*ilo_hw), GFP_KERNEL); | 770 | ilo_hw = kzalloc(sizeof(*ilo_hw), GFP_KERNEL); |
765 | if (!ilo_hw) | 771 | if (!ilo_hw) |
766 | goto out; | 772 | goto out; |
767 | 773 | ||
768 | ilo_hw->ilo_dev = pdev; | 774 | ilo_hw->ilo_dev = pdev; |
769 | spin_lock_init(&ilo_hw->alloc_lock); | 775 | spin_lock_init(&ilo_hw->alloc_lock); |
770 | spin_lock_init(&ilo_hw->fifo_lock); | 776 | spin_lock_init(&ilo_hw->fifo_lock); |
771 | spin_lock_init(&ilo_hw->open_lock); | 777 | spin_lock_init(&ilo_hw->open_lock); |
772 | 778 | ||
773 | error = pci_enable_device(pdev); | 779 | error = pci_enable_device(pdev); |
774 | if (error) | 780 | if (error) |
775 | goto free; | 781 | goto free; |
776 | 782 | ||
777 | pci_set_master(pdev); | 783 | pci_set_master(pdev); |
778 | 784 | ||
779 | error = pci_request_regions(pdev, ILO_NAME); | 785 | error = pci_request_regions(pdev, ILO_NAME); |
780 | if (error) | 786 | if (error) |
781 | goto disable; | 787 | goto disable; |
782 | 788 | ||
783 | error = ilo_map_device(pdev, ilo_hw); | 789 | error = ilo_map_device(pdev, ilo_hw); |
784 | if (error) | 790 | if (error) |
785 | goto free_regions; | 791 | goto free_regions; |
786 | 792 | ||
787 | pci_set_drvdata(pdev, ilo_hw); | 793 | pci_set_drvdata(pdev, ilo_hw); |
788 | clear_device(ilo_hw); | 794 | clear_device(ilo_hw); |
789 | 795 | ||
790 | error = request_irq(pdev->irq, ilo_isr, IRQF_SHARED, "hpilo", ilo_hw); | 796 | error = request_irq(pdev->irq, ilo_isr, IRQF_SHARED, "hpilo", ilo_hw); |
791 | if (error) | 797 | if (error) |
792 | goto unmap; | 798 | goto unmap; |
793 | 799 | ||
794 | ilo_enable_interrupts(ilo_hw); | 800 | ilo_enable_interrupts(ilo_hw); |
795 | 801 | ||
796 | cdev_init(&ilo_hw->cdev, &ilo_fops); | 802 | cdev_init(&ilo_hw->cdev, &ilo_fops); |
797 | ilo_hw->cdev.owner = THIS_MODULE; | 803 | ilo_hw->cdev.owner = THIS_MODULE; |
798 | start = devnum * MAX_CCB; | 804 | start = devnum * max_ccb; |
799 | error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), MAX_CCB); | 805 | error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), max_ccb); |
800 | if (error) { | 806 | if (error) { |
801 | dev_err(&pdev->dev, "Could not add cdev\n"); | 807 | dev_err(&pdev->dev, "Could not add cdev\n"); |
802 | goto remove_isr; | 808 | goto remove_isr; |
803 | } | 809 | } |
804 | 810 | ||
805 | for (minor = 0 ; minor < MAX_CCB; minor++) { | 811 | for (minor = 0 ; minor < max_ccb; minor++) { |
806 | struct device *dev; | 812 | struct device *dev; |
807 | dev = device_create(ilo_class, &pdev->dev, | 813 | dev = device_create(ilo_class, &pdev->dev, |
808 | MKDEV(ilo_major, minor), NULL, | 814 | MKDEV(ilo_major, minor), NULL, |
809 | "hpilo!d%dccb%d", devnum, minor); | 815 | "hpilo!d%dccb%d", devnum, minor); |
810 | if (IS_ERR(dev)) | 816 | if (IS_ERR(dev)) |
811 | dev_err(&pdev->dev, "Could not create files\n"); | 817 | dev_err(&pdev->dev, "Could not create files\n"); |
812 | } | 818 | } |
813 | 819 | ||
814 | return 0; | 820 | return 0; |
815 | remove_isr: | 821 | remove_isr: |
816 | ilo_disable_interrupts(ilo_hw); | 822 | ilo_disable_interrupts(ilo_hw); |
817 | free_irq(pdev->irq, ilo_hw); | 823 | free_irq(pdev->irq, ilo_hw); |
818 | unmap: | 824 | unmap: |
819 | ilo_unmap_device(pdev, ilo_hw); | 825 | ilo_unmap_device(pdev, ilo_hw); |
820 | free_regions: | 826 | free_regions: |
821 | pci_release_regions(pdev); | 827 | pci_release_regions(pdev); |
822 | disable: | 828 | disable: |
823 | pci_disable_device(pdev); | 829 | pci_disable_device(pdev); |
824 | free: | 830 | free: |
825 | kfree(ilo_hw); | 831 | kfree(ilo_hw); |
826 | out: | 832 | out: |
827 | ilo_hwdev[devnum] = 0; | 833 | ilo_hwdev[devnum] = 0; |
828 | return error; | 834 | return error; |
829 | } | 835 | } |
830 | 836 | ||
831 | static struct pci_device_id ilo_devices[] = { | 837 | static struct pci_device_id ilo_devices[] = { |
832 | { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) }, | 838 | { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) }, |
833 | { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3307) }, | 839 | { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3307) }, |
834 | { } | 840 | { } |
835 | }; | 841 | }; |
836 | MODULE_DEVICE_TABLE(pci, ilo_devices); | 842 | MODULE_DEVICE_TABLE(pci, ilo_devices); |
837 | 843 | ||
838 | static struct pci_driver ilo_driver = { | 844 | static struct pci_driver ilo_driver = { |
839 | .name = ILO_NAME, | 845 | .name = ILO_NAME, |
840 | .id_table = ilo_devices, | 846 | .id_table = ilo_devices, |
841 | .probe = ilo_probe, | 847 | .probe = ilo_probe, |
842 | .remove = __devexit_p(ilo_remove), | 848 | .remove = __devexit_p(ilo_remove), |
843 | }; | 849 | }; |
844 | 850 | ||
845 | static int __init ilo_init(void) | 851 | static int __init ilo_init(void) |
846 | { | 852 | { |
847 | int error; | 853 | int error; |
848 | dev_t dev; | 854 | dev_t dev; |
849 | 855 | ||
850 | ilo_class = class_create(THIS_MODULE, "iLO"); | 856 | ilo_class = class_create(THIS_MODULE, "iLO"); |
851 | if (IS_ERR(ilo_class)) { | 857 | if (IS_ERR(ilo_class)) { |
852 | error = PTR_ERR(ilo_class); | 858 | error = PTR_ERR(ilo_class); |
853 | goto out; | 859 | goto out; |
854 | } | 860 | } |
855 | 861 | ||
856 | error = alloc_chrdev_region(&dev, 0, MAX_OPEN, ILO_NAME); | 862 | error = alloc_chrdev_region(&dev, 0, MAX_OPEN, ILO_NAME); |
857 | if (error) | 863 | if (error) |
858 | goto class_destroy; | 864 | goto class_destroy; |
859 | 865 | ||
860 | ilo_major = MAJOR(dev); | 866 | ilo_major = MAJOR(dev); |
861 | 867 | ||
862 | error = pci_register_driver(&ilo_driver); | 868 | error = pci_register_driver(&ilo_driver); |
863 | if (error) | 869 | if (error) |
864 | goto chr_remove; | 870 | goto chr_remove; |
865 | 871 | ||
866 | return 0; | 872 | return 0; |
867 | chr_remove: | 873 | chr_remove: |
868 | unregister_chrdev_region(dev, MAX_OPEN); | 874 | unregister_chrdev_region(dev, MAX_OPEN); |
869 | class_destroy: | 875 | class_destroy: |
870 | class_destroy(ilo_class); | 876 | class_destroy(ilo_class); |
871 | out: | 877 | out: |
872 | return error; | 878 | return error; |
873 | } | 879 | } |
874 | 880 | ||
875 | static void __exit ilo_exit(void) | 881 | static void __exit ilo_exit(void) |
876 | { | 882 | { |
877 | pci_unregister_driver(&ilo_driver); | 883 | pci_unregister_driver(&ilo_driver); |
878 | unregister_chrdev_region(MKDEV(ilo_major, 0), MAX_OPEN); | 884 | unregister_chrdev_region(MKDEV(ilo_major, 0), MAX_OPEN); |
879 | class_destroy(ilo_class); | 885 | class_destroy(ilo_class); |
880 | } | 886 | } |
881 | 887 | ||
882 | MODULE_VERSION("1.2"); | 888 | MODULE_VERSION("1.3"); |
883 | MODULE_ALIAS(ILO_NAME); | 889 | MODULE_ALIAS(ILO_NAME); |
884 | MODULE_DESCRIPTION(ILO_NAME); | 890 | MODULE_DESCRIPTION(ILO_NAME); |
885 | MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>"); | 891 | MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>"); |
886 | MODULE_LICENSE("GPL v2"); | 892 | MODULE_LICENSE("GPL v2"); |
893 | |||
894 | module_param(max_ccb, uint, 0444); | ||
895 | MODULE_PARM_DESC(max_ccb, "Maximum number of HP iLO channels to attach (8)"); | ||
887 | 896 | ||
888 | module_init(ilo_init); | 897 | module_init(ilo_init); |
889 | module_exit(ilo_exit); | 898 | module_exit(ilo_exit); |
890 | 899 |
drivers/misc/hpilo.h
1 | /* | 1 | /* |
2 | * linux/drivers/char/hpilo.h | 2 | * linux/drivers/char/hpilo.h |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. | 4 | * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. |
5 | * David Altobelli <david.altobelli@hp.com> | 5 | * David Altobelli <david.altobelli@hp.com> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #ifndef __HPILO_H | 11 | #ifndef __HPILO_H |
12 | #define __HPILO_H | 12 | #define __HPILO_H |
13 | 13 | ||
14 | #define ILO_NAME "hpilo" | 14 | #define ILO_NAME "hpilo" |
15 | 15 | ||
16 | /* max number of open channel control blocks per device, hw limited to 32 */ | 16 | /* max number of open channel control blocks per device, hw limited to 32 */ |
17 | #define MAX_CCB 8 | 17 | #define MAX_CCB 24 |
18 | /* min number of open channel control blocks per device, hw limited to 32 */ | ||
19 | #define MIN_CCB 8 | ||
18 | /* max number of supported devices */ | 20 | /* max number of supported devices */ |
19 | #define MAX_ILO_DEV 1 | 21 | #define MAX_ILO_DEV 1 |
20 | /* max number of files */ | 22 | /* max number of files */ |
21 | #define MAX_OPEN (MAX_CCB * MAX_ILO_DEV) | 23 | #define MAX_OPEN (MAX_CCB * MAX_ILO_DEV) |
22 | /* total wait time in usec */ | 24 | /* total wait time in usec */ |
23 | #define MAX_WAIT_TIME 10000 | 25 | #define MAX_WAIT_TIME 10000 |
24 | /* per spin wait time in usec */ | 26 | /* per spin wait time in usec */ |
25 | #define WAIT_TIME 10 | 27 | #define WAIT_TIME 10 |
26 | /* spin counter for open/close delay */ | 28 | /* spin counter for open/close delay */ |
27 | #define MAX_WAIT (MAX_WAIT_TIME / WAIT_TIME) | 29 | #define MAX_WAIT (MAX_WAIT_TIME / WAIT_TIME) |
28 | 30 | ||
29 | /* | 31 | /* |
30 | * Per device, used to track global memory allocations. | 32 | * Per device, used to track global memory allocations. |
31 | */ | 33 | */ |
32 | struct ilo_hwinfo { | 34 | struct ilo_hwinfo { |
33 | /* mmio registers on device */ | 35 | /* mmio registers on device */ |
34 | char __iomem *mmio_vaddr; | 36 | char __iomem *mmio_vaddr; |
35 | 37 | ||
36 | /* doorbell registers on device */ | 38 | /* doorbell registers on device */ |
37 | char __iomem *db_vaddr; | 39 | char __iomem *db_vaddr; |
38 | 40 | ||
39 | /* shared memory on device used for channel control blocks */ | 41 | /* shared memory on device used for channel control blocks */ |
40 | char __iomem *ram_vaddr; | 42 | char __iomem *ram_vaddr; |
41 | 43 | ||
42 | /* files corresponding to this device */ | 44 | /* files corresponding to this device */ |
43 | struct ccb_data *ccb_alloc[MAX_CCB]; | 45 | struct ccb_data *ccb_alloc[MAX_CCB]; |
44 | 46 | ||
45 | struct pci_dev *ilo_dev; | 47 | struct pci_dev *ilo_dev; |
46 | 48 | ||
47 | /* | 49 | /* |
48 | * open_lock serializes ccb_cnt during open and close | 50 | * open_lock serializes ccb_cnt during open and close |
49 | * [ irq disabled ] | 51 | * [ irq disabled ] |
50 | * -> alloc_lock used when adding/removing/searching ccb_alloc, | 52 | * -> alloc_lock used when adding/removing/searching ccb_alloc, |
51 | * which represents all ccbs open on the device | 53 | * which represents all ccbs open on the device |
52 | * --> fifo_lock controls access to fifo queues shared with hw | 54 | * --> fifo_lock controls access to fifo queues shared with hw |
53 | * | 55 | * |
54 | * Locks must be taken in this order, but open_lock and alloc_lock | 56 | * Locks must be taken in this order, but open_lock and alloc_lock |
55 | * are optional, they do not need to be held in order to take a | 57 | * are optional, they do not need to be held in order to take a |
56 | * lower level lock. | 58 | * lower level lock. |
57 | */ | 59 | */ |
58 | spinlock_t open_lock; | 60 | spinlock_t open_lock; |
59 | spinlock_t alloc_lock; | 61 | spinlock_t alloc_lock; |
60 | spinlock_t fifo_lock; | 62 | spinlock_t fifo_lock; |
61 | 63 | ||
62 | struct cdev cdev; | 64 | struct cdev cdev; |
63 | }; | 65 | }; |
64 | 66 | ||
65 | /* offset from mmio_vaddr for enabling doorbell interrupts */ | 67 | /* offset from mmio_vaddr for enabling doorbell interrupts */ |
66 | #define DB_IRQ 0xB2 | 68 | #define DB_IRQ 0xB2 |
67 | /* offset from mmio_vaddr for outbound communications */ | 69 | /* offset from mmio_vaddr for outbound communications */ |
68 | #define DB_OUT 0xD4 | 70 | #define DB_OUT 0xD4 |
69 | /* DB_OUT reset bit */ | 71 | /* DB_OUT reset bit */ |
70 | #define DB_RESET 26 | 72 | #define DB_RESET 26 |
71 | 73 | ||
72 | /* | 74 | /* |
73 | * Channel control block. Used to manage hardware queues. | 75 | * Channel control block. Used to manage hardware queues. |
74 | * The format must match hw's version. The hw ccb is 128 bytes, | 76 | * The format must match hw's version. The hw ccb is 128 bytes, |
75 | * but the context area shouldn't be touched by the driver. | 77 | * but the context area shouldn't be touched by the driver. |
76 | */ | 78 | */ |
77 | #define ILOSW_CCB_SZ 64 | 79 | #define ILOSW_CCB_SZ 64 |
78 | #define ILOHW_CCB_SZ 128 | 80 | #define ILOHW_CCB_SZ 128 |
79 | struct ccb { | 81 | struct ccb { |
80 | union { | 82 | union { |
81 | char *send_fifobar; | 83 | char *send_fifobar; |
82 | u64 send_fifobar_pa; | 84 | u64 send_fifobar_pa; |
83 | } ccb_u1; | 85 | } ccb_u1; |
84 | union { | 86 | union { |
85 | char *send_desc; | 87 | char *send_desc; |
86 | u64 send_desc_pa; | 88 | u64 send_desc_pa; |
87 | } ccb_u2; | 89 | } ccb_u2; |
88 | u64 send_ctrl; | 90 | u64 send_ctrl; |
89 | 91 | ||
90 | union { | 92 | union { |
91 | char *recv_fifobar; | 93 | char *recv_fifobar; |
92 | u64 recv_fifobar_pa; | 94 | u64 recv_fifobar_pa; |
93 | } ccb_u3; | 95 | } ccb_u3; |
94 | union { | 96 | union { |
95 | char *recv_desc; | 97 | char *recv_desc; |
96 | u64 recv_desc_pa; | 98 | u64 recv_desc_pa; |
97 | } ccb_u4; | 99 | } ccb_u4; |
98 | u64 recv_ctrl; | 100 | u64 recv_ctrl; |
99 | 101 | ||
100 | union { | 102 | union { |
101 | char __iomem *db_base; | 103 | char __iomem *db_base; |
102 | u64 padding5; | 104 | u64 padding5; |
103 | } ccb_u5; | 105 | } ccb_u5; |
104 | 106 | ||
105 | u64 channel; | 107 | u64 channel; |
106 | 108 | ||
107 | /* unused context area (64 bytes) */ | 109 | /* unused context area (64 bytes) */ |
108 | }; | 110 | }; |
109 | 111 | ||
110 | /* ccb queue parameters */ | 112 | /* ccb queue parameters */ |
111 | #define SENDQ 1 | 113 | #define SENDQ 1 |
112 | #define RECVQ 2 | 114 | #define RECVQ 2 |
113 | #define NR_QENTRY 4 | 115 | #define NR_QENTRY 4 |
114 | #define L2_QENTRY_SZ 12 | 116 | #define L2_QENTRY_SZ 12 |
115 | 117 | ||
116 | /* ccb ctrl bitfields */ | 118 | /* ccb ctrl bitfields */ |
117 | #define CTRL_BITPOS_L2SZ 0 | 119 | #define CTRL_BITPOS_L2SZ 0 |
118 | #define CTRL_BITPOS_FIFOINDEXMASK 4 | 120 | #define CTRL_BITPOS_FIFOINDEXMASK 4 |
119 | #define CTRL_BITPOS_DESCLIMIT 18 | 121 | #define CTRL_BITPOS_DESCLIMIT 18 |
120 | #define CTRL_BITPOS_A 30 | 122 | #define CTRL_BITPOS_A 30 |
121 | #define CTRL_BITPOS_G 31 | 123 | #define CTRL_BITPOS_G 31 |
122 | 124 | ||
123 | /* ccb doorbell macros */ | 125 | /* ccb doorbell macros */ |
124 | #define L2_DB_SIZE 14 | 126 | #define L2_DB_SIZE 14 |
125 | #define ONE_DB_SIZE (1 << L2_DB_SIZE) | 127 | #define ONE_DB_SIZE (1 << L2_DB_SIZE) |
126 | 128 | ||
127 | /* | 129 | /* |
128 | * Per fd structure used to track the ccb allocated to that dev file. | 130 | * Per fd structure used to track the ccb allocated to that dev file. |
129 | */ | 131 | */ |
130 | struct ccb_data { | 132 | struct ccb_data { |
131 | /* software version of ccb, using virtual addrs */ | 133 | /* software version of ccb, using virtual addrs */ |
132 | struct ccb driver_ccb; | 134 | struct ccb driver_ccb; |
133 | 135 | ||
134 | /* hardware version of ccb, using physical addrs */ | 136 | /* hardware version of ccb, using physical addrs */ |
135 | struct ccb ilo_ccb; | 137 | struct ccb ilo_ccb; |
136 | 138 | ||
137 | /* hardware ccb is written to this shared mapped device memory */ | 139 | /* hardware ccb is written to this shared mapped device memory */ |
138 | struct ccb __iomem *mapped_ccb; | 140 | struct ccb __iomem *mapped_ccb; |
139 | 141 | ||
140 | /* dma'able memory used for send/recv queues */ | 142 | /* dma'able memory used for send/recv queues */ |
141 | void *dma_va; | 143 | void *dma_va; |
142 | dma_addr_t dma_pa; | 144 | dma_addr_t dma_pa; |
143 | size_t dma_size; | 145 | size_t dma_size; |
144 | 146 | ||
145 | /* pointer to hardware device info */ | 147 | /* pointer to hardware device info */ |
146 | struct ilo_hwinfo *ilo_hw; | 148 | struct ilo_hwinfo *ilo_hw; |
147 | 149 | ||
148 | /* queue for this ccb to wait for recv data */ | 150 | /* queue for this ccb to wait for recv data */ |
149 | wait_queue_head_t ccb_waitq; | 151 | wait_queue_head_t ccb_waitq; |
150 | 152 | ||
151 | /* usage count, to allow for shared ccb's */ | 153 | /* usage count, to allow for shared ccb's */ |
152 | int ccb_cnt; | 154 | int ccb_cnt; |
153 | 155 | ||
154 | /* open wanted exclusive access to this ccb */ | 156 | /* open wanted exclusive access to this ccb */ |
155 | int ccb_excl; | 157 | int ccb_excl; |
156 | }; | 158 | }; |
157 | 159 | ||
158 | /* | 160 | /* |
159 | * FIFO queue structure, shared with hw. | 161 | * FIFO queue structure, shared with hw. |
160 | */ | 162 | */ |
161 | #define ILO_START_ALIGN 4096 | 163 | #define ILO_START_ALIGN 4096 |
162 | #define ILO_CACHE_SZ 128 | 164 | #define ILO_CACHE_SZ 128 |
163 | struct fifo { | 165 | struct fifo { |
164 | u64 nrents; /* user requested number of fifo entries */ | 166 | u64 nrents; /* user requested number of fifo entries */ |
165 | u64 imask; /* mask to extract valid fifo index */ | 167 | u64 imask; /* mask to extract valid fifo index */ |
166 | u64 merge; /* O/C bits to merge in during enqueue operation */ | 168 | u64 merge; /* O/C bits to merge in during enqueue operation */ |
167 | u64 reset; /* set to non-zero when the target device resets */ | 169 | u64 reset; /* set to non-zero when the target device resets */ |
168 | u8 pad_0[ILO_CACHE_SZ - (sizeof(u64) * 4)]; | 170 | u8 pad_0[ILO_CACHE_SZ - (sizeof(u64) * 4)]; |
169 | 171 | ||
170 | u64 head; | 172 | u64 head; |
171 | u8 pad_1[ILO_CACHE_SZ - (sizeof(u64))]; | 173 | u8 pad_1[ILO_CACHE_SZ - (sizeof(u64))]; |
172 | 174 | ||
173 | u64 tail; | 175 | u64 tail; |
174 | u8 pad_2[ILO_CACHE_SZ - (sizeof(u64))]; | 176 | u8 pad_2[ILO_CACHE_SZ - (sizeof(u64))]; |
175 | 177 | ||
176 | u64 fifobar[1]; | 178 | u64 fifobar[1]; |
177 | }; | 179 | }; |
178 | 180 | ||
179 | /* convert between struct fifo, and the fifobar, which is saved in the ccb */ | 181 | /* convert between struct fifo, and the fifobar, which is saved in the ccb */ |
180 | #define FIFOHANDLESIZE (sizeof(struct fifo) - sizeof(u64)) | 182 | #define FIFOHANDLESIZE (sizeof(struct fifo) - sizeof(u64)) |
181 | #define FIFOBARTOHANDLE(_fifo) \ | 183 | #define FIFOBARTOHANDLE(_fifo) \ |
182 | ((struct fifo *)(((char *)(_fifo)) - FIFOHANDLESIZE)) | 184 | ((struct fifo *)(((char *)(_fifo)) - FIFOHANDLESIZE)) |
183 | 185 | ||
184 | /* the number of qwords to consume from the entry descriptor */ | 186 | /* the number of qwords to consume from the entry descriptor */ |
185 | #define ENTRY_BITPOS_QWORDS 0 | 187 | #define ENTRY_BITPOS_QWORDS 0 |
186 | /* descriptor index number (within a specified queue) */ | 188 | /* descriptor index number (within a specified queue) */ |
187 | #define ENTRY_BITPOS_DESCRIPTOR 10 | 189 | #define ENTRY_BITPOS_DESCRIPTOR 10 |
188 | /* state bit, fifo entry consumed by consumer */ | 190 | /* state bit, fifo entry consumed by consumer */ |
189 | #define ENTRY_BITPOS_C 22 | 191 | #define ENTRY_BITPOS_C 22 |
190 | /* state bit, fifo entry is occupied */ | 192 | /* state bit, fifo entry is occupied */ |
191 | #define ENTRY_BITPOS_O 23 | 193 | #define ENTRY_BITPOS_O 23 |
192 | 194 | ||
193 | #define ENTRY_BITS_QWORDS 10 | 195 | #define ENTRY_BITS_QWORDS 10 |
194 | #define ENTRY_BITS_DESCRIPTOR 12 | 196 | #define ENTRY_BITS_DESCRIPTOR 12 |
195 | #define ENTRY_BITS_C 1 | 197 | #define ENTRY_BITS_C 1 |
196 | #define ENTRY_BITS_O 1 | 198 | #define ENTRY_BITS_O 1 |
197 | #define ENTRY_BITS_TOTAL \ | 199 | #define ENTRY_BITS_TOTAL \ |
198 | (ENTRY_BITS_C + ENTRY_BITS_O + \ | 200 | (ENTRY_BITS_C + ENTRY_BITS_O + \ |
199 | ENTRY_BITS_QWORDS + ENTRY_BITS_DESCRIPTOR) | 201 | ENTRY_BITS_QWORDS + ENTRY_BITS_DESCRIPTOR) |
200 | 202 | ||
201 | /* extract various entry fields */ | 203 | /* extract various entry fields */ |
202 | #define ENTRY_MASK ((1 << ENTRY_BITS_TOTAL) - 1) | 204 | #define ENTRY_MASK ((1 << ENTRY_BITS_TOTAL) - 1) |
203 | #define ENTRY_MASK_C (((1 << ENTRY_BITS_C) - 1) << ENTRY_BITPOS_C) | 205 | #define ENTRY_MASK_C (((1 << ENTRY_BITS_C) - 1) << ENTRY_BITPOS_C) |
204 | #define ENTRY_MASK_O (((1 << ENTRY_BITS_O) - 1) << ENTRY_BITPOS_O) | 206 | #define ENTRY_MASK_O (((1 << ENTRY_BITS_O) - 1) << ENTRY_BITPOS_O) |
205 | #define ENTRY_MASK_QWORDS \ | 207 | #define ENTRY_MASK_QWORDS \ |
206 | (((1 << ENTRY_BITS_QWORDS) - 1) << ENTRY_BITPOS_QWORDS) | 208 | (((1 << ENTRY_BITS_QWORDS) - 1) << ENTRY_BITPOS_QWORDS) |
207 | #define ENTRY_MASK_DESCRIPTOR \ | 209 | #define ENTRY_MASK_DESCRIPTOR \ |
208 | (((1 << ENTRY_BITS_DESCRIPTOR) - 1) << ENTRY_BITPOS_DESCRIPTOR) | 210 | (((1 << ENTRY_BITS_DESCRIPTOR) - 1) << ENTRY_BITPOS_DESCRIPTOR) |
209 | 211 | ||
210 | #define ENTRY_MASK_NOSTATE (ENTRY_MASK >> (ENTRY_BITS_C + ENTRY_BITS_O)) | 212 | #define ENTRY_MASK_NOSTATE (ENTRY_MASK >> (ENTRY_BITS_C + ENTRY_BITS_O)) |
211 | 213 | ||
212 | #endif /* __HPILO_H */ | 214 | #endif /* __HPILO_H */ |
213 | 215 |